i965/fs: Use regs_read/written for post-RA scheduling in calculate_deps
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33 #include "glsl/nir/glsl_types.h"
34 #include "glsl/ir_optimization.h"
35
36 using namespace brw;
37
38 /** @file brw_fs_schedule_instructions.cpp
39 *
40 * List scheduling of FS instructions.
41 *
42 * The basic model of the list scheduler is to take a basic block,
43 * compute a DAG of the dependencies (RAW ordering with latency, WAW
44 * ordering with latency, WAR ordering), and make a list of the DAG heads.
45 * Heuristically pick a DAG head, then put all the children that are
46 * now DAG heads into the list of things to schedule.
47 *
48 * The heuristic is the important part. We're trying to be cheap,
49 * since actually computing the optimal scheduling is NP complete.
50 * What we do is track a "current clock". When we schedule a node, we
51 * update the earliest-unblocked clock time of its children, and
52 * increment the clock. Then, when trying to schedule, we just pick
53 * the earliest-unblocked instruction to schedule.
54 *
55 * Note that often there will be many things which could execute
56 * immediately, and there are a range of heuristic options to choose
57 * from in picking among those.
58 */
59
60 static bool debug = false;
61
62 class instruction_scheduler;
63
64 class schedule_node : public exec_node
65 {
66 public:
67 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
68 void set_latency_gen4();
69 void set_latency_gen7(bool is_haswell);
70
71 backend_instruction *inst;
72 schedule_node **children;
73 int *child_latency;
74 int child_count;
75 int parent_count;
76 int child_array_size;
77 int unblocked_time;
78 int latency;
79
80 /**
81 * Which iteration of pushing groups of children onto the candidates list
82 * this node was a part of.
83 */
84 unsigned cand_generation;
85
86 /**
87 * This is the sum of the instruction's latency plus the maximum delay of
88 * its children, or just the issue_time if it's a leaf node.
89 */
90 int delay;
91 };
92
93 void
94 schedule_node::set_latency_gen4()
95 {
96 int chans = 8;
97 int math_latency = 22;
98
99 switch (inst->opcode) {
100 case SHADER_OPCODE_RCP:
101 this->latency = 1 * chans * math_latency;
102 break;
103 case SHADER_OPCODE_RSQ:
104 this->latency = 2 * chans * math_latency;
105 break;
106 case SHADER_OPCODE_INT_QUOTIENT:
107 case SHADER_OPCODE_SQRT:
108 case SHADER_OPCODE_LOG2:
109 /* full precision log. partial is 2. */
110 this->latency = 3 * chans * math_latency;
111 break;
112 case SHADER_OPCODE_INT_REMAINDER:
113 case SHADER_OPCODE_EXP2:
114 /* full precision. partial is 3, same throughput. */
115 this->latency = 4 * chans * math_latency;
116 break;
117 case SHADER_OPCODE_POW:
118 this->latency = 8 * chans * math_latency;
119 break;
120 case SHADER_OPCODE_SIN:
121 case SHADER_OPCODE_COS:
122 /* minimum latency, max is 12 rounds. */
123 this->latency = 5 * chans * math_latency;
124 break;
125 default:
126 this->latency = 2;
127 break;
128 }
129 }
130
131 void
132 schedule_node::set_latency_gen7(bool is_haswell)
133 {
134 switch (inst->opcode) {
135 case BRW_OPCODE_MAD:
136 /* 2 cycles
137 * (since the last two src operands are in different register banks):
138 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
139 *
140 * 3 cycles on IVB, 4 on HSW
141 * (since the last two src operands are in the same register bank):
142 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
143 *
144 * 18 cycles on IVB, 16 on HSW
145 * (since the last two src operands are in different register banks):
146 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
147 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
148 *
149 * 20 cycles on IVB, 18 on HSW
150 * (since the last two src operands are in the same register bank):
151 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
152 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
153 */
154
155 /* Our register allocator doesn't know about register banks, so use the
156 * higher latency.
157 */
158 latency = is_haswell ? 16 : 18;
159 break;
160
161 case BRW_OPCODE_LRP:
162 /* 2 cycles
163 * (since the last two src operands are in different register banks):
164 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
165 *
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
169 *
170 * 16 cycles on IVB, 14 on HSW
171 * (since the last two src operands are in different register banks):
172 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
174 *
175 * 16 cycles
176 * (since the last two src operands are in the same register bank):
177 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 */
180
181 /* Our register allocator doesn't know about register banks, so use the
182 * higher latency.
183 */
184 latency = 14;
185 break;
186
187 case SHADER_OPCODE_RCP:
188 case SHADER_OPCODE_RSQ:
189 case SHADER_OPCODE_SQRT:
190 case SHADER_OPCODE_LOG2:
191 case SHADER_OPCODE_EXP2:
192 case SHADER_OPCODE_SIN:
193 case SHADER_OPCODE_COS:
194 /* 2 cycles:
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 *
197 * 18 cycles:
198 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
199 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
200 *
201 * Same for exp2, log2, rsq, sqrt, sin, cos.
202 */
203 latency = is_haswell ? 14 : 16;
204 break;
205
206 case SHADER_OPCODE_POW:
207 /* 2 cycles:
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 *
210 * 26 cycles:
211 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
212 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
213 */
214 latency = is_haswell ? 22 : 24;
215 break;
216
217 case SHADER_OPCODE_TEX:
218 case SHADER_OPCODE_TXD:
219 case SHADER_OPCODE_TXF:
220 case SHADER_OPCODE_TXL:
221 /* 18 cycles:
222 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
223 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
224 * send(8) g4<1>UW g114<8,8,1>F
225 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
226 *
227 * 697 +/-49 cycles (min 610, n=26):
228 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
229 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
230 * send(8) g4<1>UW g114<8,8,1>F
231 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
232 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
233 *
234 * So the latency on our first texture load of the batchbuffer takes
235 * ~700 cycles, since the caches are cold at that point.
236 *
237 * 840 +/- 92 cycles (min 720, n=25):
238 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
239 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
243 * send(8) g4<1>UW g114<8,8,1>F
244 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
245 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
246 *
247 * On the second load, it takes just an extra ~140 cycles, and after
248 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
249 *
250 * 683 +/- 49 cycles (min = 602, n=47):
251 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
252 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
253 * send(8) g4<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * send(8) g50<1>UW g114<8,8,1>F
256 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
257 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
258 *
259 * The unit appears to be pipelined, since this matches up with the
260 * cache-cold case, despite there being two loads here. If you replace
261 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
262 *
263 * So, take some number between the cache-hot 140 cycles and the
264 * cache-cold 700 cycles. No particular tuning was done on this.
265 *
266 * I haven't done significant testing of the non-TEX opcodes. TXL at
267 * least looked about the same as TEX.
268 */
269 latency = 200;
270 break;
271
272 case SHADER_OPCODE_TXS:
273 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
274 * cycles (n=15):
275 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
276 * send(8) g6<1>UW g114<8,8,1>F
277 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
278 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
279 *
280 *
281 * Two loads was 535 +/- 30 cycles (n=19):
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * send(16) g6<1>UW g114<8,8,1>F
284 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
285 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
286 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
287 * send(16) g8<1>UW g114<8,8,1>F
288 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
289 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
290 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
291 *
292 * Since the only caches that should matter are just the
293 * instruction/state cache containing the surface state, assume that we
294 * always have hot caches.
295 */
296 latency = 100;
297 break;
298
299 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
300 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
301 case VS_OPCODE_PULL_CONSTANT_LOAD:
302 /* testing using varying-index pull constants:
303 *
304 * 16 cycles:
305 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
306 * send(8) g4<1>F g4<8,8,1>D
307 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
308 *
309 * ~480 cycles:
310 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
311 * send(8) g4<1>F g4<8,8,1>D
312 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
313 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
314 *
315 * ~620 cycles:
316 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
320 * send(8) g4<1>F g4<8,8,1>D
321 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
322 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
323 *
324 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
325 * about 460. We expect to mostly be cache hot, so pick something more
326 * in that direction.
327 */
328 latency = 200;
329 break;
330
331 case SHADER_OPCODE_GEN7_SCRATCH_READ:
332 /* Testing a load from offset 0, that had been previously written:
333 *
334 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
335 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
336 *
337 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
338 * then around 140. Presumably this is cache hit vs miss.
339 */
340 latency = 50;
341 break;
342
343 case SHADER_OPCODE_UNTYPED_ATOMIC:
344 case SHADER_OPCODE_TYPED_ATOMIC:
345 /* Test code:
346 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
347 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
348 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
349 * send(8) g4<1>ud g112<8,8,1>ud
350 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
351 *
352 * Running it 100 times as fragment shader on a 128x128 quad
353 * gives an average latency of 13867 cycles per atomic op,
354 * standard deviation 3%. Note that this is a rather
355 * pessimistic estimate, the actual latency in cases with few
356 * collisions between threads and favorable pipelining has been
357 * seen to be reduced by a factor of 100.
358 */
359 latency = 14000;
360 break;
361
362 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
363 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
364 case SHADER_OPCODE_TYPED_SURFACE_READ:
365 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
366 /* Test code:
367 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
368 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
369 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
370 * send(8) g4<1>UD g112<8,8,1>UD
371 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
372 * .
373 * . [repeats 8 times]
374 * .
375 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
376 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
377 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
378 * send(8) g4<1>UD g112<8,8,1>UD
379 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
380 *
381 * Running it 100 times as fragment shader on a 128x128 quad
382 * gives an average latency of 583 cycles per surface read,
383 * standard deviation 0.9%.
384 */
385 latency = is_haswell ? 300 : 600;
386 break;
387
388 default:
389 /* 2 cycles:
390 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
391 *
392 * 16 cycles:
393 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
394 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
395 */
396 latency = 14;
397 break;
398 }
399 }
400
401 class instruction_scheduler {
402 public:
403 instruction_scheduler(backend_shader *s, int grf_count,
404 int hw_reg_count, int block_count,
405 instruction_scheduler_mode mode)
406 {
407 this->bs = s;
408 this->mem_ctx = ralloc_context(NULL);
409 this->grf_count = grf_count;
410 this->hw_reg_count = hw_reg_count;
411 this->instructions.make_empty();
412 this->instructions_to_schedule = 0;
413 this->post_reg_alloc = (mode == SCHEDULE_POST);
414 this->mode = mode;
415 this->time = 0;
416 if (!post_reg_alloc) {
417 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
418
419 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
420 for (int i = 0; i < block_count; i++)
421 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
422 BITSET_WORDS(grf_count));
423
424 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
425 for (int i = 0; i < block_count; i++)
426 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
427 BITSET_WORDS(grf_count));
428
429 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
430 for (int i = 0; i < block_count; i++)
431 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
432 BITSET_WORDS(hw_reg_count));
433
434 this->written = rzalloc_array(mem_ctx, bool, grf_count);
435
436 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
437
438 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
439 } else {
440 this->reg_pressure_in = NULL;
441 this->livein = NULL;
442 this->liveout = NULL;
443 this->hw_liveout = NULL;
444 this->written = NULL;
445 this->reads_remaining = NULL;
446 this->hw_reads_remaining = NULL;
447 }
448 }
449
450 ~instruction_scheduler()
451 {
452 ralloc_free(this->mem_ctx);
453 }
454 void add_barrier_deps(schedule_node *n);
455 void add_dep(schedule_node *before, schedule_node *after, int latency);
456 void add_dep(schedule_node *before, schedule_node *after);
457
458 void run(cfg_t *cfg);
459 void add_insts_from_block(bblock_t *block);
460 void compute_delay(schedule_node *node);
461 virtual void calculate_deps() = 0;
462 virtual schedule_node *choose_instruction_to_schedule() = 0;
463
464 /**
465 * Returns how many cycles it takes the instruction to issue.
466 *
467 * Instructions in gen hardware are handled one simd4 vector at a time,
468 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
469 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
470 */
471 virtual int issue_time(backend_instruction *inst) = 0;
472
473 virtual void count_reads_remaining(backend_instruction *inst) = 0;
474 virtual void setup_liveness(cfg_t *cfg) = 0;
475 virtual void update_register_pressure(backend_instruction *inst) = 0;
476 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
477
478 void schedule_instructions(bblock_t *block);
479
480 void *mem_ctx;
481
482 bool post_reg_alloc;
483 int instructions_to_schedule;
484 int grf_count;
485 int hw_reg_count;
486 int time;
487 int reg_pressure;
488 int block_idx;
489 exec_list instructions;
490 backend_shader *bs;
491
492 instruction_scheduler_mode mode;
493
494 /*
495 * The register pressure at the beginning of each basic block.
496 */
497
498 int *reg_pressure_in;
499
500 /*
501 * The virtual GRF's whose range overlaps the beginning of each basic block.
502 */
503
504 BITSET_WORD **livein;
505
506 /*
507 * The virtual GRF's whose range overlaps the end of each basic block.
508 */
509
510 BITSET_WORD **liveout;
511
512 /*
513 * The hardware GRF's whose range overlaps the end of each basic block.
514 */
515
516 BITSET_WORD **hw_liveout;
517
518 /*
519 * Whether we've scheduled a write for this virtual GRF yet.
520 */
521
522 bool *written;
523
524 /*
525 * How many reads we haven't scheduled for this virtual GRF yet.
526 */
527
528 int *reads_remaining;
529
530 /*
531 * How many reads we haven't scheduled for this hardware GRF yet.
532 */
533
534 int *hw_reads_remaining;
535 };
536
537 class fs_instruction_scheduler : public instruction_scheduler
538 {
539 public:
540 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
541 int block_count,
542 instruction_scheduler_mode mode);
543 void calculate_deps();
544 bool is_compressed(fs_inst *inst);
545 schedule_node *choose_instruction_to_schedule();
546 int issue_time(backend_instruction *inst);
547 fs_visitor *v;
548
549 void count_reads_remaining(backend_instruction *inst);
550 void setup_liveness(cfg_t *cfg);
551 void update_register_pressure(backend_instruction *inst);
552 int get_register_pressure_benefit(backend_instruction *inst);
553 };
554
555 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
556 int grf_count, int hw_reg_count,
557 int block_count,
558 instruction_scheduler_mode mode)
559 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
560 v(v)
561 {
562 }
563
564 static bool
565 is_src_duplicate(fs_inst *inst, int src)
566 {
567 for (int i = 0; i < src; i++)
568 if (inst->src[i].equals(inst->src[src]))
569 return true;
570
571 return false;
572 }
573
574 void
575 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
576 {
577 fs_inst *inst = (fs_inst *)be;
578
579 if (!reads_remaining)
580 return;
581
582 for (int i = 0; i < inst->sources; i++) {
583 if (is_src_duplicate(inst, i))
584 continue;
585
586 if (inst->src[i].file == GRF) {
587 reads_remaining[inst->src[i].reg]++;
588 } else if (inst->src[i].file == HW_REG &&
589 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
590 if (inst->src[i].fixed_hw_reg.nr >= hw_reg_count)
591 continue;
592
593 for (int j = 0; j < inst->regs_read(i); j++)
594 hw_reads_remaining[inst->src[i].fixed_hw_reg.nr + j]++;
595 }
596 }
597 }
598
599 void
600 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
601 {
602 /* First, compute liveness on a per-GRF level using the in/out sets from
603 * liveness calculation.
604 */
605 for (int block = 0; block < cfg->num_blocks; block++) {
606 for (int i = 0; i < v->live_intervals->num_vars; i++) {
607 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
608 int vgrf = v->live_intervals->vgrf_from_var[i];
609 if (!BITSET_TEST(livein[block], vgrf)) {
610 reg_pressure_in[block] += v->alloc.sizes[vgrf];
611 BITSET_SET(livein[block], vgrf);
612 }
613 }
614
615 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
616 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
617 }
618 }
619
620 /* Now, extend the live in/live out sets for when a range crosses a block
621 * boundary, which matches what our register allocator/interference code
622 * does to account for force_writemask_all and incompatible exec_mask's.
623 */
624 for (int block = 0; block < cfg->num_blocks - 1; block++) {
625 for (int i = 0; i < grf_count; i++) {
626 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
627 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
628 if (!BITSET_TEST(livein[block + 1], i)) {
629 reg_pressure_in[block + 1] += v->alloc.sizes[i];
630 BITSET_SET(livein[block + 1], i);
631 }
632
633 BITSET_SET(liveout[block], i);
634 }
635 }
636 }
637
638 int payload_last_use_ip[hw_reg_count];
639 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
640
641 for (int i = 0; i < hw_reg_count; i++) {
642 if (payload_last_use_ip[i] == -1)
643 continue;
644
645 for (int block = 0; block < cfg->num_blocks; block++) {
646 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
647 reg_pressure_in[block]++;
648
649 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
650 BITSET_SET(hw_liveout[block], i);
651 }
652 }
653 }
654
655 void
656 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
657 {
658 fs_inst *inst = (fs_inst *)be;
659
660 if (!reads_remaining)
661 return;
662
663 if (inst->dst.file == GRF) {
664 written[inst->dst.reg] = true;
665 }
666
667 for (int i = 0; i < inst->sources; i++) {
668 if (is_src_duplicate(inst, i))
669 continue;
670
671 if (inst->src[i].file == GRF) {
672 reads_remaining[inst->src[i].reg]--;
673 } else if (inst->src[i].file == HW_REG &&
674 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE &&
675 inst->src[i].fixed_hw_reg.nr < hw_reg_count) {
676 for (int off = 0; off < inst->regs_read(i); off++)
677 hw_reads_remaining[inst->src[i].fixed_hw_reg.nr + off]--;
678 }
679 }
680 }
681
682 int
683 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
684 {
685 fs_inst *inst = (fs_inst *)be;
686 int benefit = 0;
687
688 if (inst->dst.file == GRF) {
689 if (!BITSET_TEST(livein[block_idx], inst->dst.reg) &&
690 !written[inst->dst.reg])
691 benefit -= v->alloc.sizes[inst->dst.reg];
692 }
693
694 for (int i = 0; i < inst->sources; i++) {
695 if (is_src_duplicate(inst, i))
696 continue;
697
698 if (inst->src[i].file == GRF &&
699 !BITSET_TEST(liveout[block_idx], inst->src[i].reg) &&
700 reads_remaining[inst->src[i].reg] == 1)
701 benefit += v->alloc.sizes[inst->src[i].reg];
702
703 if (inst->src[i].file == HW_REG &&
704 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE &&
705 inst->src[i].fixed_hw_reg.nr < hw_reg_count) {
706 for (int off = 0; off < inst->regs_read(i); off++) {
707 int reg = inst->src[i].fixed_hw_reg.nr + off;
708 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
709 hw_reads_remaining[reg] == 1) {
710 benefit++;
711 }
712 }
713 }
714 }
715
716 return benefit;
717 }
718
719 class vec4_instruction_scheduler : public instruction_scheduler
720 {
721 public:
722 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
723 void calculate_deps();
724 schedule_node *choose_instruction_to_schedule();
725 int issue_time(backend_instruction *inst);
726 vec4_visitor *v;
727
728 void count_reads_remaining(backend_instruction *inst);
729 void setup_liveness(cfg_t *cfg);
730 void update_register_pressure(backend_instruction *inst);
731 int get_register_pressure_benefit(backend_instruction *inst);
732 };
733
734 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
735 int grf_count)
736 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
737 v(v)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
748 {
749 }
750
751 void
752 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
753 {
754 }
755
756 int
757 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
758 {
759 return 0;
760 }
761
762 schedule_node::schedule_node(backend_instruction *inst,
763 instruction_scheduler *sched)
764 {
765 const struct brw_device_info *devinfo = sched->bs->devinfo;
766
767 this->inst = inst;
768 this->child_array_size = 0;
769 this->children = NULL;
770 this->child_latency = NULL;
771 this->child_count = 0;
772 this->parent_count = 0;
773 this->unblocked_time = 0;
774 this->cand_generation = 0;
775 this->delay = 0;
776
777 /* We can't measure Gen6 timings directly but expect them to be much
778 * closer to Gen7 than Gen4.
779 */
780 if (!sched->post_reg_alloc)
781 this->latency = 1;
782 else if (devinfo->gen >= 6)
783 set_latency_gen7(devinfo->is_haswell);
784 else
785 set_latency_gen4();
786 }
787
788 void
789 instruction_scheduler::add_insts_from_block(bblock_t *block)
790 {
791 /* Removing the last instruction from a basic block removes the block as
792 * well, so put a NOP at the end to keep it alive.
793 */
794 if (!block->end()->is_control_flow()) {
795 backend_instruction *nop = new(mem_ctx) backend_instruction();
796 nop->opcode = BRW_OPCODE_NOP;
797 block->end()->insert_after(block, nop);
798 }
799
800 foreach_inst_in_block_safe(backend_instruction, inst, block) {
801 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
802 continue;
803
804 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
805
806 this->instructions_to_schedule++;
807
808 inst->remove(block);
809 instructions.push_tail(n);
810 }
811 }
812
813 /** Recursive computation of the delay member of a node. */
814 void
815 instruction_scheduler::compute_delay(schedule_node *n)
816 {
817 if (!n->child_count) {
818 n->delay = issue_time(n->inst);
819 } else {
820 for (int i = 0; i < n->child_count; i++) {
821 if (!n->children[i]->delay)
822 compute_delay(n->children[i]);
823 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
824 }
825 }
826 }
827
828 /**
829 * Add a dependency between two instruction nodes.
830 *
831 * The @after node will be scheduled after @before. We will try to
832 * schedule it @latency cycles after @before, but no guarantees there.
833 */
834 void
835 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
836 int latency)
837 {
838 if (!before || !after)
839 return;
840
841 assert(before != after);
842
843 for (int i = 0; i < before->child_count; i++) {
844 if (before->children[i] == after) {
845 before->child_latency[i] = MAX2(before->child_latency[i], latency);
846 return;
847 }
848 }
849
850 if (before->child_array_size <= before->child_count) {
851 if (before->child_array_size < 16)
852 before->child_array_size = 16;
853 else
854 before->child_array_size *= 2;
855
856 before->children = reralloc(mem_ctx, before->children,
857 schedule_node *,
858 before->child_array_size);
859 before->child_latency = reralloc(mem_ctx, before->child_latency,
860 int, before->child_array_size);
861 }
862
863 before->children[before->child_count] = after;
864 before->child_latency[before->child_count] = latency;
865 before->child_count++;
866 after->parent_count++;
867 }
868
869 void
870 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
871 {
872 if (!before)
873 return;
874
875 add_dep(before, after, before->latency);
876 }
877
878 /**
879 * Sometimes we really want this node to execute after everything that
880 * was before it and before everything that followed it. This adds
881 * the deps to do so.
882 */
883 void
884 instruction_scheduler::add_barrier_deps(schedule_node *n)
885 {
886 schedule_node *prev = (schedule_node *)n->prev;
887 schedule_node *next = (schedule_node *)n->next;
888
889 if (prev) {
890 while (!prev->is_head_sentinel()) {
891 add_dep(prev, n, 0);
892 prev = (schedule_node *)prev->prev;
893 }
894 }
895
896 if (next) {
897 while (!next->is_tail_sentinel()) {
898 add_dep(n, next, 0);
899 next = (schedule_node *)next->next;
900 }
901 }
902 }
903
904 /* instruction scheduling needs to be aware of when an MRF write
905 * actually writes 2 MRFs.
906 */
907 bool
908 fs_instruction_scheduler::is_compressed(fs_inst *inst)
909 {
910 return inst->exec_size == 16;
911 }
912
913 void
914 fs_instruction_scheduler::calculate_deps()
915 {
916 /* Pre-register-allocation, this tracks the last write per VGRF offset.
917 * After register allocation, reg_offsets are gone and we track individual
918 * GRF registers.
919 */
920 schedule_node *last_grf_write[grf_count * 16];
921 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
922 schedule_node *last_conditional_mod[2] = { NULL, NULL };
923 schedule_node *last_accumulator_write = NULL;
924 /* Fixed HW registers are assumed to be separate from the virtual
925 * GRFs, so they can be tracked separately. We don't really write
926 * to fixed GRFs much, so don't bother tracking them on a more
927 * granular level.
928 */
929 schedule_node *last_fixed_grf_write = NULL;
930
931 /* The last instruction always needs to still be the last
932 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
933 * WHILE) and scheduling other things after it would disturb the
934 * basic block, or it's FB_WRITE and we should do a better job at
935 * dead code elimination anyway.
936 */
937 schedule_node *last = (schedule_node *)instructions.get_tail();
938 add_barrier_deps(last);
939
940 memset(last_grf_write, 0, sizeof(last_grf_write));
941 memset(last_mrf_write, 0, sizeof(last_mrf_write));
942
943 /* top-to-bottom dependencies: RAW and WAW. */
944 foreach_in_list(schedule_node, n, &instructions) {
945 fs_inst *inst = (fs_inst *)n->inst;
946
947 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
948 inst->has_side_effects())
949 add_barrier_deps(n);
950
951 /* read-after-write deps. */
952 for (int i = 0; i < inst->sources; i++) {
953 if (inst->src[i].file == GRF) {
954 if (post_reg_alloc) {
955 for (int r = 0; r < inst->regs_read(i); r++)
956 add_dep(last_grf_write[inst->src[i].reg + r], n);
957 } else {
958 for (int r = 0; r < inst->regs_read(i); r++) {
959 add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n);
960 }
961 }
962 } else if (inst->src[i].file == HW_REG &&
963 (inst->src[i].fixed_hw_reg.file ==
964 BRW_GENERAL_REGISTER_FILE)) {
965 if (post_reg_alloc) {
966 for (int r = 0; r < inst->regs_read(i); r++)
967 add_dep(last_grf_write[inst->src[i].fixed_hw_reg.nr + r], n);
968 } else {
969 add_dep(last_fixed_grf_write, n);
970 }
971 } else if (inst->src[i].is_accumulator()) {
972 add_dep(last_accumulator_write, n);
973 } else if (inst->src[i].file != BAD_FILE &&
974 inst->src[i].file != IMM &&
975 inst->src[i].file != UNIFORM &&
976 (inst->src[i].file != HW_REG ||
977 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
978 assert(inst->src[i].file != MRF);
979 add_barrier_deps(n);
980 }
981 }
982
983 if (inst->base_mrf != -1) {
984 for (int i = 0; i < inst->mlen; i++) {
985 /* It looks like the MRF regs are released in the send
986 * instruction once it's sent, not when the result comes
987 * back.
988 */
989 add_dep(last_mrf_write[inst->base_mrf + i], n);
990 }
991 }
992
993 if (inst->reads_flag()) {
994 add_dep(last_conditional_mod[inst->flag_subreg], n);
995 }
996
997 if (inst->reads_accumulator_implicitly()) {
998 add_dep(last_accumulator_write, n);
999 }
1000
1001 /* write-after-write deps. */
1002 if (inst->dst.file == GRF) {
1003 if (post_reg_alloc) {
1004 for (int r = 0; r < inst->regs_written; r++) {
1005 add_dep(last_grf_write[inst->dst.reg + r], n);
1006 last_grf_write[inst->dst.reg + r] = n;
1007 }
1008 } else {
1009 for (int r = 0; r < inst->regs_written; r++) {
1010 add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n);
1011 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
1012 }
1013 }
1014 } else if (inst->dst.file == MRF) {
1015 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
1016
1017 add_dep(last_mrf_write[reg], n);
1018 last_mrf_write[reg] = n;
1019 if (is_compressed(inst)) {
1020 if (inst->dst.reg & BRW_MRF_COMPR4)
1021 reg += 4;
1022 else
1023 reg++;
1024 add_dep(last_mrf_write[reg], n);
1025 last_mrf_write[reg] = n;
1026 }
1027 } else if (inst->dst.file == HW_REG &&
1028 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1029 if (post_reg_alloc) {
1030 for (int r = 0; r < inst->regs_written; r++)
1031 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
1032 } else {
1033 last_fixed_grf_write = n;
1034 }
1035 } else if (inst->dst.is_accumulator()) {
1036 add_dep(last_accumulator_write, n);
1037 last_accumulator_write = n;
1038 } else if (inst->dst.file != BAD_FILE &&
1039 !inst->dst.is_null()) {
1040 add_barrier_deps(n);
1041 }
1042
1043 if (inst->mlen > 0 && inst->base_mrf != -1) {
1044 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1045 add_dep(last_mrf_write[inst->base_mrf + i], n);
1046 last_mrf_write[inst->base_mrf + i] = n;
1047 }
1048 }
1049
1050 if (inst->writes_flag()) {
1051 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1052 last_conditional_mod[inst->flag_subreg] = n;
1053 }
1054
1055 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1056 !inst->dst.is_accumulator()) {
1057 add_dep(last_accumulator_write, n);
1058 last_accumulator_write = n;
1059 }
1060 }
1061
1062 /* bottom-to-top dependencies: WAR */
1063 memset(last_grf_write, 0, sizeof(last_grf_write));
1064 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1065 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1066 last_accumulator_write = NULL;
1067 last_fixed_grf_write = NULL;
1068
1069 exec_node *node;
1070 exec_node *prev;
1071 for (node = instructions.get_tail(), prev = node->prev;
1072 !node->is_head_sentinel();
1073 node = prev, prev = node->prev) {
1074 schedule_node *n = (schedule_node *)node;
1075 fs_inst *inst = (fs_inst *)n->inst;
1076
1077 /* write-after-read deps. */
1078 for (int i = 0; i < inst->sources; i++) {
1079 if (inst->src[i].file == GRF) {
1080 if (post_reg_alloc) {
1081 for (int r = 0; r < inst->regs_read(i); r++)
1082 add_dep(n, last_grf_write[inst->src[i].reg + r], 0);
1083 } else {
1084 for (int r = 0; r < inst->regs_read(i); r++) {
1085 add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], 0);
1086 }
1087 }
1088 } else if (inst->src[i].file == HW_REG &&
1089 (inst->src[i].fixed_hw_reg.file ==
1090 BRW_GENERAL_REGISTER_FILE)) {
1091 if (post_reg_alloc) {
1092 for (int r = 0; r < inst->regs_read(i); r++)
1093 add_dep(n, last_grf_write[inst->src[i].fixed_hw_reg.nr + r], 0);
1094 } else {
1095 add_dep(n, last_fixed_grf_write, 0);
1096 }
1097 } else if (inst->src[i].is_accumulator()) {
1098 add_dep(n, last_accumulator_write, 0);
1099 } else if (inst->src[i].file != BAD_FILE &&
1100 inst->src[i].file != IMM &&
1101 inst->src[i].file != UNIFORM &&
1102 (inst->src[i].file != HW_REG ||
1103 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1104 assert(inst->src[i].file != MRF);
1105 add_barrier_deps(n);
1106 }
1107 }
1108
1109 if (inst->base_mrf != -1) {
1110 for (int i = 0; i < inst->mlen; i++) {
1111 /* It looks like the MRF regs are released in the send
1112 * instruction once it's sent, not when the result comes
1113 * back.
1114 */
1115 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1116 }
1117 }
1118
1119 if (inst->reads_flag()) {
1120 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1121 }
1122
1123 if (inst->reads_accumulator_implicitly()) {
1124 add_dep(n, last_accumulator_write);
1125 }
1126
1127 /* Update the things this instruction wrote, so earlier reads
1128 * can mark this as WAR dependency.
1129 */
1130 if (inst->dst.file == GRF) {
1131 if (post_reg_alloc) {
1132 for (int r = 0; r < inst->regs_written; r++)
1133 last_grf_write[inst->dst.reg + r] = n;
1134 } else {
1135 for (int r = 0; r < inst->regs_written; r++) {
1136 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
1137 }
1138 }
1139 } else if (inst->dst.file == MRF) {
1140 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
1141
1142 last_mrf_write[reg] = n;
1143
1144 if (is_compressed(inst)) {
1145 if (inst->dst.reg & BRW_MRF_COMPR4)
1146 reg += 4;
1147 else
1148 reg++;
1149
1150 last_mrf_write[reg] = n;
1151 }
1152 } else if (inst->dst.file == HW_REG &&
1153 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1154 if (post_reg_alloc) {
1155 for (int r = 0; r < inst->regs_written; r++)
1156 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
1157 } else {
1158 last_fixed_grf_write = n;
1159 }
1160 } else if (inst->dst.is_accumulator()) {
1161 last_accumulator_write = n;
1162 } else if (inst->dst.file != BAD_FILE &&
1163 !inst->dst.is_null()) {
1164 add_barrier_deps(n);
1165 }
1166
1167 if (inst->mlen > 0 && inst->base_mrf != -1) {
1168 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1169 last_mrf_write[inst->base_mrf + i] = n;
1170 }
1171 }
1172
1173 if (inst->writes_flag()) {
1174 last_conditional_mod[inst->flag_subreg] = n;
1175 }
1176
1177 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1178 last_accumulator_write = n;
1179 }
1180 }
1181 }
1182
1183 void
1184 vec4_instruction_scheduler::calculate_deps()
1185 {
1186 schedule_node *last_grf_write[grf_count];
1187 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1188 schedule_node *last_conditional_mod = NULL;
1189 schedule_node *last_accumulator_write = NULL;
1190 /* Fixed HW registers are assumed to be separate from the virtual
1191 * GRFs, so they can be tracked separately. We don't really write
1192 * to fixed GRFs much, so don't bother tracking them on a more
1193 * granular level.
1194 */
1195 schedule_node *last_fixed_grf_write = NULL;
1196
1197 /* The last instruction always needs to still be the last instruction.
1198 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1199 * other things after it would disturb the basic block, or it's the EOT
1200 * URB_WRITE and we should do a better job at dead code eliminating
1201 * anything that could have been scheduled after it.
1202 */
1203 schedule_node *last = (schedule_node *)instructions.get_tail();
1204 add_barrier_deps(last);
1205
1206 memset(last_grf_write, 0, sizeof(last_grf_write));
1207 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1208
1209 /* top-to-bottom dependencies: RAW and WAW. */
1210 foreach_in_list(schedule_node, n, &instructions) {
1211 vec4_instruction *inst = (vec4_instruction *)n->inst;
1212
1213 if (inst->has_side_effects())
1214 add_barrier_deps(n);
1215
1216 /* read-after-write deps. */
1217 for (int i = 0; i < 3; i++) {
1218 if (inst->src[i].file == GRF) {
1219 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1220 add_dep(last_grf_write[inst->src[i].reg + j], n);
1221 } else if (inst->src[i].file == HW_REG &&
1222 (inst->src[i].fixed_hw_reg.file ==
1223 BRW_GENERAL_REGISTER_FILE)) {
1224 add_dep(last_fixed_grf_write, n);
1225 } else if (inst->src[i].is_accumulator()) {
1226 assert(last_accumulator_write);
1227 add_dep(last_accumulator_write, n);
1228 } else if (inst->src[i].file != BAD_FILE &&
1229 inst->src[i].file != IMM &&
1230 inst->src[i].file != UNIFORM &&
1231 (inst->src[i].file != HW_REG ||
1232 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1233 /* No reads from MRF, and ATTR is already translated away */
1234 assert(inst->src[i].file != MRF &&
1235 inst->src[i].file != ATTR);
1236 add_barrier_deps(n);
1237 }
1238 }
1239
1240 if (!inst->is_send_from_grf()) {
1241 for (int i = 0; i < inst->mlen; i++) {
1242 /* It looks like the MRF regs are released in the send
1243 * instruction once it's sent, not when the result comes
1244 * back.
1245 */
1246 add_dep(last_mrf_write[inst->base_mrf + i], n);
1247 }
1248 }
1249
1250 if (inst->reads_flag()) {
1251 assert(last_conditional_mod);
1252 add_dep(last_conditional_mod, n);
1253 }
1254
1255 if (inst->reads_accumulator_implicitly()) {
1256 assert(last_accumulator_write);
1257 add_dep(last_accumulator_write, n);
1258 }
1259
1260 /* write-after-write deps. */
1261 if (inst->dst.file == GRF) {
1262 for (unsigned j = 0; j < inst->regs_written; ++j) {
1263 add_dep(last_grf_write[inst->dst.reg + j], n);
1264 last_grf_write[inst->dst.reg + j] = n;
1265 }
1266 } else if (inst->dst.file == MRF) {
1267 add_dep(last_mrf_write[inst->dst.reg], n);
1268 last_mrf_write[inst->dst.reg] = n;
1269 } else if (inst->dst.file == HW_REG &&
1270 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1271 last_fixed_grf_write = n;
1272 } else if (inst->dst.is_accumulator()) {
1273 add_dep(last_accumulator_write, n);
1274 last_accumulator_write = n;
1275 } else if (inst->dst.file != BAD_FILE &&
1276 !inst->dst.is_null()) {
1277 add_barrier_deps(n);
1278 }
1279
1280 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1281 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1282 add_dep(last_mrf_write[inst->base_mrf + i], n);
1283 last_mrf_write[inst->base_mrf + i] = n;
1284 }
1285 }
1286
1287 if (inst->writes_flag()) {
1288 add_dep(last_conditional_mod, n, 0);
1289 last_conditional_mod = n;
1290 }
1291
1292 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1293 !inst->dst.is_accumulator()) {
1294 add_dep(last_accumulator_write, n);
1295 last_accumulator_write = n;
1296 }
1297 }
1298
1299 /* bottom-to-top dependencies: WAR */
1300 memset(last_grf_write, 0, sizeof(last_grf_write));
1301 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1302 last_conditional_mod = NULL;
1303 last_accumulator_write = NULL;
1304 last_fixed_grf_write = NULL;
1305
1306 exec_node *node;
1307 exec_node *prev;
1308 for (node = instructions.get_tail(), prev = node->prev;
1309 !node->is_head_sentinel();
1310 node = prev, prev = node->prev) {
1311 schedule_node *n = (schedule_node *)node;
1312 vec4_instruction *inst = (vec4_instruction *)n->inst;
1313
1314 /* write-after-read deps. */
1315 for (int i = 0; i < 3; i++) {
1316 if (inst->src[i].file == GRF) {
1317 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1318 add_dep(n, last_grf_write[inst->src[i].reg + j]);
1319 } else if (inst->src[i].file == HW_REG &&
1320 (inst->src[i].fixed_hw_reg.file ==
1321 BRW_GENERAL_REGISTER_FILE)) {
1322 add_dep(n, last_fixed_grf_write);
1323 } else if (inst->src[i].is_accumulator()) {
1324 add_dep(n, last_accumulator_write);
1325 } else if (inst->src[i].file != BAD_FILE &&
1326 inst->src[i].file != IMM &&
1327 inst->src[i].file != UNIFORM &&
1328 (inst->src[i].file != HW_REG ||
1329 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1330 assert(inst->src[i].file != MRF &&
1331 inst->src[i].file != ATTR);
1332 add_barrier_deps(n);
1333 }
1334 }
1335
1336 if (!inst->is_send_from_grf()) {
1337 for (int i = 0; i < inst->mlen; i++) {
1338 /* It looks like the MRF regs are released in the send
1339 * instruction once it's sent, not when the result comes
1340 * back.
1341 */
1342 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1343 }
1344 }
1345
1346 if (inst->reads_flag()) {
1347 add_dep(n, last_conditional_mod);
1348 }
1349
1350 if (inst->reads_accumulator_implicitly()) {
1351 add_dep(n, last_accumulator_write);
1352 }
1353
1354 /* Update the things this instruction wrote, so earlier reads
1355 * can mark this as WAR dependency.
1356 */
1357 if (inst->dst.file == GRF) {
1358 for (unsigned j = 0; j < inst->regs_written; ++j)
1359 last_grf_write[inst->dst.reg + j] = n;
1360 } else if (inst->dst.file == MRF) {
1361 last_mrf_write[inst->dst.reg] = n;
1362 } else if (inst->dst.file == HW_REG &&
1363 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1364 last_fixed_grf_write = n;
1365 } else if (inst->dst.is_accumulator()) {
1366 last_accumulator_write = n;
1367 } else if (inst->dst.file != BAD_FILE &&
1368 !inst->dst.is_null()) {
1369 add_barrier_deps(n);
1370 }
1371
1372 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1373 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1374 last_mrf_write[inst->base_mrf + i] = n;
1375 }
1376 }
1377
1378 if (inst->writes_flag()) {
1379 last_conditional_mod = n;
1380 }
1381
1382 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1383 last_accumulator_write = n;
1384 }
1385 }
1386 }
1387
1388 schedule_node *
1389 fs_instruction_scheduler::choose_instruction_to_schedule()
1390 {
1391 schedule_node *chosen = NULL;
1392
1393 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1394 int chosen_time = 0;
1395
1396 /* Of the instructions ready to execute or the closest to
1397 * being ready, choose the oldest one.
1398 */
1399 foreach_in_list(schedule_node, n, &instructions) {
1400 if (!chosen || n->unblocked_time < chosen_time) {
1401 chosen = n;
1402 chosen_time = n->unblocked_time;
1403 }
1404 }
1405 } else {
1406 /* Before register allocation, we don't care about the latencies of
1407 * instructions. All we care about is reducing live intervals of
1408 * variables so that we can avoid register spilling, or get SIMD16
1409 * shaders which naturally do a better job of hiding instruction
1410 * latency.
1411 */
1412 foreach_in_list(schedule_node, n, &instructions) {
1413 fs_inst *inst = (fs_inst *)n->inst;
1414
1415 if (!chosen) {
1416 chosen = n;
1417 continue;
1418 }
1419
1420 /* Most important: If we can definitely reduce register pressure, do
1421 * so immediately.
1422 */
1423 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1424 int chosen_register_pressure_benefit =
1425 get_register_pressure_benefit(chosen->inst);
1426
1427 if (register_pressure_benefit > 0 &&
1428 register_pressure_benefit > chosen_register_pressure_benefit) {
1429 chosen = n;
1430 continue;
1431 } else if (chosen_register_pressure_benefit > 0 &&
1432 (register_pressure_benefit <
1433 chosen_register_pressure_benefit)) {
1434 continue;
1435 }
1436
1437 if (mode == SCHEDULE_PRE_LIFO) {
1438 /* Prefer instructions that recently became available for
1439 * scheduling. These are the things that are most likely to
1440 * (eventually) make a variable dead and reduce register pressure.
1441 * Typical register pressure estimates don't work for us because
1442 * most of our pressure comes from texturing, where no single
1443 * instruction to schedule will make a vec4 value dead.
1444 */
1445 if (n->cand_generation > chosen->cand_generation) {
1446 chosen = n;
1447 continue;
1448 } else if (n->cand_generation < chosen->cand_generation) {
1449 continue;
1450 }
1451
1452 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1453 * do this, then because we prefer instructions that just became
1454 * candidates, we'll end up in a pattern of scheduling a SEND,
1455 * then the MRFs for the next SEND, then the next SEND, then the
1456 * MRFs, etc., without ever consuming the results of a send.
1457 */
1458 if (v->devinfo->gen < 7) {
1459 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1460
1461 /* We use regs_written > 1 as our test for the kind of send
1462 * instruction to avoid -- only sends generate many regs, and a
1463 * single-result send is probably actually reducing register
1464 * pressure.
1465 */
1466 if (inst->regs_written <= inst->exec_size / 8 &&
1467 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1468 chosen = n;
1469 continue;
1470 } else if (inst->regs_written > chosen_inst->regs_written) {
1471 continue;
1472 }
1473 }
1474 }
1475
1476 /* For instructions pushed on the cands list at the same time, prefer
1477 * the one with the highest delay to the end of the program. This is
1478 * most likely to have its values able to be consumed first (such as
1479 * for a large tree of lowered ubo loads, which appear reversed in
1480 * the instruction stream with respect to when they can be consumed).
1481 */
1482 if (n->delay > chosen->delay) {
1483 chosen = n;
1484 continue;
1485 } else if (n->delay < chosen->delay) {
1486 continue;
1487 }
1488
1489 /* If all other metrics are equal, we prefer the first instruction in
1490 * the list (program execution).
1491 */
1492 }
1493 }
1494
1495 return chosen;
1496 }
1497
1498 schedule_node *
1499 vec4_instruction_scheduler::choose_instruction_to_schedule()
1500 {
1501 schedule_node *chosen = NULL;
1502 int chosen_time = 0;
1503
1504 /* Of the instructions ready to execute or the closest to being ready,
1505 * choose the oldest one.
1506 */
1507 foreach_in_list(schedule_node, n, &instructions) {
1508 if (!chosen || n->unblocked_time < chosen_time) {
1509 chosen = n;
1510 chosen_time = n->unblocked_time;
1511 }
1512 }
1513
1514 return chosen;
1515 }
1516
1517 int
1518 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1519 {
1520 if (is_compressed((fs_inst *)inst))
1521 return 4;
1522 else
1523 return 2;
1524 }
1525
1526 int
1527 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1528 {
1529 /* We always execute as two vec4s in parallel. */
1530 return 2;
1531 }
1532
1533 void
1534 instruction_scheduler::schedule_instructions(bblock_t *block)
1535 {
1536 const struct brw_device_info *devinfo = bs->devinfo;
1537 backend_instruction *inst = block->end();
1538 time = 0;
1539 if (!post_reg_alloc)
1540 reg_pressure = reg_pressure_in[block->num];
1541 block_idx = block->num;
1542
1543 /* Remove non-DAG heads from the list. */
1544 foreach_in_list_safe(schedule_node, n, &instructions) {
1545 if (n->parent_count != 0)
1546 n->remove();
1547 }
1548
1549 unsigned cand_generation = 1;
1550 while (!instructions.is_empty()) {
1551 schedule_node *chosen = choose_instruction_to_schedule();
1552
1553 /* Schedule this instruction. */
1554 assert(chosen);
1555 chosen->remove();
1556 inst->insert_before(block, chosen->inst);
1557 instructions_to_schedule--;
1558
1559 if (!post_reg_alloc) {
1560 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1561 update_register_pressure(chosen->inst);
1562 }
1563
1564 /* If we expected a delay for scheduling, then bump the clock to reflect
1565 * that. In reality, the hardware will switch to another hyperthread
1566 * and may not return to dispatching our thread for a while even after
1567 * we're unblocked. After this, we have the time when the chosen
1568 * instruction will start executing.
1569 */
1570 time = MAX2(time, chosen->unblocked_time);
1571
1572 /* Update the clock for how soon an instruction could start after the
1573 * chosen one.
1574 */
1575 time += issue_time(chosen->inst);
1576
1577 if (debug) {
1578 fprintf(stderr, "clock %4d, scheduled: ", time);
1579 bs->dump_instruction(chosen->inst);
1580 if (!post_reg_alloc)
1581 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1582 }
1583
1584 /* Now that we've scheduled a new instruction, some of its
1585 * children can be promoted to the list of instructions ready to
1586 * be scheduled. Update the children's unblocked time for this
1587 * DAG edge as we do so.
1588 */
1589 for (int i = chosen->child_count - 1; i >= 0; i--) {
1590 schedule_node *child = chosen->children[i];
1591
1592 child->unblocked_time = MAX2(child->unblocked_time,
1593 time + chosen->child_latency[i]);
1594
1595 if (debug) {
1596 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1597 bs->dump_instruction(child->inst);
1598 }
1599
1600 child->cand_generation = cand_generation;
1601 child->parent_count--;
1602 if (child->parent_count == 0) {
1603 if (debug) {
1604 fprintf(stderr, "\t\tnow available\n");
1605 }
1606 instructions.push_head(child);
1607 }
1608 }
1609 cand_generation++;
1610
1611 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1612 * but it's more limited pre-gen6, so if we send something off to it then
1613 * the next math instruction isn't going to make progress until the first
1614 * is done.
1615 */
1616 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1617 foreach_in_list(schedule_node, n, &instructions) {
1618 if (n->inst->is_math())
1619 n->unblocked_time = MAX2(n->unblocked_time,
1620 time + chosen->latency);
1621 }
1622 }
1623 }
1624
1625 if (block->end()->opcode == BRW_OPCODE_NOP)
1626 block->end()->remove(block);
1627 assert(instructions_to_schedule == 0);
1628
1629 block->cycle_count = time;
1630 }
1631
1632 static unsigned get_cycle_count(cfg_t *cfg)
1633 {
1634 unsigned count = 0, multiplier = 1;
1635 foreach_block(block, cfg) {
1636 if (block->start()->opcode == BRW_OPCODE_DO)
1637 multiplier *= 10; /* assume that loops execute ~10 times */
1638
1639 count += block->cycle_count * multiplier;
1640
1641 if (block->end()->opcode == BRW_OPCODE_WHILE)
1642 multiplier /= 10;
1643 }
1644
1645 return count;
1646 }
1647
1648 void
1649 instruction_scheduler::run(cfg_t *cfg)
1650 {
1651 if (debug && !post_reg_alloc) {
1652 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1653 post_reg_alloc);
1654 bs->dump_instructions();
1655 }
1656
1657 if (!post_reg_alloc)
1658 setup_liveness(cfg);
1659
1660 foreach_block(block, cfg) {
1661 if (block->end_ip - block->start_ip <= 1)
1662 continue;
1663
1664 if (reads_remaining) {
1665 memset(reads_remaining, 0,
1666 grf_count * sizeof(*reads_remaining));
1667 memset(hw_reads_remaining, 0,
1668 hw_reg_count * sizeof(*hw_reads_remaining));
1669 memset(written, 0, grf_count * sizeof(*written));
1670
1671 foreach_inst_in_block(fs_inst, inst, block)
1672 count_reads_remaining(inst);
1673 }
1674
1675 add_insts_from_block(block);
1676
1677 calculate_deps();
1678
1679 foreach_in_list(schedule_node, n, &instructions) {
1680 compute_delay(n);
1681 }
1682
1683 schedule_instructions(block);
1684 }
1685
1686 if (debug && !post_reg_alloc) {
1687 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1688 post_reg_alloc);
1689 bs->dump_instructions();
1690 }
1691
1692 cfg->cycle_count = get_cycle_count(cfg);
1693 }
1694
1695 void
1696 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1697 {
1698 if (mode != SCHEDULE_POST)
1699 calculate_live_intervals();
1700
1701 int grf_count;
1702 if (mode == SCHEDULE_POST)
1703 grf_count = grf_used;
1704 else
1705 grf_count = alloc.count;
1706
1707 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1708 cfg->num_blocks, mode);
1709 sched.run(cfg);
1710
1711 if (unlikely(debug_enabled) && mode == SCHEDULE_POST) {
1712 fprintf(stderr, "%s%d estimated execution time: %d cycles\n",
1713 stage_abbrev, dispatch_width, sched.time);
1714 }
1715
1716 invalidate_live_intervals();
1717 }
1718
1719 void
1720 vec4_visitor::opt_schedule_instructions()
1721 {
1722 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1723 sched.run(cfg);
1724
1725 if (unlikely(debug_enabled)) {
1726 fprintf(stderr, "%s estimated execution time: %d cycles\n",
1727 stage_abbrev, sched.time);
1728 }
1729
1730 invalidate_live_intervals();
1731 }