Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89 };
90
91 void
92 schedule_node::set_latency_gen4()
93 {
94 int chans = 8;
95 int math_latency = 22;
96
97 switch (inst->opcode) {
98 case SHADER_OPCODE_RCP:
99 this->latency = 1 * chans * math_latency;
100 break;
101 case SHADER_OPCODE_RSQ:
102 this->latency = 2 * chans * math_latency;
103 break;
104 case SHADER_OPCODE_INT_QUOTIENT:
105 case SHADER_OPCODE_SQRT:
106 case SHADER_OPCODE_LOG2:
107 /* full precision log. partial is 2. */
108 this->latency = 3 * chans * math_latency;
109 break;
110 case SHADER_OPCODE_INT_REMAINDER:
111 case SHADER_OPCODE_EXP2:
112 /* full precision. partial is 3, same throughput. */
113 this->latency = 4 * chans * math_latency;
114 break;
115 case SHADER_OPCODE_POW:
116 this->latency = 8 * chans * math_latency;
117 break;
118 case SHADER_OPCODE_SIN:
119 case SHADER_OPCODE_COS:
120 /* minimum latency, max is 12 rounds. */
121 this->latency = 5 * chans * math_latency;
122 break;
123 default:
124 this->latency = 2;
125 break;
126 }
127 }
128
129 void
130 schedule_node::set_latency_gen7(bool is_haswell)
131 {
132 switch (inst->opcode) {
133 case BRW_OPCODE_MAD:
134 /* 2 cycles
135 * (since the last two src operands are in different register banks):
136 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
137 *
138 * 3 cycles on IVB, 4 on HSW
139 * (since the last two src operands are in the same register bank):
140 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
141 *
142 * 18 cycles on IVB, 16 on HSW
143 * (since the last two src operands are in different register banks):
144 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
145 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
146 *
147 * 20 cycles on IVB, 18 on HSW
148 * (since the last two src operands are in the same register bank):
149 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
150 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
151 */
152
153 /* Our register allocator doesn't know about register banks, so use the
154 * higher latency.
155 */
156 latency = is_haswell ? 16 : 18;
157 break;
158
159 case BRW_OPCODE_LRP:
160 /* 2 cycles
161 * (since the last two src operands are in different register banks):
162 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163 *
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167 *
168 * 16 cycles on IVB, 14 on HSW
169 * (since the last two src operands are in different register banks):
170 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
172 *
173 * 16 cycles
174 * (since the last two src operands are in the same register bank):
175 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
177 */
178
179 /* Our register allocator doesn't know about register banks, so use the
180 * higher latency.
181 */
182 latency = 14;
183 break;
184
185 case SHADER_OPCODE_RCP:
186 case SHADER_OPCODE_RSQ:
187 case SHADER_OPCODE_SQRT:
188 case SHADER_OPCODE_LOG2:
189 case SHADER_OPCODE_EXP2:
190 case SHADER_OPCODE_SIN:
191 case SHADER_OPCODE_COS:
192 /* 2 cycles:
193 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
194 *
195 * 18 cycles:
196 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
197 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
198 *
199 * Same for exp2, log2, rsq, sqrt, sin, cos.
200 */
201 latency = is_haswell ? 14 : 16;
202 break;
203
204 case SHADER_OPCODE_POW:
205 /* 2 cycles:
206 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
207 *
208 * 26 cycles:
209 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
210 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
211 */
212 latency = is_haswell ? 22 : 24;
213 break;
214
215 case SHADER_OPCODE_TEX:
216 case SHADER_OPCODE_TXD:
217 case SHADER_OPCODE_TXF:
218 case SHADER_OPCODE_TXL:
219 /* 18 cycles:
220 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
221 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
222 * send(8) g4<1>UW g114<8,8,1>F
223 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
224 *
225 * 697 +/-49 cycles (min 610, n=26):
226 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
227 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
228 * send(8) g4<1>UW g114<8,8,1>F
229 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
230 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
231 *
232 * So the latency on our first texture load of the batchbuffer takes
233 * ~700 cycles, since the caches are cold at that point.
234 *
235 * 840 +/- 92 cycles (min 720, n=25):
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
240 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
241 * send(8) g4<1>UW g114<8,8,1>F
242 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
243 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
244 *
245 * On the second load, it takes just an extra ~140 cycles, and after
246 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
247 *
248 * 683 +/- 49 cycles (min = 602, n=47):
249 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
250 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
251 * send(8) g4<1>UW g114<8,8,1>F
252 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * send(8) g50<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
256 *
257 * The unit appears to be pipelined, since this matches up with the
258 * cache-cold case, despite there being two loads here. If you replace
259 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
260 *
261 * So, take some number between the cache-hot 140 cycles and the
262 * cache-cold 700 cycles. No particular tuning was done on this.
263 *
264 * I haven't done significant testing of the non-TEX opcodes. TXL at
265 * least looked about the same as TEX.
266 */
267 latency = 200;
268 break;
269
270 case SHADER_OPCODE_TXS:
271 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
272 * cycles (n=15):
273 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
274 * send(8) g6<1>UW g114<8,8,1>F
275 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
276 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
277 *
278 *
279 * Two loads was 535 +/- 30 cycles (n=19):
280 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
281 * send(16) g6<1>UW g114<8,8,1>F
282 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
283 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
284 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
285 * send(16) g8<1>UW g114<8,8,1>F
286 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
287 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
288 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
289 *
290 * Since the only caches that should matter are just the
291 * instruction/state cache containing the surface state, assume that we
292 * always have hot caches.
293 */
294 latency = 100;
295 break;
296
297 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
298 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
299 case VS_OPCODE_PULL_CONSTANT_LOAD:
300 /* testing using varying-index pull constants:
301 *
302 * 16 cycles:
303 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
304 * send(8) g4<1>F g4<8,8,1>D
305 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
306 *
307 * ~480 cycles:
308 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
309 * send(8) g4<1>F g4<8,8,1>D
310 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
311 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
312 *
313 * ~620 cycles:
314 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
315 * send(8) g4<1>F g4<8,8,1>D
316 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
317 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
318 * send(8) g4<1>F g4<8,8,1>D
319 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 *
322 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
323 * about 460. We expect to mostly be cache hot, so pick something more
324 * in that direction.
325 */
326 latency = 200;
327 break;
328
329 case SHADER_OPCODE_GEN7_SCRATCH_READ:
330 /* Testing a load from offset 0, that had been previously written:
331 *
332 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
334 *
335 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
336 * then around 140. Presumably this is cache hit vs miss.
337 */
338 latency = 50;
339 break;
340
341 case SHADER_OPCODE_UNTYPED_ATOMIC:
342 case SHADER_OPCODE_TYPED_ATOMIC:
343 /* Test code:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
349 *
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
356 */
357 latency = 14000;
358 break;
359
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
361 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
362 case SHADER_OPCODE_TYPED_SURFACE_READ:
363 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
364 /* Test code:
365 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
366 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
367 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
368 * send(8) g4<1>UD g112<8,8,1>UD
369 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
370 * .
371 * . [repeats 8 times]
372 * .
373 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
374 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
375 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
376 * send(8) g4<1>UD g112<8,8,1>UD
377 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
378 *
379 * Running it 100 times as fragment shader on a 128x128 quad
380 * gives an average latency of 583 cycles per surface read,
381 * standard deviation 0.9%.
382 */
383 latency = is_haswell ? 300 : 600;
384 break;
385
386 default:
387 /* 2 cycles:
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
389 *
390 * 16 cycles:
391 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
392 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
393 */
394 latency = 14;
395 break;
396 }
397 }
398
399 class instruction_scheduler {
400 public:
401 instruction_scheduler(backend_shader *s, int grf_count,
402 int hw_reg_count, int block_count,
403 instruction_scheduler_mode mode)
404 {
405 this->bs = s;
406 this->mem_ctx = ralloc_context(NULL);
407 this->grf_count = grf_count;
408 this->hw_reg_count = hw_reg_count;
409 this->instructions.make_empty();
410 this->instructions_to_schedule = 0;
411 this->post_reg_alloc = (mode == SCHEDULE_POST);
412 this->mode = mode;
413 this->time = 0;
414 if (!post_reg_alloc) {
415 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
416
417 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
418 for (int i = 0; i < block_count; i++)
419 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
420 BITSET_WORDS(grf_count));
421
422 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
423 for (int i = 0; i < block_count; i++)
424 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
425 BITSET_WORDS(grf_count));
426
427 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
428 for (int i = 0; i < block_count; i++)
429 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
430 BITSET_WORDS(hw_reg_count));
431
432 this->written = rzalloc_array(mem_ctx, bool, grf_count);
433
434 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
435
436 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
437 } else {
438 this->reg_pressure_in = NULL;
439 this->livein = NULL;
440 this->liveout = NULL;
441 this->hw_liveout = NULL;
442 this->written = NULL;
443 this->reads_remaining = NULL;
444 this->hw_reads_remaining = NULL;
445 }
446 }
447
448 ~instruction_scheduler()
449 {
450 ralloc_free(this->mem_ctx);
451 }
452 void add_barrier_deps(schedule_node *n);
453 void add_dep(schedule_node *before, schedule_node *after, int latency);
454 void add_dep(schedule_node *before, schedule_node *after);
455
456 void run(cfg_t *cfg);
457 void add_insts_from_block(bblock_t *block);
458 void compute_delay(schedule_node *node);
459 virtual void calculate_deps() = 0;
460 virtual schedule_node *choose_instruction_to_schedule() = 0;
461
462 /**
463 * Returns how many cycles it takes the instruction to issue.
464 *
465 * Instructions in gen hardware are handled one simd4 vector at a time,
466 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
467 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
468 */
469 virtual int issue_time(backend_instruction *inst) = 0;
470
471 virtual void count_reads_remaining(backend_instruction *inst) = 0;
472 virtual void setup_liveness(cfg_t *cfg) = 0;
473 virtual void update_register_pressure(backend_instruction *inst) = 0;
474 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
475
476 void schedule_instructions(bblock_t *block);
477
478 void *mem_ctx;
479
480 bool post_reg_alloc;
481 int instructions_to_schedule;
482 int grf_count;
483 int hw_reg_count;
484 int time;
485 int reg_pressure;
486 int block_idx;
487 exec_list instructions;
488 backend_shader *bs;
489
490 instruction_scheduler_mode mode;
491
492 /*
493 * The register pressure at the beginning of each basic block.
494 */
495
496 int *reg_pressure_in;
497
498 /*
499 * The virtual GRF's whose range overlaps the beginning of each basic block.
500 */
501
502 BITSET_WORD **livein;
503
504 /*
505 * The virtual GRF's whose range overlaps the end of each basic block.
506 */
507
508 BITSET_WORD **liveout;
509
510 /*
511 * The hardware GRF's whose range overlaps the end of each basic block.
512 */
513
514 BITSET_WORD **hw_liveout;
515
516 /*
517 * Whether we've scheduled a write for this virtual GRF yet.
518 */
519
520 bool *written;
521
522 /*
523 * How many reads we haven't scheduled for this virtual GRF yet.
524 */
525
526 int *reads_remaining;
527
528 /*
529 * How many reads we haven't scheduled for this hardware GRF yet.
530 */
531
532 int *hw_reads_remaining;
533 };
534
535 class fs_instruction_scheduler : public instruction_scheduler
536 {
537 public:
538 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
539 int block_count,
540 instruction_scheduler_mode mode);
541 void calculate_deps();
542 bool is_compressed(fs_inst *inst);
543 schedule_node *choose_instruction_to_schedule();
544 int issue_time(backend_instruction *inst);
545 fs_visitor *v;
546
547 void count_reads_remaining(backend_instruction *inst);
548 void setup_liveness(cfg_t *cfg);
549 void update_register_pressure(backend_instruction *inst);
550 int get_register_pressure_benefit(backend_instruction *inst);
551 };
552
553 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
554 int grf_count, int hw_reg_count,
555 int block_count,
556 instruction_scheduler_mode mode)
557 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
558 v(v)
559 {
560 }
561
562 static bool
563 is_src_duplicate(fs_inst *inst, int src)
564 {
565 for (int i = 0; i < src; i++)
566 if (inst->src[i].equals(inst->src[src]))
567 return true;
568
569 return false;
570 }
571
572 void
573 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
574 {
575 fs_inst *inst = (fs_inst *)be;
576
577 if (!reads_remaining)
578 return;
579
580 for (int i = 0; i < inst->sources; i++) {
581 if (is_src_duplicate(inst, i))
582 continue;
583
584 if (inst->src[i].file == VGRF) {
585 reads_remaining[inst->src[i].nr]++;
586 } else if (inst->src[i].file == FIXED_GRF) {
587 if (inst->src[i].nr >= hw_reg_count)
588 continue;
589
590 for (int j = 0; j < inst->regs_read(i); j++)
591 hw_reads_remaining[inst->src[i].nr + j]++;
592 }
593 }
594 }
595
596 void
597 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
598 {
599 /* First, compute liveness on a per-GRF level using the in/out sets from
600 * liveness calculation.
601 */
602 for (int block = 0; block < cfg->num_blocks; block++) {
603 for (int i = 0; i < v->live_intervals->num_vars; i++) {
604 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
605 int vgrf = v->live_intervals->vgrf_from_var[i];
606 if (!BITSET_TEST(livein[block], vgrf)) {
607 reg_pressure_in[block] += v->alloc.sizes[vgrf];
608 BITSET_SET(livein[block], vgrf);
609 }
610 }
611
612 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
613 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
614 }
615 }
616
617 /* Now, extend the live in/live out sets for when a range crosses a block
618 * boundary, which matches what our register allocator/interference code
619 * does to account for force_writemask_all and incompatible exec_mask's.
620 */
621 for (int block = 0; block < cfg->num_blocks - 1; block++) {
622 for (int i = 0; i < grf_count; i++) {
623 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
624 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
625 if (!BITSET_TEST(livein[block + 1], i)) {
626 reg_pressure_in[block + 1] += v->alloc.sizes[i];
627 BITSET_SET(livein[block + 1], i);
628 }
629
630 BITSET_SET(liveout[block], i);
631 }
632 }
633 }
634
635 int payload_last_use_ip[hw_reg_count];
636 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
637
638 for (int i = 0; i < hw_reg_count; i++) {
639 if (payload_last_use_ip[i] == -1)
640 continue;
641
642 for (int block = 0; block < cfg->num_blocks; block++) {
643 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
644 reg_pressure_in[block]++;
645
646 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
647 BITSET_SET(hw_liveout[block], i);
648 }
649 }
650 }
651
652 void
653 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
654 {
655 fs_inst *inst = (fs_inst *)be;
656
657 if (!reads_remaining)
658 return;
659
660 if (inst->dst.file == VGRF) {
661 written[inst->dst.nr] = true;
662 }
663
664 for (int i = 0; i < inst->sources; i++) {
665 if (is_src_duplicate(inst, i))
666 continue;
667
668 if (inst->src[i].file == VGRF) {
669 reads_remaining[inst->src[i].nr]--;
670 } else if (inst->src[i].file == FIXED_GRF &&
671 inst->src[i].nr < hw_reg_count) {
672 for (int off = 0; off < inst->regs_read(i); off++)
673 hw_reads_remaining[inst->src[i].nr + off]--;
674 }
675 }
676 }
677
678 int
679 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
680 {
681 fs_inst *inst = (fs_inst *)be;
682 int benefit = 0;
683
684 if (inst->dst.file == VGRF) {
685 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
686 !written[inst->dst.nr])
687 benefit -= v->alloc.sizes[inst->dst.nr];
688 }
689
690 for (int i = 0; i < inst->sources; i++) {
691 if (is_src_duplicate(inst, i))
692 continue;
693
694 if (inst->src[i].file == VGRF &&
695 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
696 reads_remaining[inst->src[i].nr] == 1)
697 benefit += v->alloc.sizes[inst->src[i].nr];
698
699 if (inst->src[i].file == FIXED_GRF &&
700 inst->src[i].nr < hw_reg_count) {
701 for (int off = 0; off < inst->regs_read(i); off++) {
702 int reg = inst->src[i].nr + off;
703 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
704 hw_reads_remaining[reg] == 1) {
705 benefit++;
706 }
707 }
708 }
709 }
710
711 return benefit;
712 }
713
714 class vec4_instruction_scheduler : public instruction_scheduler
715 {
716 public:
717 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
718 void calculate_deps();
719 schedule_node *choose_instruction_to_schedule();
720 int issue_time(backend_instruction *inst);
721 vec4_visitor *v;
722
723 void count_reads_remaining(backend_instruction *inst);
724 void setup_liveness(cfg_t *cfg);
725 void update_register_pressure(backend_instruction *inst);
726 int get_register_pressure_benefit(backend_instruction *inst);
727 };
728
729 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
730 int grf_count)
731 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
732 v(v)
733 {
734 }
735
736 void
737 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
748 {
749 }
750
751 int
752 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
753 {
754 return 0;
755 }
756
757 schedule_node::schedule_node(backend_instruction *inst,
758 instruction_scheduler *sched)
759 {
760 const struct brw_device_info *devinfo = sched->bs->devinfo;
761
762 this->inst = inst;
763 this->child_array_size = 0;
764 this->children = NULL;
765 this->child_latency = NULL;
766 this->child_count = 0;
767 this->parent_count = 0;
768 this->unblocked_time = 0;
769 this->cand_generation = 0;
770 this->delay = 0;
771
772 /* We can't measure Gen6 timings directly but expect them to be much
773 * closer to Gen7 than Gen4.
774 */
775 if (!sched->post_reg_alloc)
776 this->latency = 1;
777 else if (devinfo->gen >= 6)
778 set_latency_gen7(devinfo->is_haswell);
779 else
780 set_latency_gen4();
781 }
782
783 void
784 instruction_scheduler::add_insts_from_block(bblock_t *block)
785 {
786 /* Removing the last instruction from a basic block removes the block as
787 * well, so put a NOP at the end to keep it alive.
788 */
789 if (!block->end()->is_control_flow()) {
790 backend_instruction *nop = new(mem_ctx) backend_instruction();
791 nop->opcode = BRW_OPCODE_NOP;
792 block->end()->insert_after(block, nop);
793 }
794
795 foreach_inst_in_block_safe(backend_instruction, inst, block) {
796 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
797 continue;
798
799 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
800
801 this->instructions_to_schedule++;
802
803 inst->remove(block);
804 instructions.push_tail(n);
805 }
806 }
807
808 /** Recursive computation of the delay member of a node. */
809 void
810 instruction_scheduler::compute_delay(schedule_node *n)
811 {
812 if (!n->child_count) {
813 n->delay = issue_time(n->inst);
814 } else {
815 for (int i = 0; i < n->child_count; i++) {
816 if (!n->children[i]->delay)
817 compute_delay(n->children[i]);
818 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
819 }
820 }
821 }
822
823 /**
824 * Add a dependency between two instruction nodes.
825 *
826 * The @after node will be scheduled after @before. We will try to
827 * schedule it @latency cycles after @before, but no guarantees there.
828 */
829 void
830 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
831 int latency)
832 {
833 if (!before || !after)
834 return;
835
836 assert(before != after);
837
838 for (int i = 0; i < before->child_count; i++) {
839 if (before->children[i] == after) {
840 before->child_latency[i] = MAX2(before->child_latency[i], latency);
841 return;
842 }
843 }
844
845 if (before->child_array_size <= before->child_count) {
846 if (before->child_array_size < 16)
847 before->child_array_size = 16;
848 else
849 before->child_array_size *= 2;
850
851 before->children = reralloc(mem_ctx, before->children,
852 schedule_node *,
853 before->child_array_size);
854 before->child_latency = reralloc(mem_ctx, before->child_latency,
855 int, before->child_array_size);
856 }
857
858 before->children[before->child_count] = after;
859 before->child_latency[before->child_count] = latency;
860 before->child_count++;
861 after->parent_count++;
862 }
863
864 void
865 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
866 {
867 if (!before)
868 return;
869
870 add_dep(before, after, before->latency);
871 }
872
873 /**
874 * Sometimes we really want this node to execute after everything that
875 * was before it and before everything that followed it. This adds
876 * the deps to do so.
877 */
878 void
879 instruction_scheduler::add_barrier_deps(schedule_node *n)
880 {
881 schedule_node *prev = (schedule_node *)n->prev;
882 schedule_node *next = (schedule_node *)n->next;
883
884 if (prev) {
885 while (!prev->is_head_sentinel()) {
886 add_dep(prev, n, 0);
887 prev = (schedule_node *)prev->prev;
888 }
889 }
890
891 if (next) {
892 while (!next->is_tail_sentinel()) {
893 add_dep(n, next, 0);
894 next = (schedule_node *)next->next;
895 }
896 }
897 }
898
899 /* instruction scheduling needs to be aware of when an MRF write
900 * actually writes 2 MRFs.
901 */
902 bool
903 fs_instruction_scheduler::is_compressed(fs_inst *inst)
904 {
905 return inst->exec_size == 16;
906 }
907
908 void
909 fs_instruction_scheduler::calculate_deps()
910 {
911 /* Pre-register-allocation, this tracks the last write per VGRF offset.
912 * After register allocation, reg_offsets are gone and we track individual
913 * GRF registers.
914 */
915 schedule_node *last_grf_write[grf_count * 16];
916 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
917 schedule_node *last_conditional_mod[2] = { NULL, NULL };
918 schedule_node *last_accumulator_write = NULL;
919 /* Fixed HW registers are assumed to be separate from the virtual
920 * GRFs, so they can be tracked separately. We don't really write
921 * to fixed GRFs much, so don't bother tracking them on a more
922 * granular level.
923 */
924 schedule_node *last_fixed_grf_write = NULL;
925
926 /* The last instruction always needs to still be the last
927 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
928 * WHILE) and scheduling other things after it would disturb the
929 * basic block, or it's FB_WRITE and we should do a better job at
930 * dead code elimination anyway.
931 */
932 schedule_node *last = (schedule_node *)instructions.get_tail();
933 add_barrier_deps(last);
934
935 memset(last_grf_write, 0, sizeof(last_grf_write));
936 memset(last_mrf_write, 0, sizeof(last_mrf_write));
937
938 /* top-to-bottom dependencies: RAW and WAW. */
939 foreach_in_list(schedule_node, n, &instructions) {
940 fs_inst *inst = (fs_inst *)n->inst;
941
942 if ((inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
943 inst->has_side_effects()) &&
944 inst->opcode != FS_OPCODE_FB_WRITE)
945 add_barrier_deps(n);
946
947 /* read-after-write deps. */
948 for (int i = 0; i < inst->sources; i++) {
949 if (inst->src[i].file == VGRF) {
950 if (post_reg_alloc) {
951 for (int r = 0; r < inst->regs_read(i); r++)
952 add_dep(last_grf_write[inst->src[i].nr + r], n);
953 } else {
954 for (int r = 0; r < inst->regs_read(i); r++) {
955 add_dep(last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], n);
956 }
957 }
958 } else if (inst->src[i].file == FIXED_GRF) {
959 if (post_reg_alloc) {
960 for (int r = 0; r < inst->regs_read(i); r++)
961 add_dep(last_grf_write[inst->src[i].nr + r], n);
962 } else {
963 add_dep(last_fixed_grf_write, n);
964 }
965 } else if (inst->src[i].is_accumulator()) {
966 add_dep(last_accumulator_write, n);
967 } else if (inst->src[i].file != BAD_FILE &&
968 inst->src[i].file != IMM &&
969 inst->src[i].file != UNIFORM) {
970 assert(inst->src[i].file != MRF);
971 add_barrier_deps(n);
972 }
973 }
974
975 if (inst->base_mrf != -1) {
976 for (int i = 0; i < inst->mlen; i++) {
977 /* It looks like the MRF regs are released in the send
978 * instruction once it's sent, not when the result comes
979 * back.
980 */
981 add_dep(last_mrf_write[inst->base_mrf + i], n);
982 }
983 }
984
985 if (inst->reads_flag()) {
986 add_dep(last_conditional_mod[inst->flag_subreg], n);
987 }
988
989 if (inst->reads_accumulator_implicitly()) {
990 add_dep(last_accumulator_write, n);
991 }
992
993 /* write-after-write deps. */
994 if (inst->dst.file == VGRF) {
995 if (post_reg_alloc) {
996 for (int r = 0; r < inst->regs_written; r++) {
997 add_dep(last_grf_write[inst->dst.nr + r], n);
998 last_grf_write[inst->dst.nr + r] = n;
999 }
1000 } else {
1001 for (int r = 0; r < inst->regs_written; r++) {
1002 add_dep(last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r], n);
1003 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1004 }
1005 }
1006 } else if (inst->dst.file == MRF) {
1007 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1008
1009 add_dep(last_mrf_write[reg], n);
1010 last_mrf_write[reg] = n;
1011 if (is_compressed(inst)) {
1012 if (inst->dst.nr & BRW_MRF_COMPR4)
1013 reg += 4;
1014 else
1015 reg++;
1016 add_dep(last_mrf_write[reg], n);
1017 last_mrf_write[reg] = n;
1018 }
1019 } else if (inst->dst.file == FIXED_GRF) {
1020 if (post_reg_alloc) {
1021 for (int r = 0; r < inst->regs_written; r++)
1022 last_grf_write[inst->dst.nr + r] = n;
1023 } else {
1024 last_fixed_grf_write = n;
1025 }
1026 } else if (inst->dst.is_accumulator()) {
1027 add_dep(last_accumulator_write, n);
1028 last_accumulator_write = n;
1029 } else if (inst->dst.file != BAD_FILE &&
1030 !inst->dst.is_null()) {
1031 add_barrier_deps(n);
1032 }
1033
1034 if (inst->mlen > 0 && inst->base_mrf != -1) {
1035 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1036 add_dep(last_mrf_write[inst->base_mrf + i], n);
1037 last_mrf_write[inst->base_mrf + i] = n;
1038 }
1039 }
1040
1041 if (inst->writes_flag()) {
1042 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1043 last_conditional_mod[inst->flag_subreg] = n;
1044 }
1045
1046 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1047 !inst->dst.is_accumulator()) {
1048 add_dep(last_accumulator_write, n);
1049 last_accumulator_write = n;
1050 }
1051 }
1052
1053 /* bottom-to-top dependencies: WAR */
1054 memset(last_grf_write, 0, sizeof(last_grf_write));
1055 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1056 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1057 last_accumulator_write = NULL;
1058 last_fixed_grf_write = NULL;
1059
1060 exec_node *node;
1061 exec_node *prev;
1062 for (node = instructions.get_tail(), prev = node->prev;
1063 !node->is_head_sentinel();
1064 node = prev, prev = node->prev) {
1065 schedule_node *n = (schedule_node *)node;
1066 fs_inst *inst = (fs_inst *)n->inst;
1067
1068 /* write-after-read deps. */
1069 for (int i = 0; i < inst->sources; i++) {
1070 if (inst->src[i].file == VGRF) {
1071 if (post_reg_alloc) {
1072 for (int r = 0; r < inst->regs_read(i); r++)
1073 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1074 } else {
1075 for (int r = 0; r < inst->regs_read(i); r++) {
1076 add_dep(n, last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], 0);
1077 }
1078 }
1079 } else if (inst->src[i].file == FIXED_GRF) {
1080 if (post_reg_alloc) {
1081 for (int r = 0; r < inst->regs_read(i); r++)
1082 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1083 } else {
1084 add_dep(n, last_fixed_grf_write, 0);
1085 }
1086 } else if (inst->src[i].is_accumulator()) {
1087 add_dep(n, last_accumulator_write, 0);
1088 } else if (inst->src[i].file != BAD_FILE &&
1089 inst->src[i].file != IMM &&
1090 inst->src[i].file != UNIFORM) {
1091 assert(inst->src[i].file != MRF);
1092 add_barrier_deps(n);
1093 }
1094 }
1095
1096 if (inst->base_mrf != -1) {
1097 for (int i = 0; i < inst->mlen; i++) {
1098 /* It looks like the MRF regs are released in the send
1099 * instruction once it's sent, not when the result comes
1100 * back.
1101 */
1102 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1103 }
1104 }
1105
1106 if (inst->reads_flag()) {
1107 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1108 }
1109
1110 if (inst->reads_accumulator_implicitly()) {
1111 add_dep(n, last_accumulator_write);
1112 }
1113
1114 /* Update the things this instruction wrote, so earlier reads
1115 * can mark this as WAR dependency.
1116 */
1117 if (inst->dst.file == VGRF) {
1118 if (post_reg_alloc) {
1119 for (int r = 0; r < inst->regs_written; r++)
1120 last_grf_write[inst->dst.nr + r] = n;
1121 } else {
1122 for (int r = 0; r < inst->regs_written; r++) {
1123 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1124 }
1125 }
1126 } else if (inst->dst.file == MRF) {
1127 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1128
1129 last_mrf_write[reg] = n;
1130
1131 if (is_compressed(inst)) {
1132 if (inst->dst.nr & BRW_MRF_COMPR4)
1133 reg += 4;
1134 else
1135 reg++;
1136
1137 last_mrf_write[reg] = n;
1138 }
1139 } else if (inst->dst.file == FIXED_GRF) {
1140 if (post_reg_alloc) {
1141 for (int r = 0; r < inst->regs_written; r++)
1142 last_grf_write[inst->dst.nr + r] = n;
1143 } else {
1144 last_fixed_grf_write = n;
1145 }
1146 } else if (inst->dst.is_accumulator()) {
1147 last_accumulator_write = n;
1148 } else if (inst->dst.file != BAD_FILE &&
1149 !inst->dst.is_null()) {
1150 add_barrier_deps(n);
1151 }
1152
1153 if (inst->mlen > 0 && inst->base_mrf != -1) {
1154 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1155 last_mrf_write[inst->base_mrf + i] = n;
1156 }
1157 }
1158
1159 if (inst->writes_flag()) {
1160 last_conditional_mod[inst->flag_subreg] = n;
1161 }
1162
1163 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1164 last_accumulator_write = n;
1165 }
1166 }
1167 }
1168
1169 void
1170 vec4_instruction_scheduler::calculate_deps()
1171 {
1172 schedule_node *last_grf_write[grf_count];
1173 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1174 schedule_node *last_conditional_mod = NULL;
1175 schedule_node *last_accumulator_write = NULL;
1176 /* Fixed HW registers are assumed to be separate from the virtual
1177 * GRFs, so they can be tracked separately. We don't really write
1178 * to fixed GRFs much, so don't bother tracking them on a more
1179 * granular level.
1180 */
1181 schedule_node *last_fixed_grf_write = NULL;
1182
1183 /* The last instruction always needs to still be the last instruction.
1184 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1185 * other things after it would disturb the basic block, or it's the EOT
1186 * URB_WRITE and we should do a better job at dead code eliminating
1187 * anything that could have been scheduled after it.
1188 */
1189 schedule_node *last = (schedule_node *)instructions.get_tail();
1190 add_barrier_deps(last);
1191
1192 memset(last_grf_write, 0, sizeof(last_grf_write));
1193 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1194
1195 /* top-to-bottom dependencies: RAW and WAW. */
1196 foreach_in_list(schedule_node, n, &instructions) {
1197 vec4_instruction *inst = (vec4_instruction *)n->inst;
1198
1199 if (inst->has_side_effects() && inst->opcode != FS_OPCODE_FB_WRITE)
1200 add_barrier_deps(n);
1201
1202 /* read-after-write deps. */
1203 for (int i = 0; i < 3; i++) {
1204 if (inst->src[i].file == VGRF) {
1205 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1206 add_dep(last_grf_write[inst->src[i].nr + j], n);
1207 } else if (inst->src[i].file == FIXED_GRF) {
1208 add_dep(last_fixed_grf_write, n);
1209 } else if (inst->src[i].is_accumulator()) {
1210 assert(last_accumulator_write);
1211 add_dep(last_accumulator_write, n);
1212 } else if (inst->src[i].file != BAD_FILE &&
1213 inst->src[i].file != IMM &&
1214 inst->src[i].file != UNIFORM) {
1215 /* No reads from MRF, and ATTR is already translated away */
1216 assert(inst->src[i].file != MRF &&
1217 inst->src[i].file != ATTR);
1218 add_barrier_deps(n);
1219 }
1220 }
1221
1222 if (!inst->is_send_from_grf()) {
1223 for (int i = 0; i < inst->mlen; i++) {
1224 /* It looks like the MRF regs are released in the send
1225 * instruction once it's sent, not when the result comes
1226 * back.
1227 */
1228 add_dep(last_mrf_write[inst->base_mrf + i], n);
1229 }
1230 }
1231
1232 if (inst->reads_flag()) {
1233 assert(last_conditional_mod);
1234 add_dep(last_conditional_mod, n);
1235 }
1236
1237 if (inst->reads_accumulator_implicitly()) {
1238 assert(last_accumulator_write);
1239 add_dep(last_accumulator_write, n);
1240 }
1241
1242 /* write-after-write deps. */
1243 if (inst->dst.file == VGRF) {
1244 for (unsigned j = 0; j < inst->regs_written; ++j) {
1245 add_dep(last_grf_write[inst->dst.nr + j], n);
1246 last_grf_write[inst->dst.nr + j] = n;
1247 }
1248 } else if (inst->dst.file == MRF) {
1249 add_dep(last_mrf_write[inst->dst.nr], n);
1250 last_mrf_write[inst->dst.nr] = n;
1251 } else if (inst->dst.file == FIXED_GRF) {
1252 last_fixed_grf_write = n;
1253 } else if (inst->dst.is_accumulator()) {
1254 add_dep(last_accumulator_write, n);
1255 last_accumulator_write = n;
1256 } else if (inst->dst.file != BAD_FILE &&
1257 !inst->dst.is_null()) {
1258 add_barrier_deps(n);
1259 }
1260
1261 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1262 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1263 add_dep(last_mrf_write[inst->base_mrf + i], n);
1264 last_mrf_write[inst->base_mrf + i] = n;
1265 }
1266 }
1267
1268 if (inst->writes_flag()) {
1269 add_dep(last_conditional_mod, n, 0);
1270 last_conditional_mod = n;
1271 }
1272
1273 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1274 !inst->dst.is_accumulator()) {
1275 add_dep(last_accumulator_write, n);
1276 last_accumulator_write = n;
1277 }
1278 }
1279
1280 /* bottom-to-top dependencies: WAR */
1281 memset(last_grf_write, 0, sizeof(last_grf_write));
1282 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1283 last_conditional_mod = NULL;
1284 last_accumulator_write = NULL;
1285 last_fixed_grf_write = NULL;
1286
1287 exec_node *node;
1288 exec_node *prev;
1289 for (node = instructions.get_tail(), prev = node->prev;
1290 !node->is_head_sentinel();
1291 node = prev, prev = node->prev) {
1292 schedule_node *n = (schedule_node *)node;
1293 vec4_instruction *inst = (vec4_instruction *)n->inst;
1294
1295 /* write-after-read deps. */
1296 for (int i = 0; i < 3; i++) {
1297 if (inst->src[i].file == VGRF) {
1298 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1299 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1300 } else if (inst->src[i].file == FIXED_GRF) {
1301 add_dep(n, last_fixed_grf_write);
1302 } else if (inst->src[i].is_accumulator()) {
1303 add_dep(n, last_accumulator_write);
1304 } else if (inst->src[i].file != BAD_FILE &&
1305 inst->src[i].file != IMM &&
1306 inst->src[i].file != UNIFORM) {
1307 assert(inst->src[i].file != MRF &&
1308 inst->src[i].file != ATTR);
1309 add_barrier_deps(n);
1310 }
1311 }
1312
1313 if (!inst->is_send_from_grf()) {
1314 for (int i = 0; i < inst->mlen; i++) {
1315 /* It looks like the MRF regs are released in the send
1316 * instruction once it's sent, not when the result comes
1317 * back.
1318 */
1319 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1320 }
1321 }
1322
1323 if (inst->reads_flag()) {
1324 add_dep(n, last_conditional_mod);
1325 }
1326
1327 if (inst->reads_accumulator_implicitly()) {
1328 add_dep(n, last_accumulator_write);
1329 }
1330
1331 /* Update the things this instruction wrote, so earlier reads
1332 * can mark this as WAR dependency.
1333 */
1334 if (inst->dst.file == VGRF) {
1335 for (unsigned j = 0; j < inst->regs_written; ++j)
1336 last_grf_write[inst->dst.nr + j] = n;
1337 } else if (inst->dst.file == MRF) {
1338 last_mrf_write[inst->dst.nr] = n;
1339 } else if (inst->dst.file == FIXED_GRF) {
1340 last_fixed_grf_write = n;
1341 } else if (inst->dst.is_accumulator()) {
1342 last_accumulator_write = n;
1343 } else if (inst->dst.file != BAD_FILE &&
1344 !inst->dst.is_null()) {
1345 add_barrier_deps(n);
1346 }
1347
1348 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1349 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1350 last_mrf_write[inst->base_mrf + i] = n;
1351 }
1352 }
1353
1354 if (inst->writes_flag()) {
1355 last_conditional_mod = n;
1356 }
1357
1358 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1359 last_accumulator_write = n;
1360 }
1361 }
1362 }
1363
1364 schedule_node *
1365 fs_instruction_scheduler::choose_instruction_to_schedule()
1366 {
1367 schedule_node *chosen = NULL;
1368
1369 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1370 int chosen_time = 0;
1371
1372 /* Of the instructions ready to execute or the closest to
1373 * being ready, choose the oldest one.
1374 */
1375 foreach_in_list(schedule_node, n, &instructions) {
1376 if (!chosen || n->unblocked_time < chosen_time) {
1377 chosen = n;
1378 chosen_time = n->unblocked_time;
1379 }
1380 }
1381 } else {
1382 /* Before register allocation, we don't care about the latencies of
1383 * instructions. All we care about is reducing live intervals of
1384 * variables so that we can avoid register spilling, or get SIMD16
1385 * shaders which naturally do a better job of hiding instruction
1386 * latency.
1387 */
1388 foreach_in_list(schedule_node, n, &instructions) {
1389 fs_inst *inst = (fs_inst *)n->inst;
1390
1391 if (!chosen) {
1392 chosen = n;
1393 continue;
1394 }
1395
1396 /* Most important: If we can definitely reduce register pressure, do
1397 * so immediately.
1398 */
1399 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1400 int chosen_register_pressure_benefit =
1401 get_register_pressure_benefit(chosen->inst);
1402
1403 if (register_pressure_benefit > 0 &&
1404 register_pressure_benefit > chosen_register_pressure_benefit) {
1405 chosen = n;
1406 continue;
1407 } else if (chosen_register_pressure_benefit > 0 &&
1408 (register_pressure_benefit <
1409 chosen_register_pressure_benefit)) {
1410 continue;
1411 }
1412
1413 if (mode == SCHEDULE_PRE_LIFO) {
1414 /* Prefer instructions that recently became available for
1415 * scheduling. These are the things that are most likely to
1416 * (eventually) make a variable dead and reduce register pressure.
1417 * Typical register pressure estimates don't work for us because
1418 * most of our pressure comes from texturing, where no single
1419 * instruction to schedule will make a vec4 value dead.
1420 */
1421 if (n->cand_generation > chosen->cand_generation) {
1422 chosen = n;
1423 continue;
1424 } else if (n->cand_generation < chosen->cand_generation) {
1425 continue;
1426 }
1427
1428 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1429 * do this, then because we prefer instructions that just became
1430 * candidates, we'll end up in a pattern of scheduling a SEND,
1431 * then the MRFs for the next SEND, then the next SEND, then the
1432 * MRFs, etc., without ever consuming the results of a send.
1433 */
1434 if (v->devinfo->gen < 7) {
1435 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1436
1437 /* We use regs_written > 1 as our test for the kind of send
1438 * instruction to avoid -- only sends generate many regs, and a
1439 * single-result send is probably actually reducing register
1440 * pressure.
1441 */
1442 if (inst->regs_written <= inst->exec_size / 8 &&
1443 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1444 chosen = n;
1445 continue;
1446 } else if (inst->regs_written > chosen_inst->regs_written) {
1447 continue;
1448 }
1449 }
1450 }
1451
1452 /* For instructions pushed on the cands list at the same time, prefer
1453 * the one with the highest delay to the end of the program. This is
1454 * most likely to have its values able to be consumed first (such as
1455 * for a large tree of lowered ubo loads, which appear reversed in
1456 * the instruction stream with respect to when they can be consumed).
1457 */
1458 if (n->delay > chosen->delay) {
1459 chosen = n;
1460 continue;
1461 } else if (n->delay < chosen->delay) {
1462 continue;
1463 }
1464
1465 /* If all other metrics are equal, we prefer the first instruction in
1466 * the list (program execution).
1467 */
1468 }
1469 }
1470
1471 return chosen;
1472 }
1473
1474 schedule_node *
1475 vec4_instruction_scheduler::choose_instruction_to_schedule()
1476 {
1477 schedule_node *chosen = NULL;
1478 int chosen_time = 0;
1479
1480 /* Of the instructions ready to execute or the closest to being ready,
1481 * choose the oldest one.
1482 */
1483 foreach_in_list(schedule_node, n, &instructions) {
1484 if (!chosen || n->unblocked_time < chosen_time) {
1485 chosen = n;
1486 chosen_time = n->unblocked_time;
1487 }
1488 }
1489
1490 return chosen;
1491 }
1492
1493 int
1494 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1495 {
1496 if (is_compressed((fs_inst *)inst))
1497 return 4;
1498 else
1499 return 2;
1500 }
1501
1502 int
1503 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1504 {
1505 /* We always execute as two vec4s in parallel. */
1506 return 2;
1507 }
1508
1509 void
1510 instruction_scheduler::schedule_instructions(bblock_t *block)
1511 {
1512 const struct brw_device_info *devinfo = bs->devinfo;
1513 backend_instruction *inst = block->end();
1514 time = 0;
1515 if (!post_reg_alloc)
1516 reg_pressure = reg_pressure_in[block->num];
1517 block_idx = block->num;
1518
1519 /* Remove non-DAG heads from the list. */
1520 foreach_in_list_safe(schedule_node, n, &instructions) {
1521 if (n->parent_count != 0)
1522 n->remove();
1523 }
1524
1525 unsigned cand_generation = 1;
1526 while (!instructions.is_empty()) {
1527 schedule_node *chosen = choose_instruction_to_schedule();
1528
1529 /* Schedule this instruction. */
1530 assert(chosen);
1531 chosen->remove();
1532 inst->insert_before(block, chosen->inst);
1533 instructions_to_schedule--;
1534
1535 if (!post_reg_alloc) {
1536 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1537 update_register_pressure(chosen->inst);
1538 }
1539
1540 /* If we expected a delay for scheduling, then bump the clock to reflect
1541 * that. In reality, the hardware will switch to another hyperthread
1542 * and may not return to dispatching our thread for a while even after
1543 * we're unblocked. After this, we have the time when the chosen
1544 * instruction will start executing.
1545 */
1546 time = MAX2(time, chosen->unblocked_time);
1547
1548 /* Update the clock for how soon an instruction could start after the
1549 * chosen one.
1550 */
1551 time += issue_time(chosen->inst);
1552
1553 if (debug) {
1554 fprintf(stderr, "clock %4d, scheduled: ", time);
1555 bs->dump_instruction(chosen->inst);
1556 if (!post_reg_alloc)
1557 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1558 }
1559
1560 /* Now that we've scheduled a new instruction, some of its
1561 * children can be promoted to the list of instructions ready to
1562 * be scheduled. Update the children's unblocked time for this
1563 * DAG edge as we do so.
1564 */
1565 for (int i = chosen->child_count - 1; i >= 0; i--) {
1566 schedule_node *child = chosen->children[i];
1567
1568 child->unblocked_time = MAX2(child->unblocked_time,
1569 time + chosen->child_latency[i]);
1570
1571 if (debug) {
1572 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1573 bs->dump_instruction(child->inst);
1574 }
1575
1576 child->cand_generation = cand_generation;
1577 child->parent_count--;
1578 if (child->parent_count == 0) {
1579 if (debug) {
1580 fprintf(stderr, "\t\tnow available\n");
1581 }
1582 instructions.push_head(child);
1583 }
1584 }
1585 cand_generation++;
1586
1587 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1588 * but it's more limited pre-gen6, so if we send something off to it then
1589 * the next math instruction isn't going to make progress until the first
1590 * is done.
1591 */
1592 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1593 foreach_in_list(schedule_node, n, &instructions) {
1594 if (n->inst->is_math())
1595 n->unblocked_time = MAX2(n->unblocked_time,
1596 time + chosen->latency);
1597 }
1598 }
1599 }
1600
1601 if (block->end()->opcode == BRW_OPCODE_NOP)
1602 block->end()->remove(block);
1603 assert(instructions_to_schedule == 0);
1604
1605 block->cycle_count = time;
1606 }
1607
1608 static unsigned get_cycle_count(cfg_t *cfg)
1609 {
1610 unsigned count = 0, multiplier = 1;
1611 foreach_block(block, cfg) {
1612 if (block->start()->opcode == BRW_OPCODE_DO)
1613 multiplier *= 10; /* assume that loops execute ~10 times */
1614
1615 count += block->cycle_count * multiplier;
1616
1617 if (block->end()->opcode == BRW_OPCODE_WHILE)
1618 multiplier /= 10;
1619 }
1620
1621 return count;
1622 }
1623
1624 void
1625 instruction_scheduler::run(cfg_t *cfg)
1626 {
1627 if (debug && !post_reg_alloc) {
1628 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1629 post_reg_alloc);
1630 bs->dump_instructions();
1631 }
1632
1633 if (!post_reg_alloc)
1634 setup_liveness(cfg);
1635
1636 foreach_block(block, cfg) {
1637 if (block->end_ip - block->start_ip <= 1)
1638 continue;
1639
1640 if (reads_remaining) {
1641 memset(reads_remaining, 0,
1642 grf_count * sizeof(*reads_remaining));
1643 memset(hw_reads_remaining, 0,
1644 hw_reg_count * sizeof(*hw_reads_remaining));
1645 memset(written, 0, grf_count * sizeof(*written));
1646
1647 foreach_inst_in_block(fs_inst, inst, block)
1648 count_reads_remaining(inst);
1649 }
1650
1651 add_insts_from_block(block);
1652
1653 calculate_deps();
1654
1655 foreach_in_list(schedule_node, n, &instructions) {
1656 compute_delay(n);
1657 }
1658
1659 schedule_instructions(block);
1660 }
1661
1662 if (debug && !post_reg_alloc) {
1663 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1664 post_reg_alloc);
1665 bs->dump_instructions();
1666 }
1667
1668 cfg->cycle_count = get_cycle_count(cfg);
1669 }
1670
1671 void
1672 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1673 {
1674 if (mode != SCHEDULE_POST)
1675 calculate_live_intervals();
1676
1677 int grf_count;
1678 if (mode == SCHEDULE_POST)
1679 grf_count = grf_used;
1680 else
1681 grf_count = alloc.count;
1682
1683 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1684 cfg->num_blocks, mode);
1685 sched.run(cfg);
1686
1687 if (unlikely(debug_enabled) && mode == SCHEDULE_POST) {
1688 fprintf(stderr, "%s%d estimated execution time: %d cycles\n",
1689 stage_abbrev, dispatch_width, sched.time);
1690 }
1691
1692 invalidate_live_intervals();
1693 }
1694
1695 void
1696 vec4_visitor::opt_schedule_instructions()
1697 {
1698 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1699 sched.run(cfg);
1700
1701 if (unlikely(debug_enabled)) {
1702 fprintf(stderr, "%s estimated execution time: %d cycles\n",
1703 stage_abbrev, sched.time);
1704 }
1705
1706 invalidate_live_intervals();
1707 }