glsl: move to compiler/
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89 };
90
91 void
92 schedule_node::set_latency_gen4()
93 {
94 int chans = 8;
95 int math_latency = 22;
96
97 switch (inst->opcode) {
98 case SHADER_OPCODE_RCP:
99 this->latency = 1 * chans * math_latency;
100 break;
101 case SHADER_OPCODE_RSQ:
102 this->latency = 2 * chans * math_latency;
103 break;
104 case SHADER_OPCODE_INT_QUOTIENT:
105 case SHADER_OPCODE_SQRT:
106 case SHADER_OPCODE_LOG2:
107 /* full precision log. partial is 2. */
108 this->latency = 3 * chans * math_latency;
109 break;
110 case SHADER_OPCODE_INT_REMAINDER:
111 case SHADER_OPCODE_EXP2:
112 /* full precision. partial is 3, same throughput. */
113 this->latency = 4 * chans * math_latency;
114 break;
115 case SHADER_OPCODE_POW:
116 this->latency = 8 * chans * math_latency;
117 break;
118 case SHADER_OPCODE_SIN:
119 case SHADER_OPCODE_COS:
120 /* minimum latency, max is 12 rounds. */
121 this->latency = 5 * chans * math_latency;
122 break;
123 default:
124 this->latency = 2;
125 break;
126 }
127 }
128
129 void
130 schedule_node::set_latency_gen7(bool is_haswell)
131 {
132 switch (inst->opcode) {
133 case BRW_OPCODE_MAD:
134 /* 2 cycles
135 * (since the last two src operands are in different register banks):
136 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
137 *
138 * 3 cycles on IVB, 4 on HSW
139 * (since the last two src operands are in the same register bank):
140 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
141 *
142 * 18 cycles on IVB, 16 on HSW
143 * (since the last two src operands are in different register banks):
144 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
145 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
146 *
147 * 20 cycles on IVB, 18 on HSW
148 * (since the last two src operands are in the same register bank):
149 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
150 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
151 */
152
153 /* Our register allocator doesn't know about register banks, so use the
154 * higher latency.
155 */
156 latency = is_haswell ? 16 : 18;
157 break;
158
159 case BRW_OPCODE_LRP:
160 /* 2 cycles
161 * (since the last two src operands are in different register banks):
162 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163 *
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167 *
168 * 16 cycles on IVB, 14 on HSW
169 * (since the last two src operands are in different register banks):
170 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
172 *
173 * 16 cycles
174 * (since the last two src operands are in the same register bank):
175 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
177 */
178
179 /* Our register allocator doesn't know about register banks, so use the
180 * higher latency.
181 */
182 latency = 14;
183 break;
184
185 case SHADER_OPCODE_RCP:
186 case SHADER_OPCODE_RSQ:
187 case SHADER_OPCODE_SQRT:
188 case SHADER_OPCODE_LOG2:
189 case SHADER_OPCODE_EXP2:
190 case SHADER_OPCODE_SIN:
191 case SHADER_OPCODE_COS:
192 /* 2 cycles:
193 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
194 *
195 * 18 cycles:
196 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
197 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
198 *
199 * Same for exp2, log2, rsq, sqrt, sin, cos.
200 */
201 latency = is_haswell ? 14 : 16;
202 break;
203
204 case SHADER_OPCODE_POW:
205 /* 2 cycles:
206 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
207 *
208 * 26 cycles:
209 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
210 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
211 */
212 latency = is_haswell ? 22 : 24;
213 break;
214
215 case SHADER_OPCODE_TEX:
216 case SHADER_OPCODE_TXD:
217 case SHADER_OPCODE_TXF:
218 case SHADER_OPCODE_TXL:
219 /* 18 cycles:
220 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
221 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
222 * send(8) g4<1>UW g114<8,8,1>F
223 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
224 *
225 * 697 +/-49 cycles (min 610, n=26):
226 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
227 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
228 * send(8) g4<1>UW g114<8,8,1>F
229 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
230 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
231 *
232 * So the latency on our first texture load of the batchbuffer takes
233 * ~700 cycles, since the caches are cold at that point.
234 *
235 * 840 +/- 92 cycles (min 720, n=25):
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
240 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
241 * send(8) g4<1>UW g114<8,8,1>F
242 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
243 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
244 *
245 * On the second load, it takes just an extra ~140 cycles, and after
246 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
247 *
248 * 683 +/- 49 cycles (min = 602, n=47):
249 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
250 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
251 * send(8) g4<1>UW g114<8,8,1>F
252 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * send(8) g50<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
256 *
257 * The unit appears to be pipelined, since this matches up with the
258 * cache-cold case, despite there being two loads here. If you replace
259 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
260 *
261 * So, take some number between the cache-hot 140 cycles and the
262 * cache-cold 700 cycles. No particular tuning was done on this.
263 *
264 * I haven't done significant testing of the non-TEX opcodes. TXL at
265 * least looked about the same as TEX.
266 */
267 latency = 200;
268 break;
269
270 case SHADER_OPCODE_TXS:
271 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
272 * cycles (n=15):
273 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
274 * send(8) g6<1>UW g114<8,8,1>F
275 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
276 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
277 *
278 *
279 * Two loads was 535 +/- 30 cycles (n=19):
280 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
281 * send(16) g6<1>UW g114<8,8,1>F
282 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
283 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
284 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
285 * send(16) g8<1>UW g114<8,8,1>F
286 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
287 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
288 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
289 *
290 * Since the only caches that should matter are just the
291 * instruction/state cache containing the surface state, assume that we
292 * always have hot caches.
293 */
294 latency = 100;
295 break;
296
297 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
298 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
299 case VS_OPCODE_PULL_CONSTANT_LOAD:
300 /* testing using varying-index pull constants:
301 *
302 * 16 cycles:
303 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
304 * send(8) g4<1>F g4<8,8,1>D
305 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
306 *
307 * ~480 cycles:
308 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
309 * send(8) g4<1>F g4<8,8,1>D
310 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
311 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
312 *
313 * ~620 cycles:
314 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
315 * send(8) g4<1>F g4<8,8,1>D
316 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
317 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
318 * send(8) g4<1>F g4<8,8,1>D
319 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 *
322 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
323 * about 460. We expect to mostly be cache hot, so pick something more
324 * in that direction.
325 */
326 latency = 200;
327 break;
328
329 case SHADER_OPCODE_GEN7_SCRATCH_READ:
330 /* Testing a load from offset 0, that had been previously written:
331 *
332 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
334 *
335 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
336 * then around 140. Presumably this is cache hit vs miss.
337 */
338 latency = 50;
339 break;
340
341 case SHADER_OPCODE_UNTYPED_ATOMIC:
342 case SHADER_OPCODE_TYPED_ATOMIC:
343 /* Test code:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
349 *
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
356 */
357 latency = 14000;
358 break;
359
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
361 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
362 case SHADER_OPCODE_TYPED_SURFACE_READ:
363 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
364 /* Test code:
365 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
366 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
367 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
368 * send(8) g4<1>UD g112<8,8,1>UD
369 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
370 * .
371 * . [repeats 8 times]
372 * .
373 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
374 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
375 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
376 * send(8) g4<1>UD g112<8,8,1>UD
377 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
378 *
379 * Running it 100 times as fragment shader on a 128x128 quad
380 * gives an average latency of 583 cycles per surface read,
381 * standard deviation 0.9%.
382 */
383 latency = is_haswell ? 300 : 600;
384 break;
385
386 default:
387 /* 2 cycles:
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
389 *
390 * 16 cycles:
391 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
392 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
393 */
394 latency = 14;
395 break;
396 }
397 }
398
399 class instruction_scheduler {
400 public:
401 instruction_scheduler(backend_shader *s, int grf_count,
402 int hw_reg_count, int block_count,
403 instruction_scheduler_mode mode)
404 {
405 this->bs = s;
406 this->mem_ctx = ralloc_context(NULL);
407 this->grf_count = grf_count;
408 this->hw_reg_count = hw_reg_count;
409 this->instructions.make_empty();
410 this->instructions_to_schedule = 0;
411 this->post_reg_alloc = (mode == SCHEDULE_POST);
412 this->mode = mode;
413 this->time = 0;
414 if (!post_reg_alloc) {
415 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
416
417 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
418 for (int i = 0; i < block_count; i++)
419 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
420 BITSET_WORDS(grf_count));
421
422 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
423 for (int i = 0; i < block_count; i++)
424 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
425 BITSET_WORDS(grf_count));
426
427 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
428 for (int i = 0; i < block_count; i++)
429 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
430 BITSET_WORDS(hw_reg_count));
431
432 this->written = rzalloc_array(mem_ctx, bool, grf_count);
433
434 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
435
436 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
437 } else {
438 this->reg_pressure_in = NULL;
439 this->livein = NULL;
440 this->liveout = NULL;
441 this->hw_liveout = NULL;
442 this->written = NULL;
443 this->reads_remaining = NULL;
444 this->hw_reads_remaining = NULL;
445 }
446 }
447
448 ~instruction_scheduler()
449 {
450 ralloc_free(this->mem_ctx);
451 }
452 void add_barrier_deps(schedule_node *n);
453 void add_dep(schedule_node *before, schedule_node *after, int latency);
454 void add_dep(schedule_node *before, schedule_node *after);
455
456 void run(cfg_t *cfg);
457 void add_insts_from_block(bblock_t *block);
458 void compute_delay(schedule_node *node);
459 virtual void calculate_deps() = 0;
460 virtual schedule_node *choose_instruction_to_schedule() = 0;
461
462 /**
463 * Returns how many cycles it takes the instruction to issue.
464 *
465 * Instructions in gen hardware are handled one simd4 vector at a time,
466 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
467 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
468 */
469 virtual int issue_time(backend_instruction *inst) = 0;
470
471 virtual void count_reads_remaining(backend_instruction *inst) = 0;
472 virtual void setup_liveness(cfg_t *cfg) = 0;
473 virtual void update_register_pressure(backend_instruction *inst) = 0;
474 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
475
476 void schedule_instructions(bblock_t *block);
477
478 void *mem_ctx;
479
480 bool post_reg_alloc;
481 int instructions_to_schedule;
482 int grf_count;
483 int hw_reg_count;
484 int time;
485 int reg_pressure;
486 int block_idx;
487 exec_list instructions;
488 backend_shader *bs;
489
490 instruction_scheduler_mode mode;
491
492 /*
493 * The register pressure at the beginning of each basic block.
494 */
495
496 int *reg_pressure_in;
497
498 /*
499 * The virtual GRF's whose range overlaps the beginning of each basic block.
500 */
501
502 BITSET_WORD **livein;
503
504 /*
505 * The virtual GRF's whose range overlaps the end of each basic block.
506 */
507
508 BITSET_WORD **liveout;
509
510 /*
511 * The hardware GRF's whose range overlaps the end of each basic block.
512 */
513
514 BITSET_WORD **hw_liveout;
515
516 /*
517 * Whether we've scheduled a write for this virtual GRF yet.
518 */
519
520 bool *written;
521
522 /*
523 * How many reads we haven't scheduled for this virtual GRF yet.
524 */
525
526 int *reads_remaining;
527
528 /*
529 * How many reads we haven't scheduled for this hardware GRF yet.
530 */
531
532 int *hw_reads_remaining;
533 };
534
535 class fs_instruction_scheduler : public instruction_scheduler
536 {
537 public:
538 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
539 int block_count,
540 instruction_scheduler_mode mode);
541 void calculate_deps();
542 bool is_compressed(fs_inst *inst);
543 schedule_node *choose_instruction_to_schedule();
544 int issue_time(backend_instruction *inst);
545 fs_visitor *v;
546
547 void count_reads_remaining(backend_instruction *inst);
548 void setup_liveness(cfg_t *cfg);
549 void update_register_pressure(backend_instruction *inst);
550 int get_register_pressure_benefit(backend_instruction *inst);
551 };
552
553 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
554 int grf_count, int hw_reg_count,
555 int block_count,
556 instruction_scheduler_mode mode)
557 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
558 v(v)
559 {
560 }
561
562 static bool
563 is_src_duplicate(fs_inst *inst, int src)
564 {
565 for (int i = 0; i < src; i++)
566 if (inst->src[i].equals(inst->src[src]))
567 return true;
568
569 return false;
570 }
571
572 void
573 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
574 {
575 fs_inst *inst = (fs_inst *)be;
576
577 if (!reads_remaining)
578 return;
579
580 for (int i = 0; i < inst->sources; i++) {
581 if (is_src_duplicate(inst, i))
582 continue;
583
584 if (inst->src[i].file == VGRF) {
585 reads_remaining[inst->src[i].nr]++;
586 } else if (inst->src[i].file == FIXED_GRF) {
587 if (inst->src[i].nr >= hw_reg_count)
588 continue;
589
590 for (int j = 0; j < inst->regs_read(i); j++)
591 hw_reads_remaining[inst->src[i].nr + j]++;
592 }
593 }
594 }
595
596 void
597 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
598 {
599 /* First, compute liveness on a per-GRF level using the in/out sets from
600 * liveness calculation.
601 */
602 for (int block = 0; block < cfg->num_blocks; block++) {
603 for (int i = 0; i < v->live_intervals->num_vars; i++) {
604 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
605 int vgrf = v->live_intervals->vgrf_from_var[i];
606 if (!BITSET_TEST(livein[block], vgrf)) {
607 reg_pressure_in[block] += v->alloc.sizes[vgrf];
608 BITSET_SET(livein[block], vgrf);
609 }
610 }
611
612 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
613 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
614 }
615 }
616
617 /* Now, extend the live in/live out sets for when a range crosses a block
618 * boundary, which matches what our register allocator/interference code
619 * does to account for force_writemask_all and incompatible exec_mask's.
620 */
621 for (int block = 0; block < cfg->num_blocks - 1; block++) {
622 for (int i = 0; i < grf_count; i++) {
623 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
624 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
625 if (!BITSET_TEST(livein[block + 1], i)) {
626 reg_pressure_in[block + 1] += v->alloc.sizes[i];
627 BITSET_SET(livein[block + 1], i);
628 }
629
630 BITSET_SET(liveout[block], i);
631 }
632 }
633 }
634
635 int payload_last_use_ip[hw_reg_count];
636 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
637
638 for (int i = 0; i < hw_reg_count; i++) {
639 if (payload_last_use_ip[i] == -1)
640 continue;
641
642 for (int block = 0; block < cfg->num_blocks; block++) {
643 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
644 reg_pressure_in[block]++;
645
646 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
647 BITSET_SET(hw_liveout[block], i);
648 }
649 }
650 }
651
652 void
653 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
654 {
655 fs_inst *inst = (fs_inst *)be;
656
657 if (!reads_remaining)
658 return;
659
660 if (inst->dst.file == VGRF) {
661 written[inst->dst.nr] = true;
662 }
663
664 for (int i = 0; i < inst->sources; i++) {
665 if (is_src_duplicate(inst, i))
666 continue;
667
668 if (inst->src[i].file == VGRF) {
669 reads_remaining[inst->src[i].nr]--;
670 } else if (inst->src[i].file == FIXED_GRF &&
671 inst->src[i].nr < hw_reg_count) {
672 for (int off = 0; off < inst->regs_read(i); off++)
673 hw_reads_remaining[inst->src[i].nr + off]--;
674 }
675 }
676 }
677
678 int
679 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
680 {
681 fs_inst *inst = (fs_inst *)be;
682 int benefit = 0;
683
684 if (inst->dst.file == VGRF) {
685 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
686 !written[inst->dst.nr])
687 benefit -= v->alloc.sizes[inst->dst.nr];
688 }
689
690 for (int i = 0; i < inst->sources; i++) {
691 if (is_src_duplicate(inst, i))
692 continue;
693
694 if (inst->src[i].file == VGRF &&
695 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
696 reads_remaining[inst->src[i].nr] == 1)
697 benefit += v->alloc.sizes[inst->src[i].nr];
698
699 if (inst->src[i].file == FIXED_GRF &&
700 inst->src[i].nr < hw_reg_count) {
701 for (int off = 0; off < inst->regs_read(i); off++) {
702 int reg = inst->src[i].nr + off;
703 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
704 hw_reads_remaining[reg] == 1) {
705 benefit++;
706 }
707 }
708 }
709 }
710
711 return benefit;
712 }
713
714 class vec4_instruction_scheduler : public instruction_scheduler
715 {
716 public:
717 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
718 void calculate_deps();
719 schedule_node *choose_instruction_to_schedule();
720 int issue_time(backend_instruction *inst);
721 vec4_visitor *v;
722
723 void count_reads_remaining(backend_instruction *inst);
724 void setup_liveness(cfg_t *cfg);
725 void update_register_pressure(backend_instruction *inst);
726 int get_register_pressure_benefit(backend_instruction *inst);
727 };
728
729 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
730 int grf_count)
731 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
732 v(v)
733 {
734 }
735
736 void
737 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
748 {
749 }
750
751 int
752 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
753 {
754 return 0;
755 }
756
757 schedule_node::schedule_node(backend_instruction *inst,
758 instruction_scheduler *sched)
759 {
760 const struct brw_device_info *devinfo = sched->bs->devinfo;
761
762 this->inst = inst;
763 this->child_array_size = 0;
764 this->children = NULL;
765 this->child_latency = NULL;
766 this->child_count = 0;
767 this->parent_count = 0;
768 this->unblocked_time = 0;
769 this->cand_generation = 0;
770 this->delay = 0;
771
772 /* We can't measure Gen6 timings directly but expect them to be much
773 * closer to Gen7 than Gen4.
774 */
775 if (!sched->post_reg_alloc)
776 this->latency = 1;
777 else if (devinfo->gen >= 6)
778 set_latency_gen7(devinfo->is_haswell);
779 else
780 set_latency_gen4();
781 }
782
783 void
784 instruction_scheduler::add_insts_from_block(bblock_t *block)
785 {
786 /* Removing the last instruction from a basic block removes the block as
787 * well, so put a NOP at the end to keep it alive.
788 */
789 if (!block->end()->is_control_flow()) {
790 backend_instruction *nop = new(mem_ctx) backend_instruction();
791 nop->opcode = BRW_OPCODE_NOP;
792 block->end()->insert_after(block, nop);
793 }
794
795 foreach_inst_in_block_safe(backend_instruction, inst, block) {
796 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
797 continue;
798
799 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
800
801 this->instructions_to_schedule++;
802
803 inst->remove(block);
804 instructions.push_tail(n);
805 }
806 }
807
808 /** Recursive computation of the delay member of a node. */
809 void
810 instruction_scheduler::compute_delay(schedule_node *n)
811 {
812 if (!n->child_count) {
813 n->delay = issue_time(n->inst);
814 } else {
815 for (int i = 0; i < n->child_count; i++) {
816 if (!n->children[i]->delay)
817 compute_delay(n->children[i]);
818 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
819 }
820 }
821 }
822
823 /**
824 * Add a dependency between two instruction nodes.
825 *
826 * The @after node will be scheduled after @before. We will try to
827 * schedule it @latency cycles after @before, but no guarantees there.
828 */
829 void
830 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
831 int latency)
832 {
833 if (!before || !after)
834 return;
835
836 assert(before != after);
837
838 for (int i = 0; i < before->child_count; i++) {
839 if (before->children[i] == after) {
840 before->child_latency[i] = MAX2(before->child_latency[i], latency);
841 return;
842 }
843 }
844
845 if (before->child_array_size <= before->child_count) {
846 if (before->child_array_size < 16)
847 before->child_array_size = 16;
848 else
849 before->child_array_size *= 2;
850
851 before->children = reralloc(mem_ctx, before->children,
852 schedule_node *,
853 before->child_array_size);
854 before->child_latency = reralloc(mem_ctx, before->child_latency,
855 int, before->child_array_size);
856 }
857
858 before->children[before->child_count] = after;
859 before->child_latency[before->child_count] = latency;
860 before->child_count++;
861 after->parent_count++;
862 }
863
864 void
865 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
866 {
867 if (!before)
868 return;
869
870 add_dep(before, after, before->latency);
871 }
872
873 /**
874 * Sometimes we really want this node to execute after everything that
875 * was before it and before everything that followed it. This adds
876 * the deps to do so.
877 */
878 void
879 instruction_scheduler::add_barrier_deps(schedule_node *n)
880 {
881 schedule_node *prev = (schedule_node *)n->prev;
882 schedule_node *next = (schedule_node *)n->next;
883
884 if (prev) {
885 while (!prev->is_head_sentinel()) {
886 add_dep(prev, n, 0);
887 prev = (schedule_node *)prev->prev;
888 }
889 }
890
891 if (next) {
892 while (!next->is_tail_sentinel()) {
893 add_dep(n, next, 0);
894 next = (schedule_node *)next->next;
895 }
896 }
897 }
898
899 /* instruction scheduling needs to be aware of when an MRF write
900 * actually writes 2 MRFs.
901 */
902 bool
903 fs_instruction_scheduler::is_compressed(fs_inst *inst)
904 {
905 return inst->exec_size == 16;
906 }
907
908 void
909 fs_instruction_scheduler::calculate_deps()
910 {
911 /* Pre-register-allocation, this tracks the last write per VGRF offset.
912 * After register allocation, reg_offsets are gone and we track individual
913 * GRF registers.
914 */
915 schedule_node *last_grf_write[grf_count * 16];
916 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
917 schedule_node *last_conditional_mod[2] = { NULL, NULL };
918 schedule_node *last_accumulator_write = NULL;
919 /* Fixed HW registers are assumed to be separate from the virtual
920 * GRFs, so they can be tracked separately. We don't really write
921 * to fixed GRFs much, so don't bother tracking them on a more
922 * granular level.
923 */
924 schedule_node *last_fixed_grf_write = NULL;
925
926 /* The last instruction always needs to still be the last
927 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
928 * WHILE) and scheduling other things after it would disturb the
929 * basic block, or it's FB_WRITE and we should do a better job at
930 * dead code elimination anyway.
931 */
932 schedule_node *last = (schedule_node *)instructions.get_tail();
933 add_barrier_deps(last);
934
935 memset(last_grf_write, 0, sizeof(last_grf_write));
936 memset(last_mrf_write, 0, sizeof(last_mrf_write));
937
938 /* top-to-bottom dependencies: RAW and WAW. */
939 foreach_in_list(schedule_node, n, &instructions) {
940 fs_inst *inst = (fs_inst *)n->inst;
941
942 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
943 inst->has_side_effects())
944 add_barrier_deps(n);
945
946 /* read-after-write deps. */
947 for (int i = 0; i < inst->sources; i++) {
948 if (inst->src[i].file == VGRF) {
949 if (post_reg_alloc) {
950 for (int r = 0; r < inst->regs_read(i); r++)
951 add_dep(last_grf_write[inst->src[i].nr + r], n);
952 } else {
953 for (int r = 0; r < inst->regs_read(i); r++) {
954 add_dep(last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], n);
955 }
956 }
957 } else if (inst->src[i].file == FIXED_GRF) {
958 if (post_reg_alloc) {
959 for (int r = 0; r < inst->regs_read(i); r++)
960 add_dep(last_grf_write[inst->src[i].nr + r], n);
961 } else {
962 add_dep(last_fixed_grf_write, n);
963 }
964 } else if (inst->src[i].is_accumulator()) {
965 add_dep(last_accumulator_write, n);
966 } else if (inst->src[i].file != BAD_FILE &&
967 inst->src[i].file != IMM &&
968 inst->src[i].file != UNIFORM) {
969 assert(inst->src[i].file != MRF);
970 add_barrier_deps(n);
971 }
972 }
973
974 if (inst->base_mrf != -1) {
975 for (int i = 0; i < inst->mlen; i++) {
976 /* It looks like the MRF regs are released in the send
977 * instruction once it's sent, not when the result comes
978 * back.
979 */
980 add_dep(last_mrf_write[inst->base_mrf + i], n);
981 }
982 }
983
984 if (inst->reads_flag()) {
985 add_dep(last_conditional_mod[inst->flag_subreg], n);
986 }
987
988 if (inst->reads_accumulator_implicitly()) {
989 add_dep(last_accumulator_write, n);
990 }
991
992 /* write-after-write deps. */
993 if (inst->dst.file == VGRF) {
994 if (post_reg_alloc) {
995 for (int r = 0; r < inst->regs_written; r++) {
996 add_dep(last_grf_write[inst->dst.nr + r], n);
997 last_grf_write[inst->dst.nr + r] = n;
998 }
999 } else {
1000 for (int r = 0; r < inst->regs_written; r++) {
1001 add_dep(last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r], n);
1002 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1003 }
1004 }
1005 } else if (inst->dst.file == MRF) {
1006 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1007
1008 add_dep(last_mrf_write[reg], n);
1009 last_mrf_write[reg] = n;
1010 if (is_compressed(inst)) {
1011 if (inst->dst.nr & BRW_MRF_COMPR4)
1012 reg += 4;
1013 else
1014 reg++;
1015 add_dep(last_mrf_write[reg], n);
1016 last_mrf_write[reg] = n;
1017 }
1018 } else if (inst->dst.file == FIXED_GRF) {
1019 if (post_reg_alloc) {
1020 for (int r = 0; r < inst->regs_written; r++)
1021 last_grf_write[inst->dst.nr + r] = n;
1022 } else {
1023 last_fixed_grf_write = n;
1024 }
1025 } else if (inst->dst.is_accumulator()) {
1026 add_dep(last_accumulator_write, n);
1027 last_accumulator_write = n;
1028 } else if (inst->dst.file != BAD_FILE &&
1029 !inst->dst.is_null()) {
1030 add_barrier_deps(n);
1031 }
1032
1033 if (inst->mlen > 0 && inst->base_mrf != -1) {
1034 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1035 add_dep(last_mrf_write[inst->base_mrf + i], n);
1036 last_mrf_write[inst->base_mrf + i] = n;
1037 }
1038 }
1039
1040 if (inst->writes_flag()) {
1041 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1042 last_conditional_mod[inst->flag_subreg] = n;
1043 }
1044
1045 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1046 !inst->dst.is_accumulator()) {
1047 add_dep(last_accumulator_write, n);
1048 last_accumulator_write = n;
1049 }
1050 }
1051
1052 /* bottom-to-top dependencies: WAR */
1053 memset(last_grf_write, 0, sizeof(last_grf_write));
1054 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1055 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1056 last_accumulator_write = NULL;
1057 last_fixed_grf_write = NULL;
1058
1059 exec_node *node;
1060 exec_node *prev;
1061 for (node = instructions.get_tail(), prev = node->prev;
1062 !node->is_head_sentinel();
1063 node = prev, prev = node->prev) {
1064 schedule_node *n = (schedule_node *)node;
1065 fs_inst *inst = (fs_inst *)n->inst;
1066
1067 /* write-after-read deps. */
1068 for (int i = 0; i < inst->sources; i++) {
1069 if (inst->src[i].file == VGRF) {
1070 if (post_reg_alloc) {
1071 for (int r = 0; r < inst->regs_read(i); r++)
1072 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1073 } else {
1074 for (int r = 0; r < inst->regs_read(i); r++) {
1075 add_dep(n, last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], 0);
1076 }
1077 }
1078 } else if (inst->src[i].file == FIXED_GRF) {
1079 if (post_reg_alloc) {
1080 for (int r = 0; r < inst->regs_read(i); r++)
1081 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1082 } else {
1083 add_dep(n, last_fixed_grf_write, 0);
1084 }
1085 } else if (inst->src[i].is_accumulator()) {
1086 add_dep(n, last_accumulator_write, 0);
1087 } else if (inst->src[i].file != BAD_FILE &&
1088 inst->src[i].file != IMM &&
1089 inst->src[i].file != UNIFORM) {
1090 assert(inst->src[i].file != MRF);
1091 add_barrier_deps(n);
1092 }
1093 }
1094
1095 if (inst->base_mrf != -1) {
1096 for (int i = 0; i < inst->mlen; i++) {
1097 /* It looks like the MRF regs are released in the send
1098 * instruction once it's sent, not when the result comes
1099 * back.
1100 */
1101 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1102 }
1103 }
1104
1105 if (inst->reads_flag()) {
1106 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1107 }
1108
1109 if (inst->reads_accumulator_implicitly()) {
1110 add_dep(n, last_accumulator_write);
1111 }
1112
1113 /* Update the things this instruction wrote, so earlier reads
1114 * can mark this as WAR dependency.
1115 */
1116 if (inst->dst.file == VGRF) {
1117 if (post_reg_alloc) {
1118 for (int r = 0; r < inst->regs_written; r++)
1119 last_grf_write[inst->dst.nr + r] = n;
1120 } else {
1121 for (int r = 0; r < inst->regs_written; r++) {
1122 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1123 }
1124 }
1125 } else if (inst->dst.file == MRF) {
1126 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1127
1128 last_mrf_write[reg] = n;
1129
1130 if (is_compressed(inst)) {
1131 if (inst->dst.nr & BRW_MRF_COMPR4)
1132 reg += 4;
1133 else
1134 reg++;
1135
1136 last_mrf_write[reg] = n;
1137 }
1138 } else if (inst->dst.file == FIXED_GRF) {
1139 if (post_reg_alloc) {
1140 for (int r = 0; r < inst->regs_written; r++)
1141 last_grf_write[inst->dst.nr + r] = n;
1142 } else {
1143 last_fixed_grf_write = n;
1144 }
1145 } else if (inst->dst.is_accumulator()) {
1146 last_accumulator_write = n;
1147 } else if (inst->dst.file != BAD_FILE &&
1148 !inst->dst.is_null()) {
1149 add_barrier_deps(n);
1150 }
1151
1152 if (inst->mlen > 0 && inst->base_mrf != -1) {
1153 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1154 last_mrf_write[inst->base_mrf + i] = n;
1155 }
1156 }
1157
1158 if (inst->writes_flag()) {
1159 last_conditional_mod[inst->flag_subreg] = n;
1160 }
1161
1162 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1163 last_accumulator_write = n;
1164 }
1165 }
1166 }
1167
1168 void
1169 vec4_instruction_scheduler::calculate_deps()
1170 {
1171 schedule_node *last_grf_write[grf_count];
1172 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1173 schedule_node *last_conditional_mod = NULL;
1174 schedule_node *last_accumulator_write = NULL;
1175 /* Fixed HW registers are assumed to be separate from the virtual
1176 * GRFs, so they can be tracked separately. We don't really write
1177 * to fixed GRFs much, so don't bother tracking them on a more
1178 * granular level.
1179 */
1180 schedule_node *last_fixed_grf_write = NULL;
1181
1182 /* The last instruction always needs to still be the last instruction.
1183 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1184 * other things after it would disturb the basic block, or it's the EOT
1185 * URB_WRITE and we should do a better job at dead code eliminating
1186 * anything that could have been scheduled after it.
1187 */
1188 schedule_node *last = (schedule_node *)instructions.get_tail();
1189 add_barrier_deps(last);
1190
1191 memset(last_grf_write, 0, sizeof(last_grf_write));
1192 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1193
1194 /* top-to-bottom dependencies: RAW and WAW. */
1195 foreach_in_list(schedule_node, n, &instructions) {
1196 vec4_instruction *inst = (vec4_instruction *)n->inst;
1197
1198 if (inst->has_side_effects())
1199 add_barrier_deps(n);
1200
1201 /* read-after-write deps. */
1202 for (int i = 0; i < 3; i++) {
1203 if (inst->src[i].file == VGRF) {
1204 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1205 add_dep(last_grf_write[inst->src[i].nr + j], n);
1206 } else if (inst->src[i].file == FIXED_GRF) {
1207 add_dep(last_fixed_grf_write, n);
1208 } else if (inst->src[i].is_accumulator()) {
1209 assert(last_accumulator_write);
1210 add_dep(last_accumulator_write, n);
1211 } else if (inst->src[i].file != BAD_FILE &&
1212 inst->src[i].file != IMM &&
1213 inst->src[i].file != UNIFORM) {
1214 /* No reads from MRF, and ATTR is already translated away */
1215 assert(inst->src[i].file != MRF &&
1216 inst->src[i].file != ATTR);
1217 add_barrier_deps(n);
1218 }
1219 }
1220
1221 if (!inst->is_send_from_grf()) {
1222 for (int i = 0; i < inst->mlen; i++) {
1223 /* It looks like the MRF regs are released in the send
1224 * instruction once it's sent, not when the result comes
1225 * back.
1226 */
1227 add_dep(last_mrf_write[inst->base_mrf + i], n);
1228 }
1229 }
1230
1231 if (inst->reads_flag()) {
1232 assert(last_conditional_mod);
1233 add_dep(last_conditional_mod, n);
1234 }
1235
1236 if (inst->reads_accumulator_implicitly()) {
1237 assert(last_accumulator_write);
1238 add_dep(last_accumulator_write, n);
1239 }
1240
1241 /* write-after-write deps. */
1242 if (inst->dst.file == VGRF) {
1243 for (unsigned j = 0; j < inst->regs_written; ++j) {
1244 add_dep(last_grf_write[inst->dst.nr + j], n);
1245 last_grf_write[inst->dst.nr + j] = n;
1246 }
1247 } else if (inst->dst.file == MRF) {
1248 add_dep(last_mrf_write[inst->dst.nr], n);
1249 last_mrf_write[inst->dst.nr] = n;
1250 } else if (inst->dst.file == FIXED_GRF) {
1251 last_fixed_grf_write = n;
1252 } else if (inst->dst.is_accumulator()) {
1253 add_dep(last_accumulator_write, n);
1254 last_accumulator_write = n;
1255 } else if (inst->dst.file != BAD_FILE &&
1256 !inst->dst.is_null()) {
1257 add_barrier_deps(n);
1258 }
1259
1260 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1261 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1262 add_dep(last_mrf_write[inst->base_mrf + i], n);
1263 last_mrf_write[inst->base_mrf + i] = n;
1264 }
1265 }
1266
1267 if (inst->writes_flag()) {
1268 add_dep(last_conditional_mod, n, 0);
1269 last_conditional_mod = n;
1270 }
1271
1272 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1273 !inst->dst.is_accumulator()) {
1274 add_dep(last_accumulator_write, n);
1275 last_accumulator_write = n;
1276 }
1277 }
1278
1279 /* bottom-to-top dependencies: WAR */
1280 memset(last_grf_write, 0, sizeof(last_grf_write));
1281 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1282 last_conditional_mod = NULL;
1283 last_accumulator_write = NULL;
1284 last_fixed_grf_write = NULL;
1285
1286 exec_node *node;
1287 exec_node *prev;
1288 for (node = instructions.get_tail(), prev = node->prev;
1289 !node->is_head_sentinel();
1290 node = prev, prev = node->prev) {
1291 schedule_node *n = (schedule_node *)node;
1292 vec4_instruction *inst = (vec4_instruction *)n->inst;
1293
1294 /* write-after-read deps. */
1295 for (int i = 0; i < 3; i++) {
1296 if (inst->src[i].file == VGRF) {
1297 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1298 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1299 } else if (inst->src[i].file == FIXED_GRF) {
1300 add_dep(n, last_fixed_grf_write);
1301 } else if (inst->src[i].is_accumulator()) {
1302 add_dep(n, last_accumulator_write);
1303 } else if (inst->src[i].file != BAD_FILE &&
1304 inst->src[i].file != IMM &&
1305 inst->src[i].file != UNIFORM) {
1306 assert(inst->src[i].file != MRF &&
1307 inst->src[i].file != ATTR);
1308 add_barrier_deps(n);
1309 }
1310 }
1311
1312 if (!inst->is_send_from_grf()) {
1313 for (int i = 0; i < inst->mlen; i++) {
1314 /* It looks like the MRF regs are released in the send
1315 * instruction once it's sent, not when the result comes
1316 * back.
1317 */
1318 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1319 }
1320 }
1321
1322 if (inst->reads_flag()) {
1323 add_dep(n, last_conditional_mod);
1324 }
1325
1326 if (inst->reads_accumulator_implicitly()) {
1327 add_dep(n, last_accumulator_write);
1328 }
1329
1330 /* Update the things this instruction wrote, so earlier reads
1331 * can mark this as WAR dependency.
1332 */
1333 if (inst->dst.file == VGRF) {
1334 for (unsigned j = 0; j < inst->regs_written; ++j)
1335 last_grf_write[inst->dst.nr + j] = n;
1336 } else if (inst->dst.file == MRF) {
1337 last_mrf_write[inst->dst.nr] = n;
1338 } else if (inst->dst.file == FIXED_GRF) {
1339 last_fixed_grf_write = n;
1340 } else if (inst->dst.is_accumulator()) {
1341 last_accumulator_write = n;
1342 } else if (inst->dst.file != BAD_FILE &&
1343 !inst->dst.is_null()) {
1344 add_barrier_deps(n);
1345 }
1346
1347 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1348 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1349 last_mrf_write[inst->base_mrf + i] = n;
1350 }
1351 }
1352
1353 if (inst->writes_flag()) {
1354 last_conditional_mod = n;
1355 }
1356
1357 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1358 last_accumulator_write = n;
1359 }
1360 }
1361 }
1362
1363 schedule_node *
1364 fs_instruction_scheduler::choose_instruction_to_schedule()
1365 {
1366 schedule_node *chosen = NULL;
1367
1368 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1369 int chosen_time = 0;
1370
1371 /* Of the instructions ready to execute or the closest to
1372 * being ready, choose the oldest one.
1373 */
1374 foreach_in_list(schedule_node, n, &instructions) {
1375 if (!chosen || n->unblocked_time < chosen_time) {
1376 chosen = n;
1377 chosen_time = n->unblocked_time;
1378 }
1379 }
1380 } else {
1381 /* Before register allocation, we don't care about the latencies of
1382 * instructions. All we care about is reducing live intervals of
1383 * variables so that we can avoid register spilling, or get SIMD16
1384 * shaders which naturally do a better job of hiding instruction
1385 * latency.
1386 */
1387 foreach_in_list(schedule_node, n, &instructions) {
1388 fs_inst *inst = (fs_inst *)n->inst;
1389
1390 if (!chosen) {
1391 chosen = n;
1392 continue;
1393 }
1394
1395 /* Most important: If we can definitely reduce register pressure, do
1396 * so immediately.
1397 */
1398 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1399 int chosen_register_pressure_benefit =
1400 get_register_pressure_benefit(chosen->inst);
1401
1402 if (register_pressure_benefit > 0 &&
1403 register_pressure_benefit > chosen_register_pressure_benefit) {
1404 chosen = n;
1405 continue;
1406 } else if (chosen_register_pressure_benefit > 0 &&
1407 (register_pressure_benefit <
1408 chosen_register_pressure_benefit)) {
1409 continue;
1410 }
1411
1412 if (mode == SCHEDULE_PRE_LIFO) {
1413 /* Prefer instructions that recently became available for
1414 * scheduling. These are the things that are most likely to
1415 * (eventually) make a variable dead and reduce register pressure.
1416 * Typical register pressure estimates don't work for us because
1417 * most of our pressure comes from texturing, where no single
1418 * instruction to schedule will make a vec4 value dead.
1419 */
1420 if (n->cand_generation > chosen->cand_generation) {
1421 chosen = n;
1422 continue;
1423 } else if (n->cand_generation < chosen->cand_generation) {
1424 continue;
1425 }
1426
1427 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1428 * do this, then because we prefer instructions that just became
1429 * candidates, we'll end up in a pattern of scheduling a SEND,
1430 * then the MRFs for the next SEND, then the next SEND, then the
1431 * MRFs, etc., without ever consuming the results of a send.
1432 */
1433 if (v->devinfo->gen < 7) {
1434 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1435
1436 /* We use regs_written > 1 as our test for the kind of send
1437 * instruction to avoid -- only sends generate many regs, and a
1438 * single-result send is probably actually reducing register
1439 * pressure.
1440 */
1441 if (inst->regs_written <= inst->exec_size / 8 &&
1442 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1443 chosen = n;
1444 continue;
1445 } else if (inst->regs_written > chosen_inst->regs_written) {
1446 continue;
1447 }
1448 }
1449 }
1450
1451 /* For instructions pushed on the cands list at the same time, prefer
1452 * the one with the highest delay to the end of the program. This is
1453 * most likely to have its values able to be consumed first (such as
1454 * for a large tree of lowered ubo loads, which appear reversed in
1455 * the instruction stream with respect to when they can be consumed).
1456 */
1457 if (n->delay > chosen->delay) {
1458 chosen = n;
1459 continue;
1460 } else if (n->delay < chosen->delay) {
1461 continue;
1462 }
1463
1464 /* If all other metrics are equal, we prefer the first instruction in
1465 * the list (program execution).
1466 */
1467 }
1468 }
1469
1470 return chosen;
1471 }
1472
1473 schedule_node *
1474 vec4_instruction_scheduler::choose_instruction_to_schedule()
1475 {
1476 schedule_node *chosen = NULL;
1477 int chosen_time = 0;
1478
1479 /* Of the instructions ready to execute or the closest to being ready,
1480 * choose the oldest one.
1481 */
1482 foreach_in_list(schedule_node, n, &instructions) {
1483 if (!chosen || n->unblocked_time < chosen_time) {
1484 chosen = n;
1485 chosen_time = n->unblocked_time;
1486 }
1487 }
1488
1489 return chosen;
1490 }
1491
1492 int
1493 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1494 {
1495 if (is_compressed((fs_inst *)inst))
1496 return 4;
1497 else
1498 return 2;
1499 }
1500
1501 int
1502 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1503 {
1504 /* We always execute as two vec4s in parallel. */
1505 return 2;
1506 }
1507
1508 void
1509 instruction_scheduler::schedule_instructions(bblock_t *block)
1510 {
1511 const struct brw_device_info *devinfo = bs->devinfo;
1512 backend_instruction *inst = block->end();
1513 time = 0;
1514 if (!post_reg_alloc)
1515 reg_pressure = reg_pressure_in[block->num];
1516 block_idx = block->num;
1517
1518 /* Remove non-DAG heads from the list. */
1519 foreach_in_list_safe(schedule_node, n, &instructions) {
1520 if (n->parent_count != 0)
1521 n->remove();
1522 }
1523
1524 unsigned cand_generation = 1;
1525 while (!instructions.is_empty()) {
1526 schedule_node *chosen = choose_instruction_to_schedule();
1527
1528 /* Schedule this instruction. */
1529 assert(chosen);
1530 chosen->remove();
1531 inst->insert_before(block, chosen->inst);
1532 instructions_to_schedule--;
1533
1534 if (!post_reg_alloc) {
1535 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1536 update_register_pressure(chosen->inst);
1537 }
1538
1539 /* If we expected a delay for scheduling, then bump the clock to reflect
1540 * that. In reality, the hardware will switch to another hyperthread
1541 * and may not return to dispatching our thread for a while even after
1542 * we're unblocked. After this, we have the time when the chosen
1543 * instruction will start executing.
1544 */
1545 time = MAX2(time, chosen->unblocked_time);
1546
1547 /* Update the clock for how soon an instruction could start after the
1548 * chosen one.
1549 */
1550 time += issue_time(chosen->inst);
1551
1552 if (debug) {
1553 fprintf(stderr, "clock %4d, scheduled: ", time);
1554 bs->dump_instruction(chosen->inst);
1555 if (!post_reg_alloc)
1556 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1557 }
1558
1559 /* Now that we've scheduled a new instruction, some of its
1560 * children can be promoted to the list of instructions ready to
1561 * be scheduled. Update the children's unblocked time for this
1562 * DAG edge as we do so.
1563 */
1564 for (int i = chosen->child_count - 1; i >= 0; i--) {
1565 schedule_node *child = chosen->children[i];
1566
1567 child->unblocked_time = MAX2(child->unblocked_time,
1568 time + chosen->child_latency[i]);
1569
1570 if (debug) {
1571 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1572 bs->dump_instruction(child->inst);
1573 }
1574
1575 child->cand_generation = cand_generation;
1576 child->parent_count--;
1577 if (child->parent_count == 0) {
1578 if (debug) {
1579 fprintf(stderr, "\t\tnow available\n");
1580 }
1581 instructions.push_head(child);
1582 }
1583 }
1584 cand_generation++;
1585
1586 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1587 * but it's more limited pre-gen6, so if we send something off to it then
1588 * the next math instruction isn't going to make progress until the first
1589 * is done.
1590 */
1591 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1592 foreach_in_list(schedule_node, n, &instructions) {
1593 if (n->inst->is_math())
1594 n->unblocked_time = MAX2(n->unblocked_time,
1595 time + chosen->latency);
1596 }
1597 }
1598 }
1599
1600 if (block->end()->opcode == BRW_OPCODE_NOP)
1601 block->end()->remove(block);
1602 assert(instructions_to_schedule == 0);
1603
1604 block->cycle_count = time;
1605 }
1606
1607 static unsigned get_cycle_count(cfg_t *cfg)
1608 {
1609 unsigned count = 0, multiplier = 1;
1610 foreach_block(block, cfg) {
1611 if (block->start()->opcode == BRW_OPCODE_DO)
1612 multiplier *= 10; /* assume that loops execute ~10 times */
1613
1614 count += block->cycle_count * multiplier;
1615
1616 if (block->end()->opcode == BRW_OPCODE_WHILE)
1617 multiplier /= 10;
1618 }
1619
1620 return count;
1621 }
1622
1623 void
1624 instruction_scheduler::run(cfg_t *cfg)
1625 {
1626 if (debug && !post_reg_alloc) {
1627 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1628 post_reg_alloc);
1629 bs->dump_instructions();
1630 }
1631
1632 if (!post_reg_alloc)
1633 setup_liveness(cfg);
1634
1635 foreach_block(block, cfg) {
1636 if (block->end_ip - block->start_ip <= 1)
1637 continue;
1638
1639 if (reads_remaining) {
1640 memset(reads_remaining, 0,
1641 grf_count * sizeof(*reads_remaining));
1642 memset(hw_reads_remaining, 0,
1643 hw_reg_count * sizeof(*hw_reads_remaining));
1644 memset(written, 0, grf_count * sizeof(*written));
1645
1646 foreach_inst_in_block(fs_inst, inst, block)
1647 count_reads_remaining(inst);
1648 }
1649
1650 add_insts_from_block(block);
1651
1652 calculate_deps();
1653
1654 foreach_in_list(schedule_node, n, &instructions) {
1655 compute_delay(n);
1656 }
1657
1658 schedule_instructions(block);
1659 }
1660
1661 if (debug && !post_reg_alloc) {
1662 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1663 post_reg_alloc);
1664 bs->dump_instructions();
1665 }
1666
1667 cfg->cycle_count = get_cycle_count(cfg);
1668 }
1669
1670 void
1671 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1672 {
1673 if (mode != SCHEDULE_POST)
1674 calculate_live_intervals();
1675
1676 int grf_count;
1677 if (mode == SCHEDULE_POST)
1678 grf_count = grf_used;
1679 else
1680 grf_count = alloc.count;
1681
1682 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1683 cfg->num_blocks, mode);
1684 sched.run(cfg);
1685
1686 if (unlikely(debug_enabled) && mode == SCHEDULE_POST) {
1687 fprintf(stderr, "%s%d estimated execution time: %d cycles\n",
1688 stage_abbrev, dispatch_width, sched.time);
1689 }
1690
1691 invalidate_live_intervals();
1692 }
1693
1694 void
1695 vec4_visitor::opt_schedule_instructions()
1696 {
1697 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1698 sched.run(cfg);
1699
1700 if (unlikely(debug_enabled)) {
1701 fprintf(stderr, "%s estimated execution time: %d cycles\n",
1702 stage_abbrev, sched.time);
1703 }
1704
1705 invalidate_live_intervals();
1706 }