i965: Remove incorrect cycle estimates.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89 };
90
91 void
92 schedule_node::set_latency_gen4()
93 {
94 int chans = 8;
95 int math_latency = 22;
96
97 switch (inst->opcode) {
98 case SHADER_OPCODE_RCP:
99 this->latency = 1 * chans * math_latency;
100 break;
101 case SHADER_OPCODE_RSQ:
102 this->latency = 2 * chans * math_latency;
103 break;
104 case SHADER_OPCODE_INT_QUOTIENT:
105 case SHADER_OPCODE_SQRT:
106 case SHADER_OPCODE_LOG2:
107 /* full precision log. partial is 2. */
108 this->latency = 3 * chans * math_latency;
109 break;
110 case SHADER_OPCODE_INT_REMAINDER:
111 case SHADER_OPCODE_EXP2:
112 /* full precision. partial is 3, same throughput. */
113 this->latency = 4 * chans * math_latency;
114 break;
115 case SHADER_OPCODE_POW:
116 this->latency = 8 * chans * math_latency;
117 break;
118 case SHADER_OPCODE_SIN:
119 case SHADER_OPCODE_COS:
120 /* minimum latency, max is 12 rounds. */
121 this->latency = 5 * chans * math_latency;
122 break;
123 default:
124 this->latency = 2;
125 break;
126 }
127 }
128
129 void
130 schedule_node::set_latency_gen7(bool is_haswell)
131 {
132 switch (inst->opcode) {
133 case BRW_OPCODE_MAD:
134 /* 2 cycles
135 * (since the last two src operands are in different register banks):
136 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
137 *
138 * 3 cycles on IVB, 4 on HSW
139 * (since the last two src operands are in the same register bank):
140 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
141 *
142 * 18 cycles on IVB, 16 on HSW
143 * (since the last two src operands are in different register banks):
144 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
145 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
146 *
147 * 20 cycles on IVB, 18 on HSW
148 * (since the last two src operands are in the same register bank):
149 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
150 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
151 */
152
153 /* Our register allocator doesn't know about register banks, so use the
154 * higher latency.
155 */
156 latency = is_haswell ? 16 : 18;
157 break;
158
159 case BRW_OPCODE_LRP:
160 /* 2 cycles
161 * (since the last two src operands are in different register banks):
162 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163 *
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167 *
168 * 16 cycles on IVB, 14 on HSW
169 * (since the last two src operands are in different register banks):
170 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
172 *
173 * 16 cycles
174 * (since the last two src operands are in the same register bank):
175 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
177 */
178
179 /* Our register allocator doesn't know about register banks, so use the
180 * higher latency.
181 */
182 latency = 14;
183 break;
184
185 case SHADER_OPCODE_RCP:
186 case SHADER_OPCODE_RSQ:
187 case SHADER_OPCODE_SQRT:
188 case SHADER_OPCODE_LOG2:
189 case SHADER_OPCODE_EXP2:
190 case SHADER_OPCODE_SIN:
191 case SHADER_OPCODE_COS:
192 /* 2 cycles:
193 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
194 *
195 * 18 cycles:
196 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
197 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
198 *
199 * Same for exp2, log2, rsq, sqrt, sin, cos.
200 */
201 latency = is_haswell ? 14 : 16;
202 break;
203
204 case SHADER_OPCODE_POW:
205 /* 2 cycles:
206 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
207 *
208 * 26 cycles:
209 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
210 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
211 */
212 latency = is_haswell ? 22 : 24;
213 break;
214
215 case SHADER_OPCODE_TEX:
216 case SHADER_OPCODE_TXD:
217 case SHADER_OPCODE_TXF:
218 case SHADER_OPCODE_TXL:
219 /* 18 cycles:
220 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
221 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
222 * send(8) g4<1>UW g114<8,8,1>F
223 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
224 *
225 * 697 +/-49 cycles (min 610, n=26):
226 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
227 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
228 * send(8) g4<1>UW g114<8,8,1>F
229 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
230 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
231 *
232 * So the latency on our first texture load of the batchbuffer takes
233 * ~700 cycles, since the caches are cold at that point.
234 *
235 * 840 +/- 92 cycles (min 720, n=25):
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
240 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
241 * send(8) g4<1>UW g114<8,8,1>F
242 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
243 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
244 *
245 * On the second load, it takes just an extra ~140 cycles, and after
246 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
247 *
248 * 683 +/- 49 cycles (min = 602, n=47):
249 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
250 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
251 * send(8) g4<1>UW g114<8,8,1>F
252 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * send(8) g50<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
256 *
257 * The unit appears to be pipelined, since this matches up with the
258 * cache-cold case, despite there being two loads here. If you replace
259 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
260 *
261 * So, take some number between the cache-hot 140 cycles and the
262 * cache-cold 700 cycles. No particular tuning was done on this.
263 *
264 * I haven't done significant testing of the non-TEX opcodes. TXL at
265 * least looked about the same as TEX.
266 */
267 latency = 200;
268 break;
269
270 case SHADER_OPCODE_TXS:
271 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
272 * cycles (n=15):
273 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
274 * send(8) g6<1>UW g114<8,8,1>F
275 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
276 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
277 *
278 *
279 * Two loads was 535 +/- 30 cycles (n=19):
280 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
281 * send(16) g6<1>UW g114<8,8,1>F
282 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
283 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
284 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
285 * send(16) g8<1>UW g114<8,8,1>F
286 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
287 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
288 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
289 *
290 * Since the only caches that should matter are just the
291 * instruction/state cache containing the surface state, assume that we
292 * always have hot caches.
293 */
294 latency = 100;
295 break;
296
297 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
298 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
299 case VS_OPCODE_PULL_CONSTANT_LOAD:
300 /* testing using varying-index pull constants:
301 *
302 * 16 cycles:
303 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
304 * send(8) g4<1>F g4<8,8,1>D
305 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
306 *
307 * ~480 cycles:
308 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
309 * send(8) g4<1>F g4<8,8,1>D
310 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
311 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
312 *
313 * ~620 cycles:
314 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
315 * send(8) g4<1>F g4<8,8,1>D
316 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
317 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
318 * send(8) g4<1>F g4<8,8,1>D
319 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 *
322 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
323 * about 460. We expect to mostly be cache hot, so pick something more
324 * in that direction.
325 */
326 latency = 200;
327 break;
328
329 case SHADER_OPCODE_GEN7_SCRATCH_READ:
330 /* Testing a load from offset 0, that had been previously written:
331 *
332 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
334 *
335 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
336 * then around 140. Presumably this is cache hit vs miss.
337 */
338 latency = 50;
339 break;
340
341 case SHADER_OPCODE_UNTYPED_ATOMIC:
342 case SHADER_OPCODE_TYPED_ATOMIC:
343 /* Test code:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
349 *
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
356 */
357 latency = 14000;
358 break;
359
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
361 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
362 case SHADER_OPCODE_TYPED_SURFACE_READ:
363 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
364 /* Test code:
365 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
366 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
367 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
368 * send(8) g4<1>UD g112<8,8,1>UD
369 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
370 * .
371 * . [repeats 8 times]
372 * .
373 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
374 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
375 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
376 * send(8) g4<1>UD g112<8,8,1>UD
377 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
378 *
379 * Running it 100 times as fragment shader on a 128x128 quad
380 * gives an average latency of 583 cycles per surface read,
381 * standard deviation 0.9%.
382 */
383 latency = is_haswell ? 300 : 600;
384 break;
385
386 default:
387 /* 2 cycles:
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
389 *
390 * 16 cycles:
391 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
392 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
393 */
394 latency = 14;
395 break;
396 }
397 }
398
399 class instruction_scheduler {
400 public:
401 instruction_scheduler(backend_shader *s, int grf_count,
402 int hw_reg_count, int block_count,
403 instruction_scheduler_mode mode)
404 {
405 this->bs = s;
406 this->mem_ctx = ralloc_context(NULL);
407 this->grf_count = grf_count;
408 this->hw_reg_count = hw_reg_count;
409 this->instructions.make_empty();
410 this->instructions_to_schedule = 0;
411 this->post_reg_alloc = (mode == SCHEDULE_POST);
412 this->mode = mode;
413 this->time = 0;
414 if (!post_reg_alloc) {
415 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
416
417 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
418 for (int i = 0; i < block_count; i++)
419 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
420 BITSET_WORDS(grf_count));
421
422 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
423 for (int i = 0; i < block_count; i++)
424 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
425 BITSET_WORDS(grf_count));
426
427 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
428 for (int i = 0; i < block_count; i++)
429 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
430 BITSET_WORDS(hw_reg_count));
431
432 this->written = rzalloc_array(mem_ctx, bool, grf_count);
433
434 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
435
436 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
437 } else {
438 this->reg_pressure_in = NULL;
439 this->livein = NULL;
440 this->liveout = NULL;
441 this->hw_liveout = NULL;
442 this->written = NULL;
443 this->reads_remaining = NULL;
444 this->hw_reads_remaining = NULL;
445 }
446 }
447
448 ~instruction_scheduler()
449 {
450 ralloc_free(this->mem_ctx);
451 }
452 void add_barrier_deps(schedule_node *n);
453 void add_dep(schedule_node *before, schedule_node *after, int latency);
454 void add_dep(schedule_node *before, schedule_node *after);
455
456 void run(cfg_t *cfg);
457 void add_insts_from_block(bblock_t *block);
458 void compute_delay(schedule_node *node);
459 virtual void calculate_deps() = 0;
460 virtual schedule_node *choose_instruction_to_schedule() = 0;
461
462 /**
463 * Returns how many cycles it takes the instruction to issue.
464 *
465 * Instructions in gen hardware are handled one simd4 vector at a time,
466 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
467 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
468 */
469 virtual int issue_time(backend_instruction *inst) = 0;
470
471 virtual void count_reads_remaining(backend_instruction *inst) = 0;
472 virtual void setup_liveness(cfg_t *cfg) = 0;
473 virtual void update_register_pressure(backend_instruction *inst) = 0;
474 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
475
476 void schedule_instructions(bblock_t *block);
477
478 void *mem_ctx;
479
480 bool post_reg_alloc;
481 int instructions_to_schedule;
482 int grf_count;
483 int hw_reg_count;
484 int time;
485 int reg_pressure;
486 int block_idx;
487 exec_list instructions;
488 backend_shader *bs;
489
490 instruction_scheduler_mode mode;
491
492 /*
493 * The register pressure at the beginning of each basic block.
494 */
495
496 int *reg_pressure_in;
497
498 /*
499 * The virtual GRF's whose range overlaps the beginning of each basic block.
500 */
501
502 BITSET_WORD **livein;
503
504 /*
505 * The virtual GRF's whose range overlaps the end of each basic block.
506 */
507
508 BITSET_WORD **liveout;
509
510 /*
511 * The hardware GRF's whose range overlaps the end of each basic block.
512 */
513
514 BITSET_WORD **hw_liveout;
515
516 /*
517 * Whether we've scheduled a write for this virtual GRF yet.
518 */
519
520 bool *written;
521
522 /*
523 * How many reads we haven't scheduled for this virtual GRF yet.
524 */
525
526 int *reads_remaining;
527
528 /*
529 * How many reads we haven't scheduled for this hardware GRF yet.
530 */
531
532 int *hw_reads_remaining;
533 };
534
535 class fs_instruction_scheduler : public instruction_scheduler
536 {
537 public:
538 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
539 int block_count,
540 instruction_scheduler_mode mode);
541 void calculate_deps();
542 bool is_compressed(fs_inst *inst);
543 schedule_node *choose_instruction_to_schedule();
544 int issue_time(backend_instruction *inst);
545 fs_visitor *v;
546
547 void count_reads_remaining(backend_instruction *inst);
548 void setup_liveness(cfg_t *cfg);
549 void update_register_pressure(backend_instruction *inst);
550 int get_register_pressure_benefit(backend_instruction *inst);
551 };
552
553 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
554 int grf_count, int hw_reg_count,
555 int block_count,
556 instruction_scheduler_mode mode)
557 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
558 v(v)
559 {
560 }
561
562 static bool
563 is_src_duplicate(fs_inst *inst, int src)
564 {
565 for (int i = 0; i < src; i++)
566 if (inst->src[i].equals(inst->src[src]))
567 return true;
568
569 return false;
570 }
571
572 void
573 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
574 {
575 fs_inst *inst = (fs_inst *)be;
576
577 if (!reads_remaining)
578 return;
579
580 for (int i = 0; i < inst->sources; i++) {
581 if (is_src_duplicate(inst, i))
582 continue;
583
584 if (inst->src[i].file == VGRF) {
585 reads_remaining[inst->src[i].nr]++;
586 } else if (inst->src[i].file == FIXED_GRF) {
587 if (inst->src[i].nr >= hw_reg_count)
588 continue;
589
590 for (int j = 0; j < inst->regs_read(i); j++)
591 hw_reads_remaining[inst->src[i].nr + j]++;
592 }
593 }
594 }
595
596 void
597 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
598 {
599 /* First, compute liveness on a per-GRF level using the in/out sets from
600 * liveness calculation.
601 */
602 for (int block = 0; block < cfg->num_blocks; block++) {
603 for (int i = 0; i < v->live_intervals->num_vars; i++) {
604 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
605 int vgrf = v->live_intervals->vgrf_from_var[i];
606 if (!BITSET_TEST(livein[block], vgrf)) {
607 reg_pressure_in[block] += v->alloc.sizes[vgrf];
608 BITSET_SET(livein[block], vgrf);
609 }
610 }
611
612 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
613 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
614 }
615 }
616
617 /* Now, extend the live in/live out sets for when a range crosses a block
618 * boundary, which matches what our register allocator/interference code
619 * does to account for force_writemask_all and incompatible exec_mask's.
620 */
621 for (int block = 0; block < cfg->num_blocks - 1; block++) {
622 for (int i = 0; i < grf_count; i++) {
623 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
624 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
625 if (!BITSET_TEST(livein[block + 1], i)) {
626 reg_pressure_in[block + 1] += v->alloc.sizes[i];
627 BITSET_SET(livein[block + 1], i);
628 }
629
630 BITSET_SET(liveout[block], i);
631 }
632 }
633 }
634
635 int payload_last_use_ip[hw_reg_count];
636 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
637
638 for (int i = 0; i < hw_reg_count; i++) {
639 if (payload_last_use_ip[i] == -1)
640 continue;
641
642 for (int block = 0; block < cfg->num_blocks; block++) {
643 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
644 reg_pressure_in[block]++;
645
646 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
647 BITSET_SET(hw_liveout[block], i);
648 }
649 }
650 }
651
652 void
653 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
654 {
655 fs_inst *inst = (fs_inst *)be;
656
657 if (!reads_remaining)
658 return;
659
660 if (inst->dst.file == VGRF) {
661 written[inst->dst.nr] = true;
662 }
663
664 for (int i = 0; i < inst->sources; i++) {
665 if (is_src_duplicate(inst, i))
666 continue;
667
668 if (inst->src[i].file == VGRF) {
669 reads_remaining[inst->src[i].nr]--;
670 } else if (inst->src[i].file == FIXED_GRF &&
671 inst->src[i].nr < hw_reg_count) {
672 for (int off = 0; off < inst->regs_read(i); off++)
673 hw_reads_remaining[inst->src[i].nr + off]--;
674 }
675 }
676 }
677
678 int
679 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
680 {
681 fs_inst *inst = (fs_inst *)be;
682 int benefit = 0;
683
684 if (inst->dst.file == VGRF) {
685 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
686 !written[inst->dst.nr])
687 benefit -= v->alloc.sizes[inst->dst.nr];
688 }
689
690 for (int i = 0; i < inst->sources; i++) {
691 if (is_src_duplicate(inst, i))
692 continue;
693
694 if (inst->src[i].file == VGRF &&
695 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
696 reads_remaining[inst->src[i].nr] == 1)
697 benefit += v->alloc.sizes[inst->src[i].nr];
698
699 if (inst->src[i].file == FIXED_GRF &&
700 inst->src[i].nr < hw_reg_count) {
701 for (int off = 0; off < inst->regs_read(i); off++) {
702 int reg = inst->src[i].nr + off;
703 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
704 hw_reads_remaining[reg] == 1) {
705 benefit++;
706 }
707 }
708 }
709 }
710
711 return benefit;
712 }
713
714 class vec4_instruction_scheduler : public instruction_scheduler
715 {
716 public:
717 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
718 void calculate_deps();
719 schedule_node *choose_instruction_to_schedule();
720 int issue_time(backend_instruction *inst);
721 vec4_visitor *v;
722
723 void count_reads_remaining(backend_instruction *inst);
724 void setup_liveness(cfg_t *cfg);
725 void update_register_pressure(backend_instruction *inst);
726 int get_register_pressure_benefit(backend_instruction *inst);
727 };
728
729 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
730 int grf_count)
731 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
732 v(v)
733 {
734 }
735
736 void
737 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
748 {
749 }
750
751 int
752 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
753 {
754 return 0;
755 }
756
757 schedule_node::schedule_node(backend_instruction *inst,
758 instruction_scheduler *sched)
759 {
760 const struct brw_device_info *devinfo = sched->bs->devinfo;
761
762 this->inst = inst;
763 this->child_array_size = 0;
764 this->children = NULL;
765 this->child_latency = NULL;
766 this->child_count = 0;
767 this->parent_count = 0;
768 this->unblocked_time = 0;
769 this->cand_generation = 0;
770 this->delay = 0;
771
772 /* We can't measure Gen6 timings directly but expect them to be much
773 * closer to Gen7 than Gen4.
774 */
775 if (!sched->post_reg_alloc)
776 this->latency = 1;
777 else if (devinfo->gen >= 6)
778 set_latency_gen7(devinfo->is_haswell);
779 else
780 set_latency_gen4();
781 }
782
783 void
784 instruction_scheduler::add_insts_from_block(bblock_t *block)
785 {
786 /* Removing the last instruction from a basic block removes the block as
787 * well, so put a NOP at the end to keep it alive.
788 */
789 if (!block->end()->is_control_flow()) {
790 backend_instruction *nop = new(mem_ctx) backend_instruction();
791 nop->opcode = BRW_OPCODE_NOP;
792 block->end()->insert_after(block, nop);
793 }
794
795 foreach_inst_in_block_safe(backend_instruction, inst, block) {
796 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
797 continue;
798
799 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
800
801 this->instructions_to_schedule++;
802
803 inst->remove(block);
804 instructions.push_tail(n);
805 }
806 }
807
808 /** Recursive computation of the delay member of a node. */
809 void
810 instruction_scheduler::compute_delay(schedule_node *n)
811 {
812 if (!n->child_count) {
813 n->delay = issue_time(n->inst);
814 } else {
815 for (int i = 0; i < n->child_count; i++) {
816 if (!n->children[i]->delay)
817 compute_delay(n->children[i]);
818 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
819 }
820 }
821 }
822
823 /**
824 * Add a dependency between two instruction nodes.
825 *
826 * The @after node will be scheduled after @before. We will try to
827 * schedule it @latency cycles after @before, but no guarantees there.
828 */
829 void
830 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
831 int latency)
832 {
833 if (!before || !after)
834 return;
835
836 assert(before != after);
837
838 for (int i = 0; i < before->child_count; i++) {
839 if (before->children[i] == after) {
840 before->child_latency[i] = MAX2(before->child_latency[i], latency);
841 return;
842 }
843 }
844
845 if (before->child_array_size <= before->child_count) {
846 if (before->child_array_size < 16)
847 before->child_array_size = 16;
848 else
849 before->child_array_size *= 2;
850
851 before->children = reralloc(mem_ctx, before->children,
852 schedule_node *,
853 before->child_array_size);
854 before->child_latency = reralloc(mem_ctx, before->child_latency,
855 int, before->child_array_size);
856 }
857
858 before->children[before->child_count] = after;
859 before->child_latency[before->child_count] = latency;
860 before->child_count++;
861 after->parent_count++;
862 }
863
864 void
865 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
866 {
867 if (!before)
868 return;
869
870 add_dep(before, after, before->latency);
871 }
872
873 /**
874 * Sometimes we really want this node to execute after everything that
875 * was before it and before everything that followed it. This adds
876 * the deps to do so.
877 */
878 void
879 instruction_scheduler::add_barrier_deps(schedule_node *n)
880 {
881 schedule_node *prev = (schedule_node *)n->prev;
882 schedule_node *next = (schedule_node *)n->next;
883
884 if (prev) {
885 while (!prev->is_head_sentinel()) {
886 add_dep(prev, n, 0);
887 prev = (schedule_node *)prev->prev;
888 }
889 }
890
891 if (next) {
892 while (!next->is_tail_sentinel()) {
893 add_dep(n, next, 0);
894 next = (schedule_node *)next->next;
895 }
896 }
897 }
898
899 /* instruction scheduling needs to be aware of when an MRF write
900 * actually writes 2 MRFs.
901 */
902 bool
903 fs_instruction_scheduler::is_compressed(fs_inst *inst)
904 {
905 return inst->exec_size == 16;
906 }
907
908 void
909 fs_instruction_scheduler::calculate_deps()
910 {
911 /* Pre-register-allocation, this tracks the last write per VGRF offset.
912 * After register allocation, reg_offsets are gone and we track individual
913 * GRF registers.
914 */
915 schedule_node *last_grf_write[grf_count * 16];
916 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
917 schedule_node *last_conditional_mod[2] = { NULL, NULL };
918 schedule_node *last_accumulator_write = NULL;
919 /* Fixed HW registers are assumed to be separate from the virtual
920 * GRFs, so they can be tracked separately. We don't really write
921 * to fixed GRFs much, so don't bother tracking them on a more
922 * granular level.
923 */
924 schedule_node *last_fixed_grf_write = NULL;
925
926 /* The last instruction always needs to still be the last
927 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
928 * WHILE) and scheduling other things after it would disturb the
929 * basic block, or it's FB_WRITE and we should do a better job at
930 * dead code elimination anyway.
931 */
932 schedule_node *last = (schedule_node *)instructions.get_tail();
933 add_barrier_deps(last);
934
935 memset(last_grf_write, 0, sizeof(last_grf_write));
936 memset(last_mrf_write, 0, sizeof(last_mrf_write));
937
938 /* top-to-bottom dependencies: RAW and WAW. */
939 foreach_in_list(schedule_node, n, &instructions) {
940 fs_inst *inst = (fs_inst *)n->inst;
941
942 if ((inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
943 inst->has_side_effects()) &&
944 inst->opcode != FS_OPCODE_FB_WRITE)
945 add_barrier_deps(n);
946
947 /* read-after-write deps. */
948 for (int i = 0; i < inst->sources; i++) {
949 if (inst->src[i].file == VGRF) {
950 if (post_reg_alloc) {
951 for (int r = 0; r < inst->regs_read(i); r++)
952 add_dep(last_grf_write[inst->src[i].nr + r], n);
953 } else {
954 for (int r = 0; r < inst->regs_read(i); r++) {
955 add_dep(last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], n);
956 }
957 }
958 } else if (inst->src[i].file == FIXED_GRF) {
959 if (post_reg_alloc) {
960 for (int r = 0; r < inst->regs_read(i); r++)
961 add_dep(last_grf_write[inst->src[i].nr + r], n);
962 } else {
963 add_dep(last_fixed_grf_write, n);
964 }
965 } else if (inst->src[i].is_accumulator()) {
966 add_dep(last_accumulator_write, n);
967 } else if (inst->src[i].file != BAD_FILE &&
968 inst->src[i].file != IMM &&
969 inst->src[i].file != UNIFORM) {
970 assert(inst->src[i].file != MRF);
971 add_barrier_deps(n);
972 }
973 }
974
975 if (inst->base_mrf != -1) {
976 for (int i = 0; i < inst->mlen; i++) {
977 /* It looks like the MRF regs are released in the send
978 * instruction once it's sent, not when the result comes
979 * back.
980 */
981 add_dep(last_mrf_write[inst->base_mrf + i], n);
982 }
983 }
984
985 if (inst->reads_flag()) {
986 add_dep(last_conditional_mod[inst->flag_subreg], n);
987 }
988
989 if (inst->reads_accumulator_implicitly()) {
990 add_dep(last_accumulator_write, n);
991 }
992
993 /* write-after-write deps. */
994 if (inst->dst.file == VGRF) {
995 if (post_reg_alloc) {
996 for (int r = 0; r < inst->regs_written; r++) {
997 add_dep(last_grf_write[inst->dst.nr + r], n);
998 last_grf_write[inst->dst.nr + r] = n;
999 }
1000 } else {
1001 for (int r = 0; r < inst->regs_written; r++) {
1002 add_dep(last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r], n);
1003 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1004 }
1005 }
1006 } else if (inst->dst.file == MRF) {
1007 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1008
1009 add_dep(last_mrf_write[reg], n);
1010 last_mrf_write[reg] = n;
1011 if (is_compressed(inst)) {
1012 if (inst->dst.nr & BRW_MRF_COMPR4)
1013 reg += 4;
1014 else
1015 reg++;
1016 add_dep(last_mrf_write[reg], n);
1017 last_mrf_write[reg] = n;
1018 }
1019 } else if (inst->dst.file == FIXED_GRF) {
1020 if (post_reg_alloc) {
1021 for (int r = 0; r < inst->regs_written; r++)
1022 last_grf_write[inst->dst.nr + r] = n;
1023 } else {
1024 last_fixed_grf_write = n;
1025 }
1026 } else if (inst->dst.is_accumulator()) {
1027 add_dep(last_accumulator_write, n);
1028 last_accumulator_write = n;
1029 } else if (inst->dst.file != BAD_FILE &&
1030 !inst->dst.is_null()) {
1031 add_barrier_deps(n);
1032 }
1033
1034 if (inst->mlen > 0 && inst->base_mrf != -1) {
1035 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1036 add_dep(last_mrf_write[inst->base_mrf + i], n);
1037 last_mrf_write[inst->base_mrf + i] = n;
1038 }
1039 }
1040
1041 if (inst->writes_flag()) {
1042 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1043 last_conditional_mod[inst->flag_subreg] = n;
1044 }
1045
1046 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1047 !inst->dst.is_accumulator()) {
1048 add_dep(last_accumulator_write, n);
1049 last_accumulator_write = n;
1050 }
1051 }
1052
1053 /* bottom-to-top dependencies: WAR */
1054 memset(last_grf_write, 0, sizeof(last_grf_write));
1055 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1056 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1057 last_accumulator_write = NULL;
1058 last_fixed_grf_write = NULL;
1059
1060 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1061 fs_inst *inst = (fs_inst *)n->inst;
1062
1063 /* write-after-read deps. */
1064 for (int i = 0; i < inst->sources; i++) {
1065 if (inst->src[i].file == VGRF) {
1066 if (post_reg_alloc) {
1067 for (int r = 0; r < inst->regs_read(i); r++)
1068 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1069 } else {
1070 for (int r = 0; r < inst->regs_read(i); r++) {
1071 add_dep(n, last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], 0);
1072 }
1073 }
1074 } else if (inst->src[i].file == FIXED_GRF) {
1075 if (post_reg_alloc) {
1076 for (int r = 0; r < inst->regs_read(i); r++)
1077 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1078 } else {
1079 add_dep(n, last_fixed_grf_write, 0);
1080 }
1081 } else if (inst->src[i].is_accumulator()) {
1082 add_dep(n, last_accumulator_write, 0);
1083 } else if (inst->src[i].file != BAD_FILE &&
1084 inst->src[i].file != IMM &&
1085 inst->src[i].file != UNIFORM) {
1086 assert(inst->src[i].file != MRF);
1087 add_barrier_deps(n);
1088 }
1089 }
1090
1091 if (inst->base_mrf != -1) {
1092 for (int i = 0; i < inst->mlen; i++) {
1093 /* It looks like the MRF regs are released in the send
1094 * instruction once it's sent, not when the result comes
1095 * back.
1096 */
1097 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1098 }
1099 }
1100
1101 if (inst->reads_flag()) {
1102 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1103 }
1104
1105 if (inst->reads_accumulator_implicitly()) {
1106 add_dep(n, last_accumulator_write);
1107 }
1108
1109 /* Update the things this instruction wrote, so earlier reads
1110 * can mark this as WAR dependency.
1111 */
1112 if (inst->dst.file == VGRF) {
1113 if (post_reg_alloc) {
1114 for (int r = 0; r < inst->regs_written; r++)
1115 last_grf_write[inst->dst.nr + r] = n;
1116 } else {
1117 for (int r = 0; r < inst->regs_written; r++) {
1118 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1119 }
1120 }
1121 } else if (inst->dst.file == MRF) {
1122 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1123
1124 last_mrf_write[reg] = n;
1125
1126 if (is_compressed(inst)) {
1127 if (inst->dst.nr & BRW_MRF_COMPR4)
1128 reg += 4;
1129 else
1130 reg++;
1131
1132 last_mrf_write[reg] = n;
1133 }
1134 } else if (inst->dst.file == FIXED_GRF) {
1135 if (post_reg_alloc) {
1136 for (int r = 0; r < inst->regs_written; r++)
1137 last_grf_write[inst->dst.nr + r] = n;
1138 } else {
1139 last_fixed_grf_write = n;
1140 }
1141 } else if (inst->dst.is_accumulator()) {
1142 last_accumulator_write = n;
1143 } else if (inst->dst.file != BAD_FILE &&
1144 !inst->dst.is_null()) {
1145 add_barrier_deps(n);
1146 }
1147
1148 if (inst->mlen > 0 && inst->base_mrf != -1) {
1149 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1150 last_mrf_write[inst->base_mrf + i] = n;
1151 }
1152 }
1153
1154 if (inst->writes_flag()) {
1155 last_conditional_mod[inst->flag_subreg] = n;
1156 }
1157
1158 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1159 last_accumulator_write = n;
1160 }
1161 }
1162 }
1163
1164 void
1165 vec4_instruction_scheduler::calculate_deps()
1166 {
1167 schedule_node *last_grf_write[grf_count];
1168 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1169 schedule_node *last_conditional_mod = NULL;
1170 schedule_node *last_accumulator_write = NULL;
1171 /* Fixed HW registers are assumed to be separate from the virtual
1172 * GRFs, so they can be tracked separately. We don't really write
1173 * to fixed GRFs much, so don't bother tracking them on a more
1174 * granular level.
1175 */
1176 schedule_node *last_fixed_grf_write = NULL;
1177
1178 /* The last instruction always needs to still be the last instruction.
1179 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1180 * other things after it would disturb the basic block, or it's the EOT
1181 * URB_WRITE and we should do a better job at dead code eliminating
1182 * anything that could have been scheduled after it.
1183 */
1184 schedule_node *last = (schedule_node *)instructions.get_tail();
1185 add_barrier_deps(last);
1186
1187 memset(last_grf_write, 0, sizeof(last_grf_write));
1188 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1189
1190 /* top-to-bottom dependencies: RAW and WAW. */
1191 foreach_in_list(schedule_node, n, &instructions) {
1192 vec4_instruction *inst = (vec4_instruction *)n->inst;
1193
1194 if (inst->has_side_effects() && inst->opcode != FS_OPCODE_FB_WRITE)
1195 add_barrier_deps(n);
1196
1197 /* read-after-write deps. */
1198 for (int i = 0; i < 3; i++) {
1199 if (inst->src[i].file == VGRF) {
1200 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1201 add_dep(last_grf_write[inst->src[i].nr + j], n);
1202 } else if (inst->src[i].file == FIXED_GRF) {
1203 add_dep(last_fixed_grf_write, n);
1204 } else if (inst->src[i].is_accumulator()) {
1205 assert(last_accumulator_write);
1206 add_dep(last_accumulator_write, n);
1207 } else if (inst->src[i].file != BAD_FILE &&
1208 inst->src[i].file != IMM &&
1209 inst->src[i].file != UNIFORM) {
1210 /* No reads from MRF, and ATTR is already translated away */
1211 assert(inst->src[i].file != MRF &&
1212 inst->src[i].file != ATTR);
1213 add_barrier_deps(n);
1214 }
1215 }
1216
1217 if (!inst->is_send_from_grf()) {
1218 for (int i = 0; i < inst->mlen; i++) {
1219 /* It looks like the MRF regs are released in the send
1220 * instruction once it's sent, not when the result comes
1221 * back.
1222 */
1223 add_dep(last_mrf_write[inst->base_mrf + i], n);
1224 }
1225 }
1226
1227 if (inst->reads_flag()) {
1228 assert(last_conditional_mod);
1229 add_dep(last_conditional_mod, n);
1230 }
1231
1232 if (inst->reads_accumulator_implicitly()) {
1233 assert(last_accumulator_write);
1234 add_dep(last_accumulator_write, n);
1235 }
1236
1237 /* write-after-write deps. */
1238 if (inst->dst.file == VGRF) {
1239 for (unsigned j = 0; j < inst->regs_written; ++j) {
1240 add_dep(last_grf_write[inst->dst.nr + j], n);
1241 last_grf_write[inst->dst.nr + j] = n;
1242 }
1243 } else if (inst->dst.file == MRF) {
1244 add_dep(last_mrf_write[inst->dst.nr], n);
1245 last_mrf_write[inst->dst.nr] = n;
1246 } else if (inst->dst.file == FIXED_GRF) {
1247 last_fixed_grf_write = n;
1248 } else if (inst->dst.is_accumulator()) {
1249 add_dep(last_accumulator_write, n);
1250 last_accumulator_write = n;
1251 } else if (inst->dst.file != BAD_FILE &&
1252 !inst->dst.is_null()) {
1253 add_barrier_deps(n);
1254 }
1255
1256 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1257 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1258 add_dep(last_mrf_write[inst->base_mrf + i], n);
1259 last_mrf_write[inst->base_mrf + i] = n;
1260 }
1261 }
1262
1263 if (inst->writes_flag()) {
1264 add_dep(last_conditional_mod, n, 0);
1265 last_conditional_mod = n;
1266 }
1267
1268 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1269 !inst->dst.is_accumulator()) {
1270 add_dep(last_accumulator_write, n);
1271 last_accumulator_write = n;
1272 }
1273 }
1274
1275 /* bottom-to-top dependencies: WAR */
1276 memset(last_grf_write, 0, sizeof(last_grf_write));
1277 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1278 last_conditional_mod = NULL;
1279 last_accumulator_write = NULL;
1280 last_fixed_grf_write = NULL;
1281
1282 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1283 vec4_instruction *inst = (vec4_instruction *)n->inst;
1284
1285 /* write-after-read deps. */
1286 for (int i = 0; i < 3; i++) {
1287 if (inst->src[i].file == VGRF) {
1288 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1289 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1290 } else if (inst->src[i].file == FIXED_GRF) {
1291 add_dep(n, last_fixed_grf_write);
1292 } else if (inst->src[i].is_accumulator()) {
1293 add_dep(n, last_accumulator_write);
1294 } else if (inst->src[i].file != BAD_FILE &&
1295 inst->src[i].file != IMM &&
1296 inst->src[i].file != UNIFORM) {
1297 assert(inst->src[i].file != MRF &&
1298 inst->src[i].file != ATTR);
1299 add_barrier_deps(n);
1300 }
1301 }
1302
1303 if (!inst->is_send_from_grf()) {
1304 for (int i = 0; i < inst->mlen; i++) {
1305 /* It looks like the MRF regs are released in the send
1306 * instruction once it's sent, not when the result comes
1307 * back.
1308 */
1309 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1310 }
1311 }
1312
1313 if (inst->reads_flag()) {
1314 add_dep(n, last_conditional_mod);
1315 }
1316
1317 if (inst->reads_accumulator_implicitly()) {
1318 add_dep(n, last_accumulator_write);
1319 }
1320
1321 /* Update the things this instruction wrote, so earlier reads
1322 * can mark this as WAR dependency.
1323 */
1324 if (inst->dst.file == VGRF) {
1325 for (unsigned j = 0; j < inst->regs_written; ++j)
1326 last_grf_write[inst->dst.nr + j] = n;
1327 } else if (inst->dst.file == MRF) {
1328 last_mrf_write[inst->dst.nr] = n;
1329 } else if (inst->dst.file == FIXED_GRF) {
1330 last_fixed_grf_write = n;
1331 } else if (inst->dst.is_accumulator()) {
1332 last_accumulator_write = n;
1333 } else if (inst->dst.file != BAD_FILE &&
1334 !inst->dst.is_null()) {
1335 add_barrier_deps(n);
1336 }
1337
1338 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1339 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1340 last_mrf_write[inst->base_mrf + i] = n;
1341 }
1342 }
1343
1344 if (inst->writes_flag()) {
1345 last_conditional_mod = n;
1346 }
1347
1348 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1349 last_accumulator_write = n;
1350 }
1351 }
1352 }
1353
1354 schedule_node *
1355 fs_instruction_scheduler::choose_instruction_to_schedule()
1356 {
1357 schedule_node *chosen = NULL;
1358
1359 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1360 int chosen_time = 0;
1361
1362 /* Of the instructions ready to execute or the closest to
1363 * being ready, choose the oldest one.
1364 */
1365 foreach_in_list(schedule_node, n, &instructions) {
1366 if (!chosen || n->unblocked_time < chosen_time) {
1367 chosen = n;
1368 chosen_time = n->unblocked_time;
1369 }
1370 }
1371 } else {
1372 /* Before register allocation, we don't care about the latencies of
1373 * instructions. All we care about is reducing live intervals of
1374 * variables so that we can avoid register spilling, or get SIMD16
1375 * shaders which naturally do a better job of hiding instruction
1376 * latency.
1377 */
1378 foreach_in_list(schedule_node, n, &instructions) {
1379 fs_inst *inst = (fs_inst *)n->inst;
1380
1381 if (!chosen) {
1382 chosen = n;
1383 continue;
1384 }
1385
1386 /* Most important: If we can definitely reduce register pressure, do
1387 * so immediately.
1388 */
1389 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1390 int chosen_register_pressure_benefit =
1391 get_register_pressure_benefit(chosen->inst);
1392
1393 if (register_pressure_benefit > 0 &&
1394 register_pressure_benefit > chosen_register_pressure_benefit) {
1395 chosen = n;
1396 continue;
1397 } else if (chosen_register_pressure_benefit > 0 &&
1398 (register_pressure_benefit <
1399 chosen_register_pressure_benefit)) {
1400 continue;
1401 }
1402
1403 if (mode == SCHEDULE_PRE_LIFO) {
1404 /* Prefer instructions that recently became available for
1405 * scheduling. These are the things that are most likely to
1406 * (eventually) make a variable dead and reduce register pressure.
1407 * Typical register pressure estimates don't work for us because
1408 * most of our pressure comes from texturing, where no single
1409 * instruction to schedule will make a vec4 value dead.
1410 */
1411 if (n->cand_generation > chosen->cand_generation) {
1412 chosen = n;
1413 continue;
1414 } else if (n->cand_generation < chosen->cand_generation) {
1415 continue;
1416 }
1417
1418 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1419 * do this, then because we prefer instructions that just became
1420 * candidates, we'll end up in a pattern of scheduling a SEND,
1421 * then the MRFs for the next SEND, then the next SEND, then the
1422 * MRFs, etc., without ever consuming the results of a send.
1423 */
1424 if (v->devinfo->gen < 7) {
1425 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1426
1427 /* We use regs_written > 1 as our test for the kind of send
1428 * instruction to avoid -- only sends generate many regs, and a
1429 * single-result send is probably actually reducing register
1430 * pressure.
1431 */
1432 if (inst->regs_written <= inst->exec_size / 8 &&
1433 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1434 chosen = n;
1435 continue;
1436 } else if (inst->regs_written > chosen_inst->regs_written) {
1437 continue;
1438 }
1439 }
1440 }
1441
1442 /* For instructions pushed on the cands list at the same time, prefer
1443 * the one with the highest delay to the end of the program. This is
1444 * most likely to have its values able to be consumed first (such as
1445 * for a large tree of lowered ubo loads, which appear reversed in
1446 * the instruction stream with respect to when they can be consumed).
1447 */
1448 if (n->delay > chosen->delay) {
1449 chosen = n;
1450 continue;
1451 } else if (n->delay < chosen->delay) {
1452 continue;
1453 }
1454
1455 /* If all other metrics are equal, we prefer the first instruction in
1456 * the list (program execution).
1457 */
1458 }
1459 }
1460
1461 return chosen;
1462 }
1463
1464 schedule_node *
1465 vec4_instruction_scheduler::choose_instruction_to_schedule()
1466 {
1467 schedule_node *chosen = NULL;
1468 int chosen_time = 0;
1469
1470 /* Of the instructions ready to execute or the closest to being ready,
1471 * choose the oldest one.
1472 */
1473 foreach_in_list(schedule_node, n, &instructions) {
1474 if (!chosen || n->unblocked_time < chosen_time) {
1475 chosen = n;
1476 chosen_time = n->unblocked_time;
1477 }
1478 }
1479
1480 return chosen;
1481 }
1482
1483 int
1484 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1485 {
1486 if (is_compressed((fs_inst *)inst))
1487 return 4;
1488 else
1489 return 2;
1490 }
1491
1492 int
1493 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1494 {
1495 /* We always execute as two vec4s in parallel. */
1496 return 2;
1497 }
1498
1499 void
1500 instruction_scheduler::schedule_instructions(bblock_t *block)
1501 {
1502 const struct brw_device_info *devinfo = bs->devinfo;
1503 backend_instruction *inst = block->end();
1504 time = 0;
1505 if (!post_reg_alloc)
1506 reg_pressure = reg_pressure_in[block->num];
1507 block_idx = block->num;
1508
1509 /* Remove non-DAG heads from the list. */
1510 foreach_in_list_safe(schedule_node, n, &instructions) {
1511 if (n->parent_count != 0)
1512 n->remove();
1513 }
1514
1515 unsigned cand_generation = 1;
1516 while (!instructions.is_empty()) {
1517 schedule_node *chosen = choose_instruction_to_schedule();
1518
1519 /* Schedule this instruction. */
1520 assert(chosen);
1521 chosen->remove();
1522 inst->insert_before(block, chosen->inst);
1523 instructions_to_schedule--;
1524
1525 if (!post_reg_alloc) {
1526 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1527 update_register_pressure(chosen->inst);
1528 }
1529
1530 /* If we expected a delay for scheduling, then bump the clock to reflect
1531 * that. In reality, the hardware will switch to another hyperthread
1532 * and may not return to dispatching our thread for a while even after
1533 * we're unblocked. After this, we have the time when the chosen
1534 * instruction will start executing.
1535 */
1536 time = MAX2(time, chosen->unblocked_time);
1537
1538 /* Update the clock for how soon an instruction could start after the
1539 * chosen one.
1540 */
1541 time += issue_time(chosen->inst);
1542
1543 if (debug) {
1544 fprintf(stderr, "clock %4d, scheduled: ", time);
1545 bs->dump_instruction(chosen->inst);
1546 if (!post_reg_alloc)
1547 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1548 }
1549
1550 /* Now that we've scheduled a new instruction, some of its
1551 * children can be promoted to the list of instructions ready to
1552 * be scheduled. Update the children's unblocked time for this
1553 * DAG edge as we do so.
1554 */
1555 for (int i = chosen->child_count - 1; i >= 0; i--) {
1556 schedule_node *child = chosen->children[i];
1557
1558 child->unblocked_time = MAX2(child->unblocked_time,
1559 time + chosen->child_latency[i]);
1560
1561 if (debug) {
1562 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1563 bs->dump_instruction(child->inst);
1564 }
1565
1566 child->cand_generation = cand_generation;
1567 child->parent_count--;
1568 if (child->parent_count == 0) {
1569 if (debug) {
1570 fprintf(stderr, "\t\tnow available\n");
1571 }
1572 instructions.push_head(child);
1573 }
1574 }
1575 cand_generation++;
1576
1577 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1578 * but it's more limited pre-gen6, so if we send something off to it then
1579 * the next math instruction isn't going to make progress until the first
1580 * is done.
1581 */
1582 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1583 foreach_in_list(schedule_node, n, &instructions) {
1584 if (n->inst->is_math())
1585 n->unblocked_time = MAX2(n->unblocked_time,
1586 time + chosen->latency);
1587 }
1588 }
1589 }
1590
1591 if (block->end()->opcode == BRW_OPCODE_NOP)
1592 block->end()->remove(block);
1593 assert(instructions_to_schedule == 0);
1594
1595 block->cycle_count = time;
1596 }
1597
1598 static unsigned get_cycle_count(cfg_t *cfg)
1599 {
1600 unsigned count = 0, multiplier = 1;
1601 foreach_block(block, cfg) {
1602 if (block->start()->opcode == BRW_OPCODE_DO)
1603 multiplier *= 10; /* assume that loops execute ~10 times */
1604
1605 count += block->cycle_count * multiplier;
1606
1607 if (block->end()->opcode == BRW_OPCODE_WHILE)
1608 multiplier /= 10;
1609 }
1610
1611 return count;
1612 }
1613
1614 void
1615 instruction_scheduler::run(cfg_t *cfg)
1616 {
1617 if (debug && !post_reg_alloc) {
1618 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1619 post_reg_alloc);
1620 bs->dump_instructions();
1621 }
1622
1623 if (!post_reg_alloc)
1624 setup_liveness(cfg);
1625
1626 foreach_block(block, cfg) {
1627 if (block->end_ip - block->start_ip <= 1)
1628 continue;
1629
1630 if (reads_remaining) {
1631 memset(reads_remaining, 0,
1632 grf_count * sizeof(*reads_remaining));
1633 memset(hw_reads_remaining, 0,
1634 hw_reg_count * sizeof(*hw_reads_remaining));
1635 memset(written, 0, grf_count * sizeof(*written));
1636
1637 foreach_inst_in_block(fs_inst, inst, block)
1638 count_reads_remaining(inst);
1639 }
1640
1641 add_insts_from_block(block);
1642
1643 calculate_deps();
1644
1645 foreach_in_list(schedule_node, n, &instructions) {
1646 compute_delay(n);
1647 }
1648
1649 schedule_instructions(block);
1650 }
1651
1652 if (debug && !post_reg_alloc) {
1653 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1654 post_reg_alloc);
1655 bs->dump_instructions();
1656 }
1657
1658 cfg->cycle_count = get_cycle_count(cfg);
1659 }
1660
1661 void
1662 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1663 {
1664 if (mode != SCHEDULE_POST)
1665 calculate_live_intervals();
1666
1667 int grf_count;
1668 if (mode == SCHEDULE_POST)
1669 grf_count = grf_used;
1670 else
1671 grf_count = alloc.count;
1672
1673 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1674 cfg->num_blocks, mode);
1675 sched.run(cfg);
1676
1677 invalidate_live_intervals();
1678 }
1679
1680 void
1681 vec4_visitor::opt_schedule_instructions()
1682 {
1683 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1684 sched.run(cfg);
1685
1686 invalidate_live_intervals();
1687 }