i965: Don't add barrier deps for FB write messages.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89 };
90
91 void
92 schedule_node::set_latency_gen4()
93 {
94 int chans = 8;
95 int math_latency = 22;
96
97 switch (inst->opcode) {
98 case SHADER_OPCODE_RCP:
99 this->latency = 1 * chans * math_latency;
100 break;
101 case SHADER_OPCODE_RSQ:
102 this->latency = 2 * chans * math_latency;
103 break;
104 case SHADER_OPCODE_INT_QUOTIENT:
105 case SHADER_OPCODE_SQRT:
106 case SHADER_OPCODE_LOG2:
107 /* full precision log. partial is 2. */
108 this->latency = 3 * chans * math_latency;
109 break;
110 case SHADER_OPCODE_INT_REMAINDER:
111 case SHADER_OPCODE_EXP2:
112 /* full precision. partial is 3, same throughput. */
113 this->latency = 4 * chans * math_latency;
114 break;
115 case SHADER_OPCODE_POW:
116 this->latency = 8 * chans * math_latency;
117 break;
118 case SHADER_OPCODE_SIN:
119 case SHADER_OPCODE_COS:
120 /* minimum latency, max is 12 rounds. */
121 this->latency = 5 * chans * math_latency;
122 break;
123 default:
124 this->latency = 2;
125 break;
126 }
127 }
128
129 void
130 schedule_node::set_latency_gen7(bool is_haswell)
131 {
132 switch (inst->opcode) {
133 case BRW_OPCODE_MAD:
134 /* 2 cycles
135 * (since the last two src operands are in different register banks):
136 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
137 *
138 * 3 cycles on IVB, 4 on HSW
139 * (since the last two src operands are in the same register bank):
140 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
141 *
142 * 18 cycles on IVB, 16 on HSW
143 * (since the last two src operands are in different register banks):
144 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
145 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
146 *
147 * 20 cycles on IVB, 18 on HSW
148 * (since the last two src operands are in the same register bank):
149 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
150 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
151 */
152
153 /* Our register allocator doesn't know about register banks, so use the
154 * higher latency.
155 */
156 latency = is_haswell ? 16 : 18;
157 break;
158
159 case BRW_OPCODE_LRP:
160 /* 2 cycles
161 * (since the last two src operands are in different register banks):
162 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163 *
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167 *
168 * 16 cycles on IVB, 14 on HSW
169 * (since the last two src operands are in different register banks):
170 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
172 *
173 * 16 cycles
174 * (since the last two src operands are in the same register bank):
175 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
177 */
178
179 /* Our register allocator doesn't know about register banks, so use the
180 * higher latency.
181 */
182 latency = 14;
183 break;
184
185 case SHADER_OPCODE_RCP:
186 case SHADER_OPCODE_RSQ:
187 case SHADER_OPCODE_SQRT:
188 case SHADER_OPCODE_LOG2:
189 case SHADER_OPCODE_EXP2:
190 case SHADER_OPCODE_SIN:
191 case SHADER_OPCODE_COS:
192 /* 2 cycles:
193 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
194 *
195 * 18 cycles:
196 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
197 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
198 *
199 * Same for exp2, log2, rsq, sqrt, sin, cos.
200 */
201 latency = is_haswell ? 14 : 16;
202 break;
203
204 case SHADER_OPCODE_POW:
205 /* 2 cycles:
206 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
207 *
208 * 26 cycles:
209 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
210 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
211 */
212 latency = is_haswell ? 22 : 24;
213 break;
214
215 case SHADER_OPCODE_TEX:
216 case SHADER_OPCODE_TXD:
217 case SHADER_OPCODE_TXF:
218 case SHADER_OPCODE_TXL:
219 /* 18 cycles:
220 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
221 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
222 * send(8) g4<1>UW g114<8,8,1>F
223 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
224 *
225 * 697 +/-49 cycles (min 610, n=26):
226 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
227 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
228 * send(8) g4<1>UW g114<8,8,1>F
229 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
230 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
231 *
232 * So the latency on our first texture load of the batchbuffer takes
233 * ~700 cycles, since the caches are cold at that point.
234 *
235 * 840 +/- 92 cycles (min 720, n=25):
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
240 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
241 * send(8) g4<1>UW g114<8,8,1>F
242 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
243 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
244 *
245 * On the second load, it takes just an extra ~140 cycles, and after
246 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
247 *
248 * 683 +/- 49 cycles (min = 602, n=47):
249 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
250 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
251 * send(8) g4<1>UW g114<8,8,1>F
252 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * send(8) g50<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
256 *
257 * The unit appears to be pipelined, since this matches up with the
258 * cache-cold case, despite there being two loads here. If you replace
259 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
260 *
261 * So, take some number between the cache-hot 140 cycles and the
262 * cache-cold 700 cycles. No particular tuning was done on this.
263 *
264 * I haven't done significant testing of the non-TEX opcodes. TXL at
265 * least looked about the same as TEX.
266 */
267 latency = 200;
268 break;
269
270 case SHADER_OPCODE_TXS:
271 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
272 * cycles (n=15):
273 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
274 * send(8) g6<1>UW g114<8,8,1>F
275 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
276 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
277 *
278 *
279 * Two loads was 535 +/- 30 cycles (n=19):
280 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
281 * send(16) g6<1>UW g114<8,8,1>F
282 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
283 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
284 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
285 * send(16) g8<1>UW g114<8,8,1>F
286 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
287 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
288 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
289 *
290 * Since the only caches that should matter are just the
291 * instruction/state cache containing the surface state, assume that we
292 * always have hot caches.
293 */
294 latency = 100;
295 break;
296
297 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
298 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
299 case VS_OPCODE_PULL_CONSTANT_LOAD:
300 /* testing using varying-index pull constants:
301 *
302 * 16 cycles:
303 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
304 * send(8) g4<1>F g4<8,8,1>D
305 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
306 *
307 * ~480 cycles:
308 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
309 * send(8) g4<1>F g4<8,8,1>D
310 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
311 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
312 *
313 * ~620 cycles:
314 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
315 * send(8) g4<1>F g4<8,8,1>D
316 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
317 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
318 * send(8) g4<1>F g4<8,8,1>D
319 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 *
322 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
323 * about 460. We expect to mostly be cache hot, so pick something more
324 * in that direction.
325 */
326 latency = 200;
327 break;
328
329 case SHADER_OPCODE_GEN7_SCRATCH_READ:
330 /* Testing a load from offset 0, that had been previously written:
331 *
332 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
334 *
335 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
336 * then around 140. Presumably this is cache hit vs miss.
337 */
338 latency = 50;
339 break;
340
341 case SHADER_OPCODE_UNTYPED_ATOMIC:
342 case SHADER_OPCODE_TYPED_ATOMIC:
343 /* Test code:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
349 *
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
356 */
357 latency = 14000;
358 break;
359
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
361 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
362 case SHADER_OPCODE_TYPED_SURFACE_READ:
363 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
364 /* Test code:
365 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
366 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
367 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
368 * send(8) g4<1>UD g112<8,8,1>UD
369 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
370 * .
371 * . [repeats 8 times]
372 * .
373 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
374 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
375 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
376 * send(8) g4<1>UD g112<8,8,1>UD
377 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
378 *
379 * Running it 100 times as fragment shader on a 128x128 quad
380 * gives an average latency of 583 cycles per surface read,
381 * standard deviation 0.9%.
382 */
383 latency = is_haswell ? 300 : 600;
384 break;
385
386 default:
387 /* 2 cycles:
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
389 *
390 * 16 cycles:
391 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
392 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
393 */
394 latency = 14;
395 break;
396 }
397 }
398
399 class instruction_scheduler {
400 public:
401 instruction_scheduler(backend_shader *s, int grf_count,
402 int hw_reg_count, int block_count,
403 instruction_scheduler_mode mode)
404 {
405 this->bs = s;
406 this->mem_ctx = ralloc_context(NULL);
407 this->grf_count = grf_count;
408 this->hw_reg_count = hw_reg_count;
409 this->instructions.make_empty();
410 this->instructions_to_schedule = 0;
411 this->post_reg_alloc = (mode == SCHEDULE_POST);
412 this->mode = mode;
413 this->time = 0;
414 if (!post_reg_alloc) {
415 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
416
417 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
418 for (int i = 0; i < block_count; i++)
419 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
420 BITSET_WORDS(grf_count));
421
422 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
423 for (int i = 0; i < block_count; i++)
424 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
425 BITSET_WORDS(grf_count));
426
427 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
428 for (int i = 0; i < block_count; i++)
429 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
430 BITSET_WORDS(hw_reg_count));
431
432 this->written = rzalloc_array(mem_ctx, bool, grf_count);
433
434 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
435
436 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
437 } else {
438 this->reg_pressure_in = NULL;
439 this->livein = NULL;
440 this->liveout = NULL;
441 this->hw_liveout = NULL;
442 this->written = NULL;
443 this->reads_remaining = NULL;
444 this->hw_reads_remaining = NULL;
445 }
446 }
447
448 ~instruction_scheduler()
449 {
450 ralloc_free(this->mem_ctx);
451 }
452 void add_barrier_deps(schedule_node *n);
453 void add_dep(schedule_node *before, schedule_node *after, int latency);
454 void add_dep(schedule_node *before, schedule_node *after);
455
456 void run(cfg_t *cfg);
457 void add_insts_from_block(bblock_t *block);
458 void compute_delay(schedule_node *node);
459 virtual void calculate_deps() = 0;
460 virtual schedule_node *choose_instruction_to_schedule() = 0;
461
462 /**
463 * Returns how many cycles it takes the instruction to issue.
464 *
465 * Instructions in gen hardware are handled one simd4 vector at a time,
466 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
467 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
468 */
469 virtual int issue_time(backend_instruction *inst) = 0;
470
471 virtual void count_reads_remaining(backend_instruction *inst) = 0;
472 virtual void setup_liveness(cfg_t *cfg) = 0;
473 virtual void update_register_pressure(backend_instruction *inst) = 0;
474 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
475
476 void schedule_instructions(bblock_t *block);
477
478 void *mem_ctx;
479
480 bool post_reg_alloc;
481 int instructions_to_schedule;
482 int grf_count;
483 int hw_reg_count;
484 int time;
485 int reg_pressure;
486 int block_idx;
487 exec_list instructions;
488 backend_shader *bs;
489
490 instruction_scheduler_mode mode;
491
492 /*
493 * The register pressure at the beginning of each basic block.
494 */
495
496 int *reg_pressure_in;
497
498 /*
499 * The virtual GRF's whose range overlaps the beginning of each basic block.
500 */
501
502 BITSET_WORD **livein;
503
504 /*
505 * The virtual GRF's whose range overlaps the end of each basic block.
506 */
507
508 BITSET_WORD **liveout;
509
510 /*
511 * The hardware GRF's whose range overlaps the end of each basic block.
512 */
513
514 BITSET_WORD **hw_liveout;
515
516 /*
517 * Whether we've scheduled a write for this virtual GRF yet.
518 */
519
520 bool *written;
521
522 /*
523 * How many reads we haven't scheduled for this virtual GRF yet.
524 */
525
526 int *reads_remaining;
527
528 /*
529 * How many reads we haven't scheduled for this hardware GRF yet.
530 */
531
532 int *hw_reads_remaining;
533 };
534
535 class fs_instruction_scheduler : public instruction_scheduler
536 {
537 public:
538 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
539 int block_count,
540 instruction_scheduler_mode mode);
541 void calculate_deps();
542 bool is_compressed(fs_inst *inst);
543 schedule_node *choose_instruction_to_schedule();
544 int issue_time(backend_instruction *inst);
545 fs_visitor *v;
546
547 void count_reads_remaining(backend_instruction *inst);
548 void setup_liveness(cfg_t *cfg);
549 void update_register_pressure(backend_instruction *inst);
550 int get_register_pressure_benefit(backend_instruction *inst);
551 };
552
553 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
554 int grf_count, int hw_reg_count,
555 int block_count,
556 instruction_scheduler_mode mode)
557 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
558 v(v)
559 {
560 }
561
562 static bool
563 is_src_duplicate(fs_inst *inst, int src)
564 {
565 for (int i = 0; i < src; i++)
566 if (inst->src[i].equals(inst->src[src]))
567 return true;
568
569 return false;
570 }
571
572 void
573 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
574 {
575 fs_inst *inst = (fs_inst *)be;
576
577 if (!reads_remaining)
578 return;
579
580 for (int i = 0; i < inst->sources; i++) {
581 if (is_src_duplicate(inst, i))
582 continue;
583
584 if (inst->src[i].file == VGRF) {
585 reads_remaining[inst->src[i].nr]++;
586 } else if (inst->src[i].file == FIXED_GRF) {
587 if (inst->src[i].nr >= hw_reg_count)
588 continue;
589
590 for (int j = 0; j < inst->regs_read(i); j++)
591 hw_reads_remaining[inst->src[i].nr + j]++;
592 }
593 }
594 }
595
596 void
597 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
598 {
599 /* First, compute liveness on a per-GRF level using the in/out sets from
600 * liveness calculation.
601 */
602 for (int block = 0; block < cfg->num_blocks; block++) {
603 for (int i = 0; i < v->live_intervals->num_vars; i++) {
604 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
605 int vgrf = v->live_intervals->vgrf_from_var[i];
606 if (!BITSET_TEST(livein[block], vgrf)) {
607 reg_pressure_in[block] += v->alloc.sizes[vgrf];
608 BITSET_SET(livein[block], vgrf);
609 }
610 }
611
612 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
613 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
614 }
615 }
616
617 /* Now, extend the live in/live out sets for when a range crosses a block
618 * boundary, which matches what our register allocator/interference code
619 * does to account for force_writemask_all and incompatible exec_mask's.
620 */
621 for (int block = 0; block < cfg->num_blocks - 1; block++) {
622 for (int i = 0; i < grf_count; i++) {
623 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
624 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
625 if (!BITSET_TEST(livein[block + 1], i)) {
626 reg_pressure_in[block + 1] += v->alloc.sizes[i];
627 BITSET_SET(livein[block + 1], i);
628 }
629
630 BITSET_SET(liveout[block], i);
631 }
632 }
633 }
634
635 int payload_last_use_ip[hw_reg_count];
636 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
637
638 for (int i = 0; i < hw_reg_count; i++) {
639 if (payload_last_use_ip[i] == -1)
640 continue;
641
642 for (int block = 0; block < cfg->num_blocks; block++) {
643 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
644 reg_pressure_in[block]++;
645
646 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
647 BITSET_SET(hw_liveout[block], i);
648 }
649 }
650 }
651
652 void
653 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
654 {
655 fs_inst *inst = (fs_inst *)be;
656
657 if (!reads_remaining)
658 return;
659
660 if (inst->dst.file == VGRF) {
661 written[inst->dst.nr] = true;
662 }
663
664 for (int i = 0; i < inst->sources; i++) {
665 if (is_src_duplicate(inst, i))
666 continue;
667
668 if (inst->src[i].file == VGRF) {
669 reads_remaining[inst->src[i].nr]--;
670 } else if (inst->src[i].file == FIXED_GRF &&
671 inst->src[i].nr < hw_reg_count) {
672 for (int off = 0; off < inst->regs_read(i); off++)
673 hw_reads_remaining[inst->src[i].nr + off]--;
674 }
675 }
676 }
677
678 int
679 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
680 {
681 fs_inst *inst = (fs_inst *)be;
682 int benefit = 0;
683
684 if (inst->dst.file == VGRF) {
685 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
686 !written[inst->dst.nr])
687 benefit -= v->alloc.sizes[inst->dst.nr];
688 }
689
690 for (int i = 0; i < inst->sources; i++) {
691 if (is_src_duplicate(inst, i))
692 continue;
693
694 if (inst->src[i].file == VGRF &&
695 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
696 reads_remaining[inst->src[i].nr] == 1)
697 benefit += v->alloc.sizes[inst->src[i].nr];
698
699 if (inst->src[i].file == FIXED_GRF &&
700 inst->src[i].nr < hw_reg_count) {
701 for (int off = 0; off < inst->regs_read(i); off++) {
702 int reg = inst->src[i].nr + off;
703 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
704 hw_reads_remaining[reg] == 1) {
705 benefit++;
706 }
707 }
708 }
709 }
710
711 return benefit;
712 }
713
714 class vec4_instruction_scheduler : public instruction_scheduler
715 {
716 public:
717 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
718 void calculate_deps();
719 schedule_node *choose_instruction_to_schedule();
720 int issue_time(backend_instruction *inst);
721 vec4_visitor *v;
722
723 void count_reads_remaining(backend_instruction *inst);
724 void setup_liveness(cfg_t *cfg);
725 void update_register_pressure(backend_instruction *inst);
726 int get_register_pressure_benefit(backend_instruction *inst);
727 };
728
729 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
730 int grf_count)
731 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
732 v(v)
733 {
734 }
735
736 void
737 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
748 {
749 }
750
751 int
752 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
753 {
754 return 0;
755 }
756
757 schedule_node::schedule_node(backend_instruction *inst,
758 instruction_scheduler *sched)
759 {
760 const struct brw_device_info *devinfo = sched->bs->devinfo;
761
762 this->inst = inst;
763 this->child_array_size = 0;
764 this->children = NULL;
765 this->child_latency = NULL;
766 this->child_count = 0;
767 this->parent_count = 0;
768 this->unblocked_time = 0;
769 this->cand_generation = 0;
770 this->delay = 0;
771
772 /* We can't measure Gen6 timings directly but expect them to be much
773 * closer to Gen7 than Gen4.
774 */
775 if (!sched->post_reg_alloc)
776 this->latency = 1;
777 else if (devinfo->gen >= 6)
778 set_latency_gen7(devinfo->is_haswell);
779 else
780 set_latency_gen4();
781 }
782
783 void
784 instruction_scheduler::add_insts_from_block(bblock_t *block)
785 {
786 foreach_inst_in_block(backend_instruction, inst, block) {
787 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
788
789 instructions.push_tail(n);
790 }
791
792 this->instructions_to_schedule = block->end_ip - block->start_ip + 1;
793 }
794
795 /** Recursive computation of the delay member of a node. */
796 void
797 instruction_scheduler::compute_delay(schedule_node *n)
798 {
799 if (!n->child_count) {
800 n->delay = issue_time(n->inst);
801 } else {
802 for (int i = 0; i < n->child_count; i++) {
803 if (!n->children[i]->delay)
804 compute_delay(n->children[i]);
805 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
806 }
807 }
808 }
809
810 /**
811 * Add a dependency between two instruction nodes.
812 *
813 * The @after node will be scheduled after @before. We will try to
814 * schedule it @latency cycles after @before, but no guarantees there.
815 */
816 void
817 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
818 int latency)
819 {
820 if (!before || !after)
821 return;
822
823 assert(before != after);
824
825 for (int i = 0; i < before->child_count; i++) {
826 if (before->children[i] == after) {
827 before->child_latency[i] = MAX2(before->child_latency[i], latency);
828 return;
829 }
830 }
831
832 if (before->child_array_size <= before->child_count) {
833 if (before->child_array_size < 16)
834 before->child_array_size = 16;
835 else
836 before->child_array_size *= 2;
837
838 before->children = reralloc(mem_ctx, before->children,
839 schedule_node *,
840 before->child_array_size);
841 before->child_latency = reralloc(mem_ctx, before->child_latency,
842 int, before->child_array_size);
843 }
844
845 before->children[before->child_count] = after;
846 before->child_latency[before->child_count] = latency;
847 before->child_count++;
848 after->parent_count++;
849 }
850
851 void
852 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
853 {
854 if (!before)
855 return;
856
857 add_dep(before, after, before->latency);
858 }
859
860 /**
861 * Sometimes we really want this node to execute after everything that
862 * was before it and before everything that followed it. This adds
863 * the deps to do so.
864 */
865 void
866 instruction_scheduler::add_barrier_deps(schedule_node *n)
867 {
868 schedule_node *prev = (schedule_node *)n->prev;
869 schedule_node *next = (schedule_node *)n->next;
870
871 if (prev) {
872 while (!prev->is_head_sentinel()) {
873 add_dep(prev, n, 0);
874 prev = (schedule_node *)prev->prev;
875 }
876 }
877
878 if (next) {
879 while (!next->is_tail_sentinel()) {
880 add_dep(n, next, 0);
881 next = (schedule_node *)next->next;
882 }
883 }
884 }
885
886 /* instruction scheduling needs to be aware of when an MRF write
887 * actually writes 2 MRFs.
888 */
889 bool
890 fs_instruction_scheduler::is_compressed(fs_inst *inst)
891 {
892 return inst->exec_size == 16;
893 }
894
895 static bool
896 is_scheduling_barrier(const fs_inst *inst)
897 {
898 return inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
899 inst->is_control_flow() ||
900 inst->eot ||
901 (inst->has_side_effects() && inst->opcode != FS_OPCODE_FB_WRITE);
902 }
903
904 void
905 fs_instruction_scheduler::calculate_deps()
906 {
907 /* Pre-register-allocation, this tracks the last write per VGRF offset.
908 * After register allocation, reg_offsets are gone and we track individual
909 * GRF registers.
910 */
911 schedule_node *last_grf_write[grf_count * 16];
912 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
913 schedule_node *last_conditional_mod[2] = { NULL, NULL };
914 schedule_node *last_accumulator_write = NULL;
915 /* Fixed HW registers are assumed to be separate from the virtual
916 * GRFs, so they can be tracked separately. We don't really write
917 * to fixed GRFs much, so don't bother tracking them on a more
918 * granular level.
919 */
920 schedule_node *last_fixed_grf_write = NULL;
921
922 memset(last_grf_write, 0, sizeof(last_grf_write));
923 memset(last_mrf_write, 0, sizeof(last_mrf_write));
924
925 /* top-to-bottom dependencies: RAW and WAW. */
926 foreach_in_list(schedule_node, n, &instructions) {
927 fs_inst *inst = (fs_inst *)n->inst;
928
929 if (is_scheduling_barrier(inst))
930 add_barrier_deps(n);
931
932 /* read-after-write deps. */
933 for (int i = 0; i < inst->sources; i++) {
934 if (inst->src[i].file == VGRF) {
935 if (post_reg_alloc) {
936 for (int r = 0; r < inst->regs_read(i); r++)
937 add_dep(last_grf_write[inst->src[i].nr + r], n);
938 } else {
939 for (int r = 0; r < inst->regs_read(i); r++) {
940 add_dep(last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], n);
941 }
942 }
943 } else if (inst->src[i].file == FIXED_GRF) {
944 if (post_reg_alloc) {
945 for (int r = 0; r < inst->regs_read(i); r++)
946 add_dep(last_grf_write[inst->src[i].nr + r], n);
947 } else {
948 add_dep(last_fixed_grf_write, n);
949 }
950 } else if (inst->src[i].is_accumulator()) {
951 add_dep(last_accumulator_write, n);
952 } else if (inst->src[i].file == ARF) {
953 add_barrier_deps(n);
954 }
955 }
956
957 if (inst->base_mrf != -1) {
958 for (int i = 0; i < inst->mlen; i++) {
959 /* It looks like the MRF regs are released in the send
960 * instruction once it's sent, not when the result comes
961 * back.
962 */
963 add_dep(last_mrf_write[inst->base_mrf + i], n);
964 }
965 }
966
967 if (inst->reads_flag()) {
968 add_dep(last_conditional_mod[inst->flag_subreg], n);
969 }
970
971 if (inst->reads_accumulator_implicitly()) {
972 add_dep(last_accumulator_write, n);
973 }
974
975 /* write-after-write deps. */
976 if (inst->dst.file == VGRF) {
977 if (post_reg_alloc) {
978 for (int r = 0; r < inst->regs_written; r++) {
979 add_dep(last_grf_write[inst->dst.nr + r], n);
980 last_grf_write[inst->dst.nr + r] = n;
981 }
982 } else {
983 for (int r = 0; r < inst->regs_written; r++) {
984 add_dep(last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r], n);
985 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
986 }
987 }
988 } else if (inst->dst.file == MRF) {
989 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
990
991 add_dep(last_mrf_write[reg], n);
992 last_mrf_write[reg] = n;
993 if (is_compressed(inst)) {
994 if (inst->dst.nr & BRW_MRF_COMPR4)
995 reg += 4;
996 else
997 reg++;
998 add_dep(last_mrf_write[reg], n);
999 last_mrf_write[reg] = n;
1000 }
1001 } else if (inst->dst.file == FIXED_GRF) {
1002 if (post_reg_alloc) {
1003 for (int r = 0; r < inst->regs_written; r++)
1004 last_grf_write[inst->dst.nr + r] = n;
1005 } else {
1006 last_fixed_grf_write = n;
1007 }
1008 } else if (inst->dst.is_accumulator()) {
1009 add_dep(last_accumulator_write, n);
1010 last_accumulator_write = n;
1011 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1012 add_barrier_deps(n);
1013 }
1014
1015 if (inst->mlen > 0 && inst->base_mrf != -1) {
1016 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1017 add_dep(last_mrf_write[inst->base_mrf + i], n);
1018 last_mrf_write[inst->base_mrf + i] = n;
1019 }
1020 }
1021
1022 if (inst->writes_flag()) {
1023 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1024 last_conditional_mod[inst->flag_subreg] = n;
1025 }
1026
1027 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1028 !inst->dst.is_accumulator()) {
1029 add_dep(last_accumulator_write, n);
1030 last_accumulator_write = n;
1031 }
1032 }
1033
1034 /* bottom-to-top dependencies: WAR */
1035 memset(last_grf_write, 0, sizeof(last_grf_write));
1036 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1037 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1038 last_accumulator_write = NULL;
1039 last_fixed_grf_write = NULL;
1040
1041 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1042 fs_inst *inst = (fs_inst *)n->inst;
1043
1044 /* write-after-read deps. */
1045 for (int i = 0; i < inst->sources; i++) {
1046 if (inst->src[i].file == VGRF) {
1047 if (post_reg_alloc) {
1048 for (int r = 0; r < inst->regs_read(i); r++)
1049 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1050 } else {
1051 for (int r = 0; r < inst->regs_read(i); r++) {
1052 add_dep(n, last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], 0);
1053 }
1054 }
1055 } else if (inst->src[i].file == FIXED_GRF) {
1056 if (post_reg_alloc) {
1057 for (int r = 0; r < inst->regs_read(i); r++)
1058 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1059 } else {
1060 add_dep(n, last_fixed_grf_write, 0);
1061 }
1062 } else if (inst->src[i].is_accumulator()) {
1063 add_dep(n, last_accumulator_write, 0);
1064 } else if (inst->src[i].file == ARF) {
1065 add_barrier_deps(n);
1066 }
1067 }
1068
1069 if (inst->base_mrf != -1) {
1070 for (int i = 0; i < inst->mlen; i++) {
1071 /* It looks like the MRF regs are released in the send
1072 * instruction once it's sent, not when the result comes
1073 * back.
1074 */
1075 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1076 }
1077 }
1078
1079 if (inst->reads_flag()) {
1080 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1081 }
1082
1083 if (inst->reads_accumulator_implicitly()) {
1084 add_dep(n, last_accumulator_write);
1085 }
1086
1087 /* Update the things this instruction wrote, so earlier reads
1088 * can mark this as WAR dependency.
1089 */
1090 if (inst->dst.file == VGRF) {
1091 if (post_reg_alloc) {
1092 for (int r = 0; r < inst->regs_written; r++)
1093 last_grf_write[inst->dst.nr + r] = n;
1094 } else {
1095 for (int r = 0; r < inst->regs_written; r++) {
1096 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1097 }
1098 }
1099 } else if (inst->dst.file == MRF) {
1100 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1101
1102 last_mrf_write[reg] = n;
1103
1104 if (is_compressed(inst)) {
1105 if (inst->dst.nr & BRW_MRF_COMPR4)
1106 reg += 4;
1107 else
1108 reg++;
1109
1110 last_mrf_write[reg] = n;
1111 }
1112 } else if (inst->dst.file == FIXED_GRF) {
1113 if (post_reg_alloc) {
1114 for (int r = 0; r < inst->regs_written; r++)
1115 last_grf_write[inst->dst.nr + r] = n;
1116 } else {
1117 last_fixed_grf_write = n;
1118 }
1119 } else if (inst->dst.is_accumulator()) {
1120 last_accumulator_write = n;
1121 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1122 add_barrier_deps(n);
1123 }
1124
1125 if (inst->mlen > 0 && inst->base_mrf != -1) {
1126 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1127 last_mrf_write[inst->base_mrf + i] = n;
1128 }
1129 }
1130
1131 if (inst->writes_flag()) {
1132 last_conditional_mod[inst->flag_subreg] = n;
1133 }
1134
1135 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1136 last_accumulator_write = n;
1137 }
1138 }
1139 }
1140
1141 static bool
1142 is_scheduling_barrier(const vec4_instruction *inst)
1143 {
1144 return inst->is_control_flow() ||
1145 inst->has_side_effects();
1146 }
1147
1148 void
1149 vec4_instruction_scheduler::calculate_deps()
1150 {
1151 schedule_node *last_grf_write[grf_count];
1152 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1153 schedule_node *last_conditional_mod = NULL;
1154 schedule_node *last_accumulator_write = NULL;
1155 /* Fixed HW registers are assumed to be separate from the virtual
1156 * GRFs, so they can be tracked separately. We don't really write
1157 * to fixed GRFs much, so don't bother tracking them on a more
1158 * granular level.
1159 */
1160 schedule_node *last_fixed_grf_write = NULL;
1161
1162 memset(last_grf_write, 0, sizeof(last_grf_write));
1163 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1164
1165 /* top-to-bottom dependencies: RAW and WAW. */
1166 foreach_in_list(schedule_node, n, &instructions) {
1167 vec4_instruction *inst = (vec4_instruction *)n->inst;
1168
1169 if (is_scheduling_barrier(inst))
1170 add_barrier_deps(n);
1171
1172 /* read-after-write deps. */
1173 for (int i = 0; i < 3; i++) {
1174 if (inst->src[i].file == VGRF) {
1175 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1176 add_dep(last_grf_write[inst->src[i].nr + j], n);
1177 } else if (inst->src[i].file == FIXED_GRF) {
1178 add_dep(last_fixed_grf_write, n);
1179 } else if (inst->src[i].is_accumulator()) {
1180 assert(last_accumulator_write);
1181 add_dep(last_accumulator_write, n);
1182 } else if (inst->src[i].file == ARF) {
1183 add_barrier_deps(n);
1184 }
1185 }
1186
1187 if (!inst->is_send_from_grf()) {
1188 for (int i = 0; i < inst->mlen; i++) {
1189 /* It looks like the MRF regs are released in the send
1190 * instruction once it's sent, not when the result comes
1191 * back.
1192 */
1193 add_dep(last_mrf_write[inst->base_mrf + i], n);
1194 }
1195 }
1196
1197 if (inst->reads_flag()) {
1198 assert(last_conditional_mod);
1199 add_dep(last_conditional_mod, n);
1200 }
1201
1202 if (inst->reads_accumulator_implicitly()) {
1203 assert(last_accumulator_write);
1204 add_dep(last_accumulator_write, n);
1205 }
1206
1207 /* write-after-write deps. */
1208 if (inst->dst.file == VGRF) {
1209 for (unsigned j = 0; j < inst->regs_written; ++j) {
1210 add_dep(last_grf_write[inst->dst.nr + j], n);
1211 last_grf_write[inst->dst.nr + j] = n;
1212 }
1213 } else if (inst->dst.file == MRF) {
1214 add_dep(last_mrf_write[inst->dst.nr], n);
1215 last_mrf_write[inst->dst.nr] = n;
1216 } else if (inst->dst.file == FIXED_GRF) {
1217 last_fixed_grf_write = n;
1218 } else if (inst->dst.is_accumulator()) {
1219 add_dep(last_accumulator_write, n);
1220 last_accumulator_write = n;
1221 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1222 add_barrier_deps(n);
1223 }
1224
1225 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1226 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1227 add_dep(last_mrf_write[inst->base_mrf + i], n);
1228 last_mrf_write[inst->base_mrf + i] = n;
1229 }
1230 }
1231
1232 if (inst->writes_flag()) {
1233 add_dep(last_conditional_mod, n, 0);
1234 last_conditional_mod = n;
1235 }
1236
1237 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1238 !inst->dst.is_accumulator()) {
1239 add_dep(last_accumulator_write, n);
1240 last_accumulator_write = n;
1241 }
1242 }
1243
1244 /* bottom-to-top dependencies: WAR */
1245 memset(last_grf_write, 0, sizeof(last_grf_write));
1246 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1247 last_conditional_mod = NULL;
1248 last_accumulator_write = NULL;
1249 last_fixed_grf_write = NULL;
1250
1251 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1252 vec4_instruction *inst = (vec4_instruction *)n->inst;
1253
1254 /* write-after-read deps. */
1255 for (int i = 0; i < 3; i++) {
1256 if (inst->src[i].file == VGRF) {
1257 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1258 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1259 } else if (inst->src[i].file == FIXED_GRF) {
1260 add_dep(n, last_fixed_grf_write);
1261 } else if (inst->src[i].is_accumulator()) {
1262 add_dep(n, last_accumulator_write);
1263 } else if (inst->src[i].file == ARF) {
1264 add_barrier_deps(n);
1265 }
1266 }
1267
1268 if (!inst->is_send_from_grf()) {
1269 for (int i = 0; i < inst->mlen; i++) {
1270 /* It looks like the MRF regs are released in the send
1271 * instruction once it's sent, not when the result comes
1272 * back.
1273 */
1274 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1275 }
1276 }
1277
1278 if (inst->reads_flag()) {
1279 add_dep(n, last_conditional_mod);
1280 }
1281
1282 if (inst->reads_accumulator_implicitly()) {
1283 add_dep(n, last_accumulator_write);
1284 }
1285
1286 /* Update the things this instruction wrote, so earlier reads
1287 * can mark this as WAR dependency.
1288 */
1289 if (inst->dst.file == VGRF) {
1290 for (unsigned j = 0; j < inst->regs_written; ++j)
1291 last_grf_write[inst->dst.nr + j] = n;
1292 } else if (inst->dst.file == MRF) {
1293 last_mrf_write[inst->dst.nr] = n;
1294 } else if (inst->dst.file == FIXED_GRF) {
1295 last_fixed_grf_write = n;
1296 } else if (inst->dst.is_accumulator()) {
1297 last_accumulator_write = n;
1298 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1299 add_barrier_deps(n);
1300 }
1301
1302 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1303 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1304 last_mrf_write[inst->base_mrf + i] = n;
1305 }
1306 }
1307
1308 if (inst->writes_flag()) {
1309 last_conditional_mod = n;
1310 }
1311
1312 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1313 last_accumulator_write = n;
1314 }
1315 }
1316 }
1317
1318 schedule_node *
1319 fs_instruction_scheduler::choose_instruction_to_schedule()
1320 {
1321 schedule_node *chosen = NULL;
1322
1323 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1324 int chosen_time = 0;
1325
1326 /* Of the instructions ready to execute or the closest to
1327 * being ready, choose the oldest one.
1328 */
1329 foreach_in_list(schedule_node, n, &instructions) {
1330 if (!chosen || n->unblocked_time < chosen_time) {
1331 chosen = n;
1332 chosen_time = n->unblocked_time;
1333 }
1334 }
1335 } else {
1336 /* Before register allocation, we don't care about the latencies of
1337 * instructions. All we care about is reducing live intervals of
1338 * variables so that we can avoid register spilling, or get SIMD16
1339 * shaders which naturally do a better job of hiding instruction
1340 * latency.
1341 */
1342 foreach_in_list(schedule_node, n, &instructions) {
1343 fs_inst *inst = (fs_inst *)n->inst;
1344
1345 if (!chosen) {
1346 chosen = n;
1347 continue;
1348 }
1349
1350 /* Most important: If we can definitely reduce register pressure, do
1351 * so immediately.
1352 */
1353 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1354 int chosen_register_pressure_benefit =
1355 get_register_pressure_benefit(chosen->inst);
1356
1357 if (register_pressure_benefit > 0 &&
1358 register_pressure_benefit > chosen_register_pressure_benefit) {
1359 chosen = n;
1360 continue;
1361 } else if (chosen_register_pressure_benefit > 0 &&
1362 (register_pressure_benefit <
1363 chosen_register_pressure_benefit)) {
1364 continue;
1365 }
1366
1367 if (mode == SCHEDULE_PRE_LIFO) {
1368 /* Prefer instructions that recently became available for
1369 * scheduling. These are the things that are most likely to
1370 * (eventually) make a variable dead and reduce register pressure.
1371 * Typical register pressure estimates don't work for us because
1372 * most of our pressure comes from texturing, where no single
1373 * instruction to schedule will make a vec4 value dead.
1374 */
1375 if (n->cand_generation > chosen->cand_generation) {
1376 chosen = n;
1377 continue;
1378 } else if (n->cand_generation < chosen->cand_generation) {
1379 continue;
1380 }
1381
1382 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1383 * do this, then because we prefer instructions that just became
1384 * candidates, we'll end up in a pattern of scheduling a SEND,
1385 * then the MRFs for the next SEND, then the next SEND, then the
1386 * MRFs, etc., without ever consuming the results of a send.
1387 */
1388 if (v->devinfo->gen < 7) {
1389 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1390
1391 /* We use regs_written > 1 as our test for the kind of send
1392 * instruction to avoid -- only sends generate many regs, and a
1393 * single-result send is probably actually reducing register
1394 * pressure.
1395 */
1396 if (inst->regs_written <= inst->exec_size / 8 &&
1397 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1398 chosen = n;
1399 continue;
1400 } else if (inst->regs_written > chosen_inst->regs_written) {
1401 continue;
1402 }
1403 }
1404 }
1405
1406 /* For instructions pushed on the cands list at the same time, prefer
1407 * the one with the highest delay to the end of the program. This is
1408 * most likely to have its values able to be consumed first (such as
1409 * for a large tree of lowered ubo loads, which appear reversed in
1410 * the instruction stream with respect to when they can be consumed).
1411 */
1412 if (n->delay > chosen->delay) {
1413 chosen = n;
1414 continue;
1415 } else if (n->delay < chosen->delay) {
1416 continue;
1417 }
1418
1419 /* If all other metrics are equal, we prefer the first instruction in
1420 * the list (program execution).
1421 */
1422 }
1423 }
1424
1425 return chosen;
1426 }
1427
1428 schedule_node *
1429 vec4_instruction_scheduler::choose_instruction_to_schedule()
1430 {
1431 schedule_node *chosen = NULL;
1432 int chosen_time = 0;
1433
1434 /* Of the instructions ready to execute or the closest to being ready,
1435 * choose the oldest one.
1436 */
1437 foreach_in_list(schedule_node, n, &instructions) {
1438 if (!chosen || n->unblocked_time < chosen_time) {
1439 chosen = n;
1440 chosen_time = n->unblocked_time;
1441 }
1442 }
1443
1444 return chosen;
1445 }
1446
1447 int
1448 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1449 {
1450 if (is_compressed((fs_inst *)inst))
1451 return 4;
1452 else
1453 return 2;
1454 }
1455
1456 int
1457 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1458 {
1459 /* We always execute as two vec4s in parallel. */
1460 return 2;
1461 }
1462
1463 void
1464 instruction_scheduler::schedule_instructions(bblock_t *block)
1465 {
1466 const struct brw_device_info *devinfo = bs->devinfo;
1467 time = 0;
1468 if (!post_reg_alloc)
1469 reg_pressure = reg_pressure_in[block->num];
1470 block_idx = block->num;
1471
1472 /* Remove non-DAG heads from the list. */
1473 foreach_in_list_safe(schedule_node, n, &instructions) {
1474 if (n->parent_count != 0)
1475 n->remove();
1476 }
1477
1478 unsigned cand_generation = 1;
1479 while (!instructions.is_empty()) {
1480 schedule_node *chosen = choose_instruction_to_schedule();
1481
1482 /* Schedule this instruction. */
1483 assert(chosen);
1484 chosen->remove();
1485 chosen->inst->exec_node::remove();
1486 block->instructions.push_tail(chosen->inst);
1487 instructions_to_schedule--;
1488
1489 if (!post_reg_alloc) {
1490 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1491 update_register_pressure(chosen->inst);
1492 }
1493
1494 /* If we expected a delay for scheduling, then bump the clock to reflect
1495 * that. In reality, the hardware will switch to another hyperthread
1496 * and may not return to dispatching our thread for a while even after
1497 * we're unblocked. After this, we have the time when the chosen
1498 * instruction will start executing.
1499 */
1500 time = MAX2(time, chosen->unblocked_time);
1501
1502 /* Update the clock for how soon an instruction could start after the
1503 * chosen one.
1504 */
1505 time += issue_time(chosen->inst);
1506
1507 if (debug) {
1508 fprintf(stderr, "clock %4d, scheduled: ", time);
1509 bs->dump_instruction(chosen->inst);
1510 if (!post_reg_alloc)
1511 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1512 }
1513
1514 /* Now that we've scheduled a new instruction, some of its
1515 * children can be promoted to the list of instructions ready to
1516 * be scheduled. Update the children's unblocked time for this
1517 * DAG edge as we do so.
1518 */
1519 for (int i = chosen->child_count - 1; i >= 0; i--) {
1520 schedule_node *child = chosen->children[i];
1521
1522 child->unblocked_time = MAX2(child->unblocked_time,
1523 time + chosen->child_latency[i]);
1524
1525 if (debug) {
1526 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1527 bs->dump_instruction(child->inst);
1528 }
1529
1530 child->cand_generation = cand_generation;
1531 child->parent_count--;
1532 if (child->parent_count == 0) {
1533 if (debug) {
1534 fprintf(stderr, "\t\tnow available\n");
1535 }
1536 instructions.push_head(child);
1537 }
1538 }
1539 cand_generation++;
1540
1541 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1542 * but it's more limited pre-gen6, so if we send something off to it then
1543 * the next math instruction isn't going to make progress until the first
1544 * is done.
1545 */
1546 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1547 foreach_in_list(schedule_node, n, &instructions) {
1548 if (n->inst->is_math())
1549 n->unblocked_time = MAX2(n->unblocked_time,
1550 time + chosen->latency);
1551 }
1552 }
1553 }
1554
1555 assert(instructions_to_schedule == 0);
1556
1557 block->cycle_count = time;
1558 }
1559
1560 static unsigned get_cycle_count(cfg_t *cfg)
1561 {
1562 unsigned count = 0, multiplier = 1;
1563 foreach_block(block, cfg) {
1564 if (block->start()->opcode == BRW_OPCODE_DO)
1565 multiplier *= 10; /* assume that loops execute ~10 times */
1566
1567 count += block->cycle_count * multiplier;
1568
1569 if (block->end()->opcode == BRW_OPCODE_WHILE)
1570 multiplier /= 10;
1571 }
1572
1573 return count;
1574 }
1575
1576 void
1577 instruction_scheduler::run(cfg_t *cfg)
1578 {
1579 if (debug && !post_reg_alloc) {
1580 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1581 post_reg_alloc);
1582 bs->dump_instructions();
1583 }
1584
1585 if (!post_reg_alloc)
1586 setup_liveness(cfg);
1587
1588 foreach_block(block, cfg) {
1589 if (block->end_ip - block->start_ip <= 1)
1590 continue;
1591
1592 if (reads_remaining) {
1593 memset(reads_remaining, 0,
1594 grf_count * sizeof(*reads_remaining));
1595 memset(hw_reads_remaining, 0,
1596 hw_reg_count * sizeof(*hw_reads_remaining));
1597 memset(written, 0, grf_count * sizeof(*written));
1598
1599 foreach_inst_in_block(fs_inst, inst, block)
1600 count_reads_remaining(inst);
1601 }
1602
1603 add_insts_from_block(block);
1604
1605 calculate_deps();
1606
1607 foreach_in_list(schedule_node, n, &instructions) {
1608 compute_delay(n);
1609 }
1610
1611 schedule_instructions(block);
1612 }
1613
1614 if (debug && !post_reg_alloc) {
1615 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1616 post_reg_alloc);
1617 bs->dump_instructions();
1618 }
1619
1620 cfg->cycle_count = get_cycle_count(cfg);
1621 }
1622
1623 void
1624 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1625 {
1626 if (mode != SCHEDULE_POST)
1627 calculate_live_intervals();
1628
1629 int grf_count;
1630 if (mode == SCHEDULE_POST)
1631 grf_count = grf_used;
1632 else
1633 grf_count = alloc.count;
1634
1635 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1636 cfg->num_blocks, mode);
1637 sched.run(cfg);
1638
1639 invalidate_live_intervals();
1640 }
1641
1642 void
1643 vec4_visitor::opt_schedule_instructions()
1644 {
1645 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1646 sched.run(cfg);
1647
1648 invalidate_live_intervals();
1649 }