intel: s/brw_device_info/gen_device_info/
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89
90 /**
91 * Preferred exit node among the (direct or indirect) successors of this
92 * node. Among the scheduler nodes blocked by this node, this will be the
93 * one that may cause earliest program termination, or NULL if none of the
94 * successors is an exit node.
95 */
96 schedule_node *exit;
97
98 bool is_barrier;
99 };
100
101 /**
102 * Lower bound of the scheduling time after which one of the instructions
103 * blocked by this node may lead to program termination.
104 *
105 * exit_unblocked_time() determines a strict partial ordering relation '«' on
106 * the set of scheduler nodes as follows:
107 *
108 * n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
109 *
110 * which can be used to heuristically order nodes according to how early they
111 * can unblock an exit node and lead to program termination.
112 */
113 static inline int
114 exit_unblocked_time(const schedule_node *n)
115 {
116 return n->exit ? n->exit->unblocked_time : INT_MAX;
117 }
118
119 void
120 schedule_node::set_latency_gen4()
121 {
122 int chans = 8;
123 int math_latency = 22;
124
125 switch (inst->opcode) {
126 case SHADER_OPCODE_RCP:
127 this->latency = 1 * chans * math_latency;
128 break;
129 case SHADER_OPCODE_RSQ:
130 this->latency = 2 * chans * math_latency;
131 break;
132 case SHADER_OPCODE_INT_QUOTIENT:
133 case SHADER_OPCODE_SQRT:
134 case SHADER_OPCODE_LOG2:
135 /* full precision log. partial is 2. */
136 this->latency = 3 * chans * math_latency;
137 break;
138 case SHADER_OPCODE_INT_REMAINDER:
139 case SHADER_OPCODE_EXP2:
140 /* full precision. partial is 3, same throughput. */
141 this->latency = 4 * chans * math_latency;
142 break;
143 case SHADER_OPCODE_POW:
144 this->latency = 8 * chans * math_latency;
145 break;
146 case SHADER_OPCODE_SIN:
147 case SHADER_OPCODE_COS:
148 /* minimum latency, max is 12 rounds. */
149 this->latency = 5 * chans * math_latency;
150 break;
151 default:
152 this->latency = 2;
153 break;
154 }
155 }
156
157 void
158 schedule_node::set_latency_gen7(bool is_haswell)
159 {
160 switch (inst->opcode) {
161 case BRW_OPCODE_MAD:
162 /* 2 cycles
163 * (since the last two src operands are in different register banks):
164 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
165 *
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
169 *
170 * 18 cycles on IVB, 16 on HSW
171 * (since the last two src operands are in different register banks):
172 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
174 *
175 * 20 cycles on IVB, 18 on HSW
176 * (since the last two src operands are in the same register bank):
177 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 */
180
181 /* Our register allocator doesn't know about register banks, so use the
182 * higher latency.
183 */
184 latency = is_haswell ? 16 : 18;
185 break;
186
187 case BRW_OPCODE_LRP:
188 /* 2 cycles
189 * (since the last two src operands are in different register banks):
190 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
191 *
192 * 3 cycles on IVB, 4 on HSW
193 * (since the last two src operands are in the same register bank):
194 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
195 *
196 * 16 cycles on IVB, 14 on HSW
197 * (since the last two src operands are in different register banks):
198 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
199 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
200 *
201 * 16 cycles
202 * (since the last two src operands are in the same register bank):
203 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
204 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
205 */
206
207 /* Our register allocator doesn't know about register banks, so use the
208 * higher latency.
209 */
210 latency = 14;
211 break;
212
213 case SHADER_OPCODE_RCP:
214 case SHADER_OPCODE_RSQ:
215 case SHADER_OPCODE_SQRT:
216 case SHADER_OPCODE_LOG2:
217 case SHADER_OPCODE_EXP2:
218 case SHADER_OPCODE_SIN:
219 case SHADER_OPCODE_COS:
220 /* 2 cycles:
221 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
222 *
223 * 18 cycles:
224 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
225 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
226 *
227 * Same for exp2, log2, rsq, sqrt, sin, cos.
228 */
229 latency = is_haswell ? 14 : 16;
230 break;
231
232 case SHADER_OPCODE_POW:
233 /* 2 cycles:
234 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
235 *
236 * 26 cycles:
237 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
238 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
239 */
240 latency = is_haswell ? 22 : 24;
241 break;
242
243 case SHADER_OPCODE_TEX:
244 case SHADER_OPCODE_TXD:
245 case SHADER_OPCODE_TXF:
246 case SHADER_OPCODE_TXF_LZ:
247 case SHADER_OPCODE_TXL:
248 case SHADER_OPCODE_TXL_LZ:
249 /* 18 cycles:
250 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
251 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
252 * send(8) g4<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 *
255 * 697 +/-49 cycles (min 610, n=26):
256 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
257 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
258 * send(8) g4<1>UW g114<8,8,1>F
259 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
260 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
261 *
262 * So the latency on our first texture load of the batchbuffer takes
263 * ~700 cycles, since the caches are cold at that point.
264 *
265 * 840 +/- 92 cycles (min 720, n=25):
266 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
267 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
268 * send(8) g4<1>UW g114<8,8,1>F
269 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
270 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
271 * send(8) g4<1>UW g114<8,8,1>F
272 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
273 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
274 *
275 * On the second load, it takes just an extra ~140 cycles, and after
276 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
277 *
278 * 683 +/- 49 cycles (min = 602, n=47):
279 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
280 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
281 * send(8) g4<1>UW g114<8,8,1>F
282 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
283 * send(8) g50<1>UW g114<8,8,1>F
284 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
285 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
286 *
287 * The unit appears to be pipelined, since this matches up with the
288 * cache-cold case, despite there being two loads here. If you replace
289 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
290 *
291 * So, take some number between the cache-hot 140 cycles and the
292 * cache-cold 700 cycles. No particular tuning was done on this.
293 *
294 * I haven't done significant testing of the non-TEX opcodes. TXL at
295 * least looked about the same as TEX.
296 */
297 latency = 200;
298 break;
299
300 case SHADER_OPCODE_TXS:
301 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
302 * cycles (n=15):
303 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
304 * send(8) g6<1>UW g114<8,8,1>F
305 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
306 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
307 *
308 *
309 * Two loads was 535 +/- 30 cycles (n=19):
310 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
311 * send(16) g6<1>UW g114<8,8,1>F
312 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
313 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
314 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
315 * send(16) g8<1>UW g114<8,8,1>F
316 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
317 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
318 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
319 *
320 * Since the only caches that should matter are just the
321 * instruction/state cache containing the surface state, assume that we
322 * always have hot caches.
323 */
324 latency = 100;
325 break;
326
327 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
328 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
329 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
330 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
331 case VS_OPCODE_PULL_CONSTANT_LOAD:
332 /* testing using varying-index pull constants:
333 *
334 * 16 cycles:
335 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
336 * send(8) g4<1>F g4<8,8,1>D
337 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
338 *
339 * ~480 cycles:
340 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
341 * send(8) g4<1>F g4<8,8,1>D
342 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
343 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
344 *
345 * ~620 cycles:
346 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
347 * send(8) g4<1>F g4<8,8,1>D
348 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
349 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
350 * send(8) g4<1>F g4<8,8,1>D
351 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
352 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
353 *
354 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
355 * about 460. We expect to mostly be cache hot, so pick something more
356 * in that direction.
357 */
358 latency = 200;
359 break;
360
361 case SHADER_OPCODE_GEN7_SCRATCH_READ:
362 /* Testing a load from offset 0, that had been previously written:
363 *
364 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
365 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
366 *
367 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
368 * then around 140. Presumably this is cache hit vs miss.
369 */
370 latency = 50;
371 break;
372
373 case SHADER_OPCODE_UNTYPED_ATOMIC:
374 case SHADER_OPCODE_TYPED_ATOMIC:
375 /* Test code:
376 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
377 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
378 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
379 * send(8) g4<1>ud g112<8,8,1>ud
380 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
381 *
382 * Running it 100 times as fragment shader on a 128x128 quad
383 * gives an average latency of 13867 cycles per atomic op,
384 * standard deviation 3%. Note that this is a rather
385 * pessimistic estimate, the actual latency in cases with few
386 * collisions between threads and favorable pipelining has been
387 * seen to be reduced by a factor of 100.
388 */
389 latency = 14000;
390 break;
391
392 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
393 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
394 case SHADER_OPCODE_TYPED_SURFACE_READ:
395 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
396 /* Test code:
397 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
398 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
399 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
400 * send(8) g4<1>UD g112<8,8,1>UD
401 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
402 * .
403 * . [repeats 8 times]
404 * .
405 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
406 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
407 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
408 * send(8) g4<1>UD g112<8,8,1>UD
409 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
410 *
411 * Running it 100 times as fragment shader on a 128x128 quad
412 * gives an average latency of 583 cycles per surface read,
413 * standard deviation 0.9%.
414 */
415 latency = is_haswell ? 300 : 600;
416 break;
417
418 default:
419 /* 2 cycles:
420 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
421 *
422 * 16 cycles:
423 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
424 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
425 */
426 latency = 14;
427 break;
428 }
429 }
430
431 class instruction_scheduler {
432 public:
433 instruction_scheduler(backend_shader *s, int grf_count,
434 int hw_reg_count, int block_count,
435 instruction_scheduler_mode mode)
436 {
437 this->bs = s;
438 this->mem_ctx = ralloc_context(NULL);
439 this->grf_count = grf_count;
440 this->hw_reg_count = hw_reg_count;
441 this->instructions.make_empty();
442 this->instructions_to_schedule = 0;
443 this->post_reg_alloc = (mode == SCHEDULE_POST);
444 this->mode = mode;
445 this->time = 0;
446 if (!post_reg_alloc) {
447 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
448
449 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
450 for (int i = 0; i < block_count; i++)
451 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
452 BITSET_WORDS(grf_count));
453
454 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
455 for (int i = 0; i < block_count; i++)
456 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
457 BITSET_WORDS(grf_count));
458
459 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
460 for (int i = 0; i < block_count; i++)
461 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
462 BITSET_WORDS(hw_reg_count));
463
464 this->written = rzalloc_array(mem_ctx, bool, grf_count);
465
466 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
467
468 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
469 } else {
470 this->reg_pressure_in = NULL;
471 this->livein = NULL;
472 this->liveout = NULL;
473 this->hw_liveout = NULL;
474 this->written = NULL;
475 this->reads_remaining = NULL;
476 this->hw_reads_remaining = NULL;
477 }
478 }
479
480 ~instruction_scheduler()
481 {
482 ralloc_free(this->mem_ctx);
483 }
484 void add_barrier_deps(schedule_node *n);
485 void add_dep(schedule_node *before, schedule_node *after, int latency);
486 void add_dep(schedule_node *before, schedule_node *after);
487
488 void run(cfg_t *cfg);
489 void add_insts_from_block(bblock_t *block);
490 void compute_delays();
491 void compute_exits();
492 virtual void calculate_deps() = 0;
493 virtual schedule_node *choose_instruction_to_schedule() = 0;
494
495 /**
496 * Returns how many cycles it takes the instruction to issue.
497 *
498 * Instructions in gen hardware are handled one simd4 vector at a time,
499 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
500 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
501 */
502 virtual int issue_time(backend_instruction *inst) = 0;
503
504 virtual void count_reads_remaining(backend_instruction *inst) = 0;
505 virtual void setup_liveness(cfg_t *cfg) = 0;
506 virtual void update_register_pressure(backend_instruction *inst) = 0;
507 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
508
509 void schedule_instructions(bblock_t *block);
510
511 void *mem_ctx;
512
513 bool post_reg_alloc;
514 int instructions_to_schedule;
515 int grf_count;
516 int hw_reg_count;
517 int time;
518 int reg_pressure;
519 int block_idx;
520 exec_list instructions;
521 backend_shader *bs;
522
523 instruction_scheduler_mode mode;
524
525 /*
526 * The register pressure at the beginning of each basic block.
527 */
528
529 int *reg_pressure_in;
530
531 /*
532 * The virtual GRF's whose range overlaps the beginning of each basic block.
533 */
534
535 BITSET_WORD **livein;
536
537 /*
538 * The virtual GRF's whose range overlaps the end of each basic block.
539 */
540
541 BITSET_WORD **liveout;
542
543 /*
544 * The hardware GRF's whose range overlaps the end of each basic block.
545 */
546
547 BITSET_WORD **hw_liveout;
548
549 /*
550 * Whether we've scheduled a write for this virtual GRF yet.
551 */
552
553 bool *written;
554
555 /*
556 * How many reads we haven't scheduled for this virtual GRF yet.
557 */
558
559 int *reads_remaining;
560
561 /*
562 * How many reads we haven't scheduled for this hardware GRF yet.
563 */
564
565 int *hw_reads_remaining;
566 };
567
568 class fs_instruction_scheduler : public instruction_scheduler
569 {
570 public:
571 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
572 int block_count,
573 instruction_scheduler_mode mode);
574 void calculate_deps();
575 bool is_compressed(fs_inst *inst);
576 schedule_node *choose_instruction_to_schedule();
577 int issue_time(backend_instruction *inst);
578 fs_visitor *v;
579
580 void count_reads_remaining(backend_instruction *inst);
581 void setup_liveness(cfg_t *cfg);
582 void update_register_pressure(backend_instruction *inst);
583 int get_register_pressure_benefit(backend_instruction *inst);
584 };
585
586 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
587 int grf_count, int hw_reg_count,
588 int block_count,
589 instruction_scheduler_mode mode)
590 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
591 v(v)
592 {
593 }
594
595 static bool
596 is_src_duplicate(fs_inst *inst, int src)
597 {
598 for (int i = 0; i < src; i++)
599 if (inst->src[i].equals(inst->src[src]))
600 return true;
601
602 return false;
603 }
604
605 void
606 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
607 {
608 fs_inst *inst = (fs_inst *)be;
609
610 if (!reads_remaining)
611 return;
612
613 for (int i = 0; i < inst->sources; i++) {
614 if (is_src_duplicate(inst, i))
615 continue;
616
617 if (inst->src[i].file == VGRF) {
618 reads_remaining[inst->src[i].nr]++;
619 } else if (inst->src[i].file == FIXED_GRF) {
620 if (inst->src[i].nr >= hw_reg_count)
621 continue;
622
623 for (int j = 0; j < inst->regs_read(i); j++)
624 hw_reads_remaining[inst->src[i].nr + j]++;
625 }
626 }
627 }
628
629 void
630 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
631 {
632 /* First, compute liveness on a per-GRF level using the in/out sets from
633 * liveness calculation.
634 */
635 for (int block = 0; block < cfg->num_blocks; block++) {
636 for (int i = 0; i < v->live_intervals->num_vars; i++) {
637 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
638 int vgrf = v->live_intervals->vgrf_from_var[i];
639 if (!BITSET_TEST(livein[block], vgrf)) {
640 reg_pressure_in[block] += v->alloc.sizes[vgrf];
641 BITSET_SET(livein[block], vgrf);
642 }
643 }
644
645 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
646 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
647 }
648 }
649
650 /* Now, extend the live in/live out sets for when a range crosses a block
651 * boundary, which matches what our register allocator/interference code
652 * does to account for force_writemask_all and incompatible exec_mask's.
653 */
654 for (int block = 0; block < cfg->num_blocks - 1; block++) {
655 for (int i = 0; i < grf_count; i++) {
656 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
657 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
658 if (!BITSET_TEST(livein[block + 1], i)) {
659 reg_pressure_in[block + 1] += v->alloc.sizes[i];
660 BITSET_SET(livein[block + 1], i);
661 }
662
663 BITSET_SET(liveout[block], i);
664 }
665 }
666 }
667
668 int payload_last_use_ip[hw_reg_count];
669 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
670
671 for (int i = 0; i < hw_reg_count; i++) {
672 if (payload_last_use_ip[i] == -1)
673 continue;
674
675 for (int block = 0; block < cfg->num_blocks; block++) {
676 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
677 reg_pressure_in[block]++;
678
679 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
680 BITSET_SET(hw_liveout[block], i);
681 }
682 }
683 }
684
685 void
686 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
687 {
688 fs_inst *inst = (fs_inst *)be;
689
690 if (!reads_remaining)
691 return;
692
693 if (inst->dst.file == VGRF) {
694 written[inst->dst.nr] = true;
695 }
696
697 for (int i = 0; i < inst->sources; i++) {
698 if (is_src_duplicate(inst, i))
699 continue;
700
701 if (inst->src[i].file == VGRF) {
702 reads_remaining[inst->src[i].nr]--;
703 } else if (inst->src[i].file == FIXED_GRF &&
704 inst->src[i].nr < hw_reg_count) {
705 for (int off = 0; off < inst->regs_read(i); off++)
706 hw_reads_remaining[inst->src[i].nr + off]--;
707 }
708 }
709 }
710
711 int
712 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
713 {
714 fs_inst *inst = (fs_inst *)be;
715 int benefit = 0;
716
717 if (inst->dst.file == VGRF) {
718 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
719 !written[inst->dst.nr])
720 benefit -= v->alloc.sizes[inst->dst.nr];
721 }
722
723 for (int i = 0; i < inst->sources; i++) {
724 if (is_src_duplicate(inst, i))
725 continue;
726
727 if (inst->src[i].file == VGRF &&
728 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
729 reads_remaining[inst->src[i].nr] == 1)
730 benefit += v->alloc.sizes[inst->src[i].nr];
731
732 if (inst->src[i].file == FIXED_GRF &&
733 inst->src[i].nr < hw_reg_count) {
734 for (int off = 0; off < inst->regs_read(i); off++) {
735 int reg = inst->src[i].nr + off;
736 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
737 hw_reads_remaining[reg] == 1) {
738 benefit++;
739 }
740 }
741 }
742 }
743
744 return benefit;
745 }
746
747 class vec4_instruction_scheduler : public instruction_scheduler
748 {
749 public:
750 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
751 void calculate_deps();
752 schedule_node *choose_instruction_to_schedule();
753 int issue_time(backend_instruction *inst);
754 vec4_visitor *v;
755
756 void count_reads_remaining(backend_instruction *inst);
757 void setup_liveness(cfg_t *cfg);
758 void update_register_pressure(backend_instruction *inst);
759 int get_register_pressure_benefit(backend_instruction *inst);
760 };
761
762 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
763 int grf_count)
764 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
765 v(v)
766 {
767 }
768
769 void
770 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
771 {
772 }
773
774 void
775 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
776 {
777 }
778
779 void
780 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
781 {
782 }
783
784 int
785 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
786 {
787 return 0;
788 }
789
790 schedule_node::schedule_node(backend_instruction *inst,
791 instruction_scheduler *sched)
792 {
793 const struct gen_device_info *devinfo = sched->bs->devinfo;
794
795 this->inst = inst;
796 this->child_array_size = 0;
797 this->children = NULL;
798 this->child_latency = NULL;
799 this->child_count = 0;
800 this->parent_count = 0;
801 this->unblocked_time = 0;
802 this->cand_generation = 0;
803 this->delay = 0;
804 this->exit = NULL;
805 this->is_barrier = false;
806
807 /* We can't measure Gen6 timings directly but expect them to be much
808 * closer to Gen7 than Gen4.
809 */
810 if (!sched->post_reg_alloc)
811 this->latency = 1;
812 else if (devinfo->gen >= 6)
813 set_latency_gen7(devinfo->is_haswell);
814 else
815 set_latency_gen4();
816 }
817
818 void
819 instruction_scheduler::add_insts_from_block(bblock_t *block)
820 {
821 foreach_inst_in_block(backend_instruction, inst, block) {
822 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
823
824 instructions.push_tail(n);
825 }
826
827 this->instructions_to_schedule = block->end_ip - block->start_ip + 1;
828 }
829
830 /** Computation of the delay member of each node. */
831 void
832 instruction_scheduler::compute_delays()
833 {
834 foreach_in_list_reverse(schedule_node, n, &instructions) {
835 if (!n->child_count) {
836 n->delay = issue_time(n->inst);
837 } else {
838 for (int i = 0; i < n->child_count; i++) {
839 assert(n->children[i]->delay);
840 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
841 }
842 }
843 }
844 }
845
846 void
847 instruction_scheduler::compute_exits()
848 {
849 /* Calculate a lower bound of the scheduling time of each node in the
850 * graph. This is analogous to the node's critical path but calculated
851 * from the top instead of from the bottom of the block.
852 */
853 foreach_in_list(schedule_node, n, &instructions) {
854 for (int i = 0; i < n->child_count; i++) {
855 n->children[i]->unblocked_time =
856 MAX2(n->children[i]->unblocked_time,
857 n->unblocked_time + issue_time(n->inst) + n->child_latency[i]);
858 }
859 }
860
861 /* Calculate the exit of each node by induction based on the exit nodes of
862 * its children. The preferred exit of a node is the one among the exit
863 * nodes of its children which can be unblocked first according to the
864 * optimistic unblocked time estimate calculated above.
865 */
866 foreach_in_list_reverse(schedule_node, n, &instructions) {
867 n->exit = (n->inst->opcode == FS_OPCODE_DISCARD_JUMP ? n : NULL);
868
869 for (int i = 0; i < n->child_count; i++) {
870 if (exit_unblocked_time(n->children[i]) < exit_unblocked_time(n))
871 n->exit = n->children[i]->exit;
872 }
873 }
874 }
875
876 /**
877 * Add a dependency between two instruction nodes.
878 *
879 * The @after node will be scheduled after @before. We will try to
880 * schedule it @latency cycles after @before, but no guarantees there.
881 */
882 void
883 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
884 int latency)
885 {
886 if (!before || !after)
887 return;
888
889 assert(before != after);
890
891 for (int i = 0; i < before->child_count; i++) {
892 if (before->children[i] == after) {
893 before->child_latency[i] = MAX2(before->child_latency[i], latency);
894 return;
895 }
896 }
897
898 if (before->child_array_size <= before->child_count) {
899 if (before->child_array_size < 16)
900 before->child_array_size = 16;
901 else
902 before->child_array_size *= 2;
903
904 before->children = reralloc(mem_ctx, before->children,
905 schedule_node *,
906 before->child_array_size);
907 before->child_latency = reralloc(mem_ctx, before->child_latency,
908 int, before->child_array_size);
909 }
910
911 before->children[before->child_count] = after;
912 before->child_latency[before->child_count] = latency;
913 before->child_count++;
914 after->parent_count++;
915 }
916
917 void
918 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
919 {
920 if (!before)
921 return;
922
923 add_dep(before, after, before->latency);
924 }
925
926 /**
927 * Sometimes we really want this node to execute after everything that
928 * was before it and before everything that followed it. This adds
929 * the deps to do so.
930 */
931 void
932 instruction_scheduler::add_barrier_deps(schedule_node *n)
933 {
934 schedule_node *prev = (schedule_node *)n->prev;
935 schedule_node *next = (schedule_node *)n->next;
936
937 n->is_barrier = true;
938
939 if (prev) {
940 while (!prev->is_head_sentinel()) {
941 add_dep(prev, n, 0);
942 if (prev->is_barrier)
943 break;
944 prev = (schedule_node *)prev->prev;
945 }
946 }
947
948 if (next) {
949 while (!next->is_tail_sentinel()) {
950 add_dep(n, next, 0);
951 if (next->is_barrier)
952 break;
953 next = (schedule_node *)next->next;
954 }
955 }
956 }
957
958 /* instruction scheduling needs to be aware of when an MRF write
959 * actually writes 2 MRFs.
960 */
961 bool
962 fs_instruction_scheduler::is_compressed(fs_inst *inst)
963 {
964 return inst->exec_size == 16;
965 }
966
967 static bool
968 is_scheduling_barrier(const fs_inst *inst)
969 {
970 return inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
971 inst->is_control_flow() ||
972 inst->has_side_effects();
973 }
974
975 void
976 fs_instruction_scheduler::calculate_deps()
977 {
978 /* Pre-register-allocation, this tracks the last write per VGRF offset.
979 * After register allocation, reg_offsets are gone and we track individual
980 * GRF registers.
981 */
982 schedule_node *last_grf_write[grf_count * 16];
983 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
984 schedule_node *last_conditional_mod[4] = {};
985 schedule_node *last_accumulator_write = NULL;
986 /* Fixed HW registers are assumed to be separate from the virtual
987 * GRFs, so they can be tracked separately. We don't really write
988 * to fixed GRFs much, so don't bother tracking them on a more
989 * granular level.
990 */
991 schedule_node *last_fixed_grf_write = NULL;
992
993 memset(last_grf_write, 0, sizeof(last_grf_write));
994 memset(last_mrf_write, 0, sizeof(last_mrf_write));
995
996 /* top-to-bottom dependencies: RAW and WAW. */
997 foreach_in_list(schedule_node, n, &instructions) {
998 fs_inst *inst = (fs_inst *)n->inst;
999
1000 if (is_scheduling_barrier(inst))
1001 add_barrier_deps(n);
1002
1003 /* read-after-write deps. */
1004 for (int i = 0; i < inst->sources; i++) {
1005 if (inst->src[i].file == VGRF) {
1006 if (post_reg_alloc) {
1007 for (int r = 0; r < inst->regs_read(i); r++)
1008 add_dep(last_grf_write[inst->src[i].nr + r], n);
1009 } else {
1010 for (int r = 0; r < inst->regs_read(i); r++) {
1011 add_dep(last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], n);
1012 }
1013 }
1014 } else if (inst->src[i].file == FIXED_GRF) {
1015 if (post_reg_alloc) {
1016 for (int r = 0; r < inst->regs_read(i); r++)
1017 add_dep(last_grf_write[inst->src[i].nr + r], n);
1018 } else {
1019 add_dep(last_fixed_grf_write, n);
1020 }
1021 } else if (inst->src[i].is_accumulator()) {
1022 add_dep(last_accumulator_write, n);
1023 } else if (inst->src[i].file == ARF) {
1024 add_barrier_deps(n);
1025 }
1026 }
1027
1028 if (inst->base_mrf != -1) {
1029 for (int i = 0; i < inst->mlen; i++) {
1030 /* It looks like the MRF regs are released in the send
1031 * instruction once it's sent, not when the result comes
1032 * back.
1033 */
1034 add_dep(last_mrf_write[inst->base_mrf + i], n);
1035 }
1036 }
1037
1038 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1039 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1040
1041 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1042 if (mask & (1 << i))
1043 add_dep(last_conditional_mod[i], n);
1044 }
1045 }
1046
1047 if (inst->reads_accumulator_implicitly()) {
1048 add_dep(last_accumulator_write, n);
1049 }
1050
1051 /* write-after-write deps. */
1052 if (inst->dst.file == VGRF) {
1053 if (post_reg_alloc) {
1054 for (int r = 0; r < inst->regs_written; r++) {
1055 add_dep(last_grf_write[inst->dst.nr + r], n);
1056 last_grf_write[inst->dst.nr + r] = n;
1057 }
1058 } else {
1059 for (int r = 0; r < inst->regs_written; r++) {
1060 add_dep(last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r], n);
1061 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1062 }
1063 }
1064 } else if (inst->dst.file == MRF) {
1065 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1066
1067 add_dep(last_mrf_write[reg], n);
1068 last_mrf_write[reg] = n;
1069 if (is_compressed(inst)) {
1070 if (inst->dst.nr & BRW_MRF_COMPR4)
1071 reg += 4;
1072 else
1073 reg++;
1074 add_dep(last_mrf_write[reg], n);
1075 last_mrf_write[reg] = n;
1076 }
1077 } else if (inst->dst.file == FIXED_GRF) {
1078 if (post_reg_alloc) {
1079 for (int r = 0; r < inst->regs_written; r++)
1080 last_grf_write[inst->dst.nr + r] = n;
1081 } else {
1082 last_fixed_grf_write = n;
1083 }
1084 } else if (inst->dst.is_accumulator()) {
1085 add_dep(last_accumulator_write, n);
1086 last_accumulator_write = n;
1087 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1088 add_barrier_deps(n);
1089 }
1090
1091 if (inst->mlen > 0 && inst->base_mrf != -1) {
1092 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1093 add_dep(last_mrf_write[inst->base_mrf + i], n);
1094 last_mrf_write[inst->base_mrf + i] = n;
1095 }
1096 }
1097
1098 if (const unsigned mask = inst->flags_written()) {
1099 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1100
1101 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1102 if (mask & (1 << i)) {
1103 add_dep(last_conditional_mod[i], n, 0);
1104 last_conditional_mod[i] = n;
1105 }
1106 }
1107 }
1108
1109 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1110 !inst->dst.is_accumulator()) {
1111 add_dep(last_accumulator_write, n);
1112 last_accumulator_write = n;
1113 }
1114 }
1115
1116 /* bottom-to-top dependencies: WAR */
1117 memset(last_grf_write, 0, sizeof(last_grf_write));
1118 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1119 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1120 last_accumulator_write = NULL;
1121 last_fixed_grf_write = NULL;
1122
1123 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1124 fs_inst *inst = (fs_inst *)n->inst;
1125
1126 /* write-after-read deps. */
1127 for (int i = 0; i < inst->sources; i++) {
1128 if (inst->src[i].file == VGRF) {
1129 if (post_reg_alloc) {
1130 for (int r = 0; r < inst->regs_read(i); r++)
1131 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1132 } else {
1133 for (int r = 0; r < inst->regs_read(i); r++) {
1134 add_dep(n, last_grf_write[inst->src[i].nr * 16 + inst->src[i].reg_offset + r], 0);
1135 }
1136 }
1137 } else if (inst->src[i].file == FIXED_GRF) {
1138 if (post_reg_alloc) {
1139 for (int r = 0; r < inst->regs_read(i); r++)
1140 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1141 } else {
1142 add_dep(n, last_fixed_grf_write, 0);
1143 }
1144 } else if (inst->src[i].is_accumulator()) {
1145 add_dep(n, last_accumulator_write, 0);
1146 } else if (inst->src[i].file == ARF) {
1147 add_barrier_deps(n);
1148 }
1149 }
1150
1151 if (inst->base_mrf != -1) {
1152 for (int i = 0; i < inst->mlen; i++) {
1153 /* It looks like the MRF regs are released in the send
1154 * instruction once it's sent, not when the result comes
1155 * back.
1156 */
1157 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1158 }
1159 }
1160
1161 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1162 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1163
1164 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1165 if (mask & (1 << i))
1166 add_dep(n, last_conditional_mod[i]);
1167 }
1168 }
1169
1170 if (inst->reads_accumulator_implicitly()) {
1171 add_dep(n, last_accumulator_write);
1172 }
1173
1174 /* Update the things this instruction wrote, so earlier reads
1175 * can mark this as WAR dependency.
1176 */
1177 if (inst->dst.file == VGRF) {
1178 if (post_reg_alloc) {
1179 for (int r = 0; r < inst->regs_written; r++)
1180 last_grf_write[inst->dst.nr + r] = n;
1181 } else {
1182 for (int r = 0; r < inst->regs_written; r++) {
1183 last_grf_write[inst->dst.nr * 16 + inst->dst.reg_offset + r] = n;
1184 }
1185 }
1186 } else if (inst->dst.file == MRF) {
1187 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1188
1189 last_mrf_write[reg] = n;
1190
1191 if (is_compressed(inst)) {
1192 if (inst->dst.nr & BRW_MRF_COMPR4)
1193 reg += 4;
1194 else
1195 reg++;
1196
1197 last_mrf_write[reg] = n;
1198 }
1199 } else if (inst->dst.file == FIXED_GRF) {
1200 if (post_reg_alloc) {
1201 for (int r = 0; r < inst->regs_written; r++)
1202 last_grf_write[inst->dst.nr + r] = n;
1203 } else {
1204 last_fixed_grf_write = n;
1205 }
1206 } else if (inst->dst.is_accumulator()) {
1207 last_accumulator_write = n;
1208 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1209 add_barrier_deps(n);
1210 }
1211
1212 if (inst->mlen > 0 && inst->base_mrf != -1) {
1213 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1214 last_mrf_write[inst->base_mrf + i] = n;
1215 }
1216 }
1217
1218 if (const unsigned mask = inst->flags_written()) {
1219 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1220
1221 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1222 if (mask & (1 << i))
1223 last_conditional_mod[i] = n;
1224 }
1225 }
1226
1227 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1228 last_accumulator_write = n;
1229 }
1230 }
1231 }
1232
1233 static bool
1234 is_scheduling_barrier(const vec4_instruction *inst)
1235 {
1236 return inst->is_control_flow() ||
1237 inst->has_side_effects();
1238 }
1239
1240 void
1241 vec4_instruction_scheduler::calculate_deps()
1242 {
1243 schedule_node *last_grf_write[grf_count];
1244 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1245 schedule_node *last_conditional_mod = NULL;
1246 schedule_node *last_accumulator_write = NULL;
1247 /* Fixed HW registers are assumed to be separate from the virtual
1248 * GRFs, so they can be tracked separately. We don't really write
1249 * to fixed GRFs much, so don't bother tracking them on a more
1250 * granular level.
1251 */
1252 schedule_node *last_fixed_grf_write = NULL;
1253
1254 memset(last_grf_write, 0, sizeof(last_grf_write));
1255 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1256
1257 /* top-to-bottom dependencies: RAW and WAW. */
1258 foreach_in_list(schedule_node, n, &instructions) {
1259 vec4_instruction *inst = (vec4_instruction *)n->inst;
1260
1261 if (is_scheduling_barrier(inst))
1262 add_barrier_deps(n);
1263
1264 /* read-after-write deps. */
1265 for (int i = 0; i < 3; i++) {
1266 if (inst->src[i].file == VGRF) {
1267 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1268 add_dep(last_grf_write[inst->src[i].nr + j], n);
1269 } else if (inst->src[i].file == FIXED_GRF) {
1270 add_dep(last_fixed_grf_write, n);
1271 } else if (inst->src[i].is_accumulator()) {
1272 assert(last_accumulator_write);
1273 add_dep(last_accumulator_write, n);
1274 } else if (inst->src[i].file == ARF) {
1275 add_barrier_deps(n);
1276 }
1277 }
1278
1279 if (!inst->is_send_from_grf()) {
1280 for (int i = 0; i < inst->mlen; i++) {
1281 /* It looks like the MRF regs are released in the send
1282 * instruction once it's sent, not when the result comes
1283 * back.
1284 */
1285 add_dep(last_mrf_write[inst->base_mrf + i], n);
1286 }
1287 }
1288
1289 if (inst->reads_flag()) {
1290 assert(last_conditional_mod);
1291 add_dep(last_conditional_mod, n);
1292 }
1293
1294 if (inst->reads_accumulator_implicitly()) {
1295 assert(last_accumulator_write);
1296 add_dep(last_accumulator_write, n);
1297 }
1298
1299 /* write-after-write deps. */
1300 if (inst->dst.file == VGRF) {
1301 for (unsigned j = 0; j < inst->regs_written; ++j) {
1302 add_dep(last_grf_write[inst->dst.nr + j], n);
1303 last_grf_write[inst->dst.nr + j] = n;
1304 }
1305 } else if (inst->dst.file == MRF) {
1306 add_dep(last_mrf_write[inst->dst.nr], n);
1307 last_mrf_write[inst->dst.nr] = n;
1308 } else if (inst->dst.file == FIXED_GRF) {
1309 last_fixed_grf_write = n;
1310 } else if (inst->dst.is_accumulator()) {
1311 add_dep(last_accumulator_write, n);
1312 last_accumulator_write = n;
1313 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1314 add_barrier_deps(n);
1315 }
1316
1317 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1318 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1319 add_dep(last_mrf_write[inst->base_mrf + i], n);
1320 last_mrf_write[inst->base_mrf + i] = n;
1321 }
1322 }
1323
1324 if (inst->writes_flag()) {
1325 add_dep(last_conditional_mod, n, 0);
1326 last_conditional_mod = n;
1327 }
1328
1329 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1330 !inst->dst.is_accumulator()) {
1331 add_dep(last_accumulator_write, n);
1332 last_accumulator_write = n;
1333 }
1334 }
1335
1336 /* bottom-to-top dependencies: WAR */
1337 memset(last_grf_write, 0, sizeof(last_grf_write));
1338 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1339 last_conditional_mod = NULL;
1340 last_accumulator_write = NULL;
1341 last_fixed_grf_write = NULL;
1342
1343 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1344 vec4_instruction *inst = (vec4_instruction *)n->inst;
1345
1346 /* write-after-read deps. */
1347 for (int i = 0; i < 3; i++) {
1348 if (inst->src[i].file == VGRF) {
1349 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1350 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1351 } else if (inst->src[i].file == FIXED_GRF) {
1352 add_dep(n, last_fixed_grf_write);
1353 } else if (inst->src[i].is_accumulator()) {
1354 add_dep(n, last_accumulator_write);
1355 } else if (inst->src[i].file == ARF) {
1356 add_barrier_deps(n);
1357 }
1358 }
1359
1360 if (!inst->is_send_from_grf()) {
1361 for (int i = 0; i < inst->mlen; i++) {
1362 /* It looks like the MRF regs are released in the send
1363 * instruction once it's sent, not when the result comes
1364 * back.
1365 */
1366 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1367 }
1368 }
1369
1370 if (inst->reads_flag()) {
1371 add_dep(n, last_conditional_mod);
1372 }
1373
1374 if (inst->reads_accumulator_implicitly()) {
1375 add_dep(n, last_accumulator_write);
1376 }
1377
1378 /* Update the things this instruction wrote, so earlier reads
1379 * can mark this as WAR dependency.
1380 */
1381 if (inst->dst.file == VGRF) {
1382 for (unsigned j = 0; j < inst->regs_written; ++j)
1383 last_grf_write[inst->dst.nr + j] = n;
1384 } else if (inst->dst.file == MRF) {
1385 last_mrf_write[inst->dst.nr] = n;
1386 } else if (inst->dst.file == FIXED_GRF) {
1387 last_fixed_grf_write = n;
1388 } else if (inst->dst.is_accumulator()) {
1389 last_accumulator_write = n;
1390 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1391 add_barrier_deps(n);
1392 }
1393
1394 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1395 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1396 last_mrf_write[inst->base_mrf + i] = n;
1397 }
1398 }
1399
1400 if (inst->writes_flag()) {
1401 last_conditional_mod = n;
1402 }
1403
1404 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1405 last_accumulator_write = n;
1406 }
1407 }
1408 }
1409
1410 schedule_node *
1411 fs_instruction_scheduler::choose_instruction_to_schedule()
1412 {
1413 schedule_node *chosen = NULL;
1414
1415 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1416 int chosen_time = 0;
1417
1418 /* Of the instructions ready to execute or the closest to being ready,
1419 * choose the one most likely to unblock an early program exit, or
1420 * otherwise the oldest one.
1421 */
1422 foreach_in_list(schedule_node, n, &instructions) {
1423 if (!chosen ||
1424 exit_unblocked_time(n) < exit_unblocked_time(chosen) ||
1425 (exit_unblocked_time(n) == exit_unblocked_time(chosen) &&
1426 n->unblocked_time < chosen_time)) {
1427 chosen = n;
1428 chosen_time = n->unblocked_time;
1429 }
1430 }
1431 } else {
1432 /* Before register allocation, we don't care about the latencies of
1433 * instructions. All we care about is reducing live intervals of
1434 * variables so that we can avoid register spilling, or get SIMD16
1435 * shaders which naturally do a better job of hiding instruction
1436 * latency.
1437 */
1438 foreach_in_list(schedule_node, n, &instructions) {
1439 fs_inst *inst = (fs_inst *)n->inst;
1440
1441 if (!chosen) {
1442 chosen = n;
1443 continue;
1444 }
1445
1446 /* Most important: If we can definitely reduce register pressure, do
1447 * so immediately.
1448 */
1449 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1450 int chosen_register_pressure_benefit =
1451 get_register_pressure_benefit(chosen->inst);
1452
1453 if (register_pressure_benefit > 0 &&
1454 register_pressure_benefit > chosen_register_pressure_benefit) {
1455 chosen = n;
1456 continue;
1457 } else if (chosen_register_pressure_benefit > 0 &&
1458 (register_pressure_benefit <
1459 chosen_register_pressure_benefit)) {
1460 continue;
1461 }
1462
1463 if (mode == SCHEDULE_PRE_LIFO) {
1464 /* Prefer instructions that recently became available for
1465 * scheduling. These are the things that are most likely to
1466 * (eventually) make a variable dead and reduce register pressure.
1467 * Typical register pressure estimates don't work for us because
1468 * most of our pressure comes from texturing, where no single
1469 * instruction to schedule will make a vec4 value dead.
1470 */
1471 if (n->cand_generation > chosen->cand_generation) {
1472 chosen = n;
1473 continue;
1474 } else if (n->cand_generation < chosen->cand_generation) {
1475 continue;
1476 }
1477
1478 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1479 * do this, then because we prefer instructions that just became
1480 * candidates, we'll end up in a pattern of scheduling a SEND,
1481 * then the MRFs for the next SEND, then the next SEND, then the
1482 * MRFs, etc., without ever consuming the results of a send.
1483 */
1484 if (v->devinfo->gen < 7) {
1485 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1486
1487 /* We use regs_written > 1 as our test for the kind of send
1488 * instruction to avoid -- only sends generate many regs, and a
1489 * single-result send is probably actually reducing register
1490 * pressure.
1491 */
1492 if (inst->regs_written <= inst->exec_size / 8 &&
1493 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1494 chosen = n;
1495 continue;
1496 } else if (inst->regs_written > chosen_inst->regs_written) {
1497 continue;
1498 }
1499 }
1500 }
1501
1502 /* For instructions pushed on the cands list at the same time, prefer
1503 * the one with the highest delay to the end of the program. This is
1504 * most likely to have its values able to be consumed first (such as
1505 * for a large tree of lowered ubo loads, which appear reversed in
1506 * the instruction stream with respect to when they can be consumed).
1507 */
1508 if (n->delay > chosen->delay) {
1509 chosen = n;
1510 continue;
1511 } else if (n->delay < chosen->delay) {
1512 continue;
1513 }
1514
1515 /* Prefer the node most likely to unblock an early program exit.
1516 */
1517 if (exit_unblocked_time(n) < exit_unblocked_time(chosen)) {
1518 chosen = n;
1519 continue;
1520 } else if (exit_unblocked_time(n) > exit_unblocked_time(chosen)) {
1521 continue;
1522 }
1523
1524 /* If all other metrics are equal, we prefer the first instruction in
1525 * the list (program execution).
1526 */
1527 }
1528 }
1529
1530 return chosen;
1531 }
1532
1533 schedule_node *
1534 vec4_instruction_scheduler::choose_instruction_to_schedule()
1535 {
1536 schedule_node *chosen = NULL;
1537 int chosen_time = 0;
1538
1539 /* Of the instructions ready to execute or the closest to being ready,
1540 * choose the oldest one.
1541 */
1542 foreach_in_list(schedule_node, n, &instructions) {
1543 if (!chosen || n->unblocked_time < chosen_time) {
1544 chosen = n;
1545 chosen_time = n->unblocked_time;
1546 }
1547 }
1548
1549 return chosen;
1550 }
1551
1552 int
1553 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1554 {
1555 if (is_compressed((fs_inst *)inst))
1556 return 4;
1557 else
1558 return 2;
1559 }
1560
1561 int
1562 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1563 {
1564 /* We always execute as two vec4s in parallel. */
1565 return 2;
1566 }
1567
1568 void
1569 instruction_scheduler::schedule_instructions(bblock_t *block)
1570 {
1571 const struct gen_device_info *devinfo = bs->devinfo;
1572 time = 0;
1573 if (!post_reg_alloc)
1574 reg_pressure = reg_pressure_in[block->num];
1575 block_idx = block->num;
1576
1577 /* Remove non-DAG heads from the list. */
1578 foreach_in_list_safe(schedule_node, n, &instructions) {
1579 if (n->parent_count != 0)
1580 n->remove();
1581 }
1582
1583 unsigned cand_generation = 1;
1584 while (!instructions.is_empty()) {
1585 schedule_node *chosen = choose_instruction_to_schedule();
1586
1587 /* Schedule this instruction. */
1588 assert(chosen);
1589 chosen->remove();
1590 chosen->inst->exec_node::remove();
1591 block->instructions.push_tail(chosen->inst);
1592 instructions_to_schedule--;
1593
1594 if (!post_reg_alloc) {
1595 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1596 update_register_pressure(chosen->inst);
1597 }
1598
1599 /* If we expected a delay for scheduling, then bump the clock to reflect
1600 * that. In reality, the hardware will switch to another hyperthread
1601 * and may not return to dispatching our thread for a while even after
1602 * we're unblocked. After this, we have the time when the chosen
1603 * instruction will start executing.
1604 */
1605 time = MAX2(time, chosen->unblocked_time);
1606
1607 /* Update the clock for how soon an instruction could start after the
1608 * chosen one.
1609 */
1610 time += issue_time(chosen->inst);
1611
1612 if (debug) {
1613 fprintf(stderr, "clock %4d, scheduled: ", time);
1614 bs->dump_instruction(chosen->inst);
1615 if (!post_reg_alloc)
1616 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1617 }
1618
1619 /* Now that we've scheduled a new instruction, some of its
1620 * children can be promoted to the list of instructions ready to
1621 * be scheduled. Update the children's unblocked time for this
1622 * DAG edge as we do so.
1623 */
1624 for (int i = chosen->child_count - 1; i >= 0; i--) {
1625 schedule_node *child = chosen->children[i];
1626
1627 child->unblocked_time = MAX2(child->unblocked_time,
1628 time + chosen->child_latency[i]);
1629
1630 if (debug) {
1631 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1632 bs->dump_instruction(child->inst);
1633 }
1634
1635 child->cand_generation = cand_generation;
1636 child->parent_count--;
1637 if (child->parent_count == 0) {
1638 if (debug) {
1639 fprintf(stderr, "\t\tnow available\n");
1640 }
1641 instructions.push_head(child);
1642 }
1643 }
1644 cand_generation++;
1645
1646 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1647 * but it's more limited pre-gen6, so if we send something off to it then
1648 * the next math instruction isn't going to make progress until the first
1649 * is done.
1650 */
1651 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1652 foreach_in_list(schedule_node, n, &instructions) {
1653 if (n->inst->is_math())
1654 n->unblocked_time = MAX2(n->unblocked_time,
1655 time + chosen->latency);
1656 }
1657 }
1658 }
1659
1660 assert(instructions_to_schedule == 0);
1661
1662 block->cycle_count = time;
1663 }
1664
1665 static unsigned get_cycle_count(cfg_t *cfg)
1666 {
1667 unsigned count = 0, multiplier = 1;
1668 foreach_block(block, cfg) {
1669 if (block->start()->opcode == BRW_OPCODE_DO)
1670 multiplier *= 10; /* assume that loops execute ~10 times */
1671
1672 count += block->cycle_count * multiplier;
1673
1674 if (block->end()->opcode == BRW_OPCODE_WHILE)
1675 multiplier /= 10;
1676 }
1677
1678 return count;
1679 }
1680
1681 void
1682 instruction_scheduler::run(cfg_t *cfg)
1683 {
1684 if (debug && !post_reg_alloc) {
1685 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1686 post_reg_alloc);
1687 bs->dump_instructions();
1688 }
1689
1690 if (!post_reg_alloc)
1691 setup_liveness(cfg);
1692
1693 foreach_block(block, cfg) {
1694 if (block->end_ip - block->start_ip <= 1)
1695 continue;
1696
1697 if (reads_remaining) {
1698 memset(reads_remaining, 0,
1699 grf_count * sizeof(*reads_remaining));
1700 memset(hw_reads_remaining, 0,
1701 hw_reg_count * sizeof(*hw_reads_remaining));
1702 memset(written, 0, grf_count * sizeof(*written));
1703
1704 foreach_inst_in_block(fs_inst, inst, block)
1705 count_reads_remaining(inst);
1706 }
1707
1708 add_insts_from_block(block);
1709
1710 calculate_deps();
1711
1712 compute_delays();
1713 compute_exits();
1714
1715 schedule_instructions(block);
1716 }
1717
1718 if (debug && !post_reg_alloc) {
1719 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1720 post_reg_alloc);
1721 bs->dump_instructions();
1722 }
1723
1724 cfg->cycle_count = get_cycle_count(cfg);
1725 }
1726
1727 void
1728 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1729 {
1730 if (mode != SCHEDULE_POST)
1731 calculate_live_intervals();
1732
1733 int grf_count;
1734 if (mode == SCHEDULE_POST)
1735 grf_count = grf_used;
1736 else
1737 grf_count = alloc.count;
1738
1739 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1740 cfg->num_blocks, mode);
1741 sched.run(cfg);
1742
1743 invalidate_live_intervals();
1744 }
1745
1746 void
1747 vec4_visitor::opt_schedule_instructions()
1748 {
1749 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1750 sched.run(cfg);
1751
1752 invalidate_live_intervals();
1753 }