util/sha1: rework _mesa_sha1_{init,final}
[mesa.git] / src / intel / compiler / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33
34 using namespace brw;
35
36 /** @file brw_fs_schedule_instructions.cpp
37 *
38 * List scheduling of FS instructions.
39 *
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
45 *
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
52 *
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
56 */
57
58 static bool debug = false;
59
60 class instruction_scheduler;
61
62 class schedule_node : public exec_node
63 {
64 public:
65 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell);
68
69 backend_instruction *inst;
70 schedule_node **children;
71 int *child_latency;
72 int child_count;
73 int parent_count;
74 int child_array_size;
75 int unblocked_time;
76 int latency;
77
78 /**
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
81 */
82 unsigned cand_generation;
83
84 /**
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
87 */
88 int delay;
89
90 /**
91 * Preferred exit node among the (direct or indirect) successors of this
92 * node. Among the scheduler nodes blocked by this node, this will be the
93 * one that may cause earliest program termination, or NULL if none of the
94 * successors is an exit node.
95 */
96 schedule_node *exit;
97
98 bool is_barrier;
99 };
100
101 /**
102 * Lower bound of the scheduling time after which one of the instructions
103 * blocked by this node may lead to program termination.
104 *
105 * exit_unblocked_time() determines a strict partial ordering relation '«' on
106 * the set of scheduler nodes as follows:
107 *
108 * n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
109 *
110 * which can be used to heuristically order nodes according to how early they
111 * can unblock an exit node and lead to program termination.
112 */
113 static inline int
114 exit_unblocked_time(const schedule_node *n)
115 {
116 return n->exit ? n->exit->unblocked_time : INT_MAX;
117 }
118
119 void
120 schedule_node::set_latency_gen4()
121 {
122 int chans = 8;
123 int math_latency = 22;
124
125 switch (inst->opcode) {
126 case SHADER_OPCODE_RCP:
127 this->latency = 1 * chans * math_latency;
128 break;
129 case SHADER_OPCODE_RSQ:
130 this->latency = 2 * chans * math_latency;
131 break;
132 case SHADER_OPCODE_INT_QUOTIENT:
133 case SHADER_OPCODE_SQRT:
134 case SHADER_OPCODE_LOG2:
135 /* full precision log. partial is 2. */
136 this->latency = 3 * chans * math_latency;
137 break;
138 case SHADER_OPCODE_INT_REMAINDER:
139 case SHADER_OPCODE_EXP2:
140 /* full precision. partial is 3, same throughput. */
141 this->latency = 4 * chans * math_latency;
142 break;
143 case SHADER_OPCODE_POW:
144 this->latency = 8 * chans * math_latency;
145 break;
146 case SHADER_OPCODE_SIN:
147 case SHADER_OPCODE_COS:
148 /* minimum latency, max is 12 rounds. */
149 this->latency = 5 * chans * math_latency;
150 break;
151 default:
152 this->latency = 2;
153 break;
154 }
155 }
156
157 void
158 schedule_node::set_latency_gen7(bool is_haswell)
159 {
160 switch (inst->opcode) {
161 case BRW_OPCODE_MAD:
162 /* 2 cycles
163 * (since the last two src operands are in different register banks):
164 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
165 *
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
169 *
170 * 18 cycles on IVB, 16 on HSW
171 * (since the last two src operands are in different register banks):
172 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
174 *
175 * 20 cycles on IVB, 18 on HSW
176 * (since the last two src operands are in the same register bank):
177 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 */
180
181 /* Our register allocator doesn't know about register banks, so use the
182 * higher latency.
183 */
184 latency = is_haswell ? 16 : 18;
185 break;
186
187 case BRW_OPCODE_LRP:
188 /* 2 cycles
189 * (since the last two src operands are in different register banks):
190 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
191 *
192 * 3 cycles on IVB, 4 on HSW
193 * (since the last two src operands are in the same register bank):
194 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
195 *
196 * 16 cycles on IVB, 14 on HSW
197 * (since the last two src operands are in different register banks):
198 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
199 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
200 *
201 * 16 cycles
202 * (since the last two src operands are in the same register bank):
203 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
204 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
205 */
206
207 /* Our register allocator doesn't know about register banks, so use the
208 * higher latency.
209 */
210 latency = 14;
211 break;
212
213 case SHADER_OPCODE_RCP:
214 case SHADER_OPCODE_RSQ:
215 case SHADER_OPCODE_SQRT:
216 case SHADER_OPCODE_LOG2:
217 case SHADER_OPCODE_EXP2:
218 case SHADER_OPCODE_SIN:
219 case SHADER_OPCODE_COS:
220 /* 2 cycles:
221 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
222 *
223 * 18 cycles:
224 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
225 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
226 *
227 * Same for exp2, log2, rsq, sqrt, sin, cos.
228 */
229 latency = is_haswell ? 14 : 16;
230 break;
231
232 case SHADER_OPCODE_POW:
233 /* 2 cycles:
234 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
235 *
236 * 26 cycles:
237 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
238 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
239 */
240 latency = is_haswell ? 22 : 24;
241 break;
242
243 case SHADER_OPCODE_TEX:
244 case SHADER_OPCODE_TXD:
245 case SHADER_OPCODE_TXF:
246 case SHADER_OPCODE_TXF_LZ:
247 case SHADER_OPCODE_TXL:
248 case SHADER_OPCODE_TXL_LZ:
249 /* 18 cycles:
250 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
251 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
252 * send(8) g4<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 *
255 * 697 +/-49 cycles (min 610, n=26):
256 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
257 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
258 * send(8) g4<1>UW g114<8,8,1>F
259 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
260 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
261 *
262 * So the latency on our first texture load of the batchbuffer takes
263 * ~700 cycles, since the caches are cold at that point.
264 *
265 * 840 +/- 92 cycles (min 720, n=25):
266 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
267 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
268 * send(8) g4<1>UW g114<8,8,1>F
269 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
270 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
271 * send(8) g4<1>UW g114<8,8,1>F
272 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
273 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
274 *
275 * On the second load, it takes just an extra ~140 cycles, and after
276 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
277 *
278 * 683 +/- 49 cycles (min = 602, n=47):
279 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
280 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
281 * send(8) g4<1>UW g114<8,8,1>F
282 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
283 * send(8) g50<1>UW g114<8,8,1>F
284 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
285 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
286 *
287 * The unit appears to be pipelined, since this matches up with the
288 * cache-cold case, despite there being two loads here. If you replace
289 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
290 *
291 * So, take some number between the cache-hot 140 cycles and the
292 * cache-cold 700 cycles. No particular tuning was done on this.
293 *
294 * I haven't done significant testing of the non-TEX opcodes. TXL at
295 * least looked about the same as TEX.
296 */
297 latency = 200;
298 break;
299
300 case SHADER_OPCODE_TXS:
301 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
302 * cycles (n=15):
303 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
304 * send(8) g6<1>UW g114<8,8,1>F
305 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
306 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
307 *
308 *
309 * Two loads was 535 +/- 30 cycles (n=19):
310 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
311 * send(16) g6<1>UW g114<8,8,1>F
312 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
313 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
314 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
315 * send(16) g8<1>UW g114<8,8,1>F
316 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
317 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
318 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
319 *
320 * Since the only caches that should matter are just the
321 * instruction/state cache containing the surface state, assume that we
322 * always have hot caches.
323 */
324 latency = 100;
325 break;
326
327 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
328 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
329 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
330 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
331 case VS_OPCODE_PULL_CONSTANT_LOAD:
332 /* testing using varying-index pull constants:
333 *
334 * 16 cycles:
335 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
336 * send(8) g4<1>F g4<8,8,1>D
337 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
338 *
339 * ~480 cycles:
340 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
341 * send(8) g4<1>F g4<8,8,1>D
342 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
343 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
344 *
345 * ~620 cycles:
346 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
347 * send(8) g4<1>F g4<8,8,1>D
348 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
349 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
350 * send(8) g4<1>F g4<8,8,1>D
351 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
352 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
353 *
354 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
355 * about 460. We expect to mostly be cache hot, so pick something more
356 * in that direction.
357 */
358 latency = 200;
359 break;
360
361 case SHADER_OPCODE_GEN7_SCRATCH_READ:
362 /* Testing a load from offset 0, that had been previously written:
363 *
364 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
365 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
366 *
367 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
368 * then around 140. Presumably this is cache hit vs miss.
369 */
370 latency = 50;
371 break;
372
373 case SHADER_OPCODE_UNTYPED_ATOMIC:
374 case SHADER_OPCODE_TYPED_ATOMIC:
375 /* Test code:
376 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
377 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
378 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
379 * send(8) g4<1>ud g112<8,8,1>ud
380 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
381 *
382 * Running it 100 times as fragment shader on a 128x128 quad
383 * gives an average latency of 13867 cycles per atomic op,
384 * standard deviation 3%. Note that this is a rather
385 * pessimistic estimate, the actual latency in cases with few
386 * collisions between threads and favorable pipelining has been
387 * seen to be reduced by a factor of 100.
388 */
389 latency = 14000;
390 break;
391
392 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
393 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
394 case SHADER_OPCODE_TYPED_SURFACE_READ:
395 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
396 /* Test code:
397 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
398 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
399 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
400 * send(8) g4<1>UD g112<8,8,1>UD
401 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
402 * .
403 * . [repeats 8 times]
404 * .
405 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
406 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
407 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
408 * send(8) g4<1>UD g112<8,8,1>UD
409 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
410 *
411 * Running it 100 times as fragment shader on a 128x128 quad
412 * gives an average latency of 583 cycles per surface read,
413 * standard deviation 0.9%.
414 */
415 latency = is_haswell ? 300 : 600;
416 break;
417
418 default:
419 /* 2 cycles:
420 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
421 *
422 * 16 cycles:
423 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
424 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
425 */
426 latency = 14;
427 break;
428 }
429 }
430
431 class instruction_scheduler {
432 public:
433 instruction_scheduler(backend_shader *s, int grf_count,
434 int hw_reg_count, int block_count,
435 instruction_scheduler_mode mode)
436 {
437 this->bs = s;
438 this->mem_ctx = ralloc_context(NULL);
439 this->grf_count = grf_count;
440 this->hw_reg_count = hw_reg_count;
441 this->instructions.make_empty();
442 this->instructions_to_schedule = 0;
443 this->post_reg_alloc = (mode == SCHEDULE_POST);
444 this->mode = mode;
445 if (!post_reg_alloc) {
446 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
447
448 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
449 for (int i = 0; i < block_count; i++)
450 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
451 BITSET_WORDS(grf_count));
452
453 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
454 for (int i = 0; i < block_count; i++)
455 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
456 BITSET_WORDS(grf_count));
457
458 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
459 for (int i = 0; i < block_count; i++)
460 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
461 BITSET_WORDS(hw_reg_count));
462
463 this->written = rzalloc_array(mem_ctx, bool, grf_count);
464
465 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
466
467 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
468 } else {
469 this->reg_pressure_in = NULL;
470 this->livein = NULL;
471 this->liveout = NULL;
472 this->hw_liveout = NULL;
473 this->written = NULL;
474 this->reads_remaining = NULL;
475 this->hw_reads_remaining = NULL;
476 }
477 }
478
479 ~instruction_scheduler()
480 {
481 ralloc_free(this->mem_ctx);
482 }
483 void add_barrier_deps(schedule_node *n);
484 void add_dep(schedule_node *before, schedule_node *after, int latency);
485 void add_dep(schedule_node *before, schedule_node *after);
486
487 void run(cfg_t *cfg);
488 void add_insts_from_block(bblock_t *block);
489 void compute_delays();
490 void compute_exits();
491 virtual void calculate_deps() = 0;
492 virtual schedule_node *choose_instruction_to_schedule() = 0;
493
494 /**
495 * Returns how many cycles it takes the instruction to issue.
496 *
497 * Instructions in gen hardware are handled one simd4 vector at a time,
498 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
499 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
500 */
501 virtual int issue_time(backend_instruction *inst) = 0;
502
503 virtual void count_reads_remaining(backend_instruction *inst) = 0;
504 virtual void setup_liveness(cfg_t *cfg) = 0;
505 virtual void update_register_pressure(backend_instruction *inst) = 0;
506 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
507
508 void schedule_instructions(bblock_t *block);
509
510 void *mem_ctx;
511
512 bool post_reg_alloc;
513 int instructions_to_schedule;
514 int grf_count;
515 int hw_reg_count;
516 int reg_pressure;
517 int block_idx;
518 exec_list instructions;
519 backend_shader *bs;
520
521 instruction_scheduler_mode mode;
522
523 /*
524 * The register pressure at the beginning of each basic block.
525 */
526
527 int *reg_pressure_in;
528
529 /*
530 * The virtual GRF's whose range overlaps the beginning of each basic block.
531 */
532
533 BITSET_WORD **livein;
534
535 /*
536 * The virtual GRF's whose range overlaps the end of each basic block.
537 */
538
539 BITSET_WORD **liveout;
540
541 /*
542 * The hardware GRF's whose range overlaps the end of each basic block.
543 */
544
545 BITSET_WORD **hw_liveout;
546
547 /*
548 * Whether we've scheduled a write for this virtual GRF yet.
549 */
550
551 bool *written;
552
553 /*
554 * How many reads we haven't scheduled for this virtual GRF yet.
555 */
556
557 int *reads_remaining;
558
559 /*
560 * How many reads we haven't scheduled for this hardware GRF yet.
561 */
562
563 int *hw_reads_remaining;
564 };
565
566 class fs_instruction_scheduler : public instruction_scheduler
567 {
568 public:
569 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
570 int block_count,
571 instruction_scheduler_mode mode);
572 void calculate_deps();
573 bool is_compressed(fs_inst *inst);
574 schedule_node *choose_instruction_to_schedule();
575 int issue_time(backend_instruction *inst);
576 fs_visitor *v;
577
578 void count_reads_remaining(backend_instruction *inst);
579 void setup_liveness(cfg_t *cfg);
580 void update_register_pressure(backend_instruction *inst);
581 int get_register_pressure_benefit(backend_instruction *inst);
582 };
583
584 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
585 int grf_count, int hw_reg_count,
586 int block_count,
587 instruction_scheduler_mode mode)
588 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
589 v(v)
590 {
591 }
592
593 static bool
594 is_src_duplicate(fs_inst *inst, int src)
595 {
596 for (int i = 0; i < src; i++)
597 if (inst->src[i].equals(inst->src[src]))
598 return true;
599
600 return false;
601 }
602
603 void
604 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
605 {
606 fs_inst *inst = (fs_inst *)be;
607
608 if (!reads_remaining)
609 return;
610
611 for (int i = 0; i < inst->sources; i++) {
612 if (is_src_duplicate(inst, i))
613 continue;
614
615 if (inst->src[i].file == VGRF) {
616 reads_remaining[inst->src[i].nr]++;
617 } else if (inst->src[i].file == FIXED_GRF) {
618 if (inst->src[i].nr >= hw_reg_count)
619 continue;
620
621 for (unsigned j = 0; j < regs_read(inst, i); j++)
622 hw_reads_remaining[inst->src[i].nr + j]++;
623 }
624 }
625 }
626
627 void
628 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
629 {
630 /* First, compute liveness on a per-GRF level using the in/out sets from
631 * liveness calculation.
632 */
633 for (int block = 0; block < cfg->num_blocks; block++) {
634 for (int i = 0; i < v->live_intervals->num_vars; i++) {
635 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
636 int vgrf = v->live_intervals->vgrf_from_var[i];
637 if (!BITSET_TEST(livein[block], vgrf)) {
638 reg_pressure_in[block] += v->alloc.sizes[vgrf];
639 BITSET_SET(livein[block], vgrf);
640 }
641 }
642
643 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
644 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
645 }
646 }
647
648 /* Now, extend the live in/live out sets for when a range crosses a block
649 * boundary, which matches what our register allocator/interference code
650 * does to account for force_writemask_all and incompatible exec_mask's.
651 */
652 for (int block = 0; block < cfg->num_blocks - 1; block++) {
653 for (int i = 0; i < grf_count; i++) {
654 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
655 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
656 if (!BITSET_TEST(livein[block + 1], i)) {
657 reg_pressure_in[block + 1] += v->alloc.sizes[i];
658 BITSET_SET(livein[block + 1], i);
659 }
660
661 BITSET_SET(liveout[block], i);
662 }
663 }
664 }
665
666 int payload_last_use_ip[hw_reg_count];
667 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
668
669 for (int i = 0; i < hw_reg_count; i++) {
670 if (payload_last_use_ip[i] == -1)
671 continue;
672
673 for (int block = 0; block < cfg->num_blocks; block++) {
674 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
675 reg_pressure_in[block]++;
676
677 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
678 BITSET_SET(hw_liveout[block], i);
679 }
680 }
681 }
682
683 void
684 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
685 {
686 fs_inst *inst = (fs_inst *)be;
687
688 if (!reads_remaining)
689 return;
690
691 if (inst->dst.file == VGRF) {
692 written[inst->dst.nr] = true;
693 }
694
695 for (int i = 0; i < inst->sources; i++) {
696 if (is_src_duplicate(inst, i))
697 continue;
698
699 if (inst->src[i].file == VGRF) {
700 reads_remaining[inst->src[i].nr]--;
701 } else if (inst->src[i].file == FIXED_GRF &&
702 inst->src[i].nr < hw_reg_count) {
703 for (unsigned off = 0; off < regs_read(inst, i); off++)
704 hw_reads_remaining[inst->src[i].nr + off]--;
705 }
706 }
707 }
708
709 int
710 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
711 {
712 fs_inst *inst = (fs_inst *)be;
713 int benefit = 0;
714
715 if (inst->dst.file == VGRF) {
716 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
717 !written[inst->dst.nr])
718 benefit -= v->alloc.sizes[inst->dst.nr];
719 }
720
721 for (int i = 0; i < inst->sources; i++) {
722 if (is_src_duplicate(inst, i))
723 continue;
724
725 if (inst->src[i].file == VGRF &&
726 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
727 reads_remaining[inst->src[i].nr] == 1)
728 benefit += v->alloc.sizes[inst->src[i].nr];
729
730 if (inst->src[i].file == FIXED_GRF &&
731 inst->src[i].nr < hw_reg_count) {
732 for (unsigned off = 0; off < regs_read(inst, i); off++) {
733 int reg = inst->src[i].nr + off;
734 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
735 hw_reads_remaining[reg] == 1) {
736 benefit++;
737 }
738 }
739 }
740 }
741
742 return benefit;
743 }
744
745 class vec4_instruction_scheduler : public instruction_scheduler
746 {
747 public:
748 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
749 void calculate_deps();
750 schedule_node *choose_instruction_to_schedule();
751 int issue_time(backend_instruction *inst);
752 vec4_visitor *v;
753
754 void count_reads_remaining(backend_instruction *inst);
755 void setup_liveness(cfg_t *cfg);
756 void update_register_pressure(backend_instruction *inst);
757 int get_register_pressure_benefit(backend_instruction *inst);
758 };
759
760 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
761 int grf_count)
762 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
763 v(v)
764 {
765 }
766
767 void
768 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
769 {
770 }
771
772 void
773 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
774 {
775 }
776
777 void
778 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
779 {
780 }
781
782 int
783 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
784 {
785 return 0;
786 }
787
788 schedule_node::schedule_node(backend_instruction *inst,
789 instruction_scheduler *sched)
790 {
791 const struct gen_device_info *devinfo = sched->bs->devinfo;
792
793 this->inst = inst;
794 this->child_array_size = 0;
795 this->children = NULL;
796 this->child_latency = NULL;
797 this->child_count = 0;
798 this->parent_count = 0;
799 this->unblocked_time = 0;
800 this->cand_generation = 0;
801 this->delay = 0;
802 this->exit = NULL;
803 this->is_barrier = false;
804
805 /* We can't measure Gen6 timings directly but expect them to be much
806 * closer to Gen7 than Gen4.
807 */
808 if (!sched->post_reg_alloc)
809 this->latency = 1;
810 else if (devinfo->gen >= 6)
811 set_latency_gen7(devinfo->is_haswell);
812 else
813 set_latency_gen4();
814 }
815
816 void
817 instruction_scheduler::add_insts_from_block(bblock_t *block)
818 {
819 foreach_inst_in_block(backend_instruction, inst, block) {
820 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
821
822 instructions.push_tail(n);
823 }
824
825 this->instructions_to_schedule = block->end_ip - block->start_ip + 1;
826 }
827
828 /** Computation of the delay member of each node. */
829 void
830 instruction_scheduler::compute_delays()
831 {
832 foreach_in_list_reverse(schedule_node, n, &instructions) {
833 if (!n->child_count) {
834 n->delay = issue_time(n->inst);
835 } else {
836 for (int i = 0; i < n->child_count; i++) {
837 assert(n->children[i]->delay);
838 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
839 }
840 }
841 }
842 }
843
844 void
845 instruction_scheduler::compute_exits()
846 {
847 /* Calculate a lower bound of the scheduling time of each node in the
848 * graph. This is analogous to the node's critical path but calculated
849 * from the top instead of from the bottom of the block.
850 */
851 foreach_in_list(schedule_node, n, &instructions) {
852 for (int i = 0; i < n->child_count; i++) {
853 n->children[i]->unblocked_time =
854 MAX2(n->children[i]->unblocked_time,
855 n->unblocked_time + issue_time(n->inst) + n->child_latency[i]);
856 }
857 }
858
859 /* Calculate the exit of each node by induction based on the exit nodes of
860 * its children. The preferred exit of a node is the one among the exit
861 * nodes of its children which can be unblocked first according to the
862 * optimistic unblocked time estimate calculated above.
863 */
864 foreach_in_list_reverse(schedule_node, n, &instructions) {
865 n->exit = (n->inst->opcode == FS_OPCODE_DISCARD_JUMP ? n : NULL);
866
867 for (int i = 0; i < n->child_count; i++) {
868 if (exit_unblocked_time(n->children[i]) < exit_unblocked_time(n))
869 n->exit = n->children[i]->exit;
870 }
871 }
872 }
873
874 /**
875 * Add a dependency between two instruction nodes.
876 *
877 * The @after node will be scheduled after @before. We will try to
878 * schedule it @latency cycles after @before, but no guarantees there.
879 */
880 void
881 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
882 int latency)
883 {
884 if (!before || !after)
885 return;
886
887 assert(before != after);
888
889 for (int i = 0; i < before->child_count; i++) {
890 if (before->children[i] == after) {
891 before->child_latency[i] = MAX2(before->child_latency[i], latency);
892 return;
893 }
894 }
895
896 if (before->child_array_size <= before->child_count) {
897 if (before->child_array_size < 16)
898 before->child_array_size = 16;
899 else
900 before->child_array_size *= 2;
901
902 before->children = reralloc(mem_ctx, before->children,
903 schedule_node *,
904 before->child_array_size);
905 before->child_latency = reralloc(mem_ctx, before->child_latency,
906 int, before->child_array_size);
907 }
908
909 before->children[before->child_count] = after;
910 before->child_latency[before->child_count] = latency;
911 before->child_count++;
912 after->parent_count++;
913 }
914
915 void
916 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
917 {
918 if (!before)
919 return;
920
921 add_dep(before, after, before->latency);
922 }
923
924 /**
925 * Sometimes we really want this node to execute after everything that
926 * was before it and before everything that followed it. This adds
927 * the deps to do so.
928 */
929 void
930 instruction_scheduler::add_barrier_deps(schedule_node *n)
931 {
932 schedule_node *prev = (schedule_node *)n->prev;
933 schedule_node *next = (schedule_node *)n->next;
934
935 n->is_barrier = true;
936
937 if (prev) {
938 while (!prev->is_head_sentinel()) {
939 add_dep(prev, n, 0);
940 if (prev->is_barrier)
941 break;
942 prev = (schedule_node *)prev->prev;
943 }
944 }
945
946 if (next) {
947 while (!next->is_tail_sentinel()) {
948 add_dep(n, next, 0);
949 if (next->is_barrier)
950 break;
951 next = (schedule_node *)next->next;
952 }
953 }
954 }
955
956 /* instruction scheduling needs to be aware of when an MRF write
957 * actually writes 2 MRFs.
958 */
959 bool
960 fs_instruction_scheduler::is_compressed(fs_inst *inst)
961 {
962 return inst->exec_size == 16;
963 }
964
965 static bool
966 is_scheduling_barrier(const fs_inst *inst)
967 {
968 return inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
969 inst->is_control_flow() ||
970 inst->has_side_effects();
971 }
972
973 void
974 fs_instruction_scheduler::calculate_deps()
975 {
976 /* Pre-register-allocation, this tracks the last write per VGRF offset.
977 * After register allocation, reg_offsets are gone and we track individual
978 * GRF registers.
979 */
980 schedule_node *last_grf_write[grf_count * 16];
981 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
982 schedule_node *last_conditional_mod[4] = {};
983 schedule_node *last_accumulator_write = NULL;
984 /* Fixed HW registers are assumed to be separate from the virtual
985 * GRFs, so they can be tracked separately. We don't really write
986 * to fixed GRFs much, so don't bother tracking them on a more
987 * granular level.
988 */
989 schedule_node *last_fixed_grf_write = NULL;
990
991 memset(last_grf_write, 0, sizeof(last_grf_write));
992 memset(last_mrf_write, 0, sizeof(last_mrf_write));
993
994 /* top-to-bottom dependencies: RAW and WAW. */
995 foreach_in_list(schedule_node, n, &instructions) {
996 fs_inst *inst = (fs_inst *)n->inst;
997
998 if (is_scheduling_barrier(inst))
999 add_barrier_deps(n);
1000
1001 /* read-after-write deps. */
1002 for (int i = 0; i < inst->sources; i++) {
1003 if (inst->src[i].file == VGRF) {
1004 if (post_reg_alloc) {
1005 for (unsigned r = 0; r < regs_read(inst, i); r++)
1006 add_dep(last_grf_write[inst->src[i].nr + r], n);
1007 } else {
1008 for (unsigned r = 0; r < regs_read(inst, i); r++) {
1009 add_dep(last_grf_write[inst->src[i].nr * 16 +
1010 inst->src[i].offset / REG_SIZE + r], n);
1011 }
1012 }
1013 } else if (inst->src[i].file == FIXED_GRF) {
1014 if (post_reg_alloc) {
1015 for (unsigned r = 0; r < regs_read(inst, i); r++)
1016 add_dep(last_grf_write[inst->src[i].nr + r], n);
1017 } else {
1018 add_dep(last_fixed_grf_write, n);
1019 }
1020 } else if (inst->src[i].is_accumulator()) {
1021 add_dep(last_accumulator_write, n);
1022 } else if (inst->src[i].file == ARF) {
1023 add_barrier_deps(n);
1024 }
1025 }
1026
1027 if (inst->base_mrf != -1) {
1028 for (int i = 0; i < inst->mlen; i++) {
1029 /* It looks like the MRF regs are released in the send
1030 * instruction once it's sent, not when the result comes
1031 * back.
1032 */
1033 add_dep(last_mrf_write[inst->base_mrf + i], n);
1034 }
1035 }
1036
1037 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1038 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1039
1040 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1041 if (mask & (1 << i))
1042 add_dep(last_conditional_mod[i], n);
1043 }
1044 }
1045
1046 if (inst->reads_accumulator_implicitly()) {
1047 add_dep(last_accumulator_write, n);
1048 }
1049
1050 /* write-after-write deps. */
1051 if (inst->dst.file == VGRF) {
1052 if (post_reg_alloc) {
1053 for (unsigned r = 0; r < regs_written(inst); r++) {
1054 add_dep(last_grf_write[inst->dst.nr + r], n);
1055 last_grf_write[inst->dst.nr + r] = n;
1056 }
1057 } else {
1058 for (unsigned r = 0; r < regs_written(inst); r++) {
1059 add_dep(last_grf_write[inst->dst.nr * 16 +
1060 inst->dst.offset / REG_SIZE + r], n);
1061 last_grf_write[inst->dst.nr * 16 +
1062 inst->dst.offset / REG_SIZE + r] = n;
1063 }
1064 }
1065 } else if (inst->dst.file == MRF) {
1066 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1067
1068 add_dep(last_mrf_write[reg], n);
1069 last_mrf_write[reg] = n;
1070 if (is_compressed(inst)) {
1071 if (inst->dst.nr & BRW_MRF_COMPR4)
1072 reg += 4;
1073 else
1074 reg++;
1075 add_dep(last_mrf_write[reg], n);
1076 last_mrf_write[reg] = n;
1077 }
1078 } else if (inst->dst.file == FIXED_GRF) {
1079 if (post_reg_alloc) {
1080 for (unsigned r = 0; r < regs_written(inst); r++)
1081 last_grf_write[inst->dst.nr + r] = n;
1082 } else {
1083 last_fixed_grf_write = n;
1084 }
1085 } else if (inst->dst.is_accumulator()) {
1086 add_dep(last_accumulator_write, n);
1087 last_accumulator_write = n;
1088 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1089 add_barrier_deps(n);
1090 }
1091
1092 if (inst->mlen > 0 && inst->base_mrf != -1) {
1093 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1094 add_dep(last_mrf_write[inst->base_mrf + i], n);
1095 last_mrf_write[inst->base_mrf + i] = n;
1096 }
1097 }
1098
1099 if (const unsigned mask = inst->flags_written()) {
1100 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1101
1102 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1103 if (mask & (1 << i)) {
1104 add_dep(last_conditional_mod[i], n, 0);
1105 last_conditional_mod[i] = n;
1106 }
1107 }
1108 }
1109
1110 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1111 !inst->dst.is_accumulator()) {
1112 add_dep(last_accumulator_write, n);
1113 last_accumulator_write = n;
1114 }
1115 }
1116
1117 /* bottom-to-top dependencies: WAR */
1118 memset(last_grf_write, 0, sizeof(last_grf_write));
1119 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1120 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1121 last_accumulator_write = NULL;
1122 last_fixed_grf_write = NULL;
1123
1124 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1125 fs_inst *inst = (fs_inst *)n->inst;
1126
1127 /* write-after-read deps. */
1128 for (int i = 0; i < inst->sources; i++) {
1129 if (inst->src[i].file == VGRF) {
1130 if (post_reg_alloc) {
1131 for (unsigned r = 0; r < regs_read(inst, i); r++)
1132 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1133 } else {
1134 for (unsigned r = 0; r < regs_read(inst, i); r++) {
1135 add_dep(n, last_grf_write[inst->src[i].nr * 16 +
1136 inst->src[i].offset / REG_SIZE + r], 0);
1137 }
1138 }
1139 } else if (inst->src[i].file == FIXED_GRF) {
1140 if (post_reg_alloc) {
1141 for (unsigned r = 0; r < regs_read(inst, i); r++)
1142 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1143 } else {
1144 add_dep(n, last_fixed_grf_write, 0);
1145 }
1146 } else if (inst->src[i].is_accumulator()) {
1147 add_dep(n, last_accumulator_write, 0);
1148 } else if (inst->src[i].file == ARF) {
1149 add_barrier_deps(n);
1150 }
1151 }
1152
1153 if (inst->base_mrf != -1) {
1154 for (int i = 0; i < inst->mlen; i++) {
1155 /* It looks like the MRF regs are released in the send
1156 * instruction once it's sent, not when the result comes
1157 * back.
1158 */
1159 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1160 }
1161 }
1162
1163 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1164 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1165
1166 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1167 if (mask & (1 << i))
1168 add_dep(n, last_conditional_mod[i]);
1169 }
1170 }
1171
1172 if (inst->reads_accumulator_implicitly()) {
1173 add_dep(n, last_accumulator_write);
1174 }
1175
1176 /* Update the things this instruction wrote, so earlier reads
1177 * can mark this as WAR dependency.
1178 */
1179 if (inst->dst.file == VGRF) {
1180 if (post_reg_alloc) {
1181 for (unsigned r = 0; r < regs_written(inst); r++)
1182 last_grf_write[inst->dst.nr + r] = n;
1183 } else {
1184 for (unsigned r = 0; r < regs_written(inst); r++) {
1185 last_grf_write[inst->dst.nr * 16 +
1186 inst->dst.offset / REG_SIZE + r] = n;
1187 }
1188 }
1189 } else if (inst->dst.file == MRF) {
1190 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1191
1192 last_mrf_write[reg] = n;
1193
1194 if (is_compressed(inst)) {
1195 if (inst->dst.nr & BRW_MRF_COMPR4)
1196 reg += 4;
1197 else
1198 reg++;
1199
1200 last_mrf_write[reg] = n;
1201 }
1202 } else if (inst->dst.file == FIXED_GRF) {
1203 if (post_reg_alloc) {
1204 for (unsigned r = 0; r < regs_written(inst); r++)
1205 last_grf_write[inst->dst.nr + r] = n;
1206 } else {
1207 last_fixed_grf_write = n;
1208 }
1209 } else if (inst->dst.is_accumulator()) {
1210 last_accumulator_write = n;
1211 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1212 add_barrier_deps(n);
1213 }
1214
1215 if (inst->mlen > 0 && inst->base_mrf != -1) {
1216 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1217 last_mrf_write[inst->base_mrf + i] = n;
1218 }
1219 }
1220
1221 if (const unsigned mask = inst->flags_written()) {
1222 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1223
1224 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1225 if (mask & (1 << i))
1226 last_conditional_mod[i] = n;
1227 }
1228 }
1229
1230 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1231 last_accumulator_write = n;
1232 }
1233 }
1234 }
1235
1236 static bool
1237 is_scheduling_barrier(const vec4_instruction *inst)
1238 {
1239 return inst->is_control_flow() ||
1240 inst->has_side_effects();
1241 }
1242
1243 void
1244 vec4_instruction_scheduler::calculate_deps()
1245 {
1246 schedule_node *last_grf_write[grf_count];
1247 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1248 schedule_node *last_conditional_mod = NULL;
1249 schedule_node *last_accumulator_write = NULL;
1250 /* Fixed HW registers are assumed to be separate from the virtual
1251 * GRFs, so they can be tracked separately. We don't really write
1252 * to fixed GRFs much, so don't bother tracking them on a more
1253 * granular level.
1254 */
1255 schedule_node *last_fixed_grf_write = NULL;
1256
1257 memset(last_grf_write, 0, sizeof(last_grf_write));
1258 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1259
1260 /* top-to-bottom dependencies: RAW and WAW. */
1261 foreach_in_list(schedule_node, n, &instructions) {
1262 vec4_instruction *inst = (vec4_instruction *)n->inst;
1263
1264 if (is_scheduling_barrier(inst))
1265 add_barrier_deps(n);
1266
1267 /* read-after-write deps. */
1268 for (int i = 0; i < 3; i++) {
1269 if (inst->src[i].file == VGRF) {
1270 for (unsigned j = 0; j < regs_read(inst, i); ++j)
1271 add_dep(last_grf_write[inst->src[i].nr + j], n);
1272 } else if (inst->src[i].file == FIXED_GRF) {
1273 add_dep(last_fixed_grf_write, n);
1274 } else if (inst->src[i].is_accumulator()) {
1275 assert(last_accumulator_write);
1276 add_dep(last_accumulator_write, n);
1277 } else if (inst->src[i].file == ARF) {
1278 add_barrier_deps(n);
1279 }
1280 }
1281
1282 if (!inst->is_send_from_grf()) {
1283 for (int i = 0; i < inst->mlen; i++) {
1284 /* It looks like the MRF regs are released in the send
1285 * instruction once it's sent, not when the result comes
1286 * back.
1287 */
1288 add_dep(last_mrf_write[inst->base_mrf + i], n);
1289 }
1290 }
1291
1292 if (inst->reads_flag()) {
1293 assert(last_conditional_mod);
1294 add_dep(last_conditional_mod, n);
1295 }
1296
1297 if (inst->reads_accumulator_implicitly()) {
1298 assert(last_accumulator_write);
1299 add_dep(last_accumulator_write, n);
1300 }
1301
1302 /* write-after-write deps. */
1303 if (inst->dst.file == VGRF) {
1304 for (unsigned j = 0; j < regs_written(inst); ++j) {
1305 add_dep(last_grf_write[inst->dst.nr + j], n);
1306 last_grf_write[inst->dst.nr + j] = n;
1307 }
1308 } else if (inst->dst.file == MRF) {
1309 add_dep(last_mrf_write[inst->dst.nr], n);
1310 last_mrf_write[inst->dst.nr] = n;
1311 } else if (inst->dst.file == FIXED_GRF) {
1312 last_fixed_grf_write = n;
1313 } else if (inst->dst.is_accumulator()) {
1314 add_dep(last_accumulator_write, n);
1315 last_accumulator_write = n;
1316 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1317 add_barrier_deps(n);
1318 }
1319
1320 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1321 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1322 add_dep(last_mrf_write[inst->base_mrf + i], n);
1323 last_mrf_write[inst->base_mrf + i] = n;
1324 }
1325 }
1326
1327 if (inst->writes_flag()) {
1328 add_dep(last_conditional_mod, n, 0);
1329 last_conditional_mod = n;
1330 }
1331
1332 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1333 !inst->dst.is_accumulator()) {
1334 add_dep(last_accumulator_write, n);
1335 last_accumulator_write = n;
1336 }
1337 }
1338
1339 /* bottom-to-top dependencies: WAR */
1340 memset(last_grf_write, 0, sizeof(last_grf_write));
1341 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1342 last_conditional_mod = NULL;
1343 last_accumulator_write = NULL;
1344 last_fixed_grf_write = NULL;
1345
1346 foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1347 vec4_instruction *inst = (vec4_instruction *)n->inst;
1348
1349 /* write-after-read deps. */
1350 for (int i = 0; i < 3; i++) {
1351 if (inst->src[i].file == VGRF) {
1352 for (unsigned j = 0; j < regs_read(inst, i); ++j)
1353 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1354 } else if (inst->src[i].file == FIXED_GRF) {
1355 add_dep(n, last_fixed_grf_write);
1356 } else if (inst->src[i].is_accumulator()) {
1357 add_dep(n, last_accumulator_write);
1358 } else if (inst->src[i].file == ARF) {
1359 add_barrier_deps(n);
1360 }
1361 }
1362
1363 if (!inst->is_send_from_grf()) {
1364 for (int i = 0; i < inst->mlen; i++) {
1365 /* It looks like the MRF regs are released in the send
1366 * instruction once it's sent, not when the result comes
1367 * back.
1368 */
1369 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1370 }
1371 }
1372
1373 if (inst->reads_flag()) {
1374 add_dep(n, last_conditional_mod);
1375 }
1376
1377 if (inst->reads_accumulator_implicitly()) {
1378 add_dep(n, last_accumulator_write);
1379 }
1380
1381 /* Update the things this instruction wrote, so earlier reads
1382 * can mark this as WAR dependency.
1383 */
1384 if (inst->dst.file == VGRF) {
1385 for (unsigned j = 0; j < regs_written(inst); ++j)
1386 last_grf_write[inst->dst.nr + j] = n;
1387 } else if (inst->dst.file == MRF) {
1388 last_mrf_write[inst->dst.nr] = n;
1389 } else if (inst->dst.file == FIXED_GRF) {
1390 last_fixed_grf_write = n;
1391 } else if (inst->dst.is_accumulator()) {
1392 last_accumulator_write = n;
1393 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1394 add_barrier_deps(n);
1395 }
1396
1397 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1398 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1399 last_mrf_write[inst->base_mrf + i] = n;
1400 }
1401 }
1402
1403 if (inst->writes_flag()) {
1404 last_conditional_mod = n;
1405 }
1406
1407 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1408 last_accumulator_write = n;
1409 }
1410 }
1411 }
1412
1413 schedule_node *
1414 fs_instruction_scheduler::choose_instruction_to_schedule()
1415 {
1416 schedule_node *chosen = NULL;
1417
1418 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1419 int chosen_time = 0;
1420
1421 /* Of the instructions ready to execute or the closest to being ready,
1422 * choose the one most likely to unblock an early program exit, or
1423 * otherwise the oldest one.
1424 */
1425 foreach_in_list(schedule_node, n, &instructions) {
1426 if (!chosen ||
1427 exit_unblocked_time(n) < exit_unblocked_time(chosen) ||
1428 (exit_unblocked_time(n) == exit_unblocked_time(chosen) &&
1429 n->unblocked_time < chosen_time)) {
1430 chosen = n;
1431 chosen_time = n->unblocked_time;
1432 }
1433 }
1434 } else {
1435 /* Before register allocation, we don't care about the latencies of
1436 * instructions. All we care about is reducing live intervals of
1437 * variables so that we can avoid register spilling, or get SIMD16
1438 * shaders which naturally do a better job of hiding instruction
1439 * latency.
1440 */
1441 foreach_in_list(schedule_node, n, &instructions) {
1442 fs_inst *inst = (fs_inst *)n->inst;
1443
1444 if (!chosen) {
1445 chosen = n;
1446 continue;
1447 }
1448
1449 /* Most important: If we can definitely reduce register pressure, do
1450 * so immediately.
1451 */
1452 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1453 int chosen_register_pressure_benefit =
1454 get_register_pressure_benefit(chosen->inst);
1455
1456 if (register_pressure_benefit > 0 &&
1457 register_pressure_benefit > chosen_register_pressure_benefit) {
1458 chosen = n;
1459 continue;
1460 } else if (chosen_register_pressure_benefit > 0 &&
1461 (register_pressure_benefit <
1462 chosen_register_pressure_benefit)) {
1463 continue;
1464 }
1465
1466 if (mode == SCHEDULE_PRE_LIFO) {
1467 /* Prefer instructions that recently became available for
1468 * scheduling. These are the things that are most likely to
1469 * (eventually) make a variable dead and reduce register pressure.
1470 * Typical register pressure estimates don't work for us because
1471 * most of our pressure comes from texturing, where no single
1472 * instruction to schedule will make a vec4 value dead.
1473 */
1474 if (n->cand_generation > chosen->cand_generation) {
1475 chosen = n;
1476 continue;
1477 } else if (n->cand_generation < chosen->cand_generation) {
1478 continue;
1479 }
1480
1481 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1482 * do this, then because we prefer instructions that just became
1483 * candidates, we'll end up in a pattern of scheduling a SEND,
1484 * then the MRFs for the next SEND, then the next SEND, then the
1485 * MRFs, etc., without ever consuming the results of a send.
1486 */
1487 if (v->devinfo->gen < 7) {
1488 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1489
1490 /* We use size_written > 4 * exec_size as our test for the kind
1491 * of send instruction to avoid -- only sends generate many
1492 * regs, and a single-result send is probably actually reducing
1493 * register pressure.
1494 */
1495 if (inst->size_written <= 4 * inst->exec_size &&
1496 chosen_inst->size_written > 4 * chosen_inst->exec_size) {
1497 chosen = n;
1498 continue;
1499 } else if (inst->size_written > chosen_inst->size_written) {
1500 continue;
1501 }
1502 }
1503 }
1504
1505 /* For instructions pushed on the cands list at the same time, prefer
1506 * the one with the highest delay to the end of the program. This is
1507 * most likely to have its values able to be consumed first (such as
1508 * for a large tree of lowered ubo loads, which appear reversed in
1509 * the instruction stream with respect to when they can be consumed).
1510 */
1511 if (n->delay > chosen->delay) {
1512 chosen = n;
1513 continue;
1514 } else if (n->delay < chosen->delay) {
1515 continue;
1516 }
1517
1518 /* Prefer the node most likely to unblock an early program exit.
1519 */
1520 if (exit_unblocked_time(n) < exit_unblocked_time(chosen)) {
1521 chosen = n;
1522 continue;
1523 } else if (exit_unblocked_time(n) > exit_unblocked_time(chosen)) {
1524 continue;
1525 }
1526
1527 /* If all other metrics are equal, we prefer the first instruction in
1528 * the list (program execution).
1529 */
1530 }
1531 }
1532
1533 return chosen;
1534 }
1535
1536 schedule_node *
1537 vec4_instruction_scheduler::choose_instruction_to_schedule()
1538 {
1539 schedule_node *chosen = NULL;
1540 int chosen_time = 0;
1541
1542 /* Of the instructions ready to execute or the closest to being ready,
1543 * choose the oldest one.
1544 */
1545 foreach_in_list(schedule_node, n, &instructions) {
1546 if (!chosen || n->unblocked_time < chosen_time) {
1547 chosen = n;
1548 chosen_time = n->unblocked_time;
1549 }
1550 }
1551
1552 return chosen;
1553 }
1554
1555 int
1556 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1557 {
1558 if (is_compressed((fs_inst *)inst))
1559 return 4;
1560 else
1561 return 2;
1562 }
1563
1564 int
1565 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1566 {
1567 /* We always execute as two vec4s in parallel. */
1568 return 2;
1569 }
1570
1571 void
1572 instruction_scheduler::schedule_instructions(bblock_t *block)
1573 {
1574 const struct gen_device_info *devinfo = bs->devinfo;
1575 int time = 0;
1576 if (!post_reg_alloc)
1577 reg_pressure = reg_pressure_in[block->num];
1578 block_idx = block->num;
1579
1580 /* Remove non-DAG heads from the list. */
1581 foreach_in_list_safe(schedule_node, n, &instructions) {
1582 if (n->parent_count != 0)
1583 n->remove();
1584 }
1585
1586 unsigned cand_generation = 1;
1587 while (!instructions.is_empty()) {
1588 schedule_node *chosen = choose_instruction_to_schedule();
1589
1590 /* Schedule this instruction. */
1591 assert(chosen);
1592 chosen->remove();
1593 chosen->inst->exec_node::remove();
1594 block->instructions.push_tail(chosen->inst);
1595 instructions_to_schedule--;
1596
1597 if (!post_reg_alloc) {
1598 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1599 update_register_pressure(chosen->inst);
1600 }
1601
1602 /* If we expected a delay for scheduling, then bump the clock to reflect
1603 * that. In reality, the hardware will switch to another hyperthread
1604 * and may not return to dispatching our thread for a while even after
1605 * we're unblocked. After this, we have the time when the chosen
1606 * instruction will start executing.
1607 */
1608 time = MAX2(time, chosen->unblocked_time);
1609
1610 /* Update the clock for how soon an instruction could start after the
1611 * chosen one.
1612 */
1613 time += issue_time(chosen->inst);
1614
1615 if (debug) {
1616 fprintf(stderr, "clock %4d, scheduled: ", time);
1617 bs->dump_instruction(chosen->inst);
1618 if (!post_reg_alloc)
1619 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1620 }
1621
1622 /* Now that we've scheduled a new instruction, some of its
1623 * children can be promoted to the list of instructions ready to
1624 * be scheduled. Update the children's unblocked time for this
1625 * DAG edge as we do so.
1626 */
1627 for (int i = chosen->child_count - 1; i >= 0; i--) {
1628 schedule_node *child = chosen->children[i];
1629
1630 child->unblocked_time = MAX2(child->unblocked_time,
1631 time + chosen->child_latency[i]);
1632
1633 if (debug) {
1634 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1635 bs->dump_instruction(child->inst);
1636 }
1637
1638 child->cand_generation = cand_generation;
1639 child->parent_count--;
1640 if (child->parent_count == 0) {
1641 if (debug) {
1642 fprintf(stderr, "\t\tnow available\n");
1643 }
1644 instructions.push_head(child);
1645 }
1646 }
1647 cand_generation++;
1648
1649 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1650 * but it's more limited pre-gen6, so if we send something off to it then
1651 * the next math instruction isn't going to make progress until the first
1652 * is done.
1653 */
1654 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1655 foreach_in_list(schedule_node, n, &instructions) {
1656 if (n->inst->is_math())
1657 n->unblocked_time = MAX2(n->unblocked_time,
1658 time + chosen->latency);
1659 }
1660 }
1661 }
1662
1663 assert(instructions_to_schedule == 0);
1664
1665 block->cycle_count = time;
1666 }
1667
1668 static unsigned get_cycle_count(cfg_t *cfg)
1669 {
1670 unsigned count = 0, multiplier = 1;
1671 foreach_block(block, cfg) {
1672 if (block->start()->opcode == BRW_OPCODE_DO)
1673 multiplier *= 10; /* assume that loops execute ~10 times */
1674
1675 count += block->cycle_count * multiplier;
1676
1677 if (block->end()->opcode == BRW_OPCODE_WHILE)
1678 multiplier /= 10;
1679 }
1680
1681 return count;
1682 }
1683
1684 void
1685 instruction_scheduler::run(cfg_t *cfg)
1686 {
1687 if (debug && !post_reg_alloc) {
1688 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1689 post_reg_alloc);
1690 bs->dump_instructions();
1691 }
1692
1693 if (!post_reg_alloc)
1694 setup_liveness(cfg);
1695
1696 foreach_block(block, cfg) {
1697 if (reads_remaining) {
1698 memset(reads_remaining, 0,
1699 grf_count * sizeof(*reads_remaining));
1700 memset(hw_reads_remaining, 0,
1701 hw_reg_count * sizeof(*hw_reads_remaining));
1702 memset(written, 0, grf_count * sizeof(*written));
1703
1704 foreach_inst_in_block(fs_inst, inst, block)
1705 count_reads_remaining(inst);
1706 }
1707
1708 add_insts_from_block(block);
1709
1710 calculate_deps();
1711
1712 compute_delays();
1713 compute_exits();
1714
1715 schedule_instructions(block);
1716 }
1717
1718 if (debug && !post_reg_alloc) {
1719 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1720 post_reg_alloc);
1721 bs->dump_instructions();
1722 }
1723
1724 cfg->cycle_count = get_cycle_count(cfg);
1725 }
1726
1727 void
1728 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1729 {
1730 if (mode != SCHEDULE_POST)
1731 calculate_live_intervals();
1732
1733 int grf_count;
1734 if (mode == SCHEDULE_POST)
1735 grf_count = grf_used;
1736 else
1737 grf_count = alloc.count;
1738
1739 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1740 cfg->num_blocks, mode);
1741 sched.run(cfg);
1742
1743 invalidate_live_intervals();
1744 }
1745
1746 void
1747 vec4_visitor::opt_schedule_instructions()
1748 {
1749 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1750 sched.run(cfg);
1751
1752 invalidate_live_intervals();
1753 }