2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 /* Creates pipeline registers. This is a prepass run before the main register
28 * allocator but after scheduling, once bundles are created. It works by
29 * iterating the scheduled IR, checking if a value is ever used after the end
30 * of the current bundle. If it is not, it is promoted to a bundle-specific
33 * Pipeline registers are only written from the first two stages of the
34 * pipeline (vmul/sadd) lasting the duration of the bundle only. There are two
35 * 128-bit pipeline registers available (r24/r25). The upshot is that no actual
36 * register allocation is needed; we can _always_ promote a value to a pipeline
37 * register, liveness permitting. This greatly simplifies the logic of this
38 * passing, negating the need for a proper RA like work registers.
43 compiler_context
*ctx
,
45 midgard_bundle
*bundle
, unsigned i
,
46 unsigned pipeline_count
)
48 midgard_instruction
*ins
= bundle
->instructions
[i
];
49 unsigned dest
= ins
->ssa_args
.dest
;
51 /* We could be pipelining a register, so we need to make sure that all
52 * of the components read in this bundle are written in this bundle,
53 * and that no components are written before this bundle */
55 unsigned node
= ins
->ssa_args
.dest
;
56 unsigned read_mask
= 0;
58 /* Analyze the bundle for a read mask */
60 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
61 midgard_instruction
*q
= bundle
->instructions
[i
];
62 read_mask
|= mir_mask_of_read_components(q
, node
);
65 /* Now analyze for a write mask */
66 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
67 midgard_instruction
*q
= bundle
->instructions
[i
];
68 if (q
->ssa_args
.dest
!= node
) continue;
70 /* Remove the written mask from the read requirements */
71 read_mask
&= ~q
->mask
;
74 /* Check for leftovers */
78 /* Now, check outside the bundle */
79 midgard_instruction
*start
= bundle
->instructions
[0];
81 if (mir_is_written_before(ctx
, start
, node
))
84 /* We want to know if we live after this bundle, so check if
85 * we're live after the last instruction of the bundle */
87 midgard_instruction
*end
= bundle
->instructions
[
88 bundle
->instruction_count
- 1];
90 if (mir_is_live_after(ctx
, block
, end
, ins
->ssa_args
.dest
))
93 /* We're only live in this bundle -- pipeline! */
95 mir_rewrite_index(ctx
, dest
, SSA_FIXED_REGISTER(24 + pipeline_count
));
101 mir_create_pipeline_registers(compiler_context
*ctx
)
103 mir_foreach_block(ctx
, block
) {
104 mir_foreach_bundle_in_block(block
, bundle
) {
105 if (!mir_is_alu_bundle(bundle
)) continue;
106 if (bundle
->instruction_count
< 2) continue;
108 /* Only first 2 instructions could pipeline */
109 bool succ
= mir_pipeline_ins(ctx
, block
, bundle
, 0, 0);
110 mir_pipeline_ins(ctx
, block
, bundle
, 1, succ
);