2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 /* Creates pipeline registers. This is a prepass run before the main register
28 * allocator but after scheduling, once bundles are created. It works by
29 * iterating the scheduled IR, checking if a value is ever used after the end
30 * of the current bundle. If it is not, it is promoted to a bundle-specific
33 * Pipeline registers are only written from the first two stages of the
34 * pipeline (vmul/sadd) lasting the duration of the bundle only. There are two
35 * 128-bit pipeline registers available (r24/r25). The upshot is that no actual
36 * register allocation is needed; we can _always_ promote a value to a pipeline
37 * register, liveness permitting. This greatly simplifies the logic of this
38 * passing, negating the need for a proper RA like work registers.
43 compiler_context
*ctx
,
45 midgard_bundle
*bundle
, unsigned i
,
46 unsigned pipeline_count
)
48 midgard_instruction
*ins
= bundle
->instructions
[i
];
50 /* Our goal is to create a pipeline register. Pipeline registers are
51 * created at the start of the bundle and are destroyed at the end. So
52 * we conservatively require:
54 * 1. Each component read in the second stage is written in the first stage.
55 * 2. The index is not live after the bundle.
56 * 3. We're not a special index (writeout, conditionals, ..)
58 * Rationale: #1 ensures that there is no need to go before the
59 * creation of the bundle, so the pipeline register can exist. #2 is
60 * since the pipeline register will be destroyed at the end. This
61 * ensures that nothing will try to read/write the pipeline register
62 * once it is not live, and that there's no need to go earlier. */
64 unsigned node
= ins
->dest
;
65 unsigned read_mask
= 0;
67 if (node
>= SSA_FIXED_MINIMUM
)
70 /* Analyze the bundle for a per-byte read mask */
72 for (unsigned j
= 0; j
< bundle
->instruction_count
; ++j
) {
73 midgard_instruction
*q
= bundle
->instructions
[j
];
75 /* The fragment colour can't be pipelined (well, it is
76 * pipelined in r0, but this is a delicate dance with
77 * scheduling and RA, not for us to worry about) */
79 if (q
->compact_branch
&& q
->writeout
&& mir_has_arg(q
, node
))
82 if (q
->unit
< UNIT_VADD
) continue;
83 read_mask
|= mir_bytemask_of_read_components(q
, node
);
86 /* Now check what's written in the beginning stage */
87 for (unsigned j
= 0; j
< bundle
->instruction_count
; ++j
) {
88 midgard_instruction
*q
= bundle
->instructions
[j
];
89 if (q
->unit
>= UNIT_VADD
) break;
90 if (q
->dest
!= node
) continue;
92 /* Remove the written mask from the read requirements */
93 read_mask
&= ~mir_bytemask(q
);
96 /* Check for leftovers */
100 /* We want to know if we live after this bundle, so check if
101 * we're live after the last instruction of the bundle */
103 midgard_instruction
*end
= bundle
->instructions
[
104 bundle
->instruction_count
- 1];
106 if (mir_is_live_after(ctx
, block
, end
, ins
->dest
))
109 /* We're only live in this bundle -- pipeline! */
110 unsigned preg
= SSA_FIXED_REGISTER(24 + pipeline_count
);
112 for (unsigned j
= 0; j
< bundle
->instruction_count
; ++j
) {
113 midgard_instruction
*q
= bundle
->instructions
[j
];
115 if (q
->unit
>= UNIT_VADD
)
116 mir_rewrite_index_src_single(q
, node
, preg
);
118 mir_rewrite_index_dst_single(q
, node
, preg
);
125 mir_create_pipeline_registers(compiler_context
*ctx
)
127 mir_invalidate_liveness(ctx
);
129 mir_foreach_block(ctx
, _block
) {
130 midgard_block
*block
= (midgard_block
*) _block
;
132 mir_foreach_bundle_in_block(block
, bundle
) {
133 if (!mir_is_alu_bundle(bundle
)) continue;
134 if (bundle
->instruction_count
< 2) continue;
136 /* Only first 2 instructions could pipeline */
137 bool succ
= mir_pipeline_ins(ctx
, block
, bundle
, 0, 0);
138 mir_pipeline_ins(ctx
, block
, bundle
, 1, succ
);