panfrost: Stub out lowering boilerplate
[mesa.git] / src / panfrost / util / pan_lower_framebuffer.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 /**
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
37 *
38 * The unpacked type depends on the format:
39 *
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
45 *
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
48 */
49
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
56
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
59
60 nir_alu_type
61 pan_unpacked_type_for_format(const struct util_format_description *desc)
62 {
63 int c = util_format_get_first_non_void_channel(desc->format);
64
65 if (c == -1)
66 unreachable("Void format not renderable");
67
68 bool large = (desc->channel[c].size > 16);
69 bool bit8 = (desc->channel[c].size == 8);
70 assert(desc->channel[c].size <= 32);
71
72 if (desc->channel[c].normalized)
73 return large ? nir_type_float32 : nir_type_float16;
74
75 switch (desc->channel[c].type) {
76 case UTIL_FORMAT_TYPE_UNSIGNED:
77 return bit8 ? nir_type_uint8 :
78 large ? nir_type_uint32 : nir_type_uint16;
79 case UTIL_FORMAT_TYPE_SIGNED:
80 return bit8 ? nir_type_int8 :
81 large ? nir_type_int32 : nir_type_int16;
82 case UTIL_FORMAT_TYPE_FLOAT:
83 return large ? nir_type_float32 : nir_type_float16;
84 default:
85 unreachable("Format not renderable");
86 }
87 }
88
89 enum pan_format_class
90 pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
91 {
92 /* Check if we can do anything better than software architecturally */
93 if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
94 return (quirks & NO_BLEND_PACKS)
95 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
96 }
97
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks & MIDGARD_MISSING_LOADS) {
100 switch (desc->format) {
101 case PIPE_FORMAT_R11G11B10_FLOAT:
102 case PIPE_FORMAT_R10G10B10A2_UNORM:
103 case PIPE_FORMAT_B10G10R10A2_UNORM:
104 case PIPE_FORMAT_R10G10B10X2_UNORM:
105 case PIPE_FORMAT_B10G10R10X2_UNORM:
106 case PIPE_FORMAT_R10G10B10A2_UINT:
107 return PAN_FORMAT_PACK;
108 default:
109 return PAN_FORMAT_NATIVE;
110 }
111 }
112
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE;
115 }
116
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
119 {
120 /* Check if we can do anything better than software architecturally */
121 if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
122 return (quirks & NO_BLEND_PACKS)
123 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
124 }
125
126 return PAN_FORMAT_NATIVE;
127 }
128
129 /* Generic dispatches for un/pack regardless of format */
130
131 static nir_ssa_def *
132 pan_unpack(nir_builder *b,
133 const struct util_format_description *desc,
134 nir_ssa_def *packed)
135 {
136 /* Stub */
137 return packed;
138 }
139
140 static nir_ssa_def *
141 pan_pack(nir_builder *b,
142 const struct util_format_description *desc,
143 nir_ssa_def *unpacked)
144 {
145 /* Stub */
146 return unpacked;
147 }
148
149 static void
150 pan_lower_fb_store(nir_shader *shader,
151 nir_builder *b,
152 nir_intrinsic_instr *intr,
153 const struct util_format_description *desc,
154 unsigned quirks)
155 {
156 /* For stores, add conversion before */
157 nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
158 nir_ssa_def *packed = pan_pack(b, desc, unpacked);
159
160 nir_intrinsic_instr *new =
161 nir_intrinsic_instr_create(shader, nir_intrinsic_store_raw_output_pan);
162 new->src[0] = nir_src_for_ssa(packed);
163 new->num_components = 4;
164 nir_builder_instr_insert(b, &new->instr);
165 }
166
167 static void
168 pan_lower_fb_load(nir_shader *shader,
169 nir_builder *b,
170 nir_intrinsic_instr *intr,
171 const struct util_format_description *desc,
172 unsigned quirks)
173 {
174 nir_intrinsic_instr *new = nir_intrinsic_instr_create(shader,
175 nir_intrinsic_load_raw_output_pan);
176 new->num_components = 4;
177
178 nir_ssa_dest_init(&new->instr, &new->dest, 4, 32, NULL);
179 nir_builder_instr_insert(b, &new->instr);
180
181 /* Convert the raw value */
182 nir_ssa_def *packed = &new->dest.ssa;
183 nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
184
185 nir_src rewritten = nir_src_for_ssa(unpacked);
186 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, rewritten, &intr->instr);
187 }
188
189 void
190 pan_lower_framebuffer(nir_shader *shader,
191 const struct util_format_description *desc,
192 unsigned quirks)
193 {
194 /* Blend shaders are represented as special fragment shaders */
195 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
196
197 nir_foreach_function(func, shader) {
198 nir_foreach_block(block, func->impl) {
199 nir_foreach_instr_safe(instr, block) {
200 if (instr->type != nir_instr_type_intrinsic)
201 continue;
202
203 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
204
205 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
206 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
207
208 if (!(is_load || is_store))
209 continue;
210
211 /* Don't worry about MRT */
212 nir_variable *var = nir_intrinsic_get_var(intr, 0);
213
214 if (var->data.location != FRAG_RESULT_COLOR)
215 continue;
216
217 nir_builder b;
218 nir_builder_init(&b, func->impl);
219
220 if (is_store) {
221 b.cursor = nir_before_instr(instr);
222 pan_lower_fb_store(shader, &b, intr, desc, quirks);
223 } else {
224 b.cursor = nir_after_instr(instr);
225 pan_lower_fb_load(shader, &b, intr, desc, quirks);
226 }
227
228 nir_instr_remove(instr);
229 }
230 }
231
232 nir_metadata_preserve(func->impl, nir_metadata_block_index |
233 nir_metadata_dominance);
234 }
235 }