v3d: Add support for shader_image_load_store.
[mesa.git] / src / broadcom / compiler / v3d_nir_lower_image_load_store.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 * Copyright © 2018 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "v3d_compiler.h"
26 #include "compiler/nir/nir_builder.h"
27 #include "compiler/nir/nir_format_convert.h"
28
29 /** @file v3d_nir_lower_image_load_store.c
30 *
31 * Performs any necessary lowering of GL_ARB_shader_image_load_store
32 * operations.
33 *
34 * On V3D 4.x, we just need to do format conversion for stores such that the
35 * GPU can effectively memcpy the arguments (in increments of 32-bit words)
36 * into the texel. Loads are the same as texturing, where we may need to
37 * unpack from 16-bit ints or floats.
38 *
39 * On V3D 3.x, to implement image load store we would need to do manual tiling
40 * calculations and load/store using the TMU general memory access path.
41 */
42
43 bool
44 v3d_gl_format_is_return_32(GLenum format)
45 {
46 switch (format) {
47 case GL_R8:
48 case GL_R8_SNORM:
49 case GL_R8UI:
50 case GL_R8I:
51 case GL_RG8:
52 case GL_RG8_SNORM:
53 case GL_RG8UI:
54 case GL_RG8I:
55 case GL_RGBA8:
56 case GL_RGBA8_SNORM:
57 case GL_RGBA8UI:
58 case GL_RGBA8I:
59 case GL_R11F_G11F_B10F:
60 case GL_RGB10_A2:
61 case GL_RGB10_A2UI:
62 case GL_R16F:
63 case GL_R16UI:
64 case GL_R16I:
65 case GL_RG16F:
66 case GL_RG16UI:
67 case GL_RG16I:
68 case GL_RGBA16F:
69 case GL_RGBA16UI:
70 case GL_RGBA16I:
71 return false;
72 case GL_R16:
73 case GL_R16_SNORM:
74 case GL_RG16:
75 case GL_RG16_SNORM:
76 case GL_RGBA16:
77 case GL_RGBA16_SNORM:
78 case GL_R32F:
79 case GL_R32UI:
80 case GL_R32I:
81 case GL_RG32F:
82 case GL_RG32UI:
83 case GL_RG32I:
84 case GL_RGBA32F:
85 case GL_RGBA32UI:
86 case GL_RGBA32I:
87 return true;
88 default:
89 unreachable("Invalid image format");
90 }
91 }
92
93 /* Packs a 32-bit vector of colors in the range [0, (1 << bits[i]) - 1] to a
94 * 32-bit SSA value, with as many channels as necessary to store all the bits
95 */
96 static nir_ssa_def *
97 pack_bits(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
98 int num_components, bool mask)
99 {
100 nir_ssa_def *results[4];
101 int offset = 0;
102 for (int i = 0; i < num_components; i++) {
103 nir_ssa_def *chan = nir_channel(b, color, i);
104
105 /* Channels being stored shouldn't cross a 32-bit boundary. */
106 assert((offset & ~31) == ((offset + bits[i] - 1) & ~31));
107
108 if (mask) {
109 chan = nir_iand(b, chan,
110 nir_imm_int(b, (1 << bits[i]) - 1));
111 }
112
113 if (offset % 32 == 0) {
114 results[offset / 32] = chan;
115 } else {
116 results[offset / 32] =
117 nir_ior(b, results[offset / 32],
118 nir_ishl(b, chan,
119 nir_imm_int(b, offset % 32)));
120 }
121 offset += bits[i];
122 }
123
124 return nir_vec(b, results, DIV_ROUND_UP(offset, 32));
125 }
126
127 static nir_ssa_def *
128 pack_unorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
129 int num_components)
130 {
131 color = nir_channels(b, color, (1 << num_components) - 1);
132 color = nir_format_float_to_unorm(b, color, bits);
133 return pack_bits(b, color, bits, color->num_components, false);
134 }
135
136 static nir_ssa_def *
137 pack_snorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
138 int num_components)
139 {
140 color = nir_channels(b, color, (1 << num_components) - 1);
141 color = nir_format_float_to_snorm(b, color, bits);
142 return pack_bits(b, color, bits, color->num_components, true);
143 }
144
145 static nir_ssa_def *
146 pack_uint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
147 int num_components)
148 {
149 color = nir_channels(b, color, (1 << num_components) - 1);
150 color = nir_format_clamp_uint(b, color, bits);
151 return pack_bits(b, color, bits, num_components, false);
152 }
153
154 static nir_ssa_def *
155 pack_sint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
156 int num_components)
157 {
158 color = nir_channels(b, color, (1 << num_components) - 1);
159 color = nir_format_clamp_uint(b, color, bits);
160 return pack_bits(b, color, bits, num_components, true);
161 }
162
163 static nir_ssa_def *
164 pack_half(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
165 int num_components)
166 {
167 color = nir_channels(b, color, (1 << num_components) - 1);
168 color = nir_format_float_to_half(b, color);
169 return pack_bits(b, color, bits, color->num_components, false);
170 }
171
172 static void
173 v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
174 {
175 nir_variable *var = nir_intrinsic_get_var(instr, 0);
176 GLenum format = var->data.image.format;
177 static const unsigned bits_8[4] = {8, 8, 8, 8};
178 static const unsigned bits_16[4] = {16, 16, 16, 16};
179 static const unsigned bits_1010102[4] = {10, 10, 10, 2};
180
181 b->cursor = nir_before_instr(&instr->instr);
182
183 nir_ssa_def *unformatted = nir_ssa_for_src(b, instr->src[3], 4);
184 nir_ssa_def *formatted = NULL;
185 switch (format) {
186 case GL_RGBA32F:
187 case GL_RGBA32UI:
188 case GL_RGBA32I:
189 /* For 4-component 32-bit components, there's no packing to be
190 * done.
191 */
192 return;
193
194 case GL_R32F:
195 case GL_R32UI:
196 case GL_R32I:
197 /* For other 32-bit components, just reduce the size of
198 * the input vector.
199 */
200 formatted = nir_channels(b, unformatted, 1);
201 break;
202 case GL_RG32F:
203 case GL_RG32UI:
204 case GL_RG32I:
205 formatted = nir_channels(b, unformatted, 2);
206 break;
207
208 case GL_R8:
209 formatted = pack_unorm(b, unformatted, bits_8, 1);
210 break;
211 case GL_RG8:
212 formatted = pack_unorm(b, unformatted, bits_8, 2);
213 break;
214 case GL_RGBA8:
215 formatted = pack_unorm(b, unformatted, bits_8, 4);
216 break;
217
218 case GL_R8_SNORM:
219 formatted = pack_snorm(b, unformatted, bits_8, 1);
220 break;
221 case GL_RG8_SNORM:
222 formatted = pack_snorm(b, unformatted, bits_8, 2);
223 break;
224 case GL_RGBA8_SNORM:
225 formatted = pack_snorm(b, unformatted, bits_8, 4);
226 break;
227
228 case GL_R16:
229 formatted = pack_unorm(b, unformatted, bits_16, 1);
230 break;
231 case GL_RG16:
232 formatted = pack_unorm(b, unformatted, bits_16, 2);
233 break;
234 case GL_RGBA16:
235 formatted = pack_unorm(b, unformatted, bits_16, 4);
236 break;
237
238 case GL_R16_SNORM:
239 formatted = pack_snorm(b, unformatted, bits_16, 1);
240 break;
241 case GL_RG16_SNORM:
242 formatted = pack_snorm(b, unformatted, bits_16, 2);
243 break;
244 case GL_RGBA16_SNORM:
245 formatted = pack_snorm(b, unformatted, bits_16, 4);
246 break;
247
248 case GL_R16F:
249 formatted = pack_half(b, unformatted, bits_16, 1);
250 break;
251 case GL_RG16F:
252 formatted = pack_half(b, unformatted, bits_16, 2);
253 break;
254 case GL_RGBA16F:
255 formatted = pack_half(b, unformatted, bits_16, 4);
256 break;
257
258 case GL_R8UI:
259 formatted = pack_uint(b, unformatted, bits_8, 1);
260 break;
261 case GL_R8I:
262 formatted = pack_sint(b, unformatted, bits_8, 1);
263 break;
264 case GL_RG8UI:
265 formatted = pack_uint(b, unformatted, bits_8, 2);
266 break;
267 case GL_RG8I:
268 formatted = pack_sint(b, unformatted, bits_8, 2);
269 break;
270 case GL_RGBA8UI:
271 formatted = pack_uint(b, unformatted, bits_8, 4);
272 break;
273 case GL_RGBA8I:
274 formatted = pack_sint(b, unformatted, bits_8, 4);
275 break;
276
277 case GL_R16UI:
278 formatted = pack_uint(b, unformatted, bits_16, 1);
279 break;
280 case GL_R16I:
281 formatted = pack_sint(b, unformatted, bits_16, 1);
282 break;
283 case GL_RG16UI:
284 formatted = pack_uint(b, unformatted, bits_16, 2);
285 break;
286 case GL_RG16I:
287 formatted = pack_sint(b, unformatted, bits_16, 2);
288 break;
289 case GL_RGBA16UI:
290 formatted = pack_uint(b, unformatted, bits_16, 4);
291 break;
292 case GL_RGBA16I:
293 formatted = pack_sint(b, unformatted, bits_16, 4);
294 break;
295
296 case GL_R11F_G11F_B10F:
297 formatted = nir_format_pack_11f11f10f(b, unformatted);
298 break;
299 case GL_RGB9_E5:
300 formatted = nir_format_pack_r9g9b9e5(b, unformatted);
301 break;
302
303 case GL_RGB10_A2:
304 formatted = pack_unorm(b, unformatted, bits_1010102, 4);
305 break;
306
307 case GL_RGB10_A2UI:
308 formatted = pack_uint(b, unformatted, bits_1010102, 4);
309 break;
310
311 default:
312 unreachable("bad format");
313 }
314
315 nir_instr_rewrite_src(&instr->instr, &instr->src[3],
316 nir_src_for_ssa(formatted));
317 instr->num_components = formatted->num_components;
318 }
319
320 static void
321 v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
322 {
323 static const unsigned bits16[] = {16, 16, 16, 16};
324 nir_variable *var = nir_intrinsic_get_var(instr, 0);
325 const struct glsl_type *sampler_type = glsl_without_array(var->type);
326 enum glsl_base_type base_type =
327 glsl_get_sampler_result_type(sampler_type);
328
329 if (v3d_gl_format_is_return_32(var->data.image.format))
330 return;
331
332 b->cursor = nir_after_instr(&instr->instr);
333
334 assert(instr->dest.is_ssa);
335 nir_ssa_def *result = &instr->dest.ssa;
336 if (base_type == GLSL_TYPE_FLOAT) {
337 nir_ssa_def *rg = nir_channel(b, result, 0);
338 nir_ssa_def *ba = nir_channel(b, result, 1);
339 result = nir_vec4(b,
340 nir_unpack_half_2x16_split_x(b, rg),
341 nir_unpack_half_2x16_split_y(b, rg),
342 nir_unpack_half_2x16_split_x(b, ba),
343 nir_unpack_half_2x16_split_y(b, ba));
344 } else if (base_type == GLSL_TYPE_INT) {
345 result = nir_format_unpack_sint(b, result, bits16, 4);
346 } else {
347 assert(base_type == GLSL_TYPE_UINT);
348 result = nir_format_unpack_uint(b, result, bits16, 4);
349 }
350
351 nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, nir_src_for_ssa(result),
352 result->parent_instr);
353 }
354
355 void
356 v3d_nir_lower_image_load_store(nir_shader *s)
357 {
358 nir_foreach_function(function, s) {
359 if (!function->impl)
360 continue;
361
362 nir_builder b;
363 nir_builder_init(&b, function->impl);
364
365 nir_foreach_block(block, function->impl) {
366 nir_foreach_instr_safe(instr, block) {
367 if (instr->type != nir_instr_type_intrinsic)
368 continue;
369
370 nir_intrinsic_instr *intr =
371 nir_instr_as_intrinsic(instr);
372
373 switch (intr->intrinsic) {
374 case nir_intrinsic_image_deref_load:
375 v3d_nir_lower_image_load(&b, intr);
376 break;
377 case nir_intrinsic_image_deref_store:
378 v3d_nir_lower_image_store(&b, intr);
379 break;
380 default:
381 break;
382 }
383 }
384 }
385
386 nir_metadata_preserve(function->impl,
387 nir_metadata_block_index |
388 nir_metadata_dominance);
389 }
390 }