amd/common: Fix various non-critical integer overflows
[mesa.git] / src / amd / common / ac_shader_util.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include "ac_shader_util.h"
29 #include "sid.h"
30
31 unsigned
32 ac_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
33 bool writes_samplemask)
34 {
35 if (writes_z) {
36 /* Z needs 32 bits. */
37 if (writes_samplemask)
38 return V_028710_SPI_SHADER_32_ABGR;
39 else if (writes_stencil)
40 return V_028710_SPI_SHADER_32_GR;
41 else
42 return V_028710_SPI_SHADER_32_R;
43 } else if (writes_stencil || writes_samplemask) {
44 /* Both stencil and sample mask need only 16 bits. */
45 return V_028710_SPI_SHADER_UINT16_ABGR;
46 } else {
47 return V_028710_SPI_SHADER_ZERO;
48 }
49 }
50
51 unsigned
52 ac_get_cb_shader_mask(unsigned spi_shader_col_format)
53 {
54 unsigned i, cb_shader_mask = 0;
55
56 for (i = 0; i < 8; i++) {
57 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
58 case V_028714_SPI_SHADER_ZERO:
59 break;
60 case V_028714_SPI_SHADER_32_R:
61 cb_shader_mask |= 0x1 << (i * 4);
62 break;
63 case V_028714_SPI_SHADER_32_GR:
64 cb_shader_mask |= 0x3 << (i * 4);
65 break;
66 case V_028714_SPI_SHADER_32_AR:
67 cb_shader_mask |= 0x9u << (i * 4);
68 break;
69 case V_028714_SPI_SHADER_FP16_ABGR:
70 case V_028714_SPI_SHADER_UNORM16_ABGR:
71 case V_028714_SPI_SHADER_SNORM16_ABGR:
72 case V_028714_SPI_SHADER_UINT16_ABGR:
73 case V_028714_SPI_SHADER_SINT16_ABGR:
74 case V_028714_SPI_SHADER_32_ABGR:
75 cb_shader_mask |= 0xfu << (i * 4);
76 break;
77 default:
78 assert(0);
79 }
80 }
81 return cb_shader_mask;
82 }
83
84 /**
85 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
86 * geometry shader.
87 */
88 uint32_t
89 ac_vgt_gs_mode(unsigned gs_max_vert_out, enum chip_class chip_class)
90 {
91 unsigned cut_mode;
92
93 if (gs_max_vert_out <= 128) {
94 cut_mode = V_028A40_GS_CUT_128;
95 } else if (gs_max_vert_out <= 256) {
96 cut_mode = V_028A40_GS_CUT_256;
97 } else if (gs_max_vert_out <= 512) {
98 cut_mode = V_028A40_GS_CUT_512;
99 } else {
100 assert(gs_max_vert_out <= 1024);
101 cut_mode = V_028A40_GS_CUT_1024;
102 }
103
104 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
105 S_028A40_CUT_MODE(cut_mode)|
106 S_028A40_ES_WRITE_OPTIMIZE(chip_class <= GFX8) |
107 S_028A40_GS_WRITE_OPTIMIZE(1) |
108 S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
109 }
110
111 /// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format
112 /// value for LLVM8+ tbuffer intrinsics.
113 unsigned
114 ac_get_tbuffer_format(enum chip_class chip_class,
115 unsigned dfmt, unsigned nfmt)
116 {
117 // Some games try to access vertex buffers without a valid format.
118 // This is a game bug, but we should still handle it gracefully.
119 if (dfmt == V_008F0C_IMG_FORMAT_INVALID)
120 return V_008F0C_IMG_FORMAT_INVALID;
121
122 if (chip_class >= GFX10) {
123 unsigned format;
124 switch (dfmt) {
125 default: unreachable("bad dfmt");
126 case V_008F0C_BUF_DATA_FORMAT_INVALID: format = V_008F0C_IMG_FORMAT_INVALID; break;
127 case V_008F0C_BUF_DATA_FORMAT_8: format = V_008F0C_IMG_FORMAT_8_UINT; break;
128 case V_008F0C_BUF_DATA_FORMAT_8_8: format = V_008F0C_IMG_FORMAT_8_8_UINT; break;
129 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8: format = V_008F0C_IMG_FORMAT_8_8_8_8_UINT; break;
130 case V_008F0C_BUF_DATA_FORMAT_16: format = V_008F0C_IMG_FORMAT_16_UINT; break;
131 case V_008F0C_BUF_DATA_FORMAT_16_16: format = V_008F0C_IMG_FORMAT_16_16_UINT; break;
132 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16: format = V_008F0C_IMG_FORMAT_16_16_16_16_UINT; break;
133 case V_008F0C_BUF_DATA_FORMAT_32: format = V_008F0C_IMG_FORMAT_32_UINT; break;
134 case V_008F0C_BUF_DATA_FORMAT_32_32: format = V_008F0C_IMG_FORMAT_32_32_UINT; break;
135 case V_008F0C_BUF_DATA_FORMAT_32_32_32: format = V_008F0C_IMG_FORMAT_32_32_32_UINT; break;
136 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32: format = V_008F0C_IMG_FORMAT_32_32_32_32_UINT; break;
137 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10: format = V_008F0C_IMG_FORMAT_2_10_10_10_UINT; break;
138 }
139
140 // Use the regularity properties of the combined format enum.
141 //
142 // Note: float is incompatible with 8-bit data formats,
143 // [us]{norm,scaled} are incomparible with 32-bit data formats.
144 // [us]scaled are not writable.
145 switch (nfmt) {
146 case V_008F0C_BUF_NUM_FORMAT_UNORM: format -= 4; break;
147 case V_008F0C_BUF_NUM_FORMAT_SNORM: format -= 3; break;
148 case V_008F0C_BUF_NUM_FORMAT_USCALED: format -= 2; break;
149 case V_008F0C_BUF_NUM_FORMAT_SSCALED: format -= 1; break;
150 default: unreachable("bad nfmt");
151 case V_008F0C_BUF_NUM_FORMAT_UINT: break;
152 case V_008F0C_BUF_NUM_FORMAT_SINT: format += 1; break;
153 case V_008F0C_BUF_NUM_FORMAT_FLOAT: format += 2; break;
154 }
155
156 return format;
157 } else {
158 return dfmt | (nfmt << 4);
159 }
160 }
161
162 static const struct ac_data_format_info data_format_table[] = {
163 [V_008F0C_BUF_DATA_FORMAT_INVALID] = { 0, 4, 0, V_008F0C_BUF_DATA_FORMAT_INVALID },
164 [V_008F0C_BUF_DATA_FORMAT_8] = { 1, 1, 1, V_008F0C_BUF_DATA_FORMAT_8 },
165 [V_008F0C_BUF_DATA_FORMAT_16] = { 2, 1, 2, V_008F0C_BUF_DATA_FORMAT_16 },
166 [V_008F0C_BUF_DATA_FORMAT_8_8] = { 2, 2, 1, V_008F0C_BUF_DATA_FORMAT_8 },
167 [V_008F0C_BUF_DATA_FORMAT_32] = { 4, 1, 4, V_008F0C_BUF_DATA_FORMAT_32 },
168 [V_008F0C_BUF_DATA_FORMAT_16_16] = { 4, 2, 2, V_008F0C_BUF_DATA_FORMAT_16 },
169 [V_008F0C_BUF_DATA_FORMAT_10_11_11] = { 4, 3, 0, V_008F0C_BUF_DATA_FORMAT_10_11_11 },
170 [V_008F0C_BUF_DATA_FORMAT_11_11_10] = { 4, 3, 0, V_008F0C_BUF_DATA_FORMAT_11_11_10 },
171 [V_008F0C_BUF_DATA_FORMAT_10_10_10_2] = { 4, 4, 0, V_008F0C_BUF_DATA_FORMAT_10_10_10_2 },
172 [V_008F0C_BUF_DATA_FORMAT_2_10_10_10] = { 4, 4, 0, V_008F0C_BUF_DATA_FORMAT_2_10_10_10 },
173 [V_008F0C_BUF_DATA_FORMAT_8_8_8_8] = { 4, 4, 1, V_008F0C_BUF_DATA_FORMAT_8 },
174 [V_008F0C_BUF_DATA_FORMAT_32_32] = { 8, 2, 4, V_008F0C_BUF_DATA_FORMAT_32 },
175 [V_008F0C_BUF_DATA_FORMAT_16_16_16_16] = { 8, 4, 2, V_008F0C_BUF_DATA_FORMAT_16 },
176 [V_008F0C_BUF_DATA_FORMAT_32_32_32] = { 12, 3, 4, V_008F0C_BUF_DATA_FORMAT_32 },
177 [V_008F0C_BUF_DATA_FORMAT_32_32_32_32] = { 16, 4, 4, V_008F0C_BUF_DATA_FORMAT_32 },
178 };
179
180 const struct ac_data_format_info *
181 ac_get_data_format_info(unsigned dfmt)
182 {
183 assert(dfmt < ARRAY_SIZE(data_format_table));
184 return &data_format_table[dfmt];
185 }
186
187 enum ac_image_dim
188 ac_get_sampler_dim(enum chip_class chip_class, enum glsl_sampler_dim dim,
189 bool is_array)
190 {
191 switch (dim) {
192 case GLSL_SAMPLER_DIM_1D:
193 if (chip_class == GFX9)
194 return is_array ? ac_image_2darray : ac_image_2d;
195 return is_array ? ac_image_1darray : ac_image_1d;
196 case GLSL_SAMPLER_DIM_2D:
197 case GLSL_SAMPLER_DIM_RECT:
198 case GLSL_SAMPLER_DIM_EXTERNAL:
199 return is_array ? ac_image_2darray : ac_image_2d;
200 case GLSL_SAMPLER_DIM_3D:
201 return ac_image_3d;
202 case GLSL_SAMPLER_DIM_CUBE:
203 return ac_image_cube;
204 case GLSL_SAMPLER_DIM_MS:
205 return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
206 case GLSL_SAMPLER_DIM_SUBPASS:
207 return ac_image_2darray;
208 case GLSL_SAMPLER_DIM_SUBPASS_MS:
209 return ac_image_2darraymsaa;
210 default:
211 unreachable("bad sampler dim");
212 }
213 }
214
215 enum ac_image_dim
216 ac_get_image_dim(enum chip_class chip_class, enum glsl_sampler_dim sdim,
217 bool is_array)
218 {
219 enum ac_image_dim dim = ac_get_sampler_dim(chip_class, sdim, is_array);
220
221 /* Match the resource type set in the descriptor. */
222 if (dim == ac_image_cube ||
223 (chip_class <= GFX8 && dim == ac_image_3d))
224 dim = ac_image_2darray;
225 else if (sdim == GLSL_SAMPLER_DIM_2D && !is_array && chip_class == GFX9) {
226 /* When a single layer of a 3D texture is bound, the shader
227 * will refer to a 2D target, but the descriptor has a 3D type.
228 * Since the HW ignores BASE_ARRAY in this case, we need to
229 * send 3 coordinates. This doesn't hurt when the underlying
230 * texture is non-3D.
231 */
232 dim = ac_image_3d;
233 }
234
235 return dim;
236 }
237
238 unsigned
239 ac_get_fs_input_vgpr_cnt(const struct ac_shader_config *config,
240 signed char *face_vgpr_index_ptr,
241 signed char *ancillary_vgpr_index_ptr)
242 {
243 unsigned num_input_vgprs = 0;
244 signed char face_vgpr_index = -1;
245 signed char ancillary_vgpr_index = -1;
246
247 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
248 num_input_vgprs += 2;
249 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
250 num_input_vgprs += 2;
251 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
252 num_input_vgprs += 2;
253 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
254 num_input_vgprs += 3;
255 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
256 num_input_vgprs += 2;
257 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
258 num_input_vgprs += 2;
259 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
260 num_input_vgprs += 2;
261 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
262 num_input_vgprs += 1;
263 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
264 num_input_vgprs += 1;
265 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
266 num_input_vgprs += 1;
267 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
268 num_input_vgprs += 1;
269 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
270 num_input_vgprs += 1;
271 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr)) {
272 face_vgpr_index = num_input_vgprs;
273 num_input_vgprs += 1;
274 }
275 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr)) {
276 ancillary_vgpr_index = num_input_vgprs;
277 num_input_vgprs += 1;
278 }
279 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
280 num_input_vgprs += 1;
281 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
282 num_input_vgprs += 1;
283
284 if (face_vgpr_index_ptr)
285 *face_vgpr_index_ptr = face_vgpr_index;
286 if (ancillary_vgpr_index_ptr)
287 *ancillary_vgpr_index_ptr = ancillary_vgpr_index;
288
289 return num_input_vgprs;
290 }
291
292 void ac_choose_spi_color_formats(unsigned format, unsigned swap,
293 unsigned ntype, bool is_depth,
294 struct ac_spi_color_formats *formats)
295 {
296 /* Alpha is needed for alpha-to-coverage.
297 * Blending may be with or without alpha.
298 */
299 unsigned normal = 0; /* most optimal, may not support blending or export alpha */
300 unsigned alpha = 0; /* exports alpha, but may not support blending */
301 unsigned blend = 0; /* supports blending, but may not export alpha */
302 unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
303
304 /* Choose the SPI color formats. These are required values for RB+.
305 * Other chips have multiple choices, though they are not necessarily better.
306 */
307 switch (format) {
308 case V_028C70_COLOR_5_6_5:
309 case V_028C70_COLOR_1_5_5_5:
310 case V_028C70_COLOR_5_5_5_1:
311 case V_028C70_COLOR_4_4_4_4:
312 case V_028C70_COLOR_10_11_11:
313 case V_028C70_COLOR_11_11_10:
314 case V_028C70_COLOR_5_9_9_9:
315 case V_028C70_COLOR_8:
316 case V_028C70_COLOR_8_8:
317 case V_028C70_COLOR_8_8_8_8:
318 case V_028C70_COLOR_10_10_10_2:
319 case V_028C70_COLOR_2_10_10_10:
320 if (ntype == V_028C70_NUMBER_UINT)
321 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
322 else if (ntype == V_028C70_NUMBER_SINT)
323 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
324 else
325 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
326 break;
327
328 case V_028C70_COLOR_16:
329 case V_028C70_COLOR_16_16:
330 case V_028C70_COLOR_16_16_16_16:
331 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) {
332 /* UNORM16 and SNORM16 don't support blending */
333 if (ntype == V_028C70_NUMBER_UNORM)
334 normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
335 else
336 normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
337
338 /* Use 32 bits per channel for blending. */
339 if (format == V_028C70_COLOR_16) {
340 if (swap == V_028C70_SWAP_STD) { /* R */
341 blend = V_028714_SPI_SHADER_32_R;
342 blend_alpha = V_028714_SPI_SHADER_32_AR;
343 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
344 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
345 else
346 assert(0);
347 } else if (format == V_028C70_COLOR_16_16) {
348 if (swap == V_028C70_SWAP_STD) { /* RG */
349 blend = V_028714_SPI_SHADER_32_GR;
350 blend_alpha = V_028714_SPI_SHADER_32_ABGR;
351 } else if (swap == V_028C70_SWAP_ALT) /* RA */
352 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
353 else
354 assert(0);
355 } else /* 16_16_16_16 */
356 blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
357 } else if (ntype == V_028C70_NUMBER_UINT)
358 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
359 else if (ntype == V_028C70_NUMBER_SINT)
360 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
361 else if (ntype == V_028C70_NUMBER_FLOAT)
362 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
363 else
364 assert(0);
365 break;
366
367 case V_028C70_COLOR_32:
368 if (swap == V_028C70_SWAP_STD) { /* R */
369 blend = normal = V_028714_SPI_SHADER_32_R;
370 alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
371 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
372 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
373 else
374 assert(0);
375 break;
376
377 case V_028C70_COLOR_32_32:
378 if (swap == V_028C70_SWAP_STD) { /* RG */
379 blend = normal = V_028714_SPI_SHADER_32_GR;
380 alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
381 } else if (swap == V_028C70_SWAP_ALT) /* RA */
382 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
383 else
384 assert(0);
385 break;
386
387 case V_028C70_COLOR_32_32_32_32:
388 case V_028C70_COLOR_8_24:
389 case V_028C70_COLOR_24_8:
390 case V_028C70_COLOR_X24_8_32_FLOAT:
391 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
392 break;
393
394 default:
395 assert(0);
396 return;
397 }
398
399 /* The DB->CB copy needs 32_ABGR. */
400 if (is_depth)
401 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
402
403 formats->normal = normal;
404 formats->alpha = alpha;
405 formats->blend = blend;
406 formats->blend_alpha = blend_alpha;
407 }