v3d: Add support for shader_image_load_store.
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25 #include "nir_deref.h"
26
27 /* We don't do any address packing. */
28 #define __gen_user_data void
29 #define __gen_address_type uint32_t
30 #define __gen_address_offset(reloc) (*reloc)
31 #define __gen_emit_reloc(cl, reloc)
32 #include "cle/v3d_packet_v41_pack.h"
33
34 static void
35 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
36 int *tmu_writes)
37 {
38 /* XXX perf: We should figure out how to merge ALU operations
39 * producing the val with this MOV, when possible.
40 */
41 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
42
43 (*tmu_writes)++;
44 }
45
46 static void
47 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
48 {
49 struct qinst *inst = vir_NOP(c);
50 inst->qpu.sig.wrtmuc = true;
51 inst->has_implicit_uniform = true;
52 inst->src[0] = vir_uniform(c, contents, data);
53 }
54
55 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
56 .per_pixel_mask_enable = true,
57 };
58
59 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
60 .op = V3D_TMU_OP_REGULAR,
61 };
62
63 void
64 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
65 {
66 unsigned unit = instr->texture_index;
67 int tmu_writes = 0;
68
69 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
70 };
71
72 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
73 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
74 !instr->is_shadow),
75
76 .unnormalized_coordinates = (instr->sampler_dim ==
77 GLSL_SAMPLER_DIM_RECT),
78 };
79
80 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
81 .op = V3D_TMU_OP_REGULAR,
82
83 .gather_mode = instr->op == nir_texop_tg4,
84 .gather_component = instr->component,
85
86 .coefficient_mode = instr->op == nir_texop_txd,
87
88 .disable_autolod = instr->op == nir_texop_tg4
89 };
90
91 int non_array_components = instr->coord_components - instr->is_array;
92 struct qreg s;
93
94 for (unsigned i = 0; i < instr->num_srcs; i++) {
95 switch (instr->src[i].src_type) {
96 case nir_tex_src_coord:
97 /* S triggers the lookup, so save it for the end. */
98 s = ntq_get_src(c, instr->src[i].src, 0);
99
100 if (non_array_components > 1) {
101 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
102 ntq_get_src(c, instr->src[i].src,
103 1), &tmu_writes);
104 }
105 if (non_array_components > 2) {
106 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
107 ntq_get_src(c, instr->src[i].src,
108 2), &tmu_writes);
109 }
110
111 if (instr->is_array) {
112 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
113 ntq_get_src(c, instr->src[i].src,
114 instr->coord_components - 1),
115 &tmu_writes);
116 }
117 break;
118
119 case nir_tex_src_bias:
120 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
121 ntq_get_src(c, instr->src[i].src, 0),
122 &tmu_writes);
123 break;
124
125 case nir_tex_src_lod:
126 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
127 ntq_get_src(c, instr->src[i].src, 0),
128 &tmu_writes);
129
130 if (instr->op != nir_texop_txf)
131 p2_unpacked.disable_autolod = true;
132 break;
133
134 case nir_tex_src_comparator:
135 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
136 ntq_get_src(c, instr->src[i].src, 0),
137 &tmu_writes);
138 break;
139
140 case nir_tex_src_offset: {
141 if (nir_src_is_const(instr->src[i].src)) {
142 nir_const_value *offset =
143 nir_src_as_const_value(instr->src[i].src);
144
145 p2_unpacked.offset_s = offset->i32[0];
146 if (instr->coord_components >= 2)
147 p2_unpacked.offset_t = offset->i32[1];
148 if (instr->coord_components >= 3)
149 p2_unpacked.offset_r = offset->i32[2];
150 } else {
151 struct qreg mask = vir_uniform_ui(c, 0xf);
152 struct qreg x, y, offset;
153
154 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
155 0), mask);
156 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
157 1), mask);
158 offset = vir_OR(c, x,
159 vir_SHL(c, y,
160 vir_uniform_ui(c, 4)));
161
162 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
163 offset, &tmu_writes);
164 }
165 break;
166 }
167
168 default:
169 unreachable("unknown texture source");
170 }
171 }
172
173 /* Limit the number of channels returned to both how many the NIR
174 * instruction writes and how many the instruction could produce.
175 */
176 assert(instr->dest.is_ssa);
177 p0_unpacked.return_words_of_texture_data =
178 nir_ssa_def_components_read(&instr->dest.ssa);
179
180 /* Word enables can't ask for more channels than the output type could
181 * provide (2 for f16, 4 for 32-bit).
182 */
183 assert(!p1_unpacked.output_type_32_bit ||
184 p0_unpacked.return_words_of_texture_data < (1 << 4));
185 assert(p1_unpacked.output_type_32_bit ||
186 p0_unpacked.return_words_of_texture_data < (1 << 2));
187
188 uint32_t p0_packed;
189 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
190 (uint8_t *)&p0_packed,
191 &p0_unpacked);
192
193 uint32_t p1_packed;
194 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
195 (uint8_t *)&p1_packed,
196 &p1_unpacked);
197
198 uint32_t p2_packed;
199 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
200 (uint8_t *)&p2_packed,
201 &p2_unpacked);
202
203 /* Load unit number into the high bits of the texture or sampler
204 * address field, which will be be used by the driver to decide which
205 * texture to put in the actual address field.
206 */
207 p0_packed |= unit << 24;
208 p1_packed |= unit << 24;
209
210 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
211 /* XXX perf: Can we skip p1 setup for txf ops? */
212 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
213 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
214 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
215
216 if (instr->op == nir_texop_txf) {
217 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
218 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
219 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
220 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
221 } else {
222 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
223 }
224
225 vir_emit_thrsw(c);
226
227 /* The input FIFO has 16 slots across all threads, so make sure we
228 * don't overfill our allocation.
229 */
230 while (tmu_writes > 16 / c->threads)
231 c->threads /= 2;
232
233 for (int i = 0; i < 4; i++) {
234 if (p0_unpacked.return_words_of_texture_data & (1 << i))
235 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
236 }
237 }
238
239 static void
240 type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
241 {
242 *size = 1;
243 *align = 1;
244 }
245
246 void
247 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
248 nir_intrinsic_instr *instr)
249 {
250 nir_variable *var = nir_intrinsic_get_var(instr, 0);
251 const struct glsl_type *sampler_type = glsl_without_array(var->type);
252 unsigned unit = (var->data.driver_location +
253 nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]),
254 type_size_align_1));
255 int tmu_writes = 0;
256
257 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
258 };
259
260 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
261 .per_pixel_mask_enable = true,
262 .output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format),
263 };
264
265 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
266
267 /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
268 * wants to have support for inc/dec?
269 */
270 switch (instr->intrinsic) {
271 case nir_intrinsic_image_deref_load:
272 case nir_intrinsic_image_deref_store:
273 p2_unpacked.op = V3D_TMU_OP_REGULAR;
274 break;
275 case nir_intrinsic_image_deref_atomic_add:
276 p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
277 break;
278 case nir_intrinsic_image_deref_atomic_min:
279 p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
280 break;
281
282 case nir_intrinsic_image_deref_atomic_max:
283 p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
284 break;
285 case nir_intrinsic_image_deref_atomic_and:
286 p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
287 break;
288 case nir_intrinsic_image_deref_atomic_or:
289 p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
290 break;
291 case nir_intrinsic_image_deref_atomic_xor:
292 p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
293 break;
294 case nir_intrinsic_image_deref_atomic_exchange:
295 p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
296 break;
297 case nir_intrinsic_image_deref_atomic_comp_swap:
298 p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
299 break;
300 default:
301 unreachable("unknown image intrinsic");
302 };
303
304 bool is_1d = false;
305 switch (glsl_get_sampler_dim(sampler_type)) {
306 case GLSL_SAMPLER_DIM_1D:
307 is_1d = true;
308 break;
309 case GLSL_SAMPLER_DIM_BUF:
310 break;
311 case GLSL_SAMPLER_DIM_2D:
312 case GLSL_SAMPLER_DIM_RECT:
313 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
314 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
315 break;
316 case GLSL_SAMPLER_DIM_3D:
317 case GLSL_SAMPLER_DIM_CUBE:
318 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
319 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
320 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
321 ntq_get_src(c, instr->src[1], 2), &tmu_writes);
322 break;
323 default:
324 unreachable("bad image sampler dim");
325 }
326
327 if (glsl_sampler_type_is_array(sampler_type)) {
328 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
329 ntq_get_src(c, instr->src[1],
330 is_1d ? 1 : 2), &tmu_writes);
331 }
332
333 /* Limit the number of channels returned to both how many the NIR
334 * instruction writes and how many the instruction could produce.
335 */
336 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
337 if (!p1_unpacked.output_type_32_bit)
338 instr_return_channels = (instr_return_channels + 1) / 2;
339
340 p0_unpacked.return_words_of_texture_data =
341 (1 << instr_return_channels) - 1;
342
343 uint32_t p0_packed;
344 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
345 (uint8_t *)&p0_packed,
346 &p0_unpacked);
347
348 uint32_t p1_packed;
349 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
350 (uint8_t *)&p1_packed,
351 &p1_unpacked);
352
353 uint32_t p2_packed;
354 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
355 (uint8_t *)&p2_packed,
356 &p2_unpacked);
357
358 /* Load unit number into the high bits of the texture or sampler
359 * address field, which will be be used by the driver to decide which
360 * texture to put in the actual address field.
361 */
362 p0_packed |= unit << 24;
363
364 vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
365 if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
366 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
367 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
368 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
369
370 /* Emit the data writes for atomics or image store. */
371 if (instr->intrinsic != nir_intrinsic_image_deref_load) {
372 /* Vector for stores, or first atomic argument */
373 struct qreg src[4];
374 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
375 src[i] = ntq_get_src(c, instr->src[3], i);
376 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
377 &tmu_writes);
378 }
379
380 /* Second atomic argument */
381 if (instr->intrinsic ==
382 nir_intrinsic_image_deref_atomic_comp_swap) {
383 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
384 ntq_get_src(c, instr->src[4], 0),
385 &tmu_writes);
386 }
387 }
388
389 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
390 &tmu_writes);
391
392 vir_emit_thrsw(c);
393
394 /* The input FIFO has 16 slots across all threads, so make sure we
395 * don't overfill our allocation.
396 */
397 while (tmu_writes > 16 / c->threads)
398 c->threads /= 2;
399
400 for (int i = 0; i < 4; i++) {
401 if (p0_unpacked.return_words_of_texture_data & (1 << i))
402 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
403 }
404
405 if (nir_intrinsic_dest_components(instr) == 0)
406 vir_TMUWT(c);
407 }