0a02163100a7832d6a929fe49a6bb673b408db5d
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32
33 static void
34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35 int *tmu_writes)
36 {
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
39 */
40 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
41
42 (*tmu_writes)++;
43 }
44
45 static void
46 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
47 {
48 struct qinst *inst = vir_NOP(c);
49 inst->qpu.sig.wrtmuc = true;
50 inst->uniform = vir_get_uniform_index(c, contents, data);
51 }
52
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
54 .per_pixel_mask_enable = true,
55 };
56
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
58 .op = V3D_TMU_OP_REGULAR,
59 };
60
61 void
62 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
63 {
64 unsigned unit = instr->texture_index;
65 int tmu_writes = 0;
66
67 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
68 };
69
70 assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);
71
72 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
73 .op = V3D_TMU_OP_REGULAR,
74
75 .gather_mode = instr->op == nir_texop_tg4,
76 .gather_component = instr->component,
77
78 .coefficient_mode = instr->op == nir_texop_txd,
79
80 .disable_autolod = instr->op == nir_texop_tg4
81 };
82
83 int non_array_components =
84 instr->op != nir_texop_lod ?
85 instr->coord_components - instr->is_array :
86 instr->coord_components;
87
88 struct qreg s;
89
90 for (unsigned i = 0; i < instr->num_srcs; i++) {
91 switch (instr->src[i].src_type) {
92 case nir_tex_src_coord:
93 /* S triggers the lookup, so save it for the end. */
94 s = ntq_get_src(c, instr->src[i].src, 0);
95
96 if (non_array_components > 1) {
97 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
98 ntq_get_src(c, instr->src[i].src,
99 1), &tmu_writes);
100 }
101 if (non_array_components > 2) {
102 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
103 ntq_get_src(c, instr->src[i].src,
104 2), &tmu_writes);
105 }
106
107 if (instr->is_array) {
108 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
109 ntq_get_src(c, instr->src[i].src,
110 instr->coord_components - 1),
111 &tmu_writes);
112 }
113 break;
114
115 case nir_tex_src_bias:
116 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
117 ntq_get_src(c, instr->src[i].src, 0),
118 &tmu_writes);
119 break;
120
121 case nir_tex_src_lod:
122 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
123 ntq_get_src(c, instr->src[i].src, 0),
124 &tmu_writes);
125
126 if (instr->op != nir_texop_txf)
127 p2_unpacked.disable_autolod = true;
128 break;
129
130 case nir_tex_src_comparator:
131 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
132 ntq_get_src(c, instr->src[i].src, 0),
133 &tmu_writes);
134 break;
135
136 case nir_tex_src_offset: {
137 if (nir_src_is_const(instr->src[i].src)) {
138 p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
139 if (instr->coord_components >= 2)
140 p2_unpacked.offset_t =
141 nir_src_comp_as_int(instr->src[i].src, 1);
142 if (non_array_components >= 3)
143 p2_unpacked.offset_r =
144 nir_src_comp_as_int(instr->src[i].src, 2);
145 } else {
146 struct qreg mask = vir_uniform_ui(c, 0xf);
147 struct qreg x, y, offset;
148
149 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
150 0), mask);
151 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
152 1), mask);
153 offset = vir_OR(c, x,
154 vir_SHL(c, y,
155 vir_uniform_ui(c, 4)));
156
157 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
158 offset, &tmu_writes);
159 }
160 break;
161 }
162
163 default:
164 unreachable("unknown texture source");
165 }
166 }
167
168 /* Limit the number of channels returned to both how many the NIR
169 * instruction writes and how many the instruction could produce.
170 */
171 assert(instr->dest.is_ssa);
172 p0_unpacked.return_words_of_texture_data =
173 nir_ssa_def_components_read(&instr->dest.ssa);
174
175 assert(p0_unpacked.return_words_of_texture_data != 0);
176
177 uint32_t p0_packed;
178 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
179 (uint8_t *)&p0_packed,
180 &p0_unpacked);
181
182 uint32_t p2_packed;
183 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
184 (uint8_t *)&p2_packed,
185 &p2_unpacked);
186
187 /* We manually set the LOD Query bit (see
188 * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
189 * feature over V41 we are using
190 */
191 if (instr->op == nir_texop_lod)
192 p2_packed |= 1UL << 24;
193
194 /* Load unit number into the high bits of the texture address field,
195 * which will be be used by the driver to decide which texture to put
196 * in the actual address field.
197 */
198 p0_packed |= unit << 24;
199
200 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
201
202 /* Even if the texture operation doesn't need a sampler by
203 * itself, we still need to add the sampler configuration
204 * parameter if the output is 32 bit
205 */
206 bool output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
207 !instr->is_shadow);
208
209 /*
210 * p1 is optional, but we can skip it only if p2 can be skipped too
211 */
212 bool needs_p2_config =
213 (instr->op == nir_texop_lod ||
214 memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0);
215
216 if (needs_p2_config || output_type_32_bit ||
217 nir_tex_instr_need_sampler(instr)) {
218 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
219 .output_type_32_bit = output_type_32_bit,
220
221 .unnormalized_coordinates = (instr->sampler_dim ==
222 GLSL_SAMPLER_DIM_RECT),
223 };
224
225 /* Word enables can't ask for more channels than the
226 * output type could provide (2 for f16, 4 for
227 * 32-bit).
228 */
229 assert(!p1_unpacked.output_type_32_bit ||
230 p0_unpacked.return_words_of_texture_data < (1 << 4));
231 assert(p1_unpacked.output_type_32_bit ||
232 p0_unpacked.return_words_of_texture_data < (1 << 2));
233
234 uint32_t p1_packed;
235 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
236 (uint8_t *)&p1_packed,
237 &p1_unpacked);
238
239 /* Load unit number into the high bits of the sampler
240 * address field, which will be be used by the driver
241 * to decide which sampler to put in the actual
242 * address field.
243 */
244 p1_packed |= unit << 24;
245
246 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
247 }
248
249 if (needs_p2_config)
250 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
251
252 if (instr->op == nir_texop_txf) {
253 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
254 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
255 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
256 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
257 } else {
258 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
259 }
260
261 vir_emit_thrsw(c);
262
263 /* The input FIFO has 16 slots across all threads, so make sure we
264 * don't overfill our allocation.
265 */
266 while (tmu_writes > 16 / c->threads)
267 c->threads /= 2;
268
269 for (int i = 0; i < 4; i++) {
270 if (p0_unpacked.return_words_of_texture_data & (1 << i))
271 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
272 }
273 }
274
275 static uint32_t
276 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
277 {
278 switch (instr->intrinsic) {
279 case nir_intrinsic_image_load:
280 case nir_intrinsic_image_store:
281 return V3D_TMU_OP_REGULAR;
282 case nir_intrinsic_image_atomic_add:
283 return v3d_get_op_for_atomic_add(instr, 3);
284 case nir_intrinsic_image_atomic_imin:
285 return V3D_TMU_OP_WRITE_SMIN;
286 case nir_intrinsic_image_atomic_umin:
287 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
288 case nir_intrinsic_image_atomic_imax:
289 return V3D_TMU_OP_WRITE_SMAX;
290 case nir_intrinsic_image_atomic_umax:
291 return V3D_TMU_OP_WRITE_UMAX;
292 case nir_intrinsic_image_atomic_and:
293 return V3D_TMU_OP_WRITE_AND_READ_INC;
294 case nir_intrinsic_image_atomic_or:
295 return V3D_TMU_OP_WRITE_OR_READ_DEC;
296 case nir_intrinsic_image_atomic_xor:
297 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
298 case nir_intrinsic_image_atomic_exchange:
299 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
300 case nir_intrinsic_image_atomic_comp_swap:
301 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
302 default:
303 unreachable("unknown image intrinsic");
304 };
305 }
306
307 void
308 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
309 nir_intrinsic_instr *instr)
310 {
311 unsigned format = nir_intrinsic_format(instr);
312 unsigned unit = nir_src_as_uint(instr->src[0]);
313 int tmu_writes = 0;
314
315 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
316 };
317
318 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
319 .per_pixel_mask_enable = true,
320 .output_type_32_bit = v3d_gl_format_is_return_32(format),
321 };
322
323 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
324
325 p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
326
327 /* If we were able to replace atomic_add for an inc/dec, then we
328 * need/can to do things slightly different, like not loading the
329 * amount to add/sub, as that is implicit.
330 */
331 bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_atomic_add &&
332 (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
333 p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
334
335 bool is_1d = false;
336 switch (nir_intrinsic_image_dim(instr)) {
337 case GLSL_SAMPLER_DIM_1D:
338 is_1d = true;
339 break;
340 case GLSL_SAMPLER_DIM_BUF:
341 break;
342 case GLSL_SAMPLER_DIM_2D:
343 case GLSL_SAMPLER_DIM_RECT:
344 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
345 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
346 break;
347 case GLSL_SAMPLER_DIM_3D:
348 case GLSL_SAMPLER_DIM_CUBE:
349 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
350 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
351 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
352 ntq_get_src(c, instr->src[1], 2), &tmu_writes);
353 break;
354 default:
355 unreachable("bad image sampler dim");
356 }
357
358 if (nir_intrinsic_image_array(instr)) {
359 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
360 ntq_get_src(c, instr->src[1],
361 is_1d ? 1 : 2), &tmu_writes);
362 }
363
364 /* Limit the number of channels returned to both how many the NIR
365 * instruction writes and how many the instruction could produce.
366 */
367 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
368 if (!p1_unpacked.output_type_32_bit)
369 instr_return_channels = (instr_return_channels + 1) / 2;
370
371 p0_unpacked.return_words_of_texture_data =
372 (1 << instr_return_channels) - 1;
373
374 uint32_t p0_packed;
375 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
376 (uint8_t *)&p0_packed,
377 &p0_unpacked);
378
379 uint32_t p1_packed;
380 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
381 (uint8_t *)&p1_packed,
382 &p1_unpacked);
383
384 uint32_t p2_packed;
385 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
386 (uint8_t *)&p2_packed,
387 &p2_unpacked);
388
389 /* Load unit number into the high bits of the texture or sampler
390 * address field, which will be be used by the driver to decide which
391 * texture to put in the actual address field.
392 */
393 p0_packed |= unit << 24;
394
395 vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
396 if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
397 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
398 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
399 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
400
401 /* Emit the data writes for atomics or image store. */
402 if (instr->intrinsic != nir_intrinsic_image_load &&
403 !atomic_add_replaced) {
404 /* Vector for stores, or first atomic argument */
405 struct qreg src[4];
406 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
407 src[i] = ntq_get_src(c, instr->src[3], i);
408 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
409 &tmu_writes);
410 }
411
412 /* Second atomic argument */
413 if (instr->intrinsic ==
414 nir_intrinsic_image_atomic_comp_swap) {
415 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
416 ntq_get_src(c, instr->src[4], 0),
417 &tmu_writes);
418 }
419 }
420
421 if (vir_in_nonuniform_control_flow(c) &&
422 instr->intrinsic != nir_intrinsic_image_load) {
423 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
424 V3D_QPU_PF_PUSHZ);
425 }
426
427 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
428 &tmu_writes);
429
430 if (vir_in_nonuniform_control_flow(c) &&
431 instr->intrinsic != nir_intrinsic_image_load) {
432 struct qinst *last_inst= (struct qinst *)c->cur_block->instructions.prev;
433 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
434 }
435
436 vir_emit_thrsw(c);
437
438 /* The input FIFO has 16 slots across all threads, so make sure we
439 * don't overfill our allocation.
440 */
441 while (tmu_writes > 16 / c->threads)
442 c->threads /= 2;
443
444 for (int i = 0; i < 4; i++) {
445 if (p0_unpacked.return_words_of_texture_data & (1 << i))
446 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
447 }
448
449 if (nir_intrinsic_dest_components(instr) == 0)
450 vir_TMUWT(c);
451
452 if (instr->intrinsic != nir_intrinsic_image_load)
453 c->tmu_dirty_rcl = true;
454 }