v3d/tex: Configuration Parameter 1 can be only skipped if P2 can be skipped too
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32
33 static void
34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35 int *tmu_writes)
36 {
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
39 */
40 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
41
42 (*tmu_writes)++;
43 }
44
45 static void
46 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
47 {
48 struct qinst *inst = vir_NOP(c);
49 inst->qpu.sig.wrtmuc = true;
50 inst->uniform = vir_get_uniform_index(c, contents, data);
51 }
52
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
54 .per_pixel_mask_enable = true,
55 };
56
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
58 .op = V3D_TMU_OP_REGULAR,
59 };
60
61 /*
62 * This method returns if the texture operation requires a sampler as
63 * a general rule, see the documentation of
64 * nir_tex_instr::sampler_index. Note that the specific hw would
65 * require a sampler in any case, for some other reason.
66 */
67 static bool
68 texture_instr_need_sampler(nir_tex_instr *instr)
69 {
70 switch(instr->op) {
71 case nir_texop_txf:
72 case nir_texop_txf_ms:
73 case nir_texop_txs:
74 case nir_texop_lod:
75 case nir_texop_query_levels:
76 case nir_texop_texture_samples:
77 case nir_texop_samples_identical:
78 return false;
79 default:
80 return true;
81 }
82 }
83
84 void
85 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
86 {
87 unsigned unit = instr->texture_index;
88 int tmu_writes = 0;
89
90 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
91 };
92
93 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
94 .op = V3D_TMU_OP_REGULAR,
95
96 .gather_mode = instr->op == nir_texop_tg4,
97 .gather_component = instr->component,
98
99 .coefficient_mode = instr->op == nir_texop_txd,
100
101 .disable_autolod = instr->op == nir_texop_tg4
102 };
103
104 int non_array_components = instr->coord_components - instr->is_array;
105 struct qreg s;
106
107 for (unsigned i = 0; i < instr->num_srcs; i++) {
108 switch (instr->src[i].src_type) {
109 case nir_tex_src_coord:
110 /* S triggers the lookup, so save it for the end. */
111 s = ntq_get_src(c, instr->src[i].src, 0);
112
113 if (non_array_components > 1) {
114 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
115 ntq_get_src(c, instr->src[i].src,
116 1), &tmu_writes);
117 }
118 if (non_array_components > 2) {
119 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
120 ntq_get_src(c, instr->src[i].src,
121 2), &tmu_writes);
122 }
123
124 if (instr->is_array) {
125 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
126 ntq_get_src(c, instr->src[i].src,
127 instr->coord_components - 1),
128 &tmu_writes);
129 }
130 break;
131
132 case nir_tex_src_bias:
133 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
134 ntq_get_src(c, instr->src[i].src, 0),
135 &tmu_writes);
136 break;
137
138 case nir_tex_src_lod:
139 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
140 ntq_get_src(c, instr->src[i].src, 0),
141 &tmu_writes);
142
143 if (instr->op != nir_texop_txf)
144 p2_unpacked.disable_autolod = true;
145 break;
146
147 case nir_tex_src_comparator:
148 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
149 ntq_get_src(c, instr->src[i].src, 0),
150 &tmu_writes);
151 break;
152
153 case nir_tex_src_offset: {
154 if (nir_src_is_const(instr->src[i].src)) {
155 p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
156 if (instr->coord_components >= 2)
157 p2_unpacked.offset_t =
158 nir_src_comp_as_int(instr->src[i].src, 1);
159 if (non_array_components >= 3)
160 p2_unpacked.offset_r =
161 nir_src_comp_as_int(instr->src[i].src, 2);
162 } else {
163 struct qreg mask = vir_uniform_ui(c, 0xf);
164 struct qreg x, y, offset;
165
166 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
167 0), mask);
168 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
169 1), mask);
170 offset = vir_OR(c, x,
171 vir_SHL(c, y,
172 vir_uniform_ui(c, 4)));
173
174 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
175 offset, &tmu_writes);
176 }
177 break;
178 }
179
180 default:
181 unreachable("unknown texture source");
182 }
183 }
184
185 /* Limit the number of channels returned to both how many the NIR
186 * instruction writes and how many the instruction could produce.
187 */
188 assert(instr->dest.is_ssa);
189 p0_unpacked.return_words_of_texture_data =
190 nir_ssa_def_components_read(&instr->dest.ssa);
191
192 assert(p0_unpacked.return_words_of_texture_data != 0);
193
194 uint32_t p0_packed;
195 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
196 (uint8_t *)&p0_packed,
197 &p0_unpacked);
198
199 uint32_t p2_packed;
200 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
201 (uint8_t *)&p2_packed,
202 &p2_unpacked);
203
204 /* Load unit number into the high bits of the texture address field,
205 * which will be be used by the driver to decide which texture to put
206 * in the actual address field.
207 */
208 p0_packed |= unit << 24;
209
210 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
211
212 /* Even if the texture operation doesn't need a sampler by
213 * itself, we still need to add the sampler configuration
214 * parameter if the output is 32 bit
215 */
216 bool output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
217 !instr->is_shadow);
218
219 /*
220 * p1 is optional, but we can skip it only if p2 can be skipped too
221 */
222 bool needs_p2_config =
223 memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0;
224
225 if (needs_p2_config || output_type_32_bit ||
226 texture_instr_need_sampler(instr)) {
227 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
228 .output_type_32_bit = output_type_32_bit,
229
230 .unnormalized_coordinates = (instr->sampler_dim ==
231 GLSL_SAMPLER_DIM_RECT),
232 };
233
234 /* Word enables can't ask for more channels than the
235 * output type could provide (2 for f16, 4 for
236 * 32-bit).
237 */
238 assert(!p1_unpacked.output_type_32_bit ||
239 p0_unpacked.return_words_of_texture_data < (1 << 4));
240 assert(p1_unpacked.output_type_32_bit ||
241 p0_unpacked.return_words_of_texture_data < (1 << 2));
242
243 uint32_t p1_packed;
244 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
245 (uint8_t *)&p1_packed,
246 &p1_unpacked);
247
248 /* Load unit number into the high bits of the sampler
249 * address field, which will be be used by the driver
250 * to decide which sampler to put in the actual
251 * address field.
252 */
253 p1_packed |= unit << 24;
254
255 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
256 }
257
258 if (needs_p2_config)
259 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
260
261 if (instr->op == nir_texop_txf) {
262 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
263 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
264 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
265 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
266 } else {
267 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
268 }
269
270 vir_emit_thrsw(c);
271
272 /* The input FIFO has 16 slots across all threads, so make sure we
273 * don't overfill our allocation.
274 */
275 while (tmu_writes > 16 / c->threads)
276 c->threads /= 2;
277
278 for (int i = 0; i < 4; i++) {
279 if (p0_unpacked.return_words_of_texture_data & (1 << i))
280 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
281 }
282 }
283
284 static uint32_t
285 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
286 {
287 switch (instr->intrinsic) {
288 case nir_intrinsic_image_load:
289 case nir_intrinsic_image_store:
290 return V3D_TMU_OP_REGULAR;
291 case nir_intrinsic_image_atomic_add:
292 return v3d_get_op_for_atomic_add(instr, 3);
293 case nir_intrinsic_image_atomic_imin:
294 return V3D_TMU_OP_WRITE_SMIN;
295 case nir_intrinsic_image_atomic_umin:
296 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
297 case nir_intrinsic_image_atomic_imax:
298 return V3D_TMU_OP_WRITE_SMAX;
299 case nir_intrinsic_image_atomic_umax:
300 return V3D_TMU_OP_WRITE_UMAX;
301 case nir_intrinsic_image_atomic_and:
302 return V3D_TMU_OP_WRITE_AND_READ_INC;
303 case nir_intrinsic_image_atomic_or:
304 return V3D_TMU_OP_WRITE_OR_READ_DEC;
305 case nir_intrinsic_image_atomic_xor:
306 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
307 case nir_intrinsic_image_atomic_exchange:
308 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
309 case nir_intrinsic_image_atomic_comp_swap:
310 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
311 default:
312 unreachable("unknown image intrinsic");
313 };
314 }
315
316 void
317 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
318 nir_intrinsic_instr *instr)
319 {
320 unsigned format = nir_intrinsic_format(instr);
321 unsigned unit = nir_src_as_uint(instr->src[0]);
322 int tmu_writes = 0;
323
324 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
325 };
326
327 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
328 .per_pixel_mask_enable = true,
329 .output_type_32_bit = v3d_gl_format_is_return_32(format),
330 };
331
332 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
333
334 p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
335
336 /* If we were able to replace atomic_add for an inc/dec, then we
337 * need/can to do things slightly different, like not loading the
338 * amount to add/sub, as that is implicit.
339 */
340 bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_atomic_add &&
341 (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
342 p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
343
344 bool is_1d = false;
345 switch (nir_intrinsic_image_dim(instr)) {
346 case GLSL_SAMPLER_DIM_1D:
347 is_1d = true;
348 break;
349 case GLSL_SAMPLER_DIM_BUF:
350 break;
351 case GLSL_SAMPLER_DIM_2D:
352 case GLSL_SAMPLER_DIM_RECT:
353 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
354 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
355 break;
356 case GLSL_SAMPLER_DIM_3D:
357 case GLSL_SAMPLER_DIM_CUBE:
358 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
359 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
360 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
361 ntq_get_src(c, instr->src[1], 2), &tmu_writes);
362 break;
363 default:
364 unreachable("bad image sampler dim");
365 }
366
367 if (nir_intrinsic_image_array(instr)) {
368 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
369 ntq_get_src(c, instr->src[1],
370 is_1d ? 1 : 2), &tmu_writes);
371 }
372
373 /* Limit the number of channels returned to both how many the NIR
374 * instruction writes and how many the instruction could produce.
375 */
376 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
377 if (!p1_unpacked.output_type_32_bit)
378 instr_return_channels = (instr_return_channels + 1) / 2;
379
380 p0_unpacked.return_words_of_texture_data =
381 (1 << instr_return_channels) - 1;
382
383 uint32_t p0_packed;
384 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
385 (uint8_t *)&p0_packed,
386 &p0_unpacked);
387
388 uint32_t p1_packed;
389 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
390 (uint8_t *)&p1_packed,
391 &p1_unpacked);
392
393 uint32_t p2_packed;
394 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
395 (uint8_t *)&p2_packed,
396 &p2_unpacked);
397
398 /* Load unit number into the high bits of the texture or sampler
399 * address field, which will be be used by the driver to decide which
400 * texture to put in the actual address field.
401 */
402 p0_packed |= unit << 24;
403
404 vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
405 if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
406 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
407 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
408 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
409
410 /* Emit the data writes for atomics or image store. */
411 if (instr->intrinsic != nir_intrinsic_image_load &&
412 !atomic_add_replaced) {
413 /* Vector for stores, or first atomic argument */
414 struct qreg src[4];
415 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
416 src[i] = ntq_get_src(c, instr->src[3], i);
417 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
418 &tmu_writes);
419 }
420
421 /* Second atomic argument */
422 if (instr->intrinsic ==
423 nir_intrinsic_image_atomic_comp_swap) {
424 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
425 ntq_get_src(c, instr->src[4], 0),
426 &tmu_writes);
427 }
428 }
429
430 if (vir_in_nonuniform_control_flow(c) &&
431 instr->intrinsic != nir_intrinsic_image_load) {
432 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
433 V3D_QPU_PF_PUSHZ);
434 }
435
436 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
437 &tmu_writes);
438
439 if (vir_in_nonuniform_control_flow(c) &&
440 instr->intrinsic != nir_intrinsic_image_load) {
441 struct qinst *last_inst= (struct qinst *)c->cur_block->instructions.prev;
442 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
443 }
444
445 vir_emit_thrsw(c);
446
447 /* The input FIFO has 16 slots across all threads, so make sure we
448 * don't overfill our allocation.
449 */
450 while (tmu_writes > 16 / c->threads)
451 c->threads /= 2;
452
453 for (int i = 0; i < 4; i++) {
454 if (p0_unpacked.return_words_of_texture_data & (1 << i))
455 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
456 }
457
458 if (nir_intrinsic_dest_components(instr) == 0)
459 vir_TMUWT(c);
460
461 if (instr->intrinsic != nir_intrinsic_image_load)
462 c->tmu_dirty_rcl = true;
463 }