turnip: Selectively configure GRAS_LAYER_CNTL
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32
33 static void
34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35 int *tmu_writes)
36 {
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
39 */
40 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
41
42 (*tmu_writes)++;
43 }
44
45 static void
46 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
47 {
48 struct qinst *inst = vir_NOP(c);
49 inst->qpu.sig.wrtmuc = true;
50 inst->uniform = vir_get_uniform_index(c, contents, data);
51 }
52
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
54 .per_pixel_mask_enable = true,
55 };
56
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
58 .op = V3D_TMU_OP_REGULAR,
59 };
60
61 void
62 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
63 {
64 unsigned unit = instr->texture_index;
65 int tmu_writes = 0;
66
67 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
68 };
69
70 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
71 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
72 !instr->is_shadow),
73
74 .unnormalized_coordinates = (instr->sampler_dim ==
75 GLSL_SAMPLER_DIM_RECT),
76 };
77
78 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
79 .op = V3D_TMU_OP_REGULAR,
80
81 .gather_mode = instr->op == nir_texop_tg4,
82 .gather_component = instr->component,
83
84 .coefficient_mode = instr->op == nir_texop_txd,
85
86 .disable_autolod = instr->op == nir_texop_tg4
87 };
88
89 int non_array_components = instr->coord_components - instr->is_array;
90 struct qreg s;
91
92 for (unsigned i = 0; i < instr->num_srcs; i++) {
93 switch (instr->src[i].src_type) {
94 case nir_tex_src_coord:
95 /* S triggers the lookup, so save it for the end. */
96 s = ntq_get_src(c, instr->src[i].src, 0);
97
98 if (non_array_components > 1) {
99 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
100 ntq_get_src(c, instr->src[i].src,
101 1), &tmu_writes);
102 }
103 if (non_array_components > 2) {
104 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
105 ntq_get_src(c, instr->src[i].src,
106 2), &tmu_writes);
107 }
108
109 if (instr->is_array) {
110 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
111 ntq_get_src(c, instr->src[i].src,
112 instr->coord_components - 1),
113 &tmu_writes);
114 }
115 break;
116
117 case nir_tex_src_bias:
118 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
119 ntq_get_src(c, instr->src[i].src, 0),
120 &tmu_writes);
121 break;
122
123 case nir_tex_src_lod:
124 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
125 ntq_get_src(c, instr->src[i].src, 0),
126 &tmu_writes);
127
128 if (instr->op != nir_texop_txf)
129 p2_unpacked.disable_autolod = true;
130 break;
131
132 case nir_tex_src_comparator:
133 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
134 ntq_get_src(c, instr->src[i].src, 0),
135 &tmu_writes);
136 break;
137
138 case nir_tex_src_offset: {
139 if (nir_src_is_const(instr->src[i].src)) {
140 p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
141 if (instr->coord_components >= 2)
142 p2_unpacked.offset_t =
143 nir_src_comp_as_int(instr->src[i].src, 1);
144 if (non_array_components >= 3)
145 p2_unpacked.offset_r =
146 nir_src_comp_as_int(instr->src[i].src, 2);
147 } else {
148 struct qreg mask = vir_uniform_ui(c, 0xf);
149 struct qreg x, y, offset;
150
151 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
152 0), mask);
153 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
154 1), mask);
155 offset = vir_OR(c, x,
156 vir_SHL(c, y,
157 vir_uniform_ui(c, 4)));
158
159 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
160 offset, &tmu_writes);
161 }
162 break;
163 }
164
165 default:
166 unreachable("unknown texture source");
167 }
168 }
169
170 /* Limit the number of channels returned to both how many the NIR
171 * instruction writes and how many the instruction could produce.
172 */
173 assert(instr->dest.is_ssa);
174 p0_unpacked.return_words_of_texture_data =
175 nir_ssa_def_components_read(&instr->dest.ssa);
176
177 /* Word enables can't ask for more channels than the output type could
178 * provide (2 for f16, 4 for 32-bit).
179 */
180 assert(!p1_unpacked.output_type_32_bit ||
181 p0_unpacked.return_words_of_texture_data < (1 << 4));
182 assert(p1_unpacked.output_type_32_bit ||
183 p0_unpacked.return_words_of_texture_data < (1 << 2));
184
185 assert(p0_unpacked.return_words_of_texture_data != 0);
186
187 uint32_t p0_packed;
188 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
189 (uint8_t *)&p0_packed,
190 &p0_unpacked);
191
192 uint32_t p1_packed;
193 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
194 (uint8_t *)&p1_packed,
195 &p1_unpacked);
196
197 uint32_t p2_packed;
198 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
199 (uint8_t *)&p2_packed,
200 &p2_unpacked);
201
202 /* Load unit number into the high bits of the texture or sampler
203 * address field, which will be be used by the driver to decide which
204 * texture to put in the actual address field.
205 */
206 p0_packed |= unit << 24;
207 p1_packed |= unit << 24;
208
209 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
210 /* XXX perf: Can we skip p1 setup for txf ops? */
211 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
212 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
213 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
214
215 if (instr->op == nir_texop_txf) {
216 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
217 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
218 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
219 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
220 } else {
221 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
222 }
223
224 vir_emit_thrsw(c);
225
226 /* The input FIFO has 16 slots across all threads, so make sure we
227 * don't overfill our allocation.
228 */
229 while (tmu_writes > 16 / c->threads)
230 c->threads /= 2;
231
232 for (int i = 0; i < 4; i++) {
233 if (p0_unpacked.return_words_of_texture_data & (1 << i))
234 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
235 }
236 }
237
238 static uint32_t
239 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
240 {
241 switch (instr->intrinsic) {
242 case nir_intrinsic_image_load:
243 case nir_intrinsic_image_store:
244 return V3D_TMU_OP_REGULAR;
245 case nir_intrinsic_image_atomic_add:
246 return v3d_get_op_for_atomic_add(instr, 3);
247 case nir_intrinsic_image_atomic_imin:
248 return V3D_TMU_OP_WRITE_SMIN;
249 case nir_intrinsic_image_atomic_umin:
250 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
251 case nir_intrinsic_image_atomic_imax:
252 return V3D_TMU_OP_WRITE_SMAX;
253 case nir_intrinsic_image_atomic_umax:
254 return V3D_TMU_OP_WRITE_UMAX;
255 case nir_intrinsic_image_atomic_and:
256 return V3D_TMU_OP_WRITE_AND_READ_INC;
257 case nir_intrinsic_image_atomic_or:
258 return V3D_TMU_OP_WRITE_OR_READ_DEC;
259 case nir_intrinsic_image_atomic_xor:
260 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
261 case nir_intrinsic_image_atomic_exchange:
262 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
263 case nir_intrinsic_image_atomic_comp_swap:
264 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
265 default:
266 unreachable("unknown image intrinsic");
267 };
268 }
269
270 void
271 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
272 nir_intrinsic_instr *instr)
273 {
274 unsigned format = nir_intrinsic_format(instr);
275 unsigned unit = nir_src_as_uint(instr->src[0]);
276 int tmu_writes = 0;
277
278 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
279 };
280
281 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
282 .per_pixel_mask_enable = true,
283 .output_type_32_bit = v3d_gl_format_is_return_32(format),
284 };
285
286 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
287
288 p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
289
290 /* If we were able to replace atomic_add for an inc/dec, then we
291 * need/can to do things slightly different, like not loading the
292 * amount to add/sub, as that is implicit.
293 */
294 bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_atomic_add &&
295 (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
296 p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
297
298 bool is_1d = false;
299 switch (nir_intrinsic_image_dim(instr)) {
300 case GLSL_SAMPLER_DIM_1D:
301 is_1d = true;
302 break;
303 case GLSL_SAMPLER_DIM_BUF:
304 break;
305 case GLSL_SAMPLER_DIM_2D:
306 case GLSL_SAMPLER_DIM_RECT:
307 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
308 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
309 break;
310 case GLSL_SAMPLER_DIM_3D:
311 case GLSL_SAMPLER_DIM_CUBE:
312 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
313 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
314 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
315 ntq_get_src(c, instr->src[1], 2), &tmu_writes);
316 break;
317 default:
318 unreachable("bad image sampler dim");
319 }
320
321 if (nir_intrinsic_image_array(instr)) {
322 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
323 ntq_get_src(c, instr->src[1],
324 is_1d ? 1 : 2), &tmu_writes);
325 }
326
327 /* Limit the number of channels returned to both how many the NIR
328 * instruction writes and how many the instruction could produce.
329 */
330 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
331 if (!p1_unpacked.output_type_32_bit)
332 instr_return_channels = (instr_return_channels + 1) / 2;
333
334 p0_unpacked.return_words_of_texture_data =
335 (1 << instr_return_channels) - 1;
336
337 uint32_t p0_packed;
338 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
339 (uint8_t *)&p0_packed,
340 &p0_unpacked);
341
342 uint32_t p1_packed;
343 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
344 (uint8_t *)&p1_packed,
345 &p1_unpacked);
346
347 uint32_t p2_packed;
348 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
349 (uint8_t *)&p2_packed,
350 &p2_unpacked);
351
352 /* Load unit number into the high bits of the texture or sampler
353 * address field, which will be be used by the driver to decide which
354 * texture to put in the actual address field.
355 */
356 p0_packed |= unit << 24;
357
358 vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
359 if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
360 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
361 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
362 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
363
364 /* Emit the data writes for atomics or image store. */
365 if (instr->intrinsic != nir_intrinsic_image_load &&
366 !atomic_add_replaced) {
367 /* Vector for stores, or first atomic argument */
368 struct qreg src[4];
369 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
370 src[i] = ntq_get_src(c, instr->src[3], i);
371 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
372 &tmu_writes);
373 }
374
375 /* Second atomic argument */
376 if (instr->intrinsic ==
377 nir_intrinsic_image_atomic_comp_swap) {
378 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
379 ntq_get_src(c, instr->src[4], 0),
380 &tmu_writes);
381 }
382 }
383
384 if (vir_in_nonuniform_control_flow(c) &&
385 instr->intrinsic != nir_intrinsic_image_load) {
386 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
387 V3D_QPU_PF_PUSHZ);
388 }
389
390 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
391 &tmu_writes);
392
393 if (vir_in_nonuniform_control_flow(c) &&
394 instr->intrinsic != nir_intrinsic_image_load) {
395 struct qinst *last_inst= (struct qinst *)c->cur_block->instructions.prev;
396 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
397 }
398
399 vir_emit_thrsw(c);
400
401 /* The input FIFO has 16 slots across all threads, so make sure we
402 * don't overfill our allocation.
403 */
404 while (tmu_writes > 16 / c->threads)
405 c->threads /= 2;
406
407 for (int i = 0; i < 4; i++) {
408 if (p0_unpacked.return_words_of_texture_data & (1 << i))
409 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
410 }
411
412 if (nir_intrinsic_dest_components(instr) == 0)
413 vir_TMUWT(c);
414
415 if (instr->intrinsic != nir_intrinsic_image_load)
416 c->tmu_dirty_rcl = true;
417 }