v3d: Only look up the 3rd texture gather offset for non-arrays.
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25 #include "compiler/nir/nir_deref.h"
26
27 /* We don't do any address packing. */
28 #define __gen_user_data void
29 #define __gen_address_type uint32_t
30 #define __gen_address_offset(reloc) (*reloc)
31 #define __gen_emit_reloc(cl, reloc)
32 #include "cle/v3d_packet_v41_pack.h"
33
34 static void
35 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
36 int *tmu_writes)
37 {
38 /* XXX perf: We should figure out how to merge ALU operations
39 * producing the val with this MOV, when possible.
40 */
41 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
42
43 (*tmu_writes)++;
44 }
45
46 static void
47 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
48 {
49 struct qinst *inst = vir_NOP(c);
50 inst->qpu.sig.wrtmuc = true;
51 inst->uniform = vir_get_uniform_index(c, contents, data);
52 }
53
54 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
55 .per_pixel_mask_enable = true,
56 };
57
58 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
59 .op = V3D_TMU_OP_REGULAR,
60 };
61
62 void
63 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
64 {
65 unsigned unit = instr->texture_index;
66 int tmu_writes = 0;
67
68 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
69 };
70
71 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
72 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
73 !instr->is_shadow),
74
75 .unnormalized_coordinates = (instr->sampler_dim ==
76 GLSL_SAMPLER_DIM_RECT),
77 };
78
79 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
80 .op = V3D_TMU_OP_REGULAR,
81
82 .gather_mode = instr->op == nir_texop_tg4,
83 .gather_component = instr->component,
84
85 .coefficient_mode = instr->op == nir_texop_txd,
86
87 .disable_autolod = instr->op == nir_texop_tg4
88 };
89
90 int non_array_components = instr->coord_components - instr->is_array;
91 struct qreg s;
92
93 for (unsigned i = 0; i < instr->num_srcs; i++) {
94 switch (instr->src[i].src_type) {
95 case nir_tex_src_coord:
96 /* S triggers the lookup, so save it for the end. */
97 s = ntq_get_src(c, instr->src[i].src, 0);
98
99 if (non_array_components > 1) {
100 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
101 ntq_get_src(c, instr->src[i].src,
102 1), &tmu_writes);
103 }
104 if (non_array_components > 2) {
105 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
106 ntq_get_src(c, instr->src[i].src,
107 2), &tmu_writes);
108 }
109
110 if (instr->is_array) {
111 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
112 ntq_get_src(c, instr->src[i].src,
113 instr->coord_components - 1),
114 &tmu_writes);
115 }
116 break;
117
118 case nir_tex_src_bias:
119 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
120 ntq_get_src(c, instr->src[i].src, 0),
121 &tmu_writes);
122 break;
123
124 case nir_tex_src_lod:
125 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
126 ntq_get_src(c, instr->src[i].src, 0),
127 &tmu_writes);
128
129 if (instr->op != nir_texop_txf)
130 p2_unpacked.disable_autolod = true;
131 break;
132
133 case nir_tex_src_comparator:
134 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
135 ntq_get_src(c, instr->src[i].src, 0),
136 &tmu_writes);
137 break;
138
139 case nir_tex_src_offset: {
140 if (nir_src_is_const(instr->src[i].src)) {
141 p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
142 if (instr->coord_components >= 2)
143 p2_unpacked.offset_t =
144 nir_src_comp_as_int(instr->src[i].src, 1);
145 if (non_array_components >= 3)
146 p2_unpacked.offset_r =
147 nir_src_comp_as_int(instr->src[i].src, 2);
148 } else {
149 struct qreg mask = vir_uniform_ui(c, 0xf);
150 struct qreg x, y, offset;
151
152 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
153 0), mask);
154 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
155 1), mask);
156 offset = vir_OR(c, x,
157 vir_SHL(c, y,
158 vir_uniform_ui(c, 4)));
159
160 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
161 offset, &tmu_writes);
162 }
163 break;
164 }
165
166 default:
167 unreachable("unknown texture source");
168 }
169 }
170
171 /* Limit the number of channels returned to both how many the NIR
172 * instruction writes and how many the instruction could produce.
173 */
174 assert(instr->dest.is_ssa);
175 p0_unpacked.return_words_of_texture_data =
176 nir_ssa_def_components_read(&instr->dest.ssa);
177
178 /* Word enables can't ask for more channels than the output type could
179 * provide (2 for f16, 4 for 32-bit).
180 */
181 assert(!p1_unpacked.output_type_32_bit ||
182 p0_unpacked.return_words_of_texture_data < (1 << 4));
183 assert(p1_unpacked.output_type_32_bit ||
184 p0_unpacked.return_words_of_texture_data < (1 << 2));
185
186 uint32_t p0_packed;
187 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
188 (uint8_t *)&p0_packed,
189 &p0_unpacked);
190
191 uint32_t p1_packed;
192 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
193 (uint8_t *)&p1_packed,
194 &p1_unpacked);
195
196 uint32_t p2_packed;
197 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
198 (uint8_t *)&p2_packed,
199 &p2_unpacked);
200
201 /* Load unit number into the high bits of the texture or sampler
202 * address field, which will be be used by the driver to decide which
203 * texture to put in the actual address field.
204 */
205 p0_packed |= unit << 24;
206 p1_packed |= unit << 24;
207
208 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
209 /* XXX perf: Can we skip p1 setup for txf ops? */
210 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
211 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
212 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
213
214 if (instr->op == nir_texop_txf) {
215 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
216 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
217 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
218 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
219 } else {
220 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
221 }
222
223 vir_emit_thrsw(c);
224
225 /* The input FIFO has 16 slots across all threads, so make sure we
226 * don't overfill our allocation.
227 */
228 while (tmu_writes > 16 / c->threads)
229 c->threads /= 2;
230
231 for (int i = 0; i < 4; i++) {
232 if (p0_unpacked.return_words_of_texture_data & (1 << i))
233 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
234 }
235 }
236
237 static void
238 type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
239 {
240 *size = 1;
241 *align = 1;
242 }
243
244 void
245 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
246 nir_intrinsic_instr *instr)
247 {
248 nir_variable *var = nir_intrinsic_get_var(instr, 0);
249 const struct glsl_type *sampler_type = glsl_without_array(var->type);
250 unsigned unit = (var->data.driver_location +
251 nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]),
252 type_size_align_1));
253 int tmu_writes = 0;
254
255 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
256 };
257
258 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
259 .per_pixel_mask_enable = true,
260 .output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format),
261 };
262
263 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
264
265 /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
266 * wants to have support for inc/dec?
267 */
268 switch (instr->intrinsic) {
269 case nir_intrinsic_image_deref_load:
270 case nir_intrinsic_image_deref_store:
271 p2_unpacked.op = V3D_TMU_OP_REGULAR;
272 break;
273 case nir_intrinsic_image_deref_atomic_add:
274 p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
275 break;
276 case nir_intrinsic_image_deref_atomic_min:
277 p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
278 break;
279
280 case nir_intrinsic_image_deref_atomic_max:
281 p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
282 break;
283 case nir_intrinsic_image_deref_atomic_and:
284 p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
285 break;
286 case nir_intrinsic_image_deref_atomic_or:
287 p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
288 break;
289 case nir_intrinsic_image_deref_atomic_xor:
290 p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
291 break;
292 case nir_intrinsic_image_deref_atomic_exchange:
293 p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
294 break;
295 case nir_intrinsic_image_deref_atomic_comp_swap:
296 p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
297 break;
298 default:
299 unreachable("unknown image intrinsic");
300 };
301
302 bool is_1d = false;
303 switch (glsl_get_sampler_dim(sampler_type)) {
304 case GLSL_SAMPLER_DIM_1D:
305 is_1d = true;
306 break;
307 case GLSL_SAMPLER_DIM_BUF:
308 break;
309 case GLSL_SAMPLER_DIM_2D:
310 case GLSL_SAMPLER_DIM_RECT:
311 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
312 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
313 break;
314 case GLSL_SAMPLER_DIM_3D:
315 case GLSL_SAMPLER_DIM_CUBE:
316 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
317 ntq_get_src(c, instr->src[1], 1), &tmu_writes);
318 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
319 ntq_get_src(c, instr->src[1], 2), &tmu_writes);
320 break;
321 default:
322 unreachable("bad image sampler dim");
323 }
324
325 if (glsl_sampler_type_is_array(sampler_type)) {
326 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
327 ntq_get_src(c, instr->src[1],
328 is_1d ? 1 : 2), &tmu_writes);
329 }
330
331 /* Limit the number of channels returned to both how many the NIR
332 * instruction writes and how many the instruction could produce.
333 */
334 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
335 if (!p1_unpacked.output_type_32_bit)
336 instr_return_channels = (instr_return_channels + 1) / 2;
337
338 p0_unpacked.return_words_of_texture_data =
339 (1 << instr_return_channels) - 1;
340
341 uint32_t p0_packed;
342 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
343 (uint8_t *)&p0_packed,
344 &p0_unpacked);
345
346 uint32_t p1_packed;
347 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
348 (uint8_t *)&p1_packed,
349 &p1_unpacked);
350
351 uint32_t p2_packed;
352 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
353 (uint8_t *)&p2_packed,
354 &p2_unpacked);
355
356 /* Load unit number into the high bits of the texture or sampler
357 * address field, which will be be used by the driver to decide which
358 * texture to put in the actual address field.
359 */
360 p0_packed |= unit << 24;
361
362 vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
363 if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
364 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
365 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
366 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
367
368 /* Emit the data writes for atomics or image store. */
369 if (instr->intrinsic != nir_intrinsic_image_deref_load) {
370 /* Vector for stores, or first atomic argument */
371 struct qreg src[4];
372 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
373 src[i] = ntq_get_src(c, instr->src[3], i);
374 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
375 &tmu_writes);
376 }
377
378 /* Second atomic argument */
379 if (instr->intrinsic ==
380 nir_intrinsic_image_deref_atomic_comp_swap) {
381 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
382 ntq_get_src(c, instr->src[4], 0),
383 &tmu_writes);
384 }
385 }
386
387 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
388 &tmu_writes);
389
390 vir_emit_thrsw(c);
391
392 /* The input FIFO has 16 slots across all threads, so make sure we
393 * don't overfill our allocation.
394 */
395 while (tmu_writes > 16 / c->threads)
396 c->threads /= 2;
397
398 for (int i = 0; i < 4; i++) {
399 if (p0_unpacked.return_words_of_texture_data & (1 << i))
400 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
401 }
402
403 if (nir_intrinsic_dest_components(instr) == 0)
404 vir_TMUWT(c);
405 }