c41cd33505a68e5d9be6efd13ee148d9e8912048
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32
33 static void
34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35 int *tmu_writes)
36 {
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
39 */
40 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
41
42 (*tmu_writes)++;
43 }
44
45 static void
46 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
47 {
48 struct qinst *inst = vir_NOP(c);
49 inst->qpu.sig.wrtmuc = true;
50 inst->has_implicit_uniform = true;
51 inst->src[0] = vir_uniform(c, contents, data);
52 }
53
54 void
55 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
56 {
57 unsigned unit = instr->texture_index;
58 int tmu_writes = 0;
59 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
60 .op = V3D_TMU_OP_REGULAR,
61 };
62
63 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
64 };
65
66 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
67 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
68 !instr->is_shadow),
69
70 .unnormalized_coordinates = (instr->sampler_dim ==
71 GLSL_SAMPLER_DIM_RECT),
72 };
73
74 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
75 .op = V3D_TMU_OP_REGULAR,
76
77 .gather_mode = instr->op == nir_texop_tg4,
78 .gather_component = instr->component,
79
80 .coefficient_mode = instr->op == nir_texop_txd,
81
82 .disable_autolod = instr->op == nir_texop_tg4
83 };
84
85 int non_array_components = instr->coord_components - instr->is_array;
86 struct qreg s;
87
88 for (unsigned i = 0; i < instr->num_srcs; i++) {
89 switch (instr->src[i].src_type) {
90 case nir_tex_src_coord:
91 /* S triggers the lookup, so save it for the end. */
92 s = ntq_get_src(c, instr->src[i].src, 0);
93
94 if (non_array_components > 1) {
95 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
96 ntq_get_src(c, instr->src[i].src,
97 1), &tmu_writes);
98 }
99 if (non_array_components > 2) {
100 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
101 ntq_get_src(c, instr->src[i].src,
102 2), &tmu_writes);
103 }
104
105 if (instr->is_array) {
106 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
107 ntq_get_src(c, instr->src[i].src,
108 instr->coord_components - 1),
109 &tmu_writes);
110 }
111 break;
112
113 case nir_tex_src_bias:
114 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
115 ntq_get_src(c, instr->src[i].src, 0),
116 &tmu_writes);
117 break;
118
119 case nir_tex_src_lod:
120 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
121 ntq_get_src(c, instr->src[i].src, 0),
122 &tmu_writes);
123
124 if (instr->op != nir_texop_txf)
125 p2_unpacked.disable_autolod = true;
126 break;
127
128 case nir_tex_src_comparator:
129 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
130 ntq_get_src(c, instr->src[i].src, 0),
131 &tmu_writes);
132 break;
133
134 case nir_tex_src_offset: {
135 if (nir_src_is_const(instr->src[i].src)) {
136 nir_const_value *offset =
137 nir_src_as_const_value(instr->src[i].src);
138
139 p2_unpacked.offset_s = offset->i32[0];
140 if (instr->coord_components >= 2)
141 p2_unpacked.offset_t = offset->i32[1];
142 if (instr->coord_components >= 3)
143 p2_unpacked.offset_r = offset->i32[2];
144 } else {
145 struct qreg mask = vir_uniform_ui(c, 0xf);
146 struct qreg x, y, offset;
147
148 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
149 0), mask);
150 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
151 1), mask);
152 offset = vir_OR(c, x,
153 vir_SHL(c, y,
154 vir_uniform_ui(c, 4)));
155
156 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
157 offset, &tmu_writes);
158 }
159 break;
160 }
161
162 default:
163 unreachable("unknown texture source");
164 }
165 }
166
167 /* Limit the number of channels returned to both how many the NIR
168 * instruction writes and how many the instruction could produce.
169 *
170 * XXX perf: Can we also limit to the number of channels that are
171 * actually read by the users of this NIR dest, so that we don't need
172 * to emit unused LDTMUs?
173 */
174 uint32_t instr_return_channels = nir_tex_instr_dest_size(instr);
175 if (!p1_unpacked.output_type_32_bit)
176 instr_return_channels = (instr_return_channels + 1) / 2;
177
178 p0_unpacked.return_words_of_texture_data =
179 (1 << MIN2(instr_return_channels,
180 c->key->tex[unit].return_channels)) - 1;
181
182 /* Word enables can't ask for more channels than the output type could
183 * provide (2 for f16, 4 for 32-bit).
184 */
185 assert(!p1_unpacked.output_type_32_bit ||
186 p0_unpacked.return_words_of_texture_data < (1 << 4));
187 assert(p1_unpacked.output_type_32_bit ||
188 p0_unpacked.return_words_of_texture_data < (1 << 2));
189
190 uint32_t p0_packed;
191 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
192 (uint8_t *)&p0_packed,
193 &p0_unpacked);
194
195 uint32_t p1_packed;
196 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
197 (uint8_t *)&p1_packed,
198 &p1_unpacked);
199
200 uint32_t p2_packed;
201 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
202 (uint8_t *)&p2_packed,
203 &p2_unpacked);
204
205 /* Load unit number into the high bits of the texture or sampler
206 * address field, which will be be used by the driver to decide which
207 * texture to put in the actual address field.
208 */
209 p0_packed |= unit << 24;
210 p1_packed |= unit << 24;
211
212 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
213 /* XXX perf: Can we skip p1 setup for txf ops? */
214 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
215 if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
216 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
217
218 if (instr->op == nir_texop_txf) {
219 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
220 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
221 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
222 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
223 } else {
224 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
225 }
226
227 vir_emit_thrsw(c);
228
229 /* The input FIFO has 16 slots across all threads, so make sure we
230 * don't overfill our allocation.
231 */
232 while (tmu_writes > 16 / c->threads)
233 c->threads /= 2;
234
235 struct qreg return_values[4];
236 for (int i = 0; i < 4; i++) {
237 /* Swizzling .zw of an RG texture should give undefined
238 * results, not crash the compiler.
239 */
240 if (p0_unpacked.return_words_of_texture_data & (1 << i))
241 return_values[i] = vir_LDTMU(c);
242 else
243 return_values[i] = c->undef;
244 }
245
246 for (int i = 0; i < nir_tex_instr_dest_size(instr); i++) {
247 struct qreg chan;
248
249 if (!p1_unpacked.output_type_32_bit) {
250 STATIC_ASSERT(PIPE_SWIZZLE_X == 0);
251 chan = return_values[i / 2];
252
253 /* XXX perf: We should move this unpacking into NIR.
254 * That would give us exposure of these types to NIR
255 * optimization, so that (for example) a repacking of
256 * half-float samples to the half-float render target
257 * could be eliminated.
258 */
259 if (nir_alu_type_get_base_type(instr->dest_type) ==
260 nir_type_float) {
261 enum v3d_qpu_input_unpack unpack;
262 if (i & 1)
263 unpack = V3D_QPU_UNPACK_H;
264 else
265 unpack = V3D_QPU_UNPACK_L;
266
267 chan = vir_FMOV(c, chan);
268 vir_set_unpack(c->defs[chan.index], 0, unpack);
269 } else {
270 /* If we're unpacking the low field, shift it
271 * up to the top first.
272 */
273 if ((i & 1) == 0) {
274 chan = vir_SHL(c, chan,
275 vir_uniform_ui(c, 16));
276 }
277
278 /* Do proper sign extension to a 32-bit int. */
279 if (nir_alu_type_get_base_type(instr->dest_type) ==
280 nir_type_int) {
281 chan = vir_ASR(c, chan,
282 vir_uniform_ui(c, 16));
283 } else {
284 chan = vir_SHR(c, chan,
285 vir_uniform_ui(c, 16));
286 }
287 }
288 } else {
289 chan = vir_MOV(c, return_values[i]);
290 }
291 ntq_store_dest(c, &instr->dest, i, chan);
292 }
293 }