v3d: Limit shader threading according to our maximum TMU fifo usage.
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32
33 static void
34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35 int *tmu_writes)
36 {
37 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
38
39 (*tmu_writes)++;
40 }
41
42 static void
43 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
44 {
45 struct qinst *inst = vir_NOP(c);
46 inst->qpu.sig.wrtmuc = true;
47 inst->has_implicit_uniform = true;
48 inst->src[0] = vir_uniform(c, contents, data);
49 }
50
51 void
52 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
53 {
54 unsigned unit = instr->texture_index;
55 int tmu_writes = 0;
56
57 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
58 };
59
60 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
61 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
62 !instr->is_shadow),
63
64 .unnormalized_coordinates = (instr->sampler_dim ==
65 GLSL_SAMPLER_DIM_RECT),
66 };
67
68 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
69 .op = V3D_TMU_OP_REGULAR,
70
71 .gather_mode = instr->op == nir_texop_tg4,
72 .gather_component = instr->component,
73
74 .coefficient_mode = instr->op == nir_texop_txd,
75 };
76
77 int non_array_components = instr->coord_components - instr->is_array;
78 struct qreg s;
79
80 for (unsigned i = 0; i < instr->num_srcs; i++) {
81 switch (instr->src[i].src_type) {
82 case nir_tex_src_coord:
83 /* S triggers the lookup, so save it for the end. */
84 s = ntq_get_src(c, instr->src[i].src, 0);
85
86 if (non_array_components > 1) {
87 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
88 ntq_get_src(c, instr->src[i].src,
89 1), &tmu_writes);
90 }
91 if (non_array_components > 2) {
92 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
93 ntq_get_src(c, instr->src[i].src,
94 2), &tmu_writes);
95 }
96
97 if (instr->is_array) {
98 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
99 ntq_get_src(c, instr->src[i].src,
100 instr->coord_components - 1),
101 &tmu_writes);
102 }
103 break;
104
105 case nir_tex_src_bias:
106 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
107 ntq_get_src(c, instr->src[i].src, 0),
108 &tmu_writes);
109 break;
110
111 case nir_tex_src_lod:
112 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
113 ntq_get_src(c, instr->src[i].src, 0),
114 &tmu_writes);
115
116 if (instr->op != nir_texop_txf &&
117 instr->op != nir_texop_tg4) {
118 p2_unpacked.disable_autolod = true;
119 }
120 break;
121
122 case nir_tex_src_comparator:
123 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
124 ntq_get_src(c, instr->src[i].src, 0),
125 &tmu_writes);
126 break;
127
128 case nir_tex_src_offset: {
129 nir_const_value *offset =
130 nir_src_as_const_value(instr->src[i].src);
131
132 p2_unpacked.offset_s = offset->i32[0];
133 if (instr->coord_components >= 2)
134 p2_unpacked.offset_t = offset->i32[1];
135 if (instr->coord_components >= 3)
136 p2_unpacked.offset_r = offset->i32[2];
137 break;
138 }
139
140 default:
141 unreachable("unknown texture source");
142 }
143 }
144
145 /* Limit the number of channels returned to both how many the NIR
146 * instruction writes and how many the instruction could produce.
147 */
148 uint32_t instr_return_channels = nir_tex_instr_dest_size(instr);
149 if (!p1_unpacked.output_type_32_bit)
150 instr_return_channels = (instr_return_channels + 1) / 2;
151
152 p0_unpacked.return_words_of_texture_data =
153 (1 << MIN2(instr_return_channels,
154 c->key->tex[unit].return_channels)) - 1;
155
156 uint32_t p0_packed;
157 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
158 (uint8_t *)&p0_packed,
159 &p0_unpacked);
160
161 uint32_t p1_packed;
162 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
163 (uint8_t *)&p1_packed,
164 &p1_unpacked);
165
166 uint32_t p2_packed;
167 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
168 (uint8_t *)&p2_packed,
169 &p2_unpacked);
170
171 /* Load unit number into the high bits of the texture or sampler
172 * address field, which will be be used by the driver to decide which
173 * texture to put in the actual address field.
174 */
175 p0_packed |= unit << 24;
176 p1_packed |= unit << 24;
177
178 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
179 vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
180 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
181
182 if (instr->op == nir_texop_txf) {
183 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
184 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
185 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
186 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
187 } else {
188 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
189 }
190
191 vir_emit_thrsw(c);
192
193 /* The input FIFO has 16 slots across all threads, so make sure we
194 * don't overfill our allocation.
195 */
196 while (tmu_writes > 16 / c->threads)
197 c->threads /= 2;
198
199 struct qreg return_values[4];
200 for (int i = 0; i < 4; i++) {
201 /* Swizzling .zw of an RG texture should give undefined
202 * results, not crash the compiler.
203 */
204 if (p0_unpacked.return_words_of_texture_data & (1 << i))
205 return_values[i] = vir_LDTMU(c);
206 else
207 return_values[i] = c->undef;
208 }
209
210 for (int i = 0; i < nir_tex_instr_dest_size(instr); i++) {
211 struct qreg chan;
212
213 if (!p1_unpacked.output_type_32_bit) {
214 STATIC_ASSERT(PIPE_SWIZZLE_X == 0);
215 chan = return_values[i / 2];
216
217 if (nir_alu_type_get_base_type(instr->dest_type) ==
218 nir_type_float) {
219 enum v3d_qpu_input_unpack unpack;
220 if (i & 1)
221 unpack = V3D_QPU_UNPACK_H;
222 else
223 unpack = V3D_QPU_UNPACK_L;
224
225 chan = vir_FMOV(c, chan);
226 vir_set_unpack(c->defs[chan.index], 0, unpack);
227 } else {
228 /* If we're unpacking the low field, shift it
229 * up to the top first.
230 */
231 if ((i & 1) == 0) {
232 chan = vir_SHL(c, chan,
233 vir_uniform_ui(c, 16));
234 }
235
236 /* Do proper sign extension to a 32-bit int. */
237 if (nir_alu_type_get_base_type(instr->dest_type) ==
238 nir_type_int) {
239 chan = vir_ASR(c, chan,
240 vir_uniform_ui(c, 16));
241 } else {
242 chan = vir_SHR(c, chan,
243 vir_uniform_ui(c, 16));
244 }
245 }
246 } else {
247 chan = vir_MOV(c, return_values[i]);
248 }
249 ntq_store_dest(c, &instr->dest, i, chan);
250 }
251 }