2 * Copyright © 2016-2018 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "v3d_compiler.h"
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
34 vir_TMU_WRITE(struct v3d_compile
*c
, enum v3d_qpu_waddr waddr
, struct qreg val
,
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
40 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), val
);
46 vir_WRTMUC(struct v3d_compile
*c
, enum quniform_contents contents
, uint32_t data
)
48 struct qinst
*inst
= vir_NOP(c
);
49 inst
->qpu
.sig
.wrtmuc
= true;
50 inst
->uniform
= vir_get_uniform_index(c
, contents
, data
);
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default
= {
54 .per_pixel_mask_enable
= true,
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default
= {
58 .op
= V3D_TMU_OP_REGULAR
,
62 * This method returns if the texture operation requires a sampler as
63 * a general rule, see the documentation of
64 * nir_tex_instr::sampler_index. Note that the specific hw would
65 * require a sampler in any case, for some other reason.
68 texture_instr_need_sampler(nir_tex_instr
*instr
)
72 case nir_texop_txf_ms
:
75 case nir_texop_query_levels
:
76 case nir_texop_texture_samples
:
77 case nir_texop_samples_identical
:
85 v3d40_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
87 unsigned unit
= instr
->texture_index
;
90 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
93 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= {
94 .op
= V3D_TMU_OP_REGULAR
,
96 .gather_mode
= instr
->op
== nir_texop_tg4
,
97 .gather_component
= instr
->component
,
99 .coefficient_mode
= instr
->op
== nir_texop_txd
,
101 .disable_autolod
= instr
->op
== nir_texop_tg4
104 int non_array_components
= instr
->coord_components
- instr
->is_array
;
107 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
108 switch (instr
->src
[i
].src_type
) {
109 case nir_tex_src_coord
:
110 /* S triggers the lookup, so save it for the end. */
111 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
113 if (non_array_components
> 1) {
114 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
115 ntq_get_src(c
, instr
->src
[i
].src
,
118 if (non_array_components
> 2) {
119 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
120 ntq_get_src(c
, instr
->src
[i
].src
,
124 if (instr
->is_array
) {
125 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
126 ntq_get_src(c
, instr
->src
[i
].src
,
127 instr
->coord_components
- 1),
132 case nir_tex_src_bias
:
133 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
134 ntq_get_src(c
, instr
->src
[i
].src
, 0),
138 case nir_tex_src_lod
:
139 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
140 ntq_get_src(c
, instr
->src
[i
].src
, 0),
143 if (instr
->op
!= nir_texop_txf
)
144 p2_unpacked
.disable_autolod
= true;
147 case nir_tex_src_comparator
:
148 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUDREF
,
149 ntq_get_src(c
, instr
->src
[i
].src
, 0),
153 case nir_tex_src_offset
: {
154 if (nir_src_is_const(instr
->src
[i
].src
)) {
155 p2_unpacked
.offset_s
= nir_src_comp_as_int(instr
->src
[i
].src
, 0);
156 if (instr
->coord_components
>= 2)
157 p2_unpacked
.offset_t
=
158 nir_src_comp_as_int(instr
->src
[i
].src
, 1);
159 if (non_array_components
>= 3)
160 p2_unpacked
.offset_r
=
161 nir_src_comp_as_int(instr
->src
[i
].src
, 2);
163 struct qreg mask
= vir_uniform_ui(c
, 0xf);
164 struct qreg x
, y
, offset
;
166 x
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
168 y
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
170 offset
= vir_OR(c
, x
,
172 vir_uniform_ui(c
, 4)));
174 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUOFF
,
175 offset
, &tmu_writes
);
181 unreachable("unknown texture source");
185 /* Limit the number of channels returned to both how many the NIR
186 * instruction writes and how many the instruction could produce.
188 assert(instr
->dest
.is_ssa
);
189 p0_unpacked
.return_words_of_texture_data
=
190 nir_ssa_def_components_read(&instr
->dest
.ssa
);
192 assert(p0_unpacked
.return_words_of_texture_data
!= 0);
195 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
196 (uint8_t *)&p0_packed
,
200 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
201 (uint8_t *)&p2_packed
,
204 /* Load unit number into the high bits of the texture address field,
205 * which will be be used by the driver to decide which texture to put
206 * in the actual address field.
208 p0_packed
|= unit
<< 24;
210 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P0
, p0_packed
);
212 /* Even if the texture operation doesn't need a sampler by
213 * itself, we still need to add the sampler configuration
214 * parameter if the output is 32 bit
216 bool output_type_32_bit
= (c
->key
->tex
[unit
].return_size
== 32 &&
219 if (output_type_32_bit
|| texture_instr_need_sampler(instr
)) {
220 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
221 .output_type_32_bit
= output_type_32_bit
,
223 .unnormalized_coordinates
= (instr
->sampler_dim
==
224 GLSL_SAMPLER_DIM_RECT
),
227 /* Word enables can't ask for more channels than the
228 * output type could provide (2 for f16, 4 for
231 assert(!p1_unpacked
.output_type_32_bit
||
232 p0_unpacked
.return_words_of_texture_data
< (1 << 4));
233 assert(p1_unpacked
.output_type_32_bit
||
234 p0_unpacked
.return_words_of_texture_data
< (1 << 2));
237 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
238 (uint8_t *)&p1_packed
,
241 /* Load unit number into the high bits of the sampler
242 * address field, which will be be used by the driver
243 * to decide which sampler to put in the actual
246 p1_packed
|= unit
<< 24;
248 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P1
, p1_packed
);
251 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
252 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
254 if (instr
->op
== nir_texop_txf
) {
255 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_CUBE
);
256 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, s
, &tmu_writes
);
257 } else if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
258 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSCM
, s
, &tmu_writes
);
260 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUS
, s
, &tmu_writes
);
265 /* The input FIFO has 16 slots across all threads, so make sure we
266 * don't overfill our allocation.
268 while (tmu_writes
> 16 / c
->threads
)
271 for (int i
= 0; i
< 4; i
++) {
272 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
273 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
278 v3d40_image_load_store_tmu_op(nir_intrinsic_instr
*instr
)
280 switch (instr
->intrinsic
) {
281 case nir_intrinsic_image_load
:
282 case nir_intrinsic_image_store
:
283 return V3D_TMU_OP_REGULAR
;
284 case nir_intrinsic_image_atomic_add
:
285 return v3d_get_op_for_atomic_add(instr
, 3);
286 case nir_intrinsic_image_atomic_imin
:
287 return V3D_TMU_OP_WRITE_SMIN
;
288 case nir_intrinsic_image_atomic_umin
:
289 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
290 case nir_intrinsic_image_atomic_imax
:
291 return V3D_TMU_OP_WRITE_SMAX
;
292 case nir_intrinsic_image_atomic_umax
:
293 return V3D_TMU_OP_WRITE_UMAX
;
294 case nir_intrinsic_image_atomic_and
:
295 return V3D_TMU_OP_WRITE_AND_READ_INC
;
296 case nir_intrinsic_image_atomic_or
:
297 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
298 case nir_intrinsic_image_atomic_xor
:
299 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
300 case nir_intrinsic_image_atomic_exchange
:
301 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
302 case nir_intrinsic_image_atomic_comp_swap
:
303 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
305 unreachable("unknown image intrinsic");
310 v3d40_vir_emit_image_load_store(struct v3d_compile
*c
,
311 nir_intrinsic_instr
*instr
)
313 unsigned format
= nir_intrinsic_format(instr
);
314 unsigned unit
= nir_src_as_uint(instr
->src
[0]);
317 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
320 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
321 .per_pixel_mask_enable
= true,
322 .output_type_32_bit
= v3d_gl_format_is_return_32(format
),
325 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= { 0 };
327 p2_unpacked
.op
= v3d40_image_load_store_tmu_op(instr
);
329 /* If we were able to replace atomic_add for an inc/dec, then we
330 * need/can to do things slightly different, like not loading the
331 * amount to add/sub, as that is implicit.
333 bool atomic_add_replaced
= (instr
->intrinsic
== nir_intrinsic_image_atomic_add
&&
334 (p2_unpacked
.op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
335 p2_unpacked
.op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
338 switch (nir_intrinsic_image_dim(instr
)) {
339 case GLSL_SAMPLER_DIM_1D
:
342 case GLSL_SAMPLER_DIM_BUF
:
344 case GLSL_SAMPLER_DIM_2D
:
345 case GLSL_SAMPLER_DIM_RECT
:
346 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
347 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
349 case GLSL_SAMPLER_DIM_3D
:
350 case GLSL_SAMPLER_DIM_CUBE
:
351 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
352 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
353 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
354 ntq_get_src(c
, instr
->src
[1], 2), &tmu_writes
);
357 unreachable("bad image sampler dim");
360 if (nir_intrinsic_image_array(instr
)) {
361 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
362 ntq_get_src(c
, instr
->src
[1],
363 is_1d
? 1 : 2), &tmu_writes
);
366 /* Limit the number of channels returned to both how many the NIR
367 * instruction writes and how many the instruction could produce.
369 uint32_t instr_return_channels
= nir_intrinsic_dest_components(instr
);
370 if (!p1_unpacked
.output_type_32_bit
)
371 instr_return_channels
= (instr_return_channels
+ 1) / 2;
373 p0_unpacked
.return_words_of_texture_data
=
374 (1 << instr_return_channels
) - 1;
377 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
378 (uint8_t *)&p0_packed
,
382 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
383 (uint8_t *)&p1_packed
,
387 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
388 (uint8_t *)&p2_packed
,
391 /* Load unit number into the high bits of the texture or sampler
392 * address field, which will be be used by the driver to decide which
393 * texture to put in the actual address field.
395 p0_packed
|= unit
<< 24;
397 vir_WRTMUC(c
, QUNIFORM_IMAGE_TMU_CONFIG_P0
, p0_packed
);
398 if (memcmp(&p1_unpacked
, &p1_unpacked_default
, sizeof(p1_unpacked
)) != 0)
399 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p1_packed
);
400 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
401 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
403 /* Emit the data writes for atomics or image store. */
404 if (instr
->intrinsic
!= nir_intrinsic_image_load
&&
405 !atomic_add_replaced
) {
406 /* Vector for stores, or first atomic argument */
408 for (int i
= 0; i
< nir_intrinsic_src_components(instr
, 3); i
++) {
409 src
[i
] = ntq_get_src(c
, instr
->src
[3], i
);
410 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
, src
[i
],
414 /* Second atomic argument */
415 if (instr
->intrinsic
==
416 nir_intrinsic_image_atomic_comp_swap
) {
417 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
,
418 ntq_get_src(c
, instr
->src
[4], 0),
423 if (vir_in_nonuniform_control_flow(c
) &&
424 instr
->intrinsic
!= nir_intrinsic_image_load
) {
425 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
429 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, ntq_get_src(c
, instr
->src
[1], 0),
432 if (vir_in_nonuniform_control_flow(c
) &&
433 instr
->intrinsic
!= nir_intrinsic_image_load
) {
434 struct qinst
*last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
435 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
440 /* The input FIFO has 16 slots across all threads, so make sure we
441 * don't overfill our allocation.
443 while (tmu_writes
> 16 / c
->threads
)
446 for (int i
= 0; i
< 4; i
++) {
447 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
448 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
451 if (nir_intrinsic_dest_components(instr
) == 0)
454 if (instr
->intrinsic
!= nir_intrinsic_image_load
)
455 c
->tmu_dirty_rcl
= true;