2 * Copyright © 2016-2018 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "v3d_compiler.h"
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
34 vir_TMU_WRITE(struct v3d_compile
*c
, enum v3d_qpu_waddr waddr
, struct qreg val
,
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
40 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), val
);
46 vir_WRTMUC(struct v3d_compile
*c
, enum quniform_contents contents
, uint32_t data
)
48 struct qinst
*inst
= vir_NOP(c
);
49 inst
->qpu
.sig
.wrtmuc
= true;
50 inst
->uniform
= vir_get_uniform_index(c
, contents
, data
);
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default
= {
54 .per_pixel_mask_enable
= true,
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default
= {
58 .op
= V3D_TMU_OP_REGULAR
,
62 v3d40_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
64 unsigned unit
= instr
->texture_index
;
67 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
70 assert(instr
->op
!= nir_texop_lod
|| c
->devinfo
->ver
>= 42);
72 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= {
73 .op
= V3D_TMU_OP_REGULAR
,
75 .gather_mode
= instr
->op
== nir_texop_tg4
,
76 .gather_component
= instr
->component
,
78 .coefficient_mode
= instr
->op
== nir_texop_txd
,
80 .disable_autolod
= instr
->op
== nir_texop_tg4
83 int non_array_components
=
84 instr
->op
!= nir_texop_lod
?
85 instr
->coord_components
- instr
->is_array
:
86 instr
->coord_components
;
90 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
91 switch (instr
->src
[i
].src_type
) {
92 case nir_tex_src_coord
:
93 /* S triggers the lookup, so save it for the end. */
94 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
96 if (non_array_components
> 1) {
97 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
98 ntq_get_src(c
, instr
->src
[i
].src
,
101 if (non_array_components
> 2) {
102 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
103 ntq_get_src(c
, instr
->src
[i
].src
,
107 if (instr
->is_array
) {
108 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
109 ntq_get_src(c
, instr
->src
[i
].src
,
110 instr
->coord_components
- 1),
115 case nir_tex_src_bias
:
116 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
117 ntq_get_src(c
, instr
->src
[i
].src
, 0),
121 case nir_tex_src_lod
:
122 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
123 ntq_get_src(c
, instr
->src
[i
].src
, 0),
126 /* With texel fetch automatic LOD is already disabled,
127 * and disable_autolod must not be enabled. For
128 * non-cubes we can use the register TMUSLOD, that
129 * implicitly sets disable_autolod.
131 if (instr
->op
!= nir_texop_txf
&&
132 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
133 p2_unpacked
.disable_autolod
= true;
137 case nir_tex_src_comparator
:
138 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUDREF
,
139 ntq_get_src(c
, instr
->src
[i
].src
, 0),
143 case nir_tex_src_offset
: {
144 if (nir_src_is_const(instr
->src
[i
].src
)) {
145 p2_unpacked
.offset_s
= nir_src_comp_as_int(instr
->src
[i
].src
, 0);
146 if (non_array_components
>= 2)
147 p2_unpacked
.offset_t
=
148 nir_src_comp_as_int(instr
->src
[i
].src
, 1);
149 if (non_array_components
>= 3)
150 p2_unpacked
.offset_r
=
151 nir_src_comp_as_int(instr
->src
[i
].src
, 2);
153 struct qreg mask
= vir_uniform_ui(c
, 0xf);
154 struct qreg x
, y
, offset
;
156 x
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
158 y
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
160 offset
= vir_OR(c
, x
,
162 vir_uniform_ui(c
, 4)));
164 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUOFF
,
165 offset
, &tmu_writes
);
171 unreachable("unknown texture source");
175 /* Limit the number of channels returned to both how many the NIR
176 * instruction writes and how many the instruction could produce.
178 assert(instr
->dest
.is_ssa
);
179 p0_unpacked
.return_words_of_texture_data
=
180 nir_ssa_def_components_read(&instr
->dest
.ssa
);
182 assert(p0_unpacked
.return_words_of_texture_data
!= 0);
185 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
186 (uint8_t *)&p0_packed
,
190 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
191 (uint8_t *)&p2_packed
,
194 /* We manually set the LOD Query bit (see
195 * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
196 * feature over V41 we are using
198 if (instr
->op
== nir_texop_lod
)
199 p2_packed
|= 1UL << 24;
201 /* Load unit number into the high bits of the texture address field,
202 * which will be be used by the driver to decide which texture to put
203 * in the actual address field.
205 p0_packed
|= unit
<< 24;
207 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P0
, p0_packed
);
209 /* Even if the texture operation doesn't need a sampler by
210 * itself, we still need to add the sampler configuration
211 * parameter if the output is 32 bit
213 bool output_type_32_bit
= (c
->key
->tex
[unit
].return_size
== 32 &&
217 * p1 is optional, but we can skip it only if p2 can be skipped too
219 bool needs_p2_config
=
220 (instr
->op
== nir_texop_lod
||
221 memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0);
223 if (output_type_32_bit
||
224 nir_tex_instr_need_sampler(instr
)) {
225 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
226 .output_type_32_bit
= output_type_32_bit
,
228 .unnormalized_coordinates
= (instr
->sampler_dim
==
229 GLSL_SAMPLER_DIM_RECT
),
232 /* Word enables can't ask for more channels than the
233 * output type could provide (2 for f16, 4 for
236 assert(!p1_unpacked
.output_type_32_bit
||
237 p0_unpacked
.return_words_of_texture_data
< (1 << 4));
238 assert(p1_unpacked
.output_type_32_bit
||
239 p0_unpacked
.return_words_of_texture_data
< (1 << 2));
242 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
243 (uint8_t *)&p1_packed
,
246 /* Load unit number into the high bits of the sampler
247 * address field, which will be be used by the driver
248 * to decide which sampler to put in the actual
251 p1_packed
|= unit
<< 24;
253 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P1
, p1_packed
);
254 } else if (needs_p2_config
) {
255 /* Configuration parameters need to be set up in
256 * order, and if P2 is needed, you need to set up P1
257 * too even if sampler info is not needed by the
258 * texture operation. But we can set up default info,
259 * and avoid asking the driver for the sampler state
262 uint32_t p1_packed_default
;
263 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
264 (uint8_t *)&p1_packed_default
,
265 &p1_unpacked_default
);
266 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p1_packed_default
);
270 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
272 if (instr
->op
== nir_texop_txf
) {
273 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_CUBE
);
274 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, s
, &tmu_writes
);
275 } else if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
276 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSCM
, s
, &tmu_writes
);
277 } else if (instr
->op
== nir_texop_txl
) {
278 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSLOD
, s
, &tmu_writes
);
280 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUS
, s
, &tmu_writes
);
285 /* The input FIFO has 16 slots across all threads, so make sure we
286 * don't overfill our allocation.
288 while (tmu_writes
> 16 / c
->threads
)
291 for (int i
= 0; i
< 4; i
++) {
292 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
293 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
298 v3d40_image_load_store_tmu_op(nir_intrinsic_instr
*instr
)
300 switch (instr
->intrinsic
) {
301 case nir_intrinsic_image_load
:
302 case nir_intrinsic_image_store
:
303 return V3D_TMU_OP_REGULAR
;
304 case nir_intrinsic_image_atomic_add
:
305 return v3d_get_op_for_atomic_add(instr
, 3);
306 case nir_intrinsic_image_atomic_imin
:
307 return V3D_TMU_OP_WRITE_SMIN
;
308 case nir_intrinsic_image_atomic_umin
:
309 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
310 case nir_intrinsic_image_atomic_imax
:
311 return V3D_TMU_OP_WRITE_SMAX
;
312 case nir_intrinsic_image_atomic_umax
:
313 return V3D_TMU_OP_WRITE_UMAX
;
314 case nir_intrinsic_image_atomic_and
:
315 return V3D_TMU_OP_WRITE_AND_READ_INC
;
316 case nir_intrinsic_image_atomic_or
:
317 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
318 case nir_intrinsic_image_atomic_xor
:
319 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
320 case nir_intrinsic_image_atomic_exchange
:
321 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
322 case nir_intrinsic_image_atomic_comp_swap
:
323 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
325 unreachable("unknown image intrinsic");
330 v3d40_vir_emit_image_load_store(struct v3d_compile
*c
,
331 nir_intrinsic_instr
*instr
)
333 unsigned format
= nir_intrinsic_format(instr
);
334 unsigned unit
= nir_src_as_uint(instr
->src
[0]);
337 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
340 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
341 .per_pixel_mask_enable
= true,
342 .output_type_32_bit
= v3d_gl_format_is_return_32(format
),
345 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= { 0 };
347 p2_unpacked
.op
= v3d40_image_load_store_tmu_op(instr
);
349 /* If we were able to replace atomic_add for an inc/dec, then we
350 * need/can to do things slightly different, like not loading the
351 * amount to add/sub, as that is implicit.
353 bool atomic_add_replaced
= (instr
->intrinsic
== nir_intrinsic_image_atomic_add
&&
354 (p2_unpacked
.op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
355 p2_unpacked
.op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
358 switch (nir_intrinsic_image_dim(instr
)) {
359 case GLSL_SAMPLER_DIM_1D
:
362 case GLSL_SAMPLER_DIM_BUF
:
364 case GLSL_SAMPLER_DIM_2D
:
365 case GLSL_SAMPLER_DIM_RECT
:
366 case GLSL_SAMPLER_DIM_CUBE
:
367 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
368 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
370 case GLSL_SAMPLER_DIM_3D
:
371 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
372 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
373 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
374 ntq_get_src(c
, instr
->src
[1], 2), &tmu_writes
);
377 unreachable("bad image sampler dim");
380 /* In order to fetch on a cube map, we need to interpret it as
381 * 2D arrays, where the third coord would be the face index.
383 if (nir_intrinsic_image_dim(instr
) == GLSL_SAMPLER_DIM_CUBE
||
384 nir_intrinsic_image_array(instr
)) {
385 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
386 ntq_get_src(c
, instr
->src
[1],
387 is_1d
? 1 : 2), &tmu_writes
);
390 /* Limit the number of channels returned to both how many the NIR
391 * instruction writes and how many the instruction could produce.
393 uint32_t instr_return_channels
= nir_intrinsic_dest_components(instr
);
394 if (!p1_unpacked
.output_type_32_bit
)
395 instr_return_channels
= (instr_return_channels
+ 1) / 2;
397 p0_unpacked
.return_words_of_texture_data
=
398 (1 << instr_return_channels
) - 1;
401 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
402 (uint8_t *)&p0_packed
,
406 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
407 (uint8_t *)&p1_packed
,
411 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
412 (uint8_t *)&p2_packed
,
415 /* Load unit number into the high bits of the texture or sampler
416 * address field, which will be be used by the driver to decide which
417 * texture to put in the actual address field.
419 p0_packed
|= unit
<< 24;
421 vir_WRTMUC(c
, QUNIFORM_IMAGE_TMU_CONFIG_P0
, p0_packed
);
422 if (memcmp(&p1_unpacked
, &p1_unpacked_default
, sizeof(p1_unpacked
)) != 0)
423 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p1_packed
);
424 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
425 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
427 /* Emit the data writes for atomics or image store. */
428 if (instr
->intrinsic
!= nir_intrinsic_image_load
&&
429 !atomic_add_replaced
) {
430 /* Vector for stores, or first atomic argument */
432 for (int i
= 0; i
< nir_intrinsic_src_components(instr
, 3); i
++) {
433 src
[i
] = ntq_get_src(c
, instr
->src
[3], i
);
434 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
, src
[i
],
438 /* Second atomic argument */
439 if (instr
->intrinsic
==
440 nir_intrinsic_image_atomic_comp_swap
) {
441 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
,
442 ntq_get_src(c
, instr
->src
[4], 0),
447 if (vir_in_nonuniform_control_flow(c
) &&
448 instr
->intrinsic
!= nir_intrinsic_image_load
) {
449 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
453 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, ntq_get_src(c
, instr
->src
[1], 0),
456 if (vir_in_nonuniform_control_flow(c
) &&
457 instr
->intrinsic
!= nir_intrinsic_image_load
) {
458 struct qinst
*last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
459 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
464 /* The input FIFO has 16 slots across all threads, so make sure we
465 * don't overfill our allocation.
467 while (tmu_writes
> 16 / c
->threads
)
470 for (int i
= 0; i
< 4; i
++) {
471 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
472 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
475 if (nir_intrinsic_dest_components(instr
) == 0)
478 if (instr
->intrinsic
!= nir_intrinsic_image_load
)
479 c
->tmu_dirty_rcl
= true;