2 * Copyright © 2016-2018 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "v3d_compiler.h"
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
34 vir_TMU_WRITE(struct v3d_compile
*c
, enum v3d_qpu_waddr waddr
, struct qreg val
,
37 /* XXX perf: We should figure out how to merge ALU operations
38 * producing the val with this MOV, when possible.
40 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), val
);
46 vir_WRTMUC(struct v3d_compile
*c
, enum quniform_contents contents
, uint32_t data
)
48 struct qinst
*inst
= vir_NOP(c
);
49 inst
->qpu
.sig
.wrtmuc
= true;
50 inst
->uniform
= vir_get_uniform_index(c
, contents
, data
);
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default
= {
54 .per_pixel_mask_enable
= true,
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default
= {
58 .op
= V3D_TMU_OP_REGULAR
,
62 v3d40_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
64 unsigned unit
= instr
->texture_index
;
67 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
70 assert(instr
->op
!= nir_texop_lod
|| c
->devinfo
->ver
>= 42);
72 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= {
73 .op
= V3D_TMU_OP_REGULAR
,
75 .gather_mode
= instr
->op
== nir_texop_tg4
,
76 .gather_component
= instr
->component
,
78 .coefficient_mode
= instr
->op
== nir_texop_txd
,
80 .disable_autolod
= instr
->op
== nir_texop_tg4
83 int non_array_components
=
84 instr
->op
!= nir_texop_lod
?
85 instr
->coord_components
- instr
->is_array
:
86 instr
->coord_components
;
90 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
91 switch (instr
->src
[i
].src_type
) {
92 case nir_tex_src_coord
:
93 /* S triggers the lookup, so save it for the end. */
94 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
96 if (non_array_components
> 1) {
97 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
98 ntq_get_src(c
, instr
->src
[i
].src
,
101 if (non_array_components
> 2) {
102 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
103 ntq_get_src(c
, instr
->src
[i
].src
,
107 if (instr
->is_array
) {
108 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
109 ntq_get_src(c
, instr
->src
[i
].src
,
110 instr
->coord_components
- 1),
115 case nir_tex_src_bias
:
116 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
117 ntq_get_src(c
, instr
->src
[i
].src
, 0),
121 case nir_tex_src_lod
:
122 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
123 ntq_get_src(c
, instr
->src
[i
].src
, 0),
126 if (instr
->op
!= nir_texop_txf
)
127 p2_unpacked
.disable_autolod
= true;
130 case nir_tex_src_comparator
:
131 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUDREF
,
132 ntq_get_src(c
, instr
->src
[i
].src
, 0),
136 case nir_tex_src_offset
: {
137 if (nir_src_is_const(instr
->src
[i
].src
)) {
138 p2_unpacked
.offset_s
= nir_src_comp_as_int(instr
->src
[i
].src
, 0);
139 if (instr
->coord_components
>= 2)
140 p2_unpacked
.offset_t
=
141 nir_src_comp_as_int(instr
->src
[i
].src
, 1);
142 if (non_array_components
>= 3)
143 p2_unpacked
.offset_r
=
144 nir_src_comp_as_int(instr
->src
[i
].src
, 2);
146 struct qreg mask
= vir_uniform_ui(c
, 0xf);
147 struct qreg x
, y
, offset
;
149 x
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
151 y
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
153 offset
= vir_OR(c
, x
,
155 vir_uniform_ui(c
, 4)));
157 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUOFF
,
158 offset
, &tmu_writes
);
164 unreachable("unknown texture source");
168 /* Limit the number of channels returned to both how many the NIR
169 * instruction writes and how many the instruction could produce.
171 assert(instr
->dest
.is_ssa
);
172 p0_unpacked
.return_words_of_texture_data
=
173 nir_ssa_def_components_read(&instr
->dest
.ssa
);
175 assert(p0_unpacked
.return_words_of_texture_data
!= 0);
178 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
179 (uint8_t *)&p0_packed
,
183 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
184 (uint8_t *)&p2_packed
,
187 /* We manually set the LOD Query bit (see
188 * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
189 * feature over V41 we are using
191 if (instr
->op
== nir_texop_lod
)
192 p2_packed
|= 1UL << 24;
194 /* Load unit number into the high bits of the texture address field,
195 * which will be be used by the driver to decide which texture to put
196 * in the actual address field.
198 p0_packed
|= unit
<< 24;
200 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P0
, p0_packed
);
202 /* Even if the texture operation doesn't need a sampler by
203 * itself, we still need to add the sampler configuration
204 * parameter if the output is 32 bit
206 bool output_type_32_bit
= (c
->key
->tex
[unit
].return_size
== 32 &&
210 * p1 is optional, but we can skip it only if p2 can be skipped too
212 bool needs_p2_config
=
213 (instr
->op
== nir_texop_lod
||
214 memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0);
216 if (needs_p2_config
|| output_type_32_bit
||
217 nir_tex_instr_need_sampler(instr
)) {
218 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
219 .output_type_32_bit
= output_type_32_bit
,
221 .unnormalized_coordinates
= (instr
->sampler_dim
==
222 GLSL_SAMPLER_DIM_RECT
),
225 /* Word enables can't ask for more channels than the
226 * output type could provide (2 for f16, 4 for
229 assert(!p1_unpacked
.output_type_32_bit
||
230 p0_unpacked
.return_words_of_texture_data
< (1 << 4));
231 assert(p1_unpacked
.output_type_32_bit
||
232 p0_unpacked
.return_words_of_texture_data
< (1 << 2));
235 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
236 (uint8_t *)&p1_packed
,
239 /* Load unit number into the high bits of the sampler
240 * address field, which will be be used by the driver
241 * to decide which sampler to put in the actual
244 p1_packed
|= unit
<< 24;
246 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P1
, p1_packed
);
250 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
252 if (instr
->op
== nir_texop_txf
) {
253 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_CUBE
);
254 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, s
, &tmu_writes
);
255 } else if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
256 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSCM
, s
, &tmu_writes
);
258 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUS
, s
, &tmu_writes
);
263 /* The input FIFO has 16 slots across all threads, so make sure we
264 * don't overfill our allocation.
266 while (tmu_writes
> 16 / c
->threads
)
269 for (int i
= 0; i
< 4; i
++) {
270 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
271 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
276 v3d40_image_load_store_tmu_op(nir_intrinsic_instr
*instr
)
278 switch (instr
->intrinsic
) {
279 case nir_intrinsic_image_load
:
280 case nir_intrinsic_image_store
:
281 return V3D_TMU_OP_REGULAR
;
282 case nir_intrinsic_image_atomic_add
:
283 return v3d_get_op_for_atomic_add(instr
, 3);
284 case nir_intrinsic_image_atomic_imin
:
285 return V3D_TMU_OP_WRITE_SMIN
;
286 case nir_intrinsic_image_atomic_umin
:
287 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
288 case nir_intrinsic_image_atomic_imax
:
289 return V3D_TMU_OP_WRITE_SMAX
;
290 case nir_intrinsic_image_atomic_umax
:
291 return V3D_TMU_OP_WRITE_UMAX
;
292 case nir_intrinsic_image_atomic_and
:
293 return V3D_TMU_OP_WRITE_AND_READ_INC
;
294 case nir_intrinsic_image_atomic_or
:
295 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
296 case nir_intrinsic_image_atomic_xor
:
297 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
298 case nir_intrinsic_image_atomic_exchange
:
299 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
300 case nir_intrinsic_image_atomic_comp_swap
:
301 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
303 unreachable("unknown image intrinsic");
308 v3d40_vir_emit_image_load_store(struct v3d_compile
*c
,
309 nir_intrinsic_instr
*instr
)
311 unsigned format
= nir_intrinsic_format(instr
);
312 unsigned unit
= nir_src_as_uint(instr
->src
[0]);
315 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
318 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
319 .per_pixel_mask_enable
= true,
320 .output_type_32_bit
= v3d_gl_format_is_return_32(format
),
323 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= { 0 };
325 p2_unpacked
.op
= v3d40_image_load_store_tmu_op(instr
);
327 /* If we were able to replace atomic_add for an inc/dec, then we
328 * need/can to do things slightly different, like not loading the
329 * amount to add/sub, as that is implicit.
331 bool atomic_add_replaced
= (instr
->intrinsic
== nir_intrinsic_image_atomic_add
&&
332 (p2_unpacked
.op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
333 p2_unpacked
.op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
336 switch (nir_intrinsic_image_dim(instr
)) {
337 case GLSL_SAMPLER_DIM_1D
:
340 case GLSL_SAMPLER_DIM_BUF
:
342 case GLSL_SAMPLER_DIM_2D
:
343 case GLSL_SAMPLER_DIM_RECT
:
344 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
345 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
347 case GLSL_SAMPLER_DIM_3D
:
348 case GLSL_SAMPLER_DIM_CUBE
:
349 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
350 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
351 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
352 ntq_get_src(c
, instr
->src
[1], 2), &tmu_writes
);
355 unreachable("bad image sampler dim");
358 if (nir_intrinsic_image_array(instr
)) {
359 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
360 ntq_get_src(c
, instr
->src
[1],
361 is_1d
? 1 : 2), &tmu_writes
);
364 /* Limit the number of channels returned to both how many the NIR
365 * instruction writes and how many the instruction could produce.
367 uint32_t instr_return_channels
= nir_intrinsic_dest_components(instr
);
368 if (!p1_unpacked
.output_type_32_bit
)
369 instr_return_channels
= (instr_return_channels
+ 1) / 2;
371 p0_unpacked
.return_words_of_texture_data
=
372 (1 << instr_return_channels
) - 1;
375 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
376 (uint8_t *)&p0_packed
,
380 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
381 (uint8_t *)&p1_packed
,
385 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
386 (uint8_t *)&p2_packed
,
389 /* Load unit number into the high bits of the texture or sampler
390 * address field, which will be be used by the driver to decide which
391 * texture to put in the actual address field.
393 p0_packed
|= unit
<< 24;
395 vir_WRTMUC(c
, QUNIFORM_IMAGE_TMU_CONFIG_P0
, p0_packed
);
396 if (memcmp(&p1_unpacked
, &p1_unpacked_default
, sizeof(p1_unpacked
)) != 0)
397 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p1_packed
);
398 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
399 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
401 /* Emit the data writes for atomics or image store. */
402 if (instr
->intrinsic
!= nir_intrinsic_image_load
&&
403 !atomic_add_replaced
) {
404 /* Vector for stores, or first atomic argument */
406 for (int i
= 0; i
< nir_intrinsic_src_components(instr
, 3); i
++) {
407 src
[i
] = ntq_get_src(c
, instr
->src
[3], i
);
408 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
, src
[i
],
412 /* Second atomic argument */
413 if (instr
->intrinsic
==
414 nir_intrinsic_image_atomic_comp_swap
) {
415 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
,
416 ntq_get_src(c
, instr
->src
[4], 0),
421 if (vir_in_nonuniform_control_flow(c
) &&
422 instr
->intrinsic
!= nir_intrinsic_image_load
) {
423 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
427 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, ntq_get_src(c
, instr
->src
[1], 0),
430 if (vir_in_nonuniform_control_flow(c
) &&
431 instr
->intrinsic
!= nir_intrinsic_image_load
) {
432 struct qinst
*last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
433 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
438 /* The input FIFO has 16 slots across all threads, so make sure we
439 * don't overfill our allocation.
441 while (tmu_writes
> 16 / c
->threads
)
444 for (int i
= 0; i
< 4; i
++) {
445 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
446 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
449 if (nir_intrinsic_dest_components(instr
) == 0)
452 if (instr
->intrinsic
!= nir_intrinsic_image_load
)
453 c
->tmu_dirty_rcl
= true;