2 * Copyright © 2016-2018 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "v3d_compiler.h"
25 #include "compiler/nir/nir_deref.h"
27 /* We don't do any address packing. */
28 #define __gen_user_data void
29 #define __gen_address_type uint32_t
30 #define __gen_address_offset(reloc) (*reloc)
31 #define __gen_emit_reloc(cl, reloc)
32 #include "cle/v3d_packet_v41_pack.h"
35 vir_TMU_WRITE(struct v3d_compile
*c
, enum v3d_qpu_waddr waddr
, struct qreg val
,
38 /* XXX perf: We should figure out how to merge ALU operations
39 * producing the val with this MOV, when possible.
41 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), val
);
47 vir_WRTMUC(struct v3d_compile
*c
, enum quniform_contents contents
, uint32_t data
)
49 struct qinst
*inst
= vir_NOP(c
);
50 inst
->qpu
.sig
.wrtmuc
= true;
51 inst
->uniform
= vir_get_uniform_index(c
, contents
, data
);
54 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default
= {
55 .per_pixel_mask_enable
= true,
58 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default
= {
59 .op
= V3D_TMU_OP_REGULAR
,
63 v3d40_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
65 unsigned unit
= instr
->texture_index
;
68 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
71 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
72 .output_type_32_bit
= (c
->key
->tex
[unit
].return_size
== 32 &&
75 .unnormalized_coordinates
= (instr
->sampler_dim
==
76 GLSL_SAMPLER_DIM_RECT
),
79 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= {
80 .op
= V3D_TMU_OP_REGULAR
,
82 .gather_mode
= instr
->op
== nir_texop_tg4
,
83 .gather_component
= instr
->component
,
85 .coefficient_mode
= instr
->op
== nir_texop_txd
,
87 .disable_autolod
= instr
->op
== nir_texop_tg4
90 int non_array_components
= instr
->coord_components
- instr
->is_array
;
93 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
94 switch (instr
->src
[i
].src_type
) {
95 case nir_tex_src_coord
:
96 /* S triggers the lookup, so save it for the end. */
97 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
99 if (non_array_components
> 1) {
100 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
101 ntq_get_src(c
, instr
->src
[i
].src
,
104 if (non_array_components
> 2) {
105 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
106 ntq_get_src(c
, instr
->src
[i
].src
,
110 if (instr
->is_array
) {
111 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
112 ntq_get_src(c
, instr
->src
[i
].src
,
113 instr
->coord_components
- 1),
118 case nir_tex_src_bias
:
119 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
120 ntq_get_src(c
, instr
->src
[i
].src
, 0),
124 case nir_tex_src_lod
:
125 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUB
,
126 ntq_get_src(c
, instr
->src
[i
].src
, 0),
129 if (instr
->op
!= nir_texop_txf
)
130 p2_unpacked
.disable_autolod
= true;
133 case nir_tex_src_comparator
:
134 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUDREF
,
135 ntq_get_src(c
, instr
->src
[i
].src
, 0),
139 case nir_tex_src_offset
: {
140 if (nir_src_is_const(instr
->src
[i
].src
)) {
141 p2_unpacked
.offset_s
= nir_src_comp_as_int(instr
->src
[i
].src
, 0);
142 if (instr
->coord_components
>= 2)
143 p2_unpacked
.offset_t
=
144 nir_src_comp_as_int(instr
->src
[i
].src
, 1);
145 if (non_array_components
>= 3)
146 p2_unpacked
.offset_r
=
147 nir_src_comp_as_int(instr
->src
[i
].src
, 2);
149 struct qreg mask
= vir_uniform_ui(c
, 0xf);
150 struct qreg x
, y
, offset
;
152 x
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
154 y
= vir_AND(c
, ntq_get_src(c
, instr
->src
[i
].src
,
156 offset
= vir_OR(c
, x
,
158 vir_uniform_ui(c
, 4)));
160 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUOFF
,
161 offset
, &tmu_writes
);
167 unreachable("unknown texture source");
171 /* Limit the number of channels returned to both how many the NIR
172 * instruction writes and how many the instruction could produce.
174 assert(instr
->dest
.is_ssa
);
175 p0_unpacked
.return_words_of_texture_data
=
176 nir_ssa_def_components_read(&instr
->dest
.ssa
);
178 /* Word enables can't ask for more channels than the output type could
179 * provide (2 for f16, 4 for 32-bit).
181 assert(!p1_unpacked
.output_type_32_bit
||
182 p0_unpacked
.return_words_of_texture_data
< (1 << 4));
183 assert(p1_unpacked
.output_type_32_bit
||
184 p0_unpacked
.return_words_of_texture_data
< (1 << 2));
186 assert(p0_unpacked
.return_words_of_texture_data
!= 0);
189 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
190 (uint8_t *)&p0_packed
,
194 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
195 (uint8_t *)&p1_packed
,
199 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
200 (uint8_t *)&p2_packed
,
203 /* Load unit number into the high bits of the texture or sampler
204 * address field, which will be be used by the driver to decide which
205 * texture to put in the actual address field.
207 p0_packed
|= unit
<< 24;
208 p1_packed
|= unit
<< 24;
210 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P0
, p0_packed
);
211 /* XXX perf: Can we skip p1 setup for txf ops? */
212 vir_WRTMUC(c
, QUNIFORM_TMU_CONFIG_P1
, p1_packed
);
213 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
214 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
216 if (instr
->op
== nir_texop_txf
) {
217 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_CUBE
);
218 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, s
, &tmu_writes
);
219 } else if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
220 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSCM
, s
, &tmu_writes
);
222 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUS
, s
, &tmu_writes
);
227 /* The input FIFO has 16 slots across all threads, so make sure we
228 * don't overfill our allocation.
230 while (tmu_writes
> 16 / c
->threads
)
233 for (int i
= 0; i
< 4; i
++) {
234 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
235 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
240 type_size_align_1(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
247 v3d40_image_load_store_tmu_op(nir_intrinsic_instr
*instr
)
249 switch (instr
->intrinsic
) {
250 case nir_intrinsic_image_deref_load
:
251 case nir_intrinsic_image_deref_store
:
252 return V3D_TMU_OP_REGULAR
;
253 case nir_intrinsic_image_deref_atomic_add
:
254 return v3d_get_op_for_atomic_add(instr
, 3);
255 case nir_intrinsic_image_deref_atomic_imin
:
256 return V3D_TMU_OP_WRITE_SMIN
;
257 case nir_intrinsic_image_deref_atomic_umin
:
258 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
259 case nir_intrinsic_image_deref_atomic_imax
:
260 return V3D_TMU_OP_WRITE_SMAX
;
261 case nir_intrinsic_image_deref_atomic_umax
:
262 return V3D_TMU_OP_WRITE_UMAX
;
263 case nir_intrinsic_image_deref_atomic_and
:
264 return V3D_TMU_OP_WRITE_AND_READ_INC
;
265 case nir_intrinsic_image_deref_atomic_or
:
266 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
267 case nir_intrinsic_image_deref_atomic_xor
:
268 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
269 case nir_intrinsic_image_deref_atomic_exchange
:
270 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
271 case nir_intrinsic_image_deref_atomic_comp_swap
:
272 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
274 unreachable("unknown image intrinsic");
279 v3d40_vir_emit_image_load_store(struct v3d_compile
*c
,
280 nir_intrinsic_instr
*instr
)
282 nir_variable
*var
= nir_intrinsic_get_var(instr
, 0);
283 const struct glsl_type
*sampler_type
= glsl_without_array(var
->type
);
284 unsigned unit
= (var
->data
.driver_location
+
285 nir_deref_instr_get_const_offset(nir_src_as_deref(instr
->src
[0]),
289 struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked
= {
292 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked
= {
293 .per_pixel_mask_enable
= true,
294 .output_type_32_bit
= v3d_gl_format_is_return_32(var
->data
.image
.format
),
297 struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked
= { 0 };
299 p2_unpacked
.op
= v3d40_image_load_store_tmu_op(instr
);
301 /* If we were able to replace atomic_add for an inc/dec, then we
302 * need/can to do things slightly different, like not loading the
303 * amount to add/sub, as that is implicit.
305 bool atomic_add_replaced
= (instr
->intrinsic
== nir_intrinsic_image_deref_atomic_add
&&
306 (p2_unpacked
.op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
307 p2_unpacked
.op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
310 switch (glsl_get_sampler_dim(sampler_type
)) {
311 case GLSL_SAMPLER_DIM_1D
:
314 case GLSL_SAMPLER_DIM_BUF
:
316 case GLSL_SAMPLER_DIM_2D
:
317 case GLSL_SAMPLER_DIM_RECT
:
318 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
319 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
321 case GLSL_SAMPLER_DIM_3D
:
322 case GLSL_SAMPLER_DIM_CUBE
:
323 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUT
,
324 ntq_get_src(c
, instr
->src
[1], 1), &tmu_writes
);
325 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUR
,
326 ntq_get_src(c
, instr
->src
[1], 2), &tmu_writes
);
329 unreachable("bad image sampler dim");
332 if (glsl_sampler_type_is_array(sampler_type
)) {
333 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUI
,
334 ntq_get_src(c
, instr
->src
[1],
335 is_1d
? 1 : 2), &tmu_writes
);
338 /* Limit the number of channels returned to both how many the NIR
339 * instruction writes and how many the instruction could produce.
341 uint32_t instr_return_channels
= nir_intrinsic_dest_components(instr
);
342 if (!p1_unpacked
.output_type_32_bit
)
343 instr_return_channels
= (instr_return_channels
+ 1) / 2;
345 p0_unpacked
.return_words_of_texture_data
=
346 (1 << instr_return_channels
) - 1;
349 V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL
,
350 (uint8_t *)&p0_packed
,
354 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL
,
355 (uint8_t *)&p1_packed
,
359 V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL
,
360 (uint8_t *)&p2_packed
,
363 /* Load unit number into the high bits of the texture or sampler
364 * address field, which will be be used by the driver to decide which
365 * texture to put in the actual address field.
367 p0_packed
|= unit
<< 24;
369 vir_WRTMUC(c
, QUNIFORM_IMAGE_TMU_CONFIG_P0
, p0_packed
);
370 if (memcmp(&p1_unpacked
, &p1_unpacked_default
, sizeof(p1_unpacked
)) != 0)
371 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p1_packed
);
372 if (memcmp(&p2_unpacked
, &p2_unpacked_default
, sizeof(p2_unpacked
)) != 0)
373 vir_WRTMUC(c
, QUNIFORM_CONSTANT
, p2_packed
);
375 /* Emit the data writes for atomics or image store. */
376 if (instr
->intrinsic
!= nir_intrinsic_image_deref_load
&&
377 !atomic_add_replaced
) {
378 /* Vector for stores, or first atomic argument */
380 for (int i
= 0; i
< nir_intrinsic_src_components(instr
, 3); i
++) {
381 src
[i
] = ntq_get_src(c
, instr
->src
[3], i
);
382 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
, src
[i
],
386 /* Second atomic argument */
387 if (instr
->intrinsic
==
388 nir_intrinsic_image_deref_atomic_comp_swap
) {
389 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUD
,
390 ntq_get_src(c
, instr
->src
[4], 0),
395 vir_TMU_WRITE(c
, V3D_QPU_WADDR_TMUSF
, ntq_get_src(c
, instr
->src
[1], 0),
400 /* The input FIFO has 16 slots across all threads, so make sure we
401 * don't overfill our allocation.
403 while (tmu_writes
> 16 / c
->threads
)
406 for (int i
= 0; i
< 4; i
++) {
407 if (p0_unpacked
.return_words_of_texture_data
& (1 << i
))
408 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
411 if (nir_intrinsic_dest_components(instr
) == 0)
414 if (instr
->intrinsic
!= nir_intrinsic_image_deref_load
)
415 c
->tmu_dirty_rcl
= true;