2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * DOC: Shader validator for VC4.
27 * The VC4 has no IOMMU between it and system memory. So, a user with access
28 * to execute shaders could escalate privilege by overwriting system memory
29 * (using the VPM write address register in the general-purpose DMA mode) or
30 * reading system memory it shouldn't (reading it as a texture, or uniform
31 * data, or vertex data).
33 * This walks over a shader starting from some offset within a BO, ensuring
34 * that its accesses are appropriately bounded, and recording how many texture
35 * accesses are made and where so that we can do relocations for them in the
38 * The kernel API has shaders stored in user-mapped BOs. The BOs will be
39 * forcibly unmapped from the process before validation, and any cache of
40 * validated state will be flushed if the mapping is faulted back in.
42 * Storing the shaders in BOs means that the validation process will be slow
43 * due to uncached reads, but since shaders are long-lived and shader BOs are
44 * never actually modified, this shouldn't be a problem.
49 #include "vc4_qpu_defines.h"
51 struct vc4_shader_validation_state
{
52 struct vc4_texture_sample_info tmu_setup
[2];
53 int tmu_write_count
[2];
55 /* For registers that were last written to by a MIN instruction with
56 * one argument being a uniform, the address of the uniform.
59 * This is used for the validation of direct address memory reads.
61 uint32_t live_clamp_offsets
[32 + 32 + 4];
65 waddr_to_live_reg_index(uint32_t waddr
, bool is_b
)
72 } else if (waddr
<= QPU_W_ACC3
) {
74 return 64 + waddr
- QPU_W_ACC0
;
81 is_tmu_submit(uint32_t waddr
)
83 return (waddr
== QPU_W_TMU0_S
||
84 waddr
== QPU_W_TMU1_S
);
88 is_tmu_write(uint32_t waddr
)
90 return (waddr
>= QPU_W_TMU0_S
&&
91 waddr
<= QPU_W_TMU1_B
);
95 record_validated_texture_sample(struct vc4_validated_shader_info
*validated_shader
,
96 struct vc4_shader_validation_state
*validation_state
,
99 uint32_t s
= validated_shader
->num_texture_samples
;
101 struct vc4_texture_sample_info
*temp_samples
;
103 temp_samples
= krealloc(validated_shader
->texture_samples
,
104 (s
+ 1) * sizeof(*temp_samples
),
109 memcpy(&temp_samples
[s
],
110 &validation_state
->tmu_setup
[tmu
],
111 sizeof(*temp_samples
));
113 validated_shader
->num_texture_samples
= s
+ 1;
114 validated_shader
->texture_samples
= temp_samples
;
116 for (i
= 0; i
< 4; i
++)
117 validation_state
->tmu_setup
[tmu
].p_offset
[i
] = ~0;
123 check_tmu_write(uint64_t inst
,
124 struct vc4_validated_shader_info
*validated_shader
,
125 struct vc4_shader_validation_state
*validation_state
,
128 uint32_t waddr
= (is_mul
?
129 QPU_GET_FIELD(inst
, QPU_WADDR_MUL
) :
130 QPU_GET_FIELD(inst
, QPU_WADDR_ADD
));
131 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
132 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
133 int tmu
= waddr
> QPU_W_TMU0_B
;
134 bool submit
= is_tmu_submit(waddr
);
135 bool is_direct
= submit
&& validation_state
->tmu_write_count
[tmu
] == 0;
136 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
139 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
140 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
141 uint32_t clamp_offset
= ~0;
143 if (sig
== QPU_SIG_SMALL_IMM
) {
144 DRM_ERROR("direct TMU read used small immediate\n");
148 /* Make sure that this texture load is an add of the base
149 * address of the UBO to a clamped offset within the UBO.
152 QPU_GET_FIELD(inst
, QPU_OP_ADD
) != QPU_A_ADD
) {
153 DRM_ERROR("direct TMU load wasn't an add\n");
157 /* We assert that the the clamped address is the first
158 * argument, and the UBO base address is the second argument.
159 * This is arbitrary, but simpler than supporting flipping the
162 if (add_a
== QPU_MUX_A
) {
163 clamp_offset
= validation_state
->live_clamp_offsets
[raddr_a
];
164 } else if (add_a
== QPU_MUX_B
) {
165 clamp_offset
= validation_state
->live_clamp_offsets
[32 + raddr_b
];
166 } else if (add_a
<= QPU_MUX_R4
) {
167 clamp_offset
= validation_state
->live_clamp_offsets
[64 + add_a
];
170 if (clamp_offset
== ~0) {
171 DRM_ERROR("direct TMU load wasn't clamped\n");
175 /* Store the clamp value's offset in p1 (see reloc_tex() in
178 validation_state
->tmu_setup
[tmu
].p_offset
[1] =
181 if (!(add_b
== QPU_MUX_A
&& raddr_a
== QPU_R_UNIF
) &&
182 !(add_b
== QPU_MUX_B
&& raddr_b
== QPU_R_UNIF
)) {
183 DRM_ERROR("direct TMU load didn't add to a uniform\n");
187 validation_state
->tmu_setup
[tmu
].is_direct
= true;
189 if (raddr_a
== QPU_R_UNIF
|| (sig
!= QPU_SIG_SMALL_IMM
&&
190 raddr_b
== QPU_R_UNIF
)) {
191 DRM_ERROR("uniform read in the same instruction as "
197 if (validation_state
->tmu_write_count
[tmu
] >= 4) {
198 DRM_ERROR("TMU%d got too many parameters before dispatch\n",
202 validation_state
->tmu_setup
[tmu
].p_offset
[validation_state
->tmu_write_count
[tmu
]] =
203 validated_shader
->uniforms_size
;
204 validation_state
->tmu_write_count
[tmu
]++;
205 /* Since direct uses a RADDR uniform reference, it will get counted in
206 * check_instruction_reads()
209 validated_shader
->uniforms_size
+= 4;
212 if (!record_validated_texture_sample(validated_shader
,
213 validation_state
, tmu
)) {
217 validation_state
->tmu_write_count
[tmu
] = 0;
224 check_register_write(uint64_t inst
,
225 struct vc4_validated_shader_info
*validated_shader
,
226 struct vc4_shader_validation_state
*validation_state
,
229 uint32_t waddr
= (is_mul
?
230 QPU_GET_FIELD(inst
, QPU_WADDR_MUL
) :
231 QPU_GET_FIELD(inst
, QPU_WADDR_ADD
));
232 bool is_b
= is_mul
!= ((inst
& QPU_WS
) != 0);
233 uint32_t live_reg_index
;
236 case QPU_W_UNIFORMS_ADDRESS
:
237 /* XXX: We'll probably need to support this for reladdr, but
238 * it's definitely a security-related one.
240 DRM_ERROR("uniforms address load unsupported\n");
243 case QPU_W_TLB_COLOR_MS
:
244 case QPU_W_TLB_COLOR_ALL
:
246 /* These only interact with the tile buffer, not main memory,
259 return check_tmu_write(inst
, validated_shader
, validation_state
,
263 case QPU_W_TMU_NOSWAP
:
264 case QPU_W_TLB_ALPHA_MASK
:
265 case QPU_W_MUTEX_RELEASE
:
266 /* XXX: I haven't thought about these, so don't support them
269 DRM_ERROR("Unsupported waddr %d\n", waddr
);
273 DRM_ERROR("General VPM DMA unsupported\n");
277 case QPU_W_VPMVCD_SETUP
:
278 /* We allow VPM setup in general, even including VPM DMA
279 * configuration setup, because the (unsafe) DMA can only be
280 * triggered by QPU_W_VPM_ADDR writes.
284 case QPU_W_TLB_STENCIL_SETUP
:
288 /* Clear out the live offset clamp tracking for the written register.
289 * If this particular instruction is setting up an offset clamp, it'll
290 * get tracked immediately after we return.
292 live_reg_index
= waddr_to_live_reg_index(waddr
, is_b
);
293 if (live_reg_index
!= ~0)
294 validation_state
->live_clamp_offsets
[live_reg_index
] = ~0;
300 track_live_clamps(uint64_t inst
,
301 struct vc4_validated_shader_info
*validated_shader
,
302 struct vc4_shader_validation_state
*validation_state
)
304 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
305 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
306 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
307 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
308 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
309 bool is_b
= inst
& QPU_WS
;
310 uint32_t live_reg_index
;
312 if (QPU_GET_FIELD(inst
, QPU_OP_ADD
) != QPU_A_MIN
)
315 if (!(add_b
== QPU_MUX_A
&& raddr_a
== QPU_R_UNIF
) &&
316 !(add_b
== QPU_MUX_B
&& raddr_b
== QPU_R_UNIF
&&
317 sig
!= QPU_SIG_SMALL_IMM
)) {
321 live_reg_index
= waddr_to_live_reg_index(waddr_add
, is_b
);
322 if (live_reg_index
!= ~0) {
323 validation_state
->live_clamp_offsets
[live_reg_index
] =
324 validated_shader
->uniforms_size
;
329 check_instruction_writes(uint64_t inst
,
330 struct vc4_validated_shader_info
*validated_shader
,
331 struct vc4_shader_validation_state
*validation_state
)
333 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
334 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
337 if (is_tmu_write(waddr_add
) && is_tmu_write(waddr_mul
)) {
338 DRM_ERROR("ADD and MUL both set up textures\n");
342 ok
= (check_register_write(inst
, validated_shader
, validation_state
, false) &&
343 check_register_write(inst
, validated_shader
, validation_state
, true));
345 track_live_clamps(inst
, validated_shader
, validation_state
);
351 check_instruction_reads(uint64_t inst
,
352 struct vc4_validated_shader_info
*validated_shader
)
354 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
355 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
356 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
358 if (raddr_a
== QPU_R_UNIF
||
359 (raddr_b
== QPU_R_UNIF
&& sig
!= QPU_SIG_SMALL_IMM
)) {
360 /* This can't overflow the uint32_t, because we're reading 8
361 * bytes of instruction to increment by 4 here, so we'd
364 validated_shader
->uniforms_size
+= 4;
370 struct vc4_validated_shader_info
*
371 vc4_validate_shader(struct drm_gem_cma_object
*shader_obj
)
373 bool found_shader_end
= false;
374 int shader_end_ip
= 0;
377 struct vc4_validated_shader_info
*validated_shader
;
378 struct vc4_shader_validation_state validation_state
;
381 memset(&validation_state
, 0, sizeof(validation_state
));
383 for (i
= 0; i
< 8; i
++)
384 validation_state
.tmu_setup
[i
/ 4].p_offset
[i
% 4] = ~0;
385 for (i
= 0; i
< ARRAY_SIZE(validation_state
.live_clamp_offsets
); i
++)
386 validation_state
.live_clamp_offsets
[i
] = ~0;
388 shader
= shader_obj
->vaddr
;
389 max_ip
= shader_obj
->base
.size
/ sizeof(uint64_t);
391 validated_shader
= kcalloc(sizeof(*validated_shader
), 1, GFP_KERNEL
);
392 if (!validated_shader
)
395 for (ip
= 0; ip
< max_ip
; ip
++) {
396 uint64_t inst
= shader
[ip
];
397 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
401 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
402 case QPU_SIG_SCOREBOARD_UNLOCK
:
403 case QPU_SIG_COLOR_LOAD
:
404 case QPU_SIG_LOAD_TMU0
:
405 case QPU_SIG_LOAD_TMU1
:
406 case QPU_SIG_PROG_END
:
407 case QPU_SIG_SMALL_IMM
:
408 if (!check_instruction_writes(inst
, validated_shader
,
409 &validation_state
)) {
410 DRM_ERROR("Bad write at ip %d\n", ip
);
414 if (!check_instruction_reads(inst
, validated_shader
))
417 if (sig
== QPU_SIG_PROG_END
) {
418 found_shader_end
= true;
424 case QPU_SIG_LOAD_IMM
:
425 if (!check_instruction_writes(inst
, validated_shader
,
426 &validation_state
)) {
427 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip
);
433 DRM_ERROR("Unsupported QPU signal %d at "
434 "instruction %d\n", sig
, ip
);
438 /* There are two delay slots after program end is signaled
439 * that are still executed, then we're finished.
441 if (found_shader_end
&& ip
== shader_end_ip
+ 2)
446 DRM_ERROR("shader failed to terminate before "
447 "shader BO end at %d\n",
448 shader_obj
->base
.size
);
452 /* Again, no chance of integer overflow here because the worst case
453 * scenario is 8 bytes of uniforms plus handles per 8-byte
456 validated_shader
->uniforms_src_size
=
457 (validated_shader
->uniforms_size
+
458 4 * validated_shader
->num_texture_samples
);
460 return validated_shader
;
463 kfree(validated_shader
);