2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * DOC: Shader validator for VC4.
27 * The VC4 has no IOMMU between it and system memory, so a user with
28 * access to execute shaders could escalate privilege by overwriting
29 * system memory (using the VPM write address register in the
30 * general-purpose DMA mode) or reading system memory it shouldn't
31 * (reading it as a texture, or uniform data, or vertex data).
33 * This walks over a shader BO, ensuring that its accesses are
34 * appropriately bounded, and recording how many texture accesses are
35 * made and where so that we can do relocations for them in the
41 #include "vc4_qpu_defines.h"
43 struct vc4_shader_validation_state
{
44 /* Current IP being validated. */
47 /* IP at the end of the BO, do not read shader[max_ip] */
52 struct vc4_texture_sample_info tmu_setup
[2];
53 int tmu_write_count
[2];
55 /* For registers that were last written to by a MIN instruction with
56 * one argument being a uniform, the address of the uniform.
59 * This is used for the validation of direct address memory reads.
61 uint32_t live_min_clamp_offsets
[32 + 32 + 4];
62 bool live_max_clamp_regs
[32 + 32 + 4];
64 /* Bitfield of which IPs are used as branch targets.
66 * Used for validation that the uniform stream is updated at the right
67 * points and clearing the texturing/clamping state.
69 unsigned long *branch_targets
;
73 waddr_to_live_reg_index(uint32_t waddr
, bool is_b
)
80 } else if (waddr
<= QPU_W_ACC3
) {
81 return 64 + waddr
- QPU_W_ACC0
;
88 raddr_add_a_to_live_reg_index(uint64_t inst
)
90 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
91 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
92 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
93 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
95 if (add_a
== QPU_MUX_A
)
97 else if (add_a
== QPU_MUX_B
&& sig
!= QPU_SIG_SMALL_IMM
)
99 else if (add_a
<= QPU_MUX_R3
)
106 is_tmu_submit(uint32_t waddr
)
108 return (waddr
== QPU_W_TMU0_S
||
109 waddr
== QPU_W_TMU1_S
);
113 is_tmu_write(uint32_t waddr
)
115 return (waddr
>= QPU_W_TMU0_S
&&
116 waddr
<= QPU_W_TMU1_B
);
120 record_texture_sample(struct vc4_validated_shader_info
*validated_shader
,
121 struct vc4_shader_validation_state
*validation_state
,
124 uint32_t s
= validated_shader
->num_texture_samples
;
126 struct vc4_texture_sample_info
*temp_samples
;
128 temp_samples
= krealloc(validated_shader
->texture_samples
,
129 (s
+ 1) * sizeof(*temp_samples
),
134 memcpy(&temp_samples
[s
],
135 &validation_state
->tmu_setup
[tmu
],
136 sizeof(*temp_samples
));
138 validated_shader
->num_texture_samples
= s
+ 1;
139 validated_shader
->texture_samples
= temp_samples
;
141 for (i
= 0; i
< 4; i
++)
142 validation_state
->tmu_setup
[tmu
].p_offset
[i
] = ~0;
148 check_tmu_write(struct vc4_validated_shader_info
*validated_shader
,
149 struct vc4_shader_validation_state
*validation_state
,
152 uint64_t inst
= validation_state
->shader
[validation_state
->ip
];
153 uint32_t waddr
= (is_mul
?
154 QPU_GET_FIELD(inst
, QPU_WADDR_MUL
) :
155 QPU_GET_FIELD(inst
, QPU_WADDR_ADD
));
156 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
157 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
158 int tmu
= waddr
> QPU_W_TMU0_B
;
159 bool submit
= is_tmu_submit(waddr
);
160 bool is_direct
= submit
&& validation_state
->tmu_write_count
[tmu
] == 0;
161 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
164 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
165 uint32_t clamp_reg
, clamp_offset
;
167 if (sig
== QPU_SIG_SMALL_IMM
) {
168 DRM_ERROR("direct TMU read used small immediate\n");
172 /* Make sure that this texture load is an add of the base
173 * address of the UBO to a clamped offset within the UBO.
176 QPU_GET_FIELD(inst
, QPU_OP_ADD
) != QPU_A_ADD
) {
177 DRM_ERROR("direct TMU load wasn't an add\n");
181 /* We assert that the clamped address is the first
182 * argument, and the UBO base address is the second argument.
183 * This is arbitrary, but simpler than supporting flipping the
186 clamp_reg
= raddr_add_a_to_live_reg_index(inst
);
187 if (clamp_reg
== ~0) {
188 DRM_ERROR("direct TMU load wasn't clamped\n");
192 clamp_offset
= validation_state
->live_min_clamp_offsets
[clamp_reg
];
193 if (clamp_offset
== ~0) {
194 DRM_ERROR("direct TMU load wasn't clamped\n");
198 /* Store the clamp value's offset in p1 (see reloc_tex() in
201 validation_state
->tmu_setup
[tmu
].p_offset
[1] =
204 if (!(add_b
== QPU_MUX_A
&& raddr_a
== QPU_R_UNIF
) &&
205 !(add_b
== QPU_MUX_B
&& raddr_b
== QPU_R_UNIF
)) {
206 DRM_ERROR("direct TMU load didn't add to a uniform\n");
210 validation_state
->tmu_setup
[tmu
].is_direct
= true;
212 if (raddr_a
== QPU_R_UNIF
|| (sig
!= QPU_SIG_SMALL_IMM
&&
213 raddr_b
== QPU_R_UNIF
)) {
214 DRM_ERROR("uniform read in the same instruction as "
220 if (validation_state
->tmu_write_count
[tmu
] >= 4) {
221 DRM_ERROR("TMU%d got too many parameters before dispatch\n",
225 validation_state
->tmu_setup
[tmu
].p_offset
[validation_state
->tmu_write_count
[tmu
]] =
226 validated_shader
->uniforms_size
;
227 validation_state
->tmu_write_count
[tmu
]++;
228 /* Since direct uses a RADDR uniform reference, it will get counted in
229 * check_instruction_reads()
232 validated_shader
->uniforms_size
+= 4;
235 if (!record_texture_sample(validated_shader
,
236 validation_state
, tmu
)) {
240 validation_state
->tmu_write_count
[tmu
] = 0;
247 check_reg_write(struct vc4_validated_shader_info
*validated_shader
,
248 struct vc4_shader_validation_state
*validation_state
,
251 uint64_t inst
= validation_state
->shader
[validation_state
->ip
];
252 uint32_t waddr
= (is_mul
?
253 QPU_GET_FIELD(inst
, QPU_WADDR_MUL
) :
254 QPU_GET_FIELD(inst
, QPU_WADDR_ADD
));
257 case QPU_W_UNIFORMS_ADDRESS
:
258 /* XXX: We'll probably need to support this for reladdr, but
259 * it's definitely a security-related one.
261 DRM_ERROR("uniforms address load unsupported\n");
264 case QPU_W_TLB_COLOR_MS
:
265 case QPU_W_TLB_COLOR_ALL
:
267 /* These only interact with the tile buffer, not main memory,
280 return check_tmu_write(validated_shader
, validation_state
,
284 case QPU_W_TMU_NOSWAP
:
285 case QPU_W_TLB_ALPHA_MASK
:
286 case QPU_W_MUTEX_RELEASE
:
287 /* XXX: I haven't thought about these, so don't support them
290 DRM_ERROR("Unsupported waddr %d\n", waddr
);
294 DRM_ERROR("General VPM DMA unsupported\n");
298 case QPU_W_VPMVCD_SETUP
:
299 /* We allow VPM setup in general, even including VPM DMA
300 * configuration setup, because the (unsafe) DMA can only be
301 * triggered by QPU_W_VPM_ADDR writes.
305 case QPU_W_TLB_STENCIL_SETUP
:
313 track_live_clamps(struct vc4_validated_shader_info
*validated_shader
,
314 struct vc4_shader_validation_state
*validation_state
)
316 uint64_t inst
= validation_state
->shader
[validation_state
->ip
];
317 uint32_t op_add
= QPU_GET_FIELD(inst
, QPU_OP_ADD
);
318 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
319 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
320 uint32_t cond_add
= QPU_GET_FIELD(inst
, QPU_COND_ADD
);
321 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
322 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
323 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
324 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
325 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
326 bool ws
= inst
& QPU_WS
;
327 uint32_t lri_add_a
, lri_add
, lri_mul
;
330 /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
331 * before we clear previous live state.
333 lri_add_a
= raddr_add_a_to_live_reg_index(inst
);
334 add_a_is_min_0
= (lri_add_a
!= ~0 &&
335 validation_state
->live_max_clamp_regs
[lri_add_a
]);
337 /* Clear live state for registers written by our instruction. */
338 lri_add
= waddr_to_live_reg_index(waddr_add
, ws
);
339 lri_mul
= waddr_to_live_reg_index(waddr_mul
, !ws
);
341 validation_state
->live_max_clamp_regs
[lri_mul
] = false;
342 validation_state
->live_min_clamp_offsets
[lri_mul
] = ~0;
345 validation_state
->live_max_clamp_regs
[lri_add
] = false;
346 validation_state
->live_min_clamp_offsets
[lri_add
] = ~0;
348 /* Nothing further to do for live tracking, since only ADDs
349 * generate new live clamp registers.
354 /* Now, handle remaining live clamp tracking for the ADD operation. */
356 if (cond_add
!= QPU_COND_ALWAYS
)
359 if (op_add
== QPU_A_MAX
) {
360 /* Track live clamps of a value to a minimum of 0 (in either
363 if (sig
!= QPU_SIG_SMALL_IMM
|| raddr_b
!= 0 ||
364 (add_a
!= QPU_MUX_B
&& add_b
!= QPU_MUX_B
)) {
368 validation_state
->live_max_clamp_regs
[lri_add
] = true;
369 } else if (op_add
== QPU_A_MIN
) {
370 /* Track live clamps of a value clamped to a minimum of 0 and
371 * a maximum of some uniform's offset.
376 if (!(add_b
== QPU_MUX_A
&& raddr_a
== QPU_R_UNIF
) &&
377 !(add_b
== QPU_MUX_B
&& raddr_b
== QPU_R_UNIF
&&
378 sig
!= QPU_SIG_SMALL_IMM
)) {
382 validation_state
->live_min_clamp_offsets
[lri_add
] =
383 validated_shader
->uniforms_size
;
388 check_instruction_writes(struct vc4_validated_shader_info
*validated_shader
,
389 struct vc4_shader_validation_state
*validation_state
)
391 uint64_t inst
= validation_state
->shader
[validation_state
->ip
];
392 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
393 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
396 if (is_tmu_write(waddr_add
) && is_tmu_write(waddr_mul
)) {
397 DRM_ERROR("ADD and MUL both set up textures\n");
401 ok
= (check_reg_write(validated_shader
, validation_state
, false) &&
402 check_reg_write(validated_shader
, validation_state
, true));
404 track_live_clamps(validated_shader
, validation_state
);
410 check_instruction_reads(uint64_t inst
,
411 struct vc4_validated_shader_info
*validated_shader
)
413 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
414 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
415 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
417 if (raddr_a
== QPU_R_UNIF
||
418 (raddr_b
== QPU_R_UNIF
&& sig
!= QPU_SIG_SMALL_IMM
)) {
419 /* This can't overflow the uint32_t, because we're reading 8
420 * bytes of instruction to increment by 4 here, so we'd
423 validated_shader
->uniforms_size
+= 4;
429 /* Make sure that all branches are absolute and point within the shader, and
430 * note their targets for later.
433 vc4_validate_branches(struct vc4_shader_validation_state
*validation_state
)
435 uint32_t max_branch_target
= 0;
436 bool found_shader_end
= false;
438 int shader_end_ip
= 0;
439 int last_branch
= -2;
441 for (ip
= 0; ip
< validation_state
->max_ip
; ip
++) {
442 uint64_t inst
= validation_state
->shader
[ip
];
443 int32_t branch_imm
= QPU_GET_FIELD(inst
, QPU_BRANCH_TARGET
);
444 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
445 uint32_t after_delay_ip
= ip
+ 4;
446 uint32_t branch_target_ip
;
448 if (sig
== QPU_SIG_PROG_END
) {
450 found_shader_end
= true;
454 if (sig
!= QPU_SIG_BRANCH
)
457 if (ip
- last_branch
< 4) {
458 DRM_ERROR("Branch at %d during delay slots\n", ip
);
463 if (inst
& QPU_BRANCH_REG
) {
464 DRM_ERROR("branching from register relative "
469 if (!(inst
& QPU_BRANCH_REL
)) {
470 DRM_ERROR("relative branching required\n");
474 /* The actual branch target is the instruction after the delay
475 * slots, plus whatever byte offset is in the low 32 bits of
476 * the instruction. Make sure we're not branching beyond the
477 * end of the shader object.
479 if (branch_imm
% sizeof(inst
) != 0) {
480 DRM_ERROR("branch target not aligned\n");
484 branch_target_ip
= after_delay_ip
+ (branch_imm
>> 3);
485 if (branch_target_ip
>= validation_state
->max_ip
) {
486 DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
487 ip
, branch_target_ip
,
488 validation_state
->max_ip
);
491 set_bit(branch_target_ip
, validation_state
->branch_targets
);
493 /* Make sure that the non-branching path is also not outside
496 if (after_delay_ip
>= validation_state
->max_ip
) {
497 DRM_ERROR("Branch at %d continues past shader end "
499 ip
, after_delay_ip
, validation_state
->max_ip
);
502 set_bit(after_delay_ip
, validation_state
->branch_targets
);
503 max_branch_target
= max(max_branch_target
, after_delay_ip
);
505 /* There are two delay slots after program end is signaled
506 * that are still executed, then we're finished.
508 if (found_shader_end
&& ip
== shader_end_ip
+ 2)
512 if (max_branch_target
> shader_end_ip
) {
513 DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
520 struct vc4_validated_shader_info
*
521 vc4_validate_shader(struct drm_gem_cma_object
*shader_obj
)
523 bool found_shader_end
= false;
524 int shader_end_ip
= 0;
526 struct vc4_validated_shader_info
*validated_shader
= NULL
;
527 struct vc4_shader_validation_state validation_state
;
530 memset(&validation_state
, 0, sizeof(validation_state
));
531 validation_state
.shader
= shader_obj
->vaddr
;
532 validation_state
.max_ip
= shader_obj
->base
.size
/ sizeof(uint64_t);
534 for (i
= 0; i
< 8; i
++)
535 validation_state
.tmu_setup
[i
/ 4].p_offset
[i
% 4] = ~0;
536 for (i
= 0; i
< ARRAY_SIZE(validation_state
.live_min_clamp_offsets
); i
++)
537 validation_state
.live_min_clamp_offsets
[i
] = ~0;
539 validation_state
.branch_targets
=
540 kcalloc(BITS_TO_LONGS(validation_state
.max_ip
),
541 sizeof(unsigned long), GFP_KERNEL
);
542 if (!validation_state
.branch_targets
)
545 validated_shader
= kcalloc(1, sizeof(*validated_shader
), GFP_KERNEL
);
546 if (!validated_shader
)
549 if (!vc4_validate_branches(&validation_state
))
552 for (ip
= 0; ip
< validation_state
.max_ip
; ip
++) {
553 uint64_t inst
= validation_state
.shader
[ip
];
554 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
556 validation_state
.ip
= ip
;
560 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
561 case QPU_SIG_SCOREBOARD_UNLOCK
:
562 case QPU_SIG_COLOR_LOAD
:
563 case QPU_SIG_LOAD_TMU0
:
564 case QPU_SIG_LOAD_TMU1
:
565 case QPU_SIG_PROG_END
:
566 case QPU_SIG_SMALL_IMM
:
567 if (!check_instruction_writes(validated_shader
,
568 &validation_state
)) {
569 DRM_ERROR("Bad write at ip %d\n", ip
);
573 if (!check_instruction_reads(inst
, validated_shader
))
576 if (sig
== QPU_SIG_PROG_END
) {
577 found_shader_end
= true;
583 case QPU_SIG_LOAD_IMM
:
584 if (!check_instruction_writes(validated_shader
,
585 &validation_state
)) {
586 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip
);
592 DRM_ERROR("Unsupported QPU signal %d at "
593 "instruction %d\n", sig
, ip
);
597 /* There are two delay slots after program end is signaled
598 * that are still executed, then we're finished.
600 if (found_shader_end
&& ip
== shader_end_ip
+ 2)
604 if (ip
== validation_state
.max_ip
) {
605 DRM_ERROR("shader failed to terminate before "
606 "shader BO end at %zd\n",
607 shader_obj
->base
.size
);
611 /* Again, no chance of integer overflow here because the worst case
612 * scenario is 8 bytes of uniforms plus handles per 8-byte
615 validated_shader
->uniforms_src_size
=
616 (validated_shader
->uniforms_size
+
617 4 * validated_shader
->num_texture_samples
);
619 kfree(validation_state
.branch_targets
);
621 return validated_shader
;
624 kfree(validation_state
.branch_targets
);
625 if (validated_shader
) {
626 kfree(validated_shader
->texture_samples
);
627 kfree(validated_shader
);