gallium: split transfer_inline_write into buffer and texture callbacks
[mesa.git] / src / gallium / drivers / vc4 / kernel / vc4_validate_shaders.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: Shader validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory, so a user with
28 * access to execute shaders could escalate privilege by overwriting
29 * system memory (using the VPM write address register in the
30 * general-purpose DMA mode) or reading system memory it shouldn't
31 * (reading it as a texture, or uniform data, or vertex data).
32 *
33 * This walks over a shader BO, ensuring that its accesses are
34 * appropriately bounded, and recording how many texture accesses are
35 * made and where so that we can do relocations for them in the
36 * uniform stream.
37 */
38
39 #include "vc4_drv.h"
40 #include "vc4_qpu.h"
41 #include "vc4_qpu_defines.h"
42
43 #define LIVE_REG_COUNT (32 + 32 + 4)
44
45 struct vc4_shader_validation_state {
46 /* Current IP being validated. */
47 uint32_t ip;
48
49 /* IP at the end of the BO, do not read shader[max_ip] */
50 uint32_t max_ip;
51
52 uint64_t *shader;
53
54 struct vc4_texture_sample_info tmu_setup[2];
55 int tmu_write_count[2];
56
57 /* For registers that were last written to by a MIN instruction with
58 * one argument being a uniform, the address of the uniform.
59 * Otherwise, ~0.
60 *
61 * This is used for the validation of direct address memory reads.
62 */
63 uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
64 bool live_max_clamp_regs[LIVE_REG_COUNT];
65 uint32_t live_immediates[LIVE_REG_COUNT];
66
67 /* Bitfield of which IPs are used as branch targets.
68 *
69 * Used for validation that the uniform stream is updated at the right
70 * points and clearing the texturing/clamping state.
71 */
72 unsigned long *branch_targets;
73
74 /* Set when entering a basic block, and cleared when the uniform
75 * address update is found. This is used to make sure that we don't
76 * read uniforms when the address is undefined.
77 */
78 bool needs_uniform_address_update;
79
80 /* Set when we find a backwards branch. If the branch is backwards,
81 * the taraget is probably doing an address reset to read uniforms,
82 * and so we need to be sure that a uniforms address is present in the
83 * stream, even if the shader didn't need to read uniforms in later
84 * basic blocks.
85 */
86 bool needs_uniform_address_for_loop;
87 };
88
89 static uint32_t
90 waddr_to_live_reg_index(uint32_t waddr, bool is_b)
91 {
92 if (waddr < 32) {
93 if (is_b)
94 return 32 + waddr;
95 else
96 return waddr;
97 } else if (waddr <= QPU_W_ACC3) {
98 return 64 + waddr - QPU_W_ACC0;
99 } else {
100 return ~0;
101 }
102 }
103
104 static uint32_t
105 raddr_add_a_to_live_reg_index(uint64_t inst)
106 {
107 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
108 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
109 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
110 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
111
112 if (add_a == QPU_MUX_A)
113 return raddr_a;
114 else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
115 return 32 + raddr_b;
116 else if (add_a <= QPU_MUX_R3)
117 return 64 + add_a;
118 else
119 return ~0;
120 }
121
122 static bool
123 is_tmu_submit(uint32_t waddr)
124 {
125 return (waddr == QPU_W_TMU0_S ||
126 waddr == QPU_W_TMU1_S);
127 }
128
129 static bool
130 is_tmu_write(uint32_t waddr)
131 {
132 return (waddr >= QPU_W_TMU0_S &&
133 waddr <= QPU_W_TMU1_B);
134 }
135
136 static bool
137 record_texture_sample(struct vc4_validated_shader_info *validated_shader,
138 struct vc4_shader_validation_state *validation_state,
139 int tmu)
140 {
141 uint32_t s = validated_shader->num_texture_samples;
142 int i;
143 struct vc4_texture_sample_info *temp_samples;
144
145 temp_samples = krealloc(validated_shader->texture_samples,
146 (s + 1) * sizeof(*temp_samples),
147 GFP_KERNEL);
148 if (!temp_samples)
149 return false;
150
151 memcpy(&temp_samples[s],
152 &validation_state->tmu_setup[tmu],
153 sizeof(*temp_samples));
154
155 validated_shader->num_texture_samples = s + 1;
156 validated_shader->texture_samples = temp_samples;
157
158 for (i = 0; i < 4; i++)
159 validation_state->tmu_setup[tmu].p_offset[i] = ~0;
160
161 return true;
162 }
163
164 static bool
165 check_tmu_write(struct vc4_validated_shader_info *validated_shader,
166 struct vc4_shader_validation_state *validation_state,
167 bool is_mul)
168 {
169 uint64_t inst = validation_state->shader[validation_state->ip];
170 uint32_t waddr = (is_mul ?
171 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
172 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
173 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
174 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
175 int tmu = waddr > QPU_W_TMU0_B;
176 bool submit = is_tmu_submit(waddr);
177 bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
178 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
179
180 if (is_direct) {
181 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
182 uint32_t clamp_reg, clamp_offset;
183
184 if (sig == QPU_SIG_SMALL_IMM) {
185 DRM_ERROR("direct TMU read used small immediate\n");
186 return false;
187 }
188
189 /* Make sure that this texture load is an add of the base
190 * address of the UBO to a clamped offset within the UBO.
191 */
192 if (is_mul ||
193 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
194 DRM_ERROR("direct TMU load wasn't an add\n");
195 return false;
196 }
197
198 /* We assert that the clamped address is the first
199 * argument, and the UBO base address is the second argument.
200 * This is arbitrary, but simpler than supporting flipping the
201 * two either way.
202 */
203 clamp_reg = raddr_add_a_to_live_reg_index(inst);
204 if (clamp_reg == ~0) {
205 DRM_ERROR("direct TMU load wasn't clamped\n");
206 return false;
207 }
208
209 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
210 if (clamp_offset == ~0) {
211 DRM_ERROR("direct TMU load wasn't clamped\n");
212 return false;
213 }
214
215 /* Store the clamp value's offset in p1 (see reloc_tex() in
216 * vc4_validate.c).
217 */
218 validation_state->tmu_setup[tmu].p_offset[1] =
219 clamp_offset;
220
221 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
222 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
223 DRM_ERROR("direct TMU load didn't add to a uniform\n");
224 return false;
225 }
226
227 validation_state->tmu_setup[tmu].is_direct = true;
228 } else {
229 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
230 raddr_b == QPU_R_UNIF)) {
231 DRM_ERROR("uniform read in the same instruction as "
232 "texture setup.\n");
233 return false;
234 }
235 }
236
237 if (validation_state->tmu_write_count[tmu] >= 4) {
238 DRM_ERROR("TMU%d got too many parameters before dispatch\n",
239 tmu);
240 return false;
241 }
242 validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
243 validated_shader->uniforms_size;
244 validation_state->tmu_write_count[tmu]++;
245 /* Since direct uses a RADDR uniform reference, it will get counted in
246 * check_instruction_reads()
247 */
248 if (!is_direct) {
249 if (validation_state->needs_uniform_address_update) {
250 DRM_ERROR("Texturing with undefined uniform address\n");
251 return false;
252 }
253
254 validated_shader->uniforms_size += 4;
255 }
256
257 if (submit) {
258 if (!record_texture_sample(validated_shader,
259 validation_state, tmu)) {
260 return false;
261 }
262
263 validation_state->tmu_write_count[tmu] = 0;
264 }
265
266 return true;
267 }
268
269 static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
270 {
271 uint32_t o = validated_shader->num_uniform_addr_offsets;
272 uint32_t num_uniforms = validated_shader->uniforms_size / 4;
273
274 validated_shader->uniform_addr_offsets =
275 krealloc(validated_shader->uniform_addr_offsets,
276 (o + 1) *
277 sizeof(*validated_shader->uniform_addr_offsets),
278 GFP_KERNEL);
279 if (!validated_shader->uniform_addr_offsets)
280 return false;
281
282 validated_shader->uniform_addr_offsets[o] = num_uniforms;
283 validated_shader->num_uniform_addr_offsets++;
284
285 return true;
286 }
287
288 static bool
289 validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
290 struct vc4_shader_validation_state *validation_state,
291 bool is_mul)
292 {
293 uint64_t inst = validation_state->shader[validation_state->ip];
294 u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
295 u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
296 u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
297 u32 add_lri = raddr_add_a_to_live_reg_index(inst);
298 /* We want our reset to be pointing at whatever uniform follows the
299 * uniforms base address.
300 */
301 u32 expected_offset = validated_shader->uniforms_size + 4;
302
303 /* We only support absolute uniform address changes, and we
304 * require that they be in the current basic block before any
305 * of its uniform reads.
306 *
307 * One could potentially emit more efficient QPU code, by
308 * noticing that (say) an if statement does uniform control
309 * flow for all threads and that the if reads the same number
310 * of uniforms on each side. However, this scheme is easy to
311 * validate so it's all we allow for now.
312 */
313
314 if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
315 DRM_ERROR("uniforms address change must be "
316 "normal math\n");
317 return false;
318 }
319
320 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
321 DRM_ERROR("Uniform address reset must be an ADD.\n");
322 return false;
323 }
324
325 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
326 DRM_ERROR("Uniform address reset must be unconditional.\n");
327 return false;
328 }
329
330 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
331 !(inst & QPU_PM)) {
332 DRM_ERROR("No packing allowed on uniforms reset\n");
333 return false;
334 }
335
336 if (add_lri == -1) {
337 DRM_ERROR("First argument of uniform address write must be "
338 "an immediate value.\n");
339 return false;
340 }
341
342 if (validation_state->live_immediates[add_lri] != expected_offset) {
343 DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
344 validation_state->live_immediates[add_lri],
345 expected_offset);
346 return false;
347 }
348
349 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
350 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
351 DRM_ERROR("Second argument of uniform address write must be "
352 "a uniform.\n");
353 return false;
354 }
355
356 validation_state->needs_uniform_address_update = false;
357 validation_state->needs_uniform_address_for_loop = false;
358 return require_uniform_address_uniform(validated_shader);
359 }
360
361 static bool
362 check_reg_write(struct vc4_validated_shader_info *validated_shader,
363 struct vc4_shader_validation_state *validation_state,
364 bool is_mul)
365 {
366 uint64_t inst = validation_state->shader[validation_state->ip];
367 uint32_t waddr = (is_mul ?
368 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
369 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
370 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
371 bool ws = inst & QPU_WS;
372 bool is_b = is_mul ^ ws;
373 u32 lri = waddr_to_live_reg_index(waddr, is_b);
374
375 if (lri != -1) {
376 uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
377 uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
378
379 if (sig == QPU_SIG_LOAD_IMM &&
380 QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
381 ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
382 (!is_mul && cond_add == QPU_COND_ALWAYS))) {
383 validation_state->live_immediates[lri] =
384 QPU_GET_FIELD(inst, QPU_LOAD_IMM);
385 } else {
386 validation_state->live_immediates[lri] = ~0;
387 }
388 }
389
390 switch (waddr) {
391 case QPU_W_UNIFORMS_ADDRESS:
392 if (is_b) {
393 DRM_ERROR("relative uniforms address change "
394 "unsupported\n");
395 return false;
396 }
397
398 return validate_uniform_address_write(validated_shader,
399 validation_state,
400 is_mul);
401
402 case QPU_W_TLB_COLOR_MS:
403 case QPU_W_TLB_COLOR_ALL:
404 case QPU_W_TLB_Z:
405 /* These only interact with the tile buffer, not main memory,
406 * so they're safe.
407 */
408 return true;
409
410 case QPU_W_TMU0_S:
411 case QPU_W_TMU0_T:
412 case QPU_W_TMU0_R:
413 case QPU_W_TMU0_B:
414 case QPU_W_TMU1_S:
415 case QPU_W_TMU1_T:
416 case QPU_W_TMU1_R:
417 case QPU_W_TMU1_B:
418 return check_tmu_write(validated_shader, validation_state,
419 is_mul);
420
421 case QPU_W_HOST_INT:
422 case QPU_W_TMU_NOSWAP:
423 case QPU_W_TLB_ALPHA_MASK:
424 case QPU_W_MUTEX_RELEASE:
425 /* XXX: I haven't thought about these, so don't support them
426 * for now.
427 */
428 DRM_ERROR("Unsupported waddr %d\n", waddr);
429 return false;
430
431 case QPU_W_VPM_ADDR:
432 DRM_ERROR("General VPM DMA unsupported\n");
433 return false;
434
435 case QPU_W_VPM:
436 case QPU_W_VPMVCD_SETUP:
437 /* We allow VPM setup in general, even including VPM DMA
438 * configuration setup, because the (unsafe) DMA can only be
439 * triggered by QPU_W_VPM_ADDR writes.
440 */
441 return true;
442
443 case QPU_W_TLB_STENCIL_SETUP:
444 return true;
445 }
446
447 return true;
448 }
449
450 static void
451 track_live_clamps(struct vc4_validated_shader_info *validated_shader,
452 struct vc4_shader_validation_state *validation_state)
453 {
454 uint64_t inst = validation_state->shader[validation_state->ip];
455 uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
456 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
457 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
458 uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
459 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
460 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
461 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
462 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
463 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
464 bool ws = inst & QPU_WS;
465 uint32_t lri_add_a, lri_add, lri_mul;
466 bool add_a_is_min_0;
467
468 /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
469 * before we clear previous live state.
470 */
471 lri_add_a = raddr_add_a_to_live_reg_index(inst);
472 add_a_is_min_0 = (lri_add_a != ~0 &&
473 validation_state->live_max_clamp_regs[lri_add_a]);
474
475 /* Clear live state for registers written by our instruction. */
476 lri_add = waddr_to_live_reg_index(waddr_add, ws);
477 lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
478 if (lri_mul != ~0) {
479 validation_state->live_max_clamp_regs[lri_mul] = false;
480 validation_state->live_min_clamp_offsets[lri_mul] = ~0;
481 }
482 if (lri_add != ~0) {
483 validation_state->live_max_clamp_regs[lri_add] = false;
484 validation_state->live_min_clamp_offsets[lri_add] = ~0;
485 } else {
486 /* Nothing further to do for live tracking, since only ADDs
487 * generate new live clamp registers.
488 */
489 return;
490 }
491
492 /* Now, handle remaining live clamp tracking for the ADD operation. */
493
494 if (cond_add != QPU_COND_ALWAYS)
495 return;
496
497 if (op_add == QPU_A_MAX) {
498 /* Track live clamps of a value to a minimum of 0 (in either
499 * arg).
500 */
501 if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
502 (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
503 return;
504 }
505
506 validation_state->live_max_clamp_regs[lri_add] = true;
507 } else if (op_add == QPU_A_MIN) {
508 /* Track live clamps of a value clamped to a minimum of 0 and
509 * a maximum of some uniform's offset.
510 */
511 if (!add_a_is_min_0)
512 return;
513
514 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
515 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
516 sig != QPU_SIG_SMALL_IMM)) {
517 return;
518 }
519
520 validation_state->live_min_clamp_offsets[lri_add] =
521 validated_shader->uniforms_size;
522 }
523 }
524
525 static bool
526 check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
527 struct vc4_shader_validation_state *validation_state)
528 {
529 uint64_t inst = validation_state->shader[validation_state->ip];
530 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
531 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
532 bool ok;
533
534 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
535 DRM_ERROR("ADD and MUL both set up textures\n");
536 return false;
537 }
538
539 ok = (check_reg_write(validated_shader, validation_state, false) &&
540 check_reg_write(validated_shader, validation_state, true));
541
542 track_live_clamps(validated_shader, validation_state);
543
544 return ok;
545 }
546
547 static bool
548 check_branch(uint64_t inst,
549 struct vc4_validated_shader_info *validated_shader,
550 struct vc4_shader_validation_state *validation_state,
551 int ip)
552 {
553 int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
554 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
555 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
556
557 if ((int)branch_imm < 0)
558 validation_state->needs_uniform_address_for_loop = true;
559
560 /* We don't want to have to worry about validation of this, and
561 * there's no need for it.
562 */
563 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
564 DRM_ERROR("branch instruction at %d wrote a register.\n",
565 validation_state->ip);
566 return false;
567 }
568
569 return true;
570 }
571
572 static bool
573 check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
574 struct vc4_shader_validation_state *validation_state)
575 {
576 uint64_t inst = validation_state->shader[validation_state->ip];
577 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
578 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
579 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
580
581 if (raddr_a == QPU_R_UNIF ||
582 (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
583 /* This can't overflow the uint32_t, because we're reading 8
584 * bytes of instruction to increment by 4 here, so we'd
585 * already be OOM.
586 */
587 validated_shader->uniforms_size += 4;
588
589 if (validation_state->needs_uniform_address_update) {
590 DRM_ERROR("Uniform read with undefined uniform "
591 "address\n");
592 return false;
593 }
594 }
595
596 return true;
597 }
598
599 /* Make sure that all branches are absolute and point within the shader, and
600 * note their targets for later.
601 */
602 static bool
603 vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
604 {
605 uint32_t max_branch_target = 0;
606 bool found_shader_end = false;
607 int ip;
608 int shader_end_ip = 0;
609 int last_branch = -2;
610
611 for (ip = 0; ip < validation_state->max_ip; ip++) {
612 uint64_t inst = validation_state->shader[ip];
613 int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
614 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
615 uint32_t after_delay_ip = ip + 4;
616 uint32_t branch_target_ip;
617
618 if (sig == QPU_SIG_PROG_END) {
619 shader_end_ip = ip;
620 found_shader_end = true;
621 continue;
622 }
623
624 if (sig != QPU_SIG_BRANCH)
625 continue;
626
627 if (ip - last_branch < 4) {
628 DRM_ERROR("Branch at %d during delay slots\n", ip);
629 return false;
630 }
631 last_branch = ip;
632
633 if (inst & QPU_BRANCH_REG) {
634 DRM_ERROR("branching from register relative "
635 "not supported\n");
636 return false;
637 }
638
639 if (!(inst & QPU_BRANCH_REL)) {
640 DRM_ERROR("relative branching required\n");
641 return false;
642 }
643
644 /* The actual branch target is the instruction after the delay
645 * slots, plus whatever byte offset is in the low 32 bits of
646 * the instruction. Make sure we're not branching beyond the
647 * end of the shader object.
648 */
649 if (branch_imm % sizeof(inst) != 0) {
650 DRM_ERROR("branch target not aligned\n");
651 return false;
652 };
653
654 branch_target_ip = after_delay_ip + (branch_imm >> 3);
655 if (branch_target_ip >= validation_state->max_ip) {
656 DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
657 ip, branch_target_ip,
658 validation_state->max_ip);
659 return false;
660 }
661 set_bit(branch_target_ip, validation_state->branch_targets);
662
663 /* Make sure that the non-branching path is also not outside
664 * the shader.
665 */
666 if (after_delay_ip >= validation_state->max_ip) {
667 DRM_ERROR("Branch at %d continues past shader end "
668 "(%d/%d)\n",
669 ip, after_delay_ip, validation_state->max_ip);
670 return false;
671 }
672 set_bit(after_delay_ip, validation_state->branch_targets);
673 max_branch_target = max(max_branch_target, after_delay_ip);
674
675 /* There are two delay slots after program end is signaled
676 * that are still executed, then we're finished.
677 */
678 if (found_shader_end && ip == shader_end_ip + 2)
679 break;
680 }
681
682 if (max_branch_target > shader_end_ip) {
683 DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
684 return false;
685 }
686
687 return true;
688 }
689
690 /* Resets any known state for the shader, used when we may be branched to from
691 * multiple locations in the program (or at shader start).
692 */
693 static void
694 reset_validation_state(struct vc4_shader_validation_state *validation_state)
695 {
696 int i;
697
698 for (i = 0; i < 8; i++)
699 validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
700
701 for (i = 0; i < LIVE_REG_COUNT; i++) {
702 validation_state->live_min_clamp_offsets[i] = ~0;
703 validation_state->live_max_clamp_regs[i] = false;
704 validation_state->live_immediates[i] = ~0;
705 }
706 }
707
708 static bool
709 texturing_in_progress(struct vc4_shader_validation_state *validation_state)
710 {
711 return (validation_state->tmu_write_count[0] != 0 ||
712 validation_state->tmu_write_count[1] != 0);
713 }
714
715 static bool
716 vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
717 {
718 uint32_t ip = validation_state->ip;
719
720 if (!test_bit(ip, validation_state->branch_targets))
721 return true;
722
723 if (texturing_in_progress(validation_state)) {
724 DRM_ERROR("Branch target landed during TMU setup\n");
725 return false;
726 }
727
728 /* Reset our live values tracking, since this instruction may have
729 * multiple predecessors.
730 *
731 * One could potentially do analysis to determine that, for
732 * example, all predecessors have a live max clamp in the same
733 * register, but we don't bother with that.
734 */
735 reset_validation_state(validation_state);
736
737 /* Since we've entered a basic block from potentially multiple
738 * predecessors, we need the uniforms address to be updated before any
739 * unforms are read. We require that after any branch point, the next
740 * uniform to be loaded is a uniform address offset. That uniform's
741 * offset will be marked by the uniform address register write
742 * validation, or a one-off the end-of-program check.
743 */
744 validation_state->needs_uniform_address_update = true;
745
746 return true;
747 }
748
749 struct vc4_validated_shader_info *
750 vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
751 {
752 bool found_shader_end = false;
753 int shader_end_ip = 0;
754 uint32_t ip;
755 struct vc4_validated_shader_info *validated_shader = NULL;
756 struct vc4_shader_validation_state validation_state;
757
758 memset(&validation_state, 0, sizeof(validation_state));
759 validation_state.shader = shader_obj->vaddr;
760 validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
761
762 reset_validation_state(&validation_state);
763
764 validation_state.branch_targets =
765 kcalloc(BITS_TO_LONGS(validation_state.max_ip),
766 sizeof(unsigned long), GFP_KERNEL);
767 if (!validation_state.branch_targets)
768 goto fail;
769
770 validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
771 if (!validated_shader)
772 goto fail;
773
774 if (!vc4_validate_branches(&validation_state))
775 goto fail;
776
777 for (ip = 0; ip < validation_state.max_ip; ip++) {
778 uint64_t inst = validation_state.shader[ip];
779 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
780
781 validation_state.ip = ip;
782
783 if (!vc4_handle_branch_target(&validation_state))
784 goto fail;
785
786 switch (sig) {
787 case QPU_SIG_NONE:
788 case QPU_SIG_WAIT_FOR_SCOREBOARD:
789 case QPU_SIG_SCOREBOARD_UNLOCK:
790 case QPU_SIG_COLOR_LOAD:
791 case QPU_SIG_LOAD_TMU0:
792 case QPU_SIG_LOAD_TMU1:
793 case QPU_SIG_PROG_END:
794 case QPU_SIG_SMALL_IMM:
795 if (!check_instruction_writes(validated_shader,
796 &validation_state)) {
797 DRM_ERROR("Bad write at ip %d\n", ip);
798 goto fail;
799 }
800
801 if (!check_instruction_reads(validated_shader,
802 &validation_state))
803 goto fail;
804
805 if (sig == QPU_SIG_PROG_END) {
806 found_shader_end = true;
807 shader_end_ip = ip;
808 }
809
810 break;
811
812 case QPU_SIG_LOAD_IMM:
813 if (!check_instruction_writes(validated_shader,
814 &validation_state)) {
815 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
816 goto fail;
817 }
818 break;
819
820 case QPU_SIG_BRANCH:
821 if (!check_branch(inst, validated_shader,
822 &validation_state, ip))
823 goto fail;
824 break;
825 default:
826 DRM_ERROR("Unsupported QPU signal %d at "
827 "instruction %d\n", sig, ip);
828 goto fail;
829 }
830
831 /* There are two delay slots after program end is signaled
832 * that are still executed, then we're finished.
833 */
834 if (found_shader_end && ip == shader_end_ip + 2)
835 break;
836 }
837
838 if (ip == validation_state.max_ip) {
839 DRM_ERROR("shader failed to terminate before "
840 "shader BO end at %zd\n",
841 shader_obj->base.size);
842 goto fail;
843 }
844
845 /* If we did a backwards branch and we haven't emitted a uniforms
846 * reset since then, we still need the uniforms stream to have the
847 * uniforms address available so that the backwards branch can do its
848 * uniforms reset.
849 *
850 * We could potentially prove that the backwards branch doesn't
851 * contain any uses of uniforms until program exit, but that doesn't
852 * seem to be worth the trouble.
853 */
854 if (validation_state.needs_uniform_address_for_loop) {
855 if (!require_uniform_address_uniform(validated_shader))
856 goto fail;
857 validated_shader->uniforms_size += 4;
858 }
859
860 /* Again, no chance of integer overflow here because the worst case
861 * scenario is 8 bytes of uniforms plus handles per 8-byte
862 * instruction.
863 */
864 validated_shader->uniforms_src_size =
865 (validated_shader->uniforms_size +
866 4 * validated_shader->num_texture_samples);
867
868 kfree(validation_state.branch_targets);
869
870 return validated_shader;
871
872 fail:
873 kfree(validation_state.branch_targets);
874 if (validated_shader) {
875 kfree(validated_shader->texture_samples);
876 kfree(validated_shader);
877 }
878 return NULL;
879 }