vc4: Add a bunch of validation of the binning mode config.
[mesa.git] / src / gallium / drivers / vc4 / vc4_simulator_validate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * Command list validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory. So, a user with
28 * access to execute command lists could escalate privilege by
29 * overwriting system memory (drawing to it as a framebuffer) or
30 * reading system memory it shouldn't (reading it as a texture, or
31 * uniform data, or vertex data).
32 *
33 * This validates command lists to ensure that all accesses are within
34 * the bounds of the GEM objects referenced. It explicitly whitelists
35 * packets, and looks at the offsets in any address fields to make
36 * sure they're constrained within the BOs they reference.
37 *
38 * Note that because of the validation that's happening anyway, this
39 * is where GEM relocation processing happens.
40 */
41
42 #include "vc4_simulator_validate.h"
43 #include "vc4_packet.h"
44
45 #define VALIDATE_ARGS \
46 struct exec_info *exec, \
47 void *validated, \
48 void *untrusted
49
50 static bool
51 vc4_use_bo(struct exec_info *exec,
52 uint32_t hindex,
53 enum vc4_bo_mode mode,
54 struct drm_gem_cma_object **obj)
55 {
56 *obj = NULL;
57
58 if (hindex >= exec->bo_count) {
59 DRM_ERROR("BO index %d greater than BO count %d\n",
60 hindex, exec->bo_count);
61 return false;
62 }
63
64 if (exec->bo[hindex].mode != mode) {
65 if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
66 exec->bo[hindex].mode = mode;
67 } else {
68 DRM_ERROR("BO index %d reused with mode %d vs %d\n",
69 hindex, exec->bo[hindex].mode, mode);
70 return false;
71 }
72 }
73
74 *obj = exec->bo[hindex].bo;
75 return true;
76 }
77
78 static bool
79 vc4_use_handle(struct exec_info *exec,
80 uint32_t gem_handles_packet_index,
81 enum vc4_bo_mode mode,
82 struct drm_gem_cma_object **obj)
83 {
84 return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
85 mode, obj);
86 }
87
88 static uint32_t
89 gl_shader_rec_size(uint32_t pointer_bits)
90 {
91 uint32_t attribute_count = pointer_bits & 7;
92 bool extended = pointer_bits & 8;
93
94 if (attribute_count == 0)
95 attribute_count = 8;
96
97 return 36 + attribute_count * (extended ? 12 : 8);
98 }
99
100 static int
101 validate_start_tile_binning(VALIDATE_ARGS)
102 {
103 if (exec->found_start_tile_binning_packet) {
104 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
105 return -EINVAL;
106 }
107 exec->found_start_tile_binning_packet = true;
108
109 if (!exec->found_tile_binning_mode_config_packet) {
110 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
111 return -EINVAL;
112 }
113
114 return 0;
115 }
116
117 static int
118 validate_branch_to_sublist(VALIDATE_ARGS)
119 {
120 struct drm_gem_cma_object *target;
121 uint32_t offset;
122
123 if (!vc4_use_handle(exec, 0, VC4_MODE_TILE_ALLOC, &target))
124 return -EINVAL;
125
126 if (target != exec->tile_alloc_bo) {
127 DRM_ERROR("Juimping to BOs other than tile alloc unsupported\n");
128 return -EINVAL;
129 }
130
131 offset = *(uint32_t *)(untrusted + 0);
132 if (offset % exec->tile_alloc_init_block_size ||
133 offset / exec->tile_alloc_init_block_size >
134 exec->bin_tiles_x * exec->bin_tiles_y) {
135 DRM_ERROR("VC4_PACKET_BRANCH_TO_SUB_LIST must jump to initial "
136 "tile allocation space.\n");
137 return -EINVAL;
138 }
139
140 *(uint32_t *)(validated + 0) = target->paddr + offset;
141
142 return 0;
143 }
144
145 static int
146 validate_loadstore_tile_buffer_general(VALIDATE_ARGS)
147 {
148 uint32_t packet_b0 = *(uint8_t *)(untrusted + 0);
149 struct drm_gem_cma_object *fbo;
150
151 if ((packet_b0 & 0xf) == VC4_LOADSTORE_TILE_BUFFER_NONE)
152 return 0;
153
154 if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &fbo))
155 return -EINVAL;
156
157 /* XXX: Validate address offset */
158 *(uint32_t *)(validated + 2) =
159 *(uint32_t *)(untrusted + 2) + fbo->paddr;
160
161 return 0;
162 }
163
164 static int
165 validate_indexed_prim_list(VALIDATE_ARGS)
166 {
167 struct drm_gem_cma_object *ib;
168 uint32_t max_index = *(uint32_t *)(untrusted + 9);
169 uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
170 uint32_t ib_access_end = (max_index + 1) * index_size;
171
172 /* Check overflow condition */
173 if (max_index == ~0) {
174 DRM_ERROR("unlimited max index\n");
175 return -EINVAL;
176 }
177
178 if (ib_access_end < max_index) {
179 DRM_ERROR("IB access overflow\n");
180 return -EINVAL;
181 }
182
183
184 if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
185 return -EINVAL;
186 if (ib_access_end > ib->base.size) {
187 DRM_ERROR("IB access out of bounds (%d/%d)\n",
188 ib_access_end, ib->base.size);
189 return -EINVAL;
190 }
191
192 *(uint32_t *)(validated + 5) =
193 *(uint32_t *)(untrusted + 5) + ib->paddr;
194
195 return 0;
196 }
197
198 static int
199 validate_gl_shader_state(VALIDATE_ARGS)
200 {
201 uint32_t i = exec->shader_state_count++;
202
203 if (i >= exec->shader_state_size) { /* XXX? */
204 DRM_ERROR("More requests for shader states than declared\n");
205 return -EINVAL;
206 }
207
208 exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
209 exec->shader_state[i].addr = *(uint32_t *)untrusted;
210
211 if (exec->shader_state[i].addr & ~0xf) {
212 DRM_ERROR("high bits set in GL shader rec reference\n");
213 return -EINVAL;
214 }
215
216 *(uint32_t *)validated = (exec->shader_rec_p +
217 exec->shader_state[i].addr);
218
219 exec->shader_rec_p +=
220 roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
221
222 return 0;
223 }
224
225 static int
226 validate_nv_shader_state(VALIDATE_ARGS)
227 {
228 uint32_t i = exec->shader_state_count++;
229
230 if (i >= exec->shader_state_size) {
231 DRM_ERROR("More requests for shader states than declared\n");
232 return -EINVAL;
233 }
234
235 exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
236 exec->shader_state[i].addr = *(uint32_t *)untrusted;
237
238 if (exec->shader_state[i].addr & 15) {
239 DRM_ERROR("NV shader state address 0x%08x misaligned\n",
240 exec->shader_state[i].addr);
241 return -EINVAL;
242 }
243
244 *(uint32_t *)validated = (exec->shader_state[i].addr +
245 exec->shader_rec_p);
246
247 return 0;
248 }
249
250 static int
251 validate_tile_binning_config(VALIDATE_ARGS)
252 {
253 struct drm_gem_cma_object *tile_allocation;
254 struct drm_gem_cma_object *tile_state_data_array;
255 uint8_t flags;
256 uint32_t tile_allocation_size;
257
258 if (!vc4_use_handle(exec, 0, VC4_MODE_TILE_ALLOC, &tile_allocation) ||
259 !vc4_use_handle(exec, 1, VC4_MODE_TSDA, &tile_state_data_array))
260 return -EINVAL;
261
262 if (exec->found_tile_binning_mode_config_packet) {
263 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
264 return -EINVAL;
265 }
266 exec->found_tile_binning_mode_config_packet = true;
267
268 exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
269 exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
270 flags = *(uint8_t *)(untrusted + 14);
271
272 if (exec->bin_tiles_x == 0 ||
273 exec->bin_tiles_y == 0) {
274 DRM_ERROR("Tile binning config of %dx%d too small\n",
275 exec->bin_tiles_x, exec->bin_tiles_y);
276 return -EINVAL;
277 }
278
279 /* Our validation relies on the user not getting to set up their own
280 * tile state/tile allocation BO contents.
281 */
282 if (!(flags & VC4_BIN_CONFIG_AUTO_INIT_TSDA)) {
283 DRM_ERROR("binning config missing "
284 "VC4_BIN_CONFIG_AUTO_INIT_TSDA\n");
285 return -EINVAL;
286 }
287
288 if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
289 VC4_BIN_CONFIG_TILE_BUFFER_64BIT |
290 VC4_BIN_CONFIG_MS_MODE_4X)) {
291 DRM_ERROR("unsupported bining config flags 0x%02x\n", flags);
292 return -EINVAL;
293 }
294
295 if (*(uint32_t *)(untrusted + 0) != 0) {
296 DRM_ERROR("tile allocation offset != 0 unsupported\n");
297 return -EINVAL;
298 }
299 tile_allocation_size = *(uint32_t *)(untrusted + 4);
300 if (tile_allocation_size > tile_allocation->base.size) {
301 DRM_ERROR("tile allocation size %d > BO size %d",
302 tile_allocation_size, tile_allocation->base.size);
303 return -EINVAL;
304 }
305 *(uint32_t *)validated = tile_allocation->paddr;
306 exec->tile_alloc_bo = tile_allocation;
307
308 exec->tile_alloc_init_block_size = 1 << (5 + ((flags >> 5) & 3));
309 if (exec->bin_tiles_x * exec->bin_tiles_y *
310 exec->tile_alloc_init_block_size > tile_allocation_size) {
311 DRM_ERROR("tile init exceeds tile alloc size (%d vs %d)\n",
312 exec->bin_tiles_x * exec->bin_tiles_y *
313 exec->tile_alloc_init_block_size,
314 tile_allocation_size);
315 return -EINVAL;
316 }
317 if (*(uint32_t *)(untrusted + 8) != 0) {
318 DRM_ERROR("TSDA offset != 0 unsupported\n");
319 return -EINVAL;
320 }
321 if (exec->bin_tiles_x * exec->bin_tiles_y * 48 >
322 tile_state_data_array->base.size) {
323 DRM_ERROR("TSDA of %db too small for %dx%d bin config\n",
324 tile_state_data_array->base.size,
325 exec->bin_tiles_x, exec->bin_tiles_y);
326 }
327 *(uint32_t *)(validated + 8) = tile_state_data_array->paddr;
328
329 return 0;
330 }
331
332 static int
333 validate_tile_rendering_mode_config(VALIDATE_ARGS)
334 {
335 struct drm_gem_cma_object *fbo;
336
337 if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &fbo))
338 return -EINVAL;
339
340 /* XXX: Validate offsets */
341 *(uint32_t *)validated =
342 *(uint32_t *)untrusted + fbo->paddr;
343
344 return 0;
345 }
346
347 static int
348 validate_tile_coordinates(VALIDATE_ARGS)
349 {
350 uint8_t tile_x = *(uint8_t *)(untrusted + 0);
351 uint8_t tile_y = *(uint8_t *)(untrusted + 1);
352
353 if (tile_x >= exec->bin_tiles_x ||
354 tile_y >= exec->bin_tiles_y) {
355 DRM_ERROR("Tile coordinates %d,%d > bin config %d,%d\n",
356 tile_x,
357 tile_y,
358 exec->bin_tiles_x,
359 exec->bin_tiles_y);
360 return -EINVAL;
361 }
362
363 return 0;
364 }
365
366 static int
367 validate_gem_handles(VALIDATE_ARGS)
368 {
369 memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
370 return 0;
371 }
372
373 static const struct cmd_info {
374 bool bin;
375 bool render;
376 uint16_t len;
377 const char *name;
378 int (*func)(struct exec_info *exec, void *validated, void *untrusted);
379 } cmd_info[] = {
380 [VC4_PACKET_HALT] = { 1, 1, 1, "halt", NULL },
381 [VC4_PACKET_NOP] = { 1, 1, 1, "nop", NULL },
382 [VC4_PACKET_FLUSH] = { 1, 1, 1, "flush", NULL },
383 [VC4_PACKET_FLUSH_ALL] = { 1, 0, 1, "flush all state", NULL },
384 [VC4_PACKET_START_TILE_BINNING] = { 1, 0, 1, "start tile binning", validate_start_tile_binning },
385 [VC4_PACKET_INCREMENT_SEMAPHORE] = { 1, 0, 1, "increment semaphore", NULL },
386 [VC4_PACKET_WAIT_ON_SEMAPHORE] = { 1, 1, 1, "wait on semaphore", NULL },
387 /* BRANCH_TO_SUB_LIST is actually supported in the binner as well, but
388 * we only use it from the render CL in order to jump into the tile
389 * allocation BO.
390 */
391 [VC4_PACKET_BRANCH_TO_SUB_LIST] = { 0, 1, 5, "branch to sublist", validate_branch_to_sublist },
392 [VC4_PACKET_STORE_MS_TILE_BUFFER] = { 0, 1, 1, "store MS resolved tile color buffer", NULL },
393 [VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF] = { 0, 1, 1, "store MS resolved tile color buffer and EOF", NULL },
394
395 [VC4_PACKET_STORE_TILE_BUFFER_GENERAL] = { 0, 1, 7, "Store Tile Buffer General", validate_loadstore_tile_buffer_general },
396 [VC4_PACKET_LOAD_TILE_BUFFER_GENERAL] = { 0, 1, 7, "Load Tile Buffer General", validate_loadstore_tile_buffer_general },
397
398 [VC4_PACKET_GL_INDEXED_PRIMITIVE] = { 1, 1, 14, "Indexed Primitive List", validate_indexed_prim_list },
399
400 /* XXX: bounds check verts? */
401 [VC4_PACKET_GL_ARRAY_PRIMITIVE] = { 1, 1, 10, "Vertex Array Primitives", NULL },
402
403 [VC4_PACKET_PRIMITIVE_LIST_FORMAT] = { 1, 1, 2, "primitive list format", NULL }, /* XXX: bin valid? */
404
405 [VC4_PACKET_GL_SHADER_STATE] = { 1, 1, 5, "GL Shader State", validate_gl_shader_state },
406 [VC4_PACKET_NV_SHADER_STATE] = { 1, 1, 5, "NV Shader State", validate_nv_shader_state },
407
408 [VC4_PACKET_CONFIGURATION_BITS] = { 1, 1, 4, "configuration bits", NULL },
409 [VC4_PACKET_FLAT_SHADE_FLAGS] = { 1, 1, 5, "flat shade flags", NULL },
410 [VC4_PACKET_POINT_SIZE] = { 1, 1, 5, "point size", NULL },
411 [VC4_PACKET_LINE_WIDTH] = { 1, 1, 5, "line width", NULL },
412 [VC4_PACKET_RHT_X_BOUNDARY] = { 1, 1, 3, "RHT X boundary", NULL },
413 [VC4_PACKET_DEPTH_OFFSET] = { 1, 1, 5, "Depth Offset", NULL },
414 [VC4_PACKET_CLIP_WINDOW] = { 1, 1, 9, "Clip Window", NULL },
415 [VC4_PACKET_VIEWPORT_OFFSET] = { 1, 1, 5, "Viewport Offset", NULL },
416 [VC4_PACKET_CLIPPER_XY_SCALING] = { 1, 1, 9, "Clipper XY Scaling", NULL },
417 /* Note: The docs say this was also 105, but it was 106 in the
418 * initial userland code drop.
419 */
420 [VC4_PACKET_CLIPPER_Z_SCALING] = { 1, 1, 9, "Clipper Z Scale and Offset", NULL },
421
422 [VC4_PACKET_TILE_BINNING_MODE_CONFIG] = { 1, 0, 16, "tile binning configuration", validate_tile_binning_config },
423
424 /* XXX: Do we need to validate this one? It's got width/height in it.
425 */
426 [VC4_PACKET_TILE_RENDERING_MODE_CONFIG] = { 0, 1, 11, "tile rendering mode configuration", validate_tile_rendering_mode_config},
427
428 [VC4_PACKET_CLEAR_COLORS] = { 0, 1, 14, "Clear Colors", NULL },
429
430 [VC4_PACKET_TILE_COORDINATES] = { 0, 1, 3, "Tile Coordinates", validate_tile_coordinates },
431
432 [VC4_PACKET_GEM_HANDLES] = { 1, 1, 9, "GEM handles", validate_gem_handles },
433 };
434
435 int
436 vc4_validate_cl(struct drm_device *dev,
437 void *validated,
438 void *unvalidated,
439 uint32_t len,
440 bool is_bin,
441 struct exec_info *exec)
442 {
443 uint32_t dst_offset = 0;
444 uint32_t src_offset = 0;
445
446 while (src_offset < len) {
447 void *dst_pkt = validated + dst_offset;
448 void *src_pkt = unvalidated + src_offset;
449 u8 cmd = *(uint8_t *)src_pkt;
450 const struct cmd_info *info;
451
452 if (cmd > ARRAY_SIZE(cmd_info)) {
453 DRM_ERROR("0x%08x: packet %d out of bounds\n",
454 src_offset, cmd);
455 return -EINVAL;
456 }
457
458 info = &cmd_info[cmd];
459 if (!info->name) {
460 DRM_ERROR("0x%08x: packet %d invalid\n",
461 src_offset, cmd);
462 return -EINVAL;
463 }
464
465 #if 0
466 DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
467 src_offset, cmd, info->name, info->len);
468 #endif
469
470 if ((is_bin && !info->bin) ||
471 (!is_bin && !info->render)) {
472 DRM_ERROR("0x%08x: packet %d (%s) invalid for %s\n",
473 src_offset, cmd, info->name,
474 is_bin ? "binner" : "render");
475 return -EINVAL;
476 }
477
478 if (src_offset + info->len > len) {
479 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
480 "exceeds bounds (0x%08x)\n",
481 src_offset, cmd, info->name, info->len,
482 src_offset + len);
483 return -EINVAL;
484 }
485
486 if (cmd != VC4_PACKET_GEM_HANDLES)
487 memcpy(dst_pkt, src_pkt, info->len);
488
489 if (info->func && info->func(exec,
490 dst_pkt + 1,
491 src_pkt + 1)) {
492 DRM_ERROR("0x%08x: packet %d (%s) failed to "
493 "validate\n",
494 src_offset, cmd, info->name);
495 return -EINVAL;
496 }
497
498 src_offset += info->len;
499 /* GEM handle loading doesn't produce HW packets. */
500 if (cmd != VC4_PACKET_GEM_HANDLES)
501 dst_offset += info->len;
502
503 /* When the CL hits halt, it'll stop reading anything else. */
504 if (cmd == VC4_PACKET_HALT)
505 break;
506 }
507
508 if (is_bin) {
509 exec->ct0ea = exec->ct0ca + dst_offset;
510
511 if (!exec->found_start_tile_binning_packet) {
512 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
513 return -EINVAL;
514 }
515 } else {
516 exec->ct1ea = exec->ct1ca + dst_offset;
517 }
518
519 return 0;
520 }
521
522 static bool
523 reloc_tex(struct exec_info *exec,
524 void *uniform_data_u,
525 struct vc4_texture_sample_info *sample,
526 uint32_t texture_handle_index)
527
528 {
529 struct drm_gem_cma_object *tex;
530 uint32_t unvalidated_p0 = *(uint32_t *)(uniform_data_u +
531 sample->p_offset[0]);
532 uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
533
534 if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
535 return false;
536
537 *validated_p0 = tex->paddr + unvalidated_p0;
538
539 return true;
540 }
541
542 static int
543 validate_shader_rec(struct drm_device *dev,
544 struct exec_info *exec,
545 struct vc4_shader_state *state)
546 {
547 uint32_t *src_handles;
548 void *pkt_u, *pkt_v;
549 enum shader_rec_reloc_type {
550 RELOC_CODE,
551 RELOC_VBO,
552 };
553 struct shader_rec_reloc {
554 enum shader_rec_reloc_type type;
555 uint32_t offset;
556 };
557 static const struct shader_rec_reloc gl_relocs[] = {
558 { RELOC_CODE, 4 }, /* fs */
559 { RELOC_CODE, 16 }, /* vs */
560 { RELOC_CODE, 28 }, /* cs */
561 };
562 static const struct shader_rec_reloc nv_relocs[] = {
563 { RELOC_CODE, 4 }, /* fs */
564 { RELOC_VBO, 12 }
565 };
566 const struct shader_rec_reloc *relocs;
567 struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
568 uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
569 int i;
570 struct vc4_validated_shader_info *validated_shader = NULL;
571
572 if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
573 relocs = nv_relocs;
574 nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
575
576 packet_size = 16;
577 } else {
578 relocs = gl_relocs;
579 nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
580
581 nr_attributes = state->addr & 0x7;
582 if (nr_attributes == 0)
583 nr_attributes = 8;
584 packet_size = gl_shader_rec_size(state->addr);
585 }
586 nr_relocs = nr_fixed_relocs + nr_attributes;
587
588 if (nr_relocs * 4 > exec->shader_rec_size) {
589 DRM_ERROR("overflowed shader recs reading %d handles "
590 "from %d bytes left\n",
591 nr_relocs, exec->shader_rec_size);
592 return -EINVAL;
593 }
594 src_handles = exec->shader_rec_u;
595 exec->shader_rec_u += nr_relocs * 4;
596 exec->shader_rec_size -= nr_relocs * 4;
597
598 if (packet_size > exec->shader_rec_size) {
599 DRM_ERROR("overflowed shader recs copying %db packet "
600 "from %d bytes left\n",
601 packet_size, exec->shader_rec_size);
602 return -EINVAL;
603 }
604 pkt_u = exec->shader_rec_u;
605 pkt_v = exec->shader_rec_v;
606 memcpy(pkt_v, pkt_u, packet_size);
607 exec->shader_rec_u += packet_size;
608 /* Shader recs have to be aligned to 16 bytes (due to the attribute
609 * flags being in the low bytes), so round the next validated shader
610 * rec address up. This should be safe, since we've got so many
611 * relocations in a shader rec packet.
612 */
613 BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
614 exec->shader_rec_v += roundup(packet_size, 16);
615 exec->shader_rec_size -= packet_size;
616
617 for (i = 0; i < nr_relocs; i++) {
618 enum vc4_bo_mode mode;
619
620 if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
621 mode = VC4_MODE_SHADER;
622 else
623 mode = VC4_MODE_RENDER;
624
625 if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i])) {
626 return false;
627 }
628 }
629
630 for (i = 0; i < nr_fixed_relocs; i++) {
631 uint32_t o = relocs[i].offset;
632 uint32_t src_offset = *(uint32_t *)(pkt_u + o);
633 uint32_t *texture_handles_u;
634 void *uniform_data_u;
635 uint32_t tex;
636
637 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
638
639 switch (relocs[i].type) {
640 case RELOC_CODE:
641 kfree(validated_shader);
642 validated_shader = vc4_validate_shader(bo[i],
643 src_offset);
644 if (!validated_shader)
645 goto fail;
646
647 if (validated_shader->uniforms_src_size >
648 exec->uniforms_size) {
649 DRM_ERROR("Uniforms src buffer overflow\n");
650 goto fail;
651 }
652
653 texture_handles_u = exec->uniforms_u;
654 uniform_data_u = (texture_handles_u +
655 validated_shader->num_texture_samples);
656
657 memcpy(exec->uniforms_v, uniform_data_u,
658 validated_shader->uniforms_size);
659
660 for (tex = 0;
661 tex < validated_shader->num_texture_samples;
662 tex++) {
663 if (!reloc_tex(exec,
664 uniform_data_u,
665 &validated_shader->texture_samples[tex],
666 texture_handles_u[tex])) {
667 goto fail;
668 }
669 }
670
671 *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
672
673 exec->uniforms_u += validated_shader->uniforms_src_size;
674 exec->uniforms_v += validated_shader->uniforms_size;
675 exec->uniforms_p += validated_shader->uniforms_size;
676
677 break;
678
679 case RELOC_VBO:
680 break;
681 }
682 }
683
684 for (i = 0; i < nr_attributes; i++) {
685 /* XXX: validation */
686 uint32_t o = 36 + i * 8;
687 *(uint32_t *)(pkt_v + o) = (bo[nr_fixed_relocs + i]->paddr +
688 *(uint32_t *)(pkt_u + o));
689 }
690
691 kfree(validated_shader);
692
693 return 0;
694
695 fail:
696 kfree(validated_shader);
697 return -EINVAL;
698 }
699
700 int
701 vc4_validate_shader_recs(struct drm_device *dev,
702 struct exec_info *exec)
703 {
704 uint32_t i;
705 int ret = 0;
706
707 for (i = 0; i < exec->shader_state_count; i++) {
708 ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
709 if (ret)
710 return ret;
711 }
712
713 return ret;
714 }