2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Command list validator for VC4.
27 * The VC4 has no IOMMU between it and system memory. So, a user with
28 * access to execute command lists could escalate privilege by
29 * overwriting system memory (drawing to it as a framebuffer) or
30 * reading system memory it shouldn't (reading it as a texture, or
31 * uniform data, or vertex data).
33 * This validates command lists to ensure that all accesses are within
34 * the bounds of the GEM objects referenced. It explicitly whitelists
35 * packets, and looks at the offsets in any address fields to make
36 * sure they're constrained within the BOs they reference.
38 * Note that because of the validation that's happening anyway, this
39 * is where GEM relocation processing happens.
42 #include "vc4_simulator_validate.h"
43 #include "vc4_packet.h"
45 #define VALIDATE_ARGS \
46 struct exec_info *exec, \
51 vc4_use_bo(struct exec_info
*exec
,
53 enum vc4_bo_mode mode
,
54 struct drm_gem_cma_object
**obj
)
58 if (hindex
>= exec
->bo_count
) {
59 DRM_ERROR("BO index %d greater than BO count %d\n",
60 hindex
, exec
->bo_count
);
64 if (exec
->bo
[hindex
].mode
!= mode
) {
65 if (exec
->bo
[hindex
].mode
== VC4_MODE_UNDECIDED
) {
66 exec
->bo
[hindex
].mode
= mode
;
68 DRM_ERROR("BO index %d reused with mode %d vs %d\n",
69 hindex
, exec
->bo
[hindex
].mode
, mode
);
74 *obj
= exec
->bo
[hindex
].bo
;
79 vc4_use_handle(struct exec_info
*exec
,
80 uint32_t gem_handles_packet_index
,
81 enum vc4_bo_mode mode
,
82 struct drm_gem_cma_object
**obj
)
84 return vc4_use_bo(exec
, exec
->bo_index
[gem_handles_packet_index
],
89 gl_shader_rec_size(uint32_t pointer_bits
)
91 uint32_t attribute_count
= pointer_bits
& 7;
92 bool extended
= pointer_bits
& 8;
94 if (attribute_count
== 0)
97 return 36 + attribute_count
* (extended
? 12 : 8);
101 validate_start_tile_binning(VALIDATE_ARGS
)
103 if (exec
->found_start_tile_binning_packet
) {
104 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
107 exec
->found_start_tile_binning_packet
= true;
109 if (!exec
->found_tile_binning_mode_config_packet
) {
110 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
118 validate_branch_to_sublist(VALIDATE_ARGS
)
120 struct drm_gem_cma_object
*target
;
123 if (!vc4_use_handle(exec
, 0, VC4_MODE_TILE_ALLOC
, &target
))
126 if (target
!= exec
->tile_alloc_bo
) {
127 DRM_ERROR("Juimping to BOs other than tile alloc unsupported\n");
131 offset
= *(uint32_t *)(untrusted
+ 0);
132 if (offset
% exec
->tile_alloc_init_block_size
||
133 offset
/ exec
->tile_alloc_init_block_size
>
134 exec
->bin_tiles_x
* exec
->bin_tiles_y
) {
135 DRM_ERROR("VC4_PACKET_BRANCH_TO_SUB_LIST must jump to initial "
136 "tile allocation space.\n");
140 *(uint32_t *)(validated
+ 0) = target
->paddr
+ offset
;
146 validate_loadstore_tile_buffer_general(VALIDATE_ARGS
)
148 uint32_t packet_b0
= *(uint8_t *)(untrusted
+ 0);
149 struct drm_gem_cma_object
*fbo
;
151 if ((packet_b0
& 0xf) == VC4_LOADSTORE_TILE_BUFFER_NONE
)
154 if (!vc4_use_handle(exec
, 0, VC4_MODE_RENDER
, &fbo
))
157 /* XXX: Validate address offset */
158 *(uint32_t *)(validated
+ 2) =
159 *(uint32_t *)(untrusted
+ 2) + fbo
->paddr
;
165 validate_indexed_prim_list(VALIDATE_ARGS
)
167 struct drm_gem_cma_object
*ib
;
168 uint32_t max_index
= *(uint32_t *)(untrusted
+ 9);
169 uint32_t index_size
= (*(uint8_t *)(untrusted
+ 0) >> 4) ? 2 : 1;
170 uint32_t ib_access_end
= (max_index
+ 1) * index_size
;
172 /* Check overflow condition */
173 if (max_index
== ~0) {
174 DRM_ERROR("unlimited max index\n");
178 if (ib_access_end
< max_index
) {
179 DRM_ERROR("IB access overflow\n");
184 if (!vc4_use_handle(exec
, 0, VC4_MODE_RENDER
, &ib
))
186 if (ib_access_end
> ib
->base
.size
) {
187 DRM_ERROR("IB access out of bounds (%d/%d)\n",
188 ib_access_end
, ib
->base
.size
);
192 *(uint32_t *)(validated
+ 5) =
193 *(uint32_t *)(untrusted
+ 5) + ib
->paddr
;
199 validate_gl_shader_state(VALIDATE_ARGS
)
201 uint32_t i
= exec
->shader_state_count
++;
203 if (i
>= exec
->shader_state_size
) { /* XXX? */
204 DRM_ERROR("More requests for shader states than declared\n");
208 exec
->shader_state
[i
].packet
= VC4_PACKET_GL_SHADER_STATE
;
209 exec
->shader_state
[i
].addr
= *(uint32_t *)untrusted
;
211 if (exec
->shader_state
[i
].addr
& ~0xf) {
212 DRM_ERROR("high bits set in GL shader rec reference\n");
216 *(uint32_t *)validated
= (exec
->shader_rec_p
+
217 exec
->shader_state
[i
].addr
);
219 exec
->shader_rec_p
+=
220 roundup(gl_shader_rec_size(exec
->shader_state
[i
].addr
), 16);
226 validate_nv_shader_state(VALIDATE_ARGS
)
228 uint32_t i
= exec
->shader_state_count
++;
230 if (i
>= exec
->shader_state_size
) {
231 DRM_ERROR("More requests for shader states than declared\n");
235 exec
->shader_state
[i
].packet
= VC4_PACKET_NV_SHADER_STATE
;
236 exec
->shader_state
[i
].addr
= *(uint32_t *)untrusted
;
238 if (exec
->shader_state
[i
].addr
& 15) {
239 DRM_ERROR("NV shader state address 0x%08x misaligned\n",
240 exec
->shader_state
[i
].addr
);
244 *(uint32_t *)validated
= (exec
->shader_state
[i
].addr
+
251 validate_tile_binning_config(VALIDATE_ARGS
)
253 struct drm_gem_cma_object
*tile_allocation
;
254 struct drm_gem_cma_object
*tile_state_data_array
;
256 uint32_t tile_allocation_size
;
258 if (!vc4_use_handle(exec
, 0, VC4_MODE_TILE_ALLOC
, &tile_allocation
) ||
259 !vc4_use_handle(exec
, 1, VC4_MODE_TSDA
, &tile_state_data_array
))
262 if (exec
->found_tile_binning_mode_config_packet
) {
263 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
266 exec
->found_tile_binning_mode_config_packet
= true;
268 exec
->bin_tiles_x
= *(uint8_t *)(untrusted
+ 12);
269 exec
->bin_tiles_y
= *(uint8_t *)(untrusted
+ 13);
270 flags
= *(uint8_t *)(untrusted
+ 14);
272 if (exec
->bin_tiles_x
== 0 ||
273 exec
->bin_tiles_y
== 0) {
274 DRM_ERROR("Tile binning config of %dx%d too small\n",
275 exec
->bin_tiles_x
, exec
->bin_tiles_y
);
279 /* Our validation relies on the user not getting to set up their own
280 * tile state/tile allocation BO contents.
282 if (!(flags
& VC4_BIN_CONFIG_AUTO_INIT_TSDA
)) {
283 DRM_ERROR("binning config missing "
284 "VC4_BIN_CONFIG_AUTO_INIT_TSDA\n");
288 if (flags
& (VC4_BIN_CONFIG_DB_NON_MS
|
289 VC4_BIN_CONFIG_TILE_BUFFER_64BIT
|
290 VC4_BIN_CONFIG_MS_MODE_4X
)) {
291 DRM_ERROR("unsupported bining config flags 0x%02x\n", flags
);
295 if (*(uint32_t *)(untrusted
+ 0) != 0) {
296 DRM_ERROR("tile allocation offset != 0 unsupported\n");
299 tile_allocation_size
= *(uint32_t *)(untrusted
+ 4);
300 if (tile_allocation_size
> tile_allocation
->base
.size
) {
301 DRM_ERROR("tile allocation size %d > BO size %d",
302 tile_allocation_size
, tile_allocation
->base
.size
);
305 *(uint32_t *)validated
= tile_allocation
->paddr
;
306 exec
->tile_alloc_bo
= tile_allocation
;
308 exec
->tile_alloc_init_block_size
= 1 << (5 + ((flags
>> 5) & 3));
309 if (exec
->bin_tiles_x
* exec
->bin_tiles_y
*
310 exec
->tile_alloc_init_block_size
> tile_allocation_size
) {
311 DRM_ERROR("tile init exceeds tile alloc size (%d vs %d)\n",
312 exec
->bin_tiles_x
* exec
->bin_tiles_y
*
313 exec
->tile_alloc_init_block_size
,
314 tile_allocation_size
);
317 if (*(uint32_t *)(untrusted
+ 8) != 0) {
318 DRM_ERROR("TSDA offset != 0 unsupported\n");
321 if (exec
->bin_tiles_x
* exec
->bin_tiles_y
* 48 >
322 tile_state_data_array
->base
.size
) {
323 DRM_ERROR("TSDA of %db too small for %dx%d bin config\n",
324 tile_state_data_array
->base
.size
,
325 exec
->bin_tiles_x
, exec
->bin_tiles_y
);
327 *(uint32_t *)(validated
+ 8) = tile_state_data_array
->paddr
;
333 validate_tile_rendering_mode_config(VALIDATE_ARGS
)
335 struct drm_gem_cma_object
*fbo
;
337 if (!vc4_use_handle(exec
, 0, VC4_MODE_RENDER
, &fbo
))
340 /* XXX: Validate offsets */
341 *(uint32_t *)validated
=
342 *(uint32_t *)untrusted
+ fbo
->paddr
;
348 validate_tile_coordinates(VALIDATE_ARGS
)
350 uint8_t tile_x
= *(uint8_t *)(untrusted
+ 0);
351 uint8_t tile_y
= *(uint8_t *)(untrusted
+ 1);
353 if (tile_x
>= exec
->bin_tiles_x
||
354 tile_y
>= exec
->bin_tiles_y
) {
355 DRM_ERROR("Tile coordinates %d,%d > bin config %d,%d\n",
367 validate_gem_handles(VALIDATE_ARGS
)
369 memcpy(exec
->bo_index
, untrusted
, sizeof(exec
->bo_index
));
373 static const struct cmd_info
{
378 int (*func
)(struct exec_info
*exec
, void *validated
, void *untrusted
);
380 [VC4_PACKET_HALT
] = { 1, 1, 1, "halt", NULL
},
381 [VC4_PACKET_NOP
] = { 1, 1, 1, "nop", NULL
},
382 [VC4_PACKET_FLUSH
] = { 1, 1, 1, "flush", NULL
},
383 [VC4_PACKET_FLUSH_ALL
] = { 1, 0, 1, "flush all state", NULL
},
384 [VC4_PACKET_START_TILE_BINNING
] = { 1, 0, 1, "start tile binning", validate_start_tile_binning
},
385 [VC4_PACKET_INCREMENT_SEMAPHORE
] = { 1, 0, 1, "increment semaphore", NULL
},
386 [VC4_PACKET_WAIT_ON_SEMAPHORE
] = { 1, 1, 1, "wait on semaphore", NULL
},
387 /* BRANCH_TO_SUB_LIST is actually supported in the binner as well, but
388 * we only use it from the render CL in order to jump into the tile
391 [VC4_PACKET_BRANCH_TO_SUB_LIST
] = { 0, 1, 5, "branch to sublist", validate_branch_to_sublist
},
392 [VC4_PACKET_STORE_MS_TILE_BUFFER
] = { 0, 1, 1, "store MS resolved tile color buffer", NULL
},
393 [VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF
] = { 0, 1, 1, "store MS resolved tile color buffer and EOF", NULL
},
395 [VC4_PACKET_STORE_TILE_BUFFER_GENERAL
] = { 0, 1, 7, "Store Tile Buffer General", validate_loadstore_tile_buffer_general
},
396 [VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
] = { 0, 1, 7, "Load Tile Buffer General", validate_loadstore_tile_buffer_general
},
398 [VC4_PACKET_GL_INDEXED_PRIMITIVE
] = { 1, 1, 14, "Indexed Primitive List", validate_indexed_prim_list
},
400 /* XXX: bounds check verts? */
401 [VC4_PACKET_GL_ARRAY_PRIMITIVE
] = { 1, 1, 10, "Vertex Array Primitives", NULL
},
403 [VC4_PACKET_PRIMITIVE_LIST_FORMAT
] = { 1, 1, 2, "primitive list format", NULL
}, /* XXX: bin valid? */
405 [VC4_PACKET_GL_SHADER_STATE
] = { 1, 1, 5, "GL Shader State", validate_gl_shader_state
},
406 [VC4_PACKET_NV_SHADER_STATE
] = { 1, 1, 5, "NV Shader State", validate_nv_shader_state
},
408 [VC4_PACKET_CONFIGURATION_BITS
] = { 1, 1, 4, "configuration bits", NULL
},
409 [VC4_PACKET_FLAT_SHADE_FLAGS
] = { 1, 1, 5, "flat shade flags", NULL
},
410 [VC4_PACKET_POINT_SIZE
] = { 1, 1, 5, "point size", NULL
},
411 [VC4_PACKET_LINE_WIDTH
] = { 1, 1, 5, "line width", NULL
},
412 [VC4_PACKET_RHT_X_BOUNDARY
] = { 1, 1, 3, "RHT X boundary", NULL
},
413 [VC4_PACKET_DEPTH_OFFSET
] = { 1, 1, 5, "Depth Offset", NULL
},
414 [VC4_PACKET_CLIP_WINDOW
] = { 1, 1, 9, "Clip Window", NULL
},
415 [VC4_PACKET_VIEWPORT_OFFSET
] = { 1, 1, 5, "Viewport Offset", NULL
},
416 [VC4_PACKET_CLIPPER_XY_SCALING
] = { 1, 1, 9, "Clipper XY Scaling", NULL
},
417 /* Note: The docs say this was also 105, but it was 106 in the
418 * initial userland code drop.
420 [VC4_PACKET_CLIPPER_Z_SCALING
] = { 1, 1, 9, "Clipper Z Scale and Offset", NULL
},
422 [VC4_PACKET_TILE_BINNING_MODE_CONFIG
] = { 1, 0, 16, "tile binning configuration", validate_tile_binning_config
},
424 /* XXX: Do we need to validate this one? It's got width/height in it.
426 [VC4_PACKET_TILE_RENDERING_MODE_CONFIG
] = { 0, 1, 11, "tile rendering mode configuration", validate_tile_rendering_mode_config
},
428 [VC4_PACKET_CLEAR_COLORS
] = { 0, 1, 14, "Clear Colors", NULL
},
430 [VC4_PACKET_TILE_COORDINATES
] = { 0, 1, 3, "Tile Coordinates", validate_tile_coordinates
},
432 [VC4_PACKET_GEM_HANDLES
] = { 1, 1, 9, "GEM handles", validate_gem_handles
},
436 vc4_validate_cl(struct drm_device
*dev
,
441 struct exec_info
*exec
)
443 uint32_t dst_offset
= 0;
444 uint32_t src_offset
= 0;
446 while (src_offset
< len
) {
447 void *dst_pkt
= validated
+ dst_offset
;
448 void *src_pkt
= unvalidated
+ src_offset
;
449 u8 cmd
= *(uint8_t *)src_pkt
;
450 const struct cmd_info
*info
;
452 if (cmd
> ARRAY_SIZE(cmd_info
)) {
453 DRM_ERROR("0x%08x: packet %d out of bounds\n",
458 info
= &cmd_info
[cmd
];
460 DRM_ERROR("0x%08x: packet %d invalid\n",
466 DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
467 src_offset
, cmd
, info
->name
, info
->len
);
470 if ((is_bin
&& !info
->bin
) ||
471 (!is_bin
&& !info
->render
)) {
472 DRM_ERROR("0x%08x: packet %d (%s) invalid for %s\n",
473 src_offset
, cmd
, info
->name
,
474 is_bin
? "binner" : "render");
478 if (src_offset
+ info
->len
> len
) {
479 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
480 "exceeds bounds (0x%08x)\n",
481 src_offset
, cmd
, info
->name
, info
->len
,
486 if (cmd
!= VC4_PACKET_GEM_HANDLES
)
487 memcpy(dst_pkt
, src_pkt
, info
->len
);
489 if (info
->func
&& info
->func(exec
,
492 DRM_ERROR("0x%08x: packet %d (%s) failed to "
494 src_offset
, cmd
, info
->name
);
498 src_offset
+= info
->len
;
499 /* GEM handle loading doesn't produce HW packets. */
500 if (cmd
!= VC4_PACKET_GEM_HANDLES
)
501 dst_offset
+= info
->len
;
503 /* When the CL hits halt, it'll stop reading anything else. */
504 if (cmd
== VC4_PACKET_HALT
)
509 exec
->ct0ea
= exec
->ct0ca
+ dst_offset
;
511 if (!exec
->found_start_tile_binning_packet
) {
512 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
516 exec
->ct1ea
= exec
->ct1ca
+ dst_offset
;
523 reloc_tex(struct exec_info
*exec
,
524 void *uniform_data_u
,
525 struct vc4_texture_sample_info
*sample
,
526 uint32_t texture_handle_index
)
529 struct drm_gem_cma_object
*tex
;
530 uint32_t unvalidated_p0
= *(uint32_t *)(uniform_data_u
+
531 sample
->p_offset
[0]);
532 uint32_t *validated_p0
= exec
->uniforms_v
+ sample
->p_offset
[0];
534 if (!vc4_use_bo(exec
, texture_handle_index
, VC4_MODE_RENDER
, &tex
))
537 *validated_p0
= tex
->paddr
+ unvalidated_p0
;
543 validate_shader_rec(struct drm_device
*dev
,
544 struct exec_info
*exec
,
545 struct vc4_shader_state
*state
)
547 uint32_t *src_handles
;
549 enum shader_rec_reloc_type
{
553 struct shader_rec_reloc
{
554 enum shader_rec_reloc_type type
;
557 static const struct shader_rec_reloc gl_relocs
[] = {
558 { RELOC_CODE
, 4 }, /* fs */
559 { RELOC_CODE
, 16 }, /* vs */
560 { RELOC_CODE
, 28 }, /* cs */
562 static const struct shader_rec_reloc nv_relocs
[] = {
563 { RELOC_CODE
, 4 }, /* fs */
566 const struct shader_rec_reloc
*relocs
;
567 struct drm_gem_cma_object
*bo
[ARRAY_SIZE(gl_relocs
) + 8];
568 uint32_t nr_attributes
= 0, nr_fixed_relocs
, nr_relocs
, packet_size
;
570 struct vc4_validated_shader_info
*validated_shader
= NULL
;
572 if (state
->packet
== VC4_PACKET_NV_SHADER_STATE
) {
574 nr_fixed_relocs
= ARRAY_SIZE(nv_relocs
);
579 nr_fixed_relocs
= ARRAY_SIZE(gl_relocs
);
581 nr_attributes
= state
->addr
& 0x7;
582 if (nr_attributes
== 0)
584 packet_size
= gl_shader_rec_size(state
->addr
);
586 nr_relocs
= nr_fixed_relocs
+ nr_attributes
;
588 if (nr_relocs
* 4 > exec
->shader_rec_size
) {
589 DRM_ERROR("overflowed shader recs reading %d handles "
590 "from %d bytes left\n",
591 nr_relocs
, exec
->shader_rec_size
);
594 src_handles
= exec
->shader_rec_u
;
595 exec
->shader_rec_u
+= nr_relocs
* 4;
596 exec
->shader_rec_size
-= nr_relocs
* 4;
598 if (packet_size
> exec
->shader_rec_size
) {
599 DRM_ERROR("overflowed shader recs copying %db packet "
600 "from %d bytes left\n",
601 packet_size
, exec
->shader_rec_size
);
604 pkt_u
= exec
->shader_rec_u
;
605 pkt_v
= exec
->shader_rec_v
;
606 memcpy(pkt_v
, pkt_u
, packet_size
);
607 exec
->shader_rec_u
+= packet_size
;
608 /* Shader recs have to be aligned to 16 bytes (due to the attribute
609 * flags being in the low bytes), so round the next validated shader
610 * rec address up. This should be safe, since we've got so many
611 * relocations in a shader rec packet.
613 BUG_ON(roundup(packet_size
, 16) - packet_size
> nr_relocs
* 4);
614 exec
->shader_rec_v
+= roundup(packet_size
, 16);
615 exec
->shader_rec_size
-= packet_size
;
617 for (i
= 0; i
< nr_relocs
; i
++) {
618 enum vc4_bo_mode mode
;
620 if (i
< nr_fixed_relocs
&& relocs
[i
].type
== RELOC_CODE
)
621 mode
= VC4_MODE_SHADER
;
623 mode
= VC4_MODE_RENDER
;
625 if (!vc4_use_bo(exec
, src_handles
[i
], mode
, &bo
[i
])) {
630 for (i
= 0; i
< nr_fixed_relocs
; i
++) {
631 uint32_t o
= relocs
[i
].offset
;
632 uint32_t src_offset
= *(uint32_t *)(pkt_u
+ o
);
633 uint32_t *texture_handles_u
;
634 void *uniform_data_u
;
637 *(uint32_t *)(pkt_v
+ o
) = bo
[i
]->paddr
+ src_offset
;
639 switch (relocs
[i
].type
) {
641 kfree(validated_shader
);
642 validated_shader
= vc4_validate_shader(bo
[i
],
644 if (!validated_shader
)
647 if (validated_shader
->uniforms_src_size
>
648 exec
->uniforms_size
) {
649 DRM_ERROR("Uniforms src buffer overflow\n");
653 texture_handles_u
= exec
->uniforms_u
;
654 uniform_data_u
= (texture_handles_u
+
655 validated_shader
->num_texture_samples
);
657 memcpy(exec
->uniforms_v
, uniform_data_u
,
658 validated_shader
->uniforms_size
);
661 tex
< validated_shader
->num_texture_samples
;
665 &validated_shader
->texture_samples
[tex
],
666 texture_handles_u
[tex
])) {
671 *(uint32_t *)(pkt_v
+ o
+ 4) = exec
->uniforms_p
;
673 exec
->uniforms_u
+= validated_shader
->uniforms_src_size
;
674 exec
->uniforms_v
+= validated_shader
->uniforms_size
;
675 exec
->uniforms_p
+= validated_shader
->uniforms_size
;
684 for (i
= 0; i
< nr_attributes
; i
++) {
685 /* XXX: validation */
686 uint32_t o
= 36 + i
* 8;
687 *(uint32_t *)(pkt_v
+ o
) = (bo
[nr_fixed_relocs
+ i
]->paddr
+
688 *(uint32_t *)(pkt_u
+ o
));
691 kfree(validated_shader
);
696 kfree(validated_shader
);
701 vc4_validate_shader_recs(struct drm_device
*dev
,
702 struct exec_info
*exec
)
707 for (i
= 0; i
< exec
->shader_state_count
; i
++) {
708 ret
= validate_shader_rec(dev
, exec
, &exec
->shader_state
[i
]);