2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "pipe/p_defines.h"
28 #include "util/ralloc.h"
29 #include "util/u_inlines.h"
30 #include "util/u_memory.h"
31 #include "util/u_blitter.h"
32 #include "indices/u_primconvert.h"
33 #include "pipe/p_screen.h"
35 #include "vc4_screen.h"
36 #include "vc4_context.h"
37 #include "vc4_resource.h"
40 * Emits a no-op STORE_TILE_BUFFER_GENERAL.
42 * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
43 * some sort before another load is triggered.
46 vc4_store_before_load(struct vc4_context
*vc4
, bool *coords_emitted
)
51 cl_u8(&vc4
->rcl
, VC4_PACKET_STORE_TILE_BUFFER_GENERAL
);
52 cl_u8(&vc4
->rcl
, VC4_LOADSTORE_TILE_BUFFER_NONE
);
53 cl_u8(&vc4
->rcl
, (VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR
|
54 VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR
|
55 VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR
));
56 cl_u32(&vc4
->rcl
, 0); /* no address, since we're in None mode */
58 *coords_emitted
= false;
62 * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
64 * The tile coordinates packet triggers a pending load if there is one, are
65 * used for clipping during rendering, and determine where loads/stores happen
66 * relative to their base address.
69 vc4_tile_coordinates(struct vc4_context
*vc4
, uint32_t x
, uint32_t y
,
75 cl_u8(&vc4
->rcl
, VC4_PACKET_TILE_COORDINATES
);
79 *coords_emitted
= true;
83 vc4_setup_rcl(struct vc4_context
*vc4
)
85 struct vc4_surface
*csurf
= vc4_surface(vc4
->framebuffer
.cbufs
[0]);
86 struct vc4_resource
*ctex
= csurf
? vc4_resource(csurf
->base
.texture
) : NULL
;
87 struct vc4_surface
*zsurf
= vc4_surface(vc4
->framebuffer
.zsbuf
);
88 struct vc4_resource
*ztex
= zsurf
? vc4_resource(zsurf
->base
.texture
) : NULL
;
91 vc4
->resolve
&= ~PIPE_CLEAR_COLOR0
;
93 vc4
->resolve
&= ~(PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
);
94 uint32_t resolve_uncleared
= vc4
->resolve
& ~vc4
->cleared
;
95 uint32_t width
= vc4
->framebuffer
.width
;
96 uint32_t height
= vc4
->framebuffer
.height
;
97 uint32_t stride_in_tiles
= align(width
, 64) / 64;
99 assert(vc4
->draw_min_x
!= ~0 && vc4
->draw_min_y
!= ~0);
100 uint32_t min_x_tile
= vc4
->draw_min_x
/ 64;
101 uint32_t min_y_tile
= vc4
->draw_min_y
/ 64;
102 uint32_t max_x_tile
= (vc4
->draw_max_x
- 1) / 64;
103 uint32_t max_y_tile
= (vc4
->draw_max_y
- 1) / 64;
104 uint32_t xtiles
= max_x_tile
- min_x_tile
+ 1;
105 uint32_t ytiles
= max_y_tile
- min_y_tile
+ 1;
108 fprintf(stderr
, "RCL: resolve 0x%x clear 0x%x resolve uncleared 0x%x\n",
114 uint32_t reloc_size
= 9;
115 uint32_t clear_size
= 14;
116 uint32_t config_size
= 11 + reloc_size
;
117 uint32_t loadstore_size
= 7 + reloc_size
;
118 uint32_t tilecoords_size
= 3;
119 uint32_t branch_size
= 5 + reloc_size
;
120 uint32_t color_store_size
= 1;
121 cl_ensure_space(&vc4
->rcl
,
125 xtiles
* ytiles
* (loadstore_size
* 4 +
126 tilecoords_size
* 3 +
130 cl_u8(&vc4
->rcl
, VC4_PACKET_CLEAR_COLORS
);
131 cl_u32(&vc4
->rcl
, vc4
->clear_color
[0]);
132 cl_u32(&vc4
->rcl
, vc4
->clear_color
[1]);
133 cl_u32(&vc4
->rcl
, vc4
->clear_depth
);
134 cl_u8(&vc4
->rcl
, vc4
->clear_stencil
);
136 /* The rendering mode config determines the pointer that's used for
137 * VC4_PACKET_STORE_MS_TILE_BUFFER address computations. The kernel
138 * could handle a no-relocation rendering mode config and deny those
139 * packets, but instead we just tell the kernel we're doing our color
140 * rendering to the Z buffer, and just don't emit any of those
143 struct vc4_surface
*render_surf
= csurf
? csurf
: zsurf
;
144 struct vc4_resource
*render_tex
= vc4_resource(render_surf
->base
.texture
);
145 cl_start_reloc(&vc4
->rcl
, 1);
146 cl_u8(&vc4
->rcl
, VC4_PACKET_TILE_RENDERING_MODE_CONFIG
);
147 cl_reloc(vc4
, &vc4
->rcl
, render_tex
->bo
, render_surf
->offset
);
148 cl_u16(&vc4
->rcl
, width
);
149 cl_u16(&vc4
->rcl
, height
);
150 cl_u16(&vc4
->rcl
, ((render_surf
->tiling
<<
151 VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT
) |
152 (vc4_rt_format_is_565(render_surf
->base
.format
) ?
153 VC4_RENDER_CONFIG_FORMAT_BGR565
:
154 VC4_RENDER_CONFIG_FORMAT_RGBA8888
)));
156 /* The tile buffer normally gets cleared when the previous tile is
157 * stored. If the clear values changed between frames, then the tile
158 * buffer has stale clear values in it, so we have to do a store in
159 * None mode (no writes) so that we trigger the tile buffer clear.
161 * Excess clearing is only a performance cost, since per-tile contents
162 * will be loaded/stored in the loop below.
164 if (vc4
->cleared
& (PIPE_CLEAR_COLOR0
|
166 PIPE_CLEAR_STENCIL
)) {
167 cl_u8(&vc4
->rcl
, VC4_PACKET_TILE_COORDINATES
);
171 cl_u8(&vc4
->rcl
, VC4_PACKET_STORE_TILE_BUFFER_GENERAL
);
172 cl_u16(&vc4
->rcl
, VC4_LOADSTORE_TILE_BUFFER_NONE
);
173 cl_u32(&vc4
->rcl
, 0); /* no address, since we're in None mode */
176 uint32_t color_hindex
= ctex
? vc4_gem_hindex(vc4
, ctex
->bo
) : 0;
177 uint32_t depth_hindex
= ztex
? vc4_gem_hindex(vc4
, ztex
->bo
) : 0;
178 uint32_t tile_alloc_hindex
= vc4_gem_hindex(vc4
, vc4
->tile_alloc
);
180 for (int y
= min_y_tile
; y
<= max_y_tile
; y
++) {
181 for (int x
= min_x_tile
; x
<= max_x_tile
; x
++) {
182 bool end_of_frame
= (x
== max_x_tile
&&
184 bool coords_emitted
= false;
186 /* Note that the load doesn't actually occur until the
187 * tile coords packet is processed, and only one load
188 * may be outstanding at a time.
190 if (resolve_uncleared
& PIPE_CLEAR_COLOR
) {
191 vc4_store_before_load(vc4
, &coords_emitted
);
193 cl_start_reloc(&vc4
->rcl
, 1);
194 cl_u8(&vc4
->rcl
, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
);
196 VC4_LOADSTORE_TILE_BUFFER_COLOR
|
198 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT
));
200 vc4_rt_format_is_565(csurf
->base
.format
) ?
201 VC4_LOADSTORE_TILE_BUFFER_BGR565
:
202 VC4_LOADSTORE_TILE_BUFFER_RGBA8888
);
203 cl_reloc_hindex(&vc4
->rcl
, color_hindex
,
206 vc4_tile_coordinates(vc4
, x
, y
, &coords_emitted
);
209 if (resolve_uncleared
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
210 vc4_store_before_load(vc4
, &coords_emitted
);
212 cl_start_reloc(&vc4
->rcl
, 1);
213 cl_u8(&vc4
->rcl
, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
);
215 VC4_LOADSTORE_TILE_BUFFER_ZS
|
217 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT
));
219 cl_reloc_hindex(&vc4
->rcl
, depth_hindex
,
222 vc4_tile_coordinates(vc4
, x
, y
, &coords_emitted
);
225 /* Clipping depends on tile coordinates having been
226 * emitted, so make sure it's happened even if
227 * everything was cleared to start.
229 vc4_tile_coordinates(vc4
, x
, y
, &coords_emitted
);
231 /* Wait for the binner before jumping to the first
234 if (x
== min_x_tile
&& y
== min_y_tile
)
235 cl_u8(&vc4
->rcl
, VC4_PACKET_WAIT_ON_SEMAPHORE
);
237 cl_start_reloc(&vc4
->rcl
, 1);
238 cl_u8(&vc4
->rcl
, VC4_PACKET_BRANCH_TO_SUB_LIST
);
239 cl_reloc_hindex(&vc4
->rcl
, tile_alloc_hindex
,
240 (y
* stride_in_tiles
+ x
) * 32);
242 if (vc4
->resolve
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
243 vc4_tile_coordinates(vc4
, x
, y
, &coords_emitted
);
245 cl_start_reloc(&vc4
->rcl
, 1);
246 cl_u8(&vc4
->rcl
, VC4_PACKET_STORE_TILE_BUFFER_GENERAL
);
248 VC4_LOADSTORE_TILE_BUFFER_ZS
|
250 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT
));
252 VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR
);
253 cl_reloc_hindex(&vc4
->rcl
, depth_hindex
,
256 !(vc4
->resolve
& PIPE_CLEAR_COLOR0
)) ?
257 VC4_LOADSTORE_TILE_BUFFER_EOF
: 0));
259 coords_emitted
= false;
262 if (vc4
->resolve
& PIPE_CLEAR_COLOR0
) {
263 vc4_tile_coordinates(vc4
, x
, y
, &coords_emitted
);
266 VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF
);
269 VC4_PACKET_STORE_MS_TILE_BUFFER
);
272 coords_emitted
= false;
275 /* One of the bits needs to have been set that would
276 * have triggered an EOF.
278 assert(vc4
->resolve
& (PIPE_CLEAR_COLOR0
|
280 PIPE_CLEAR_STENCIL
));
281 /* Any coords emitted must also have been consumed by
284 assert(!coords_emitted
);
288 if (vc4
->resolve
& PIPE_CLEAR_COLOR0
)
291 if (vc4
->resolve
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
))
296 vc4_draw_reset(struct vc4_context
*vc4
)
298 struct vc4_bo
**referenced_bos
= vc4
->bo_pointers
.base
;
299 for (int i
= 0; i
< (vc4
->bo_handles
.next
-
300 vc4
->bo_handles
.base
) / 4; i
++) {
301 vc4_bo_unreference(&referenced_bos
[i
]);
303 vc4_reset_cl(&vc4
->bcl
);
304 vc4_reset_cl(&vc4
->rcl
);
305 vc4_reset_cl(&vc4
->shader_rec
);
306 vc4_reset_cl(&vc4
->uniforms
);
307 vc4_reset_cl(&vc4
->bo_handles
);
308 vc4_reset_cl(&vc4
->bo_pointers
);
309 vc4
->shader_rec_count
= 0;
311 vc4
->needs_flush
= false;
312 vc4
->draw_call_queued
= false;
314 /* We have no hardware context saved between our draw calls, so we
315 * need to flag the next draw as needing all state emitted. Emitting
316 * all state at the start of our draws is also what ensures that we
317 * return to the state we need after a previous tile has finished.
323 vc4
->draw_min_x
= ~0;
324 vc4
->draw_min_y
= ~0;
330 vc4_flush(struct pipe_context
*pctx
)
332 struct vc4_context
*vc4
= vc4_context(pctx
);
334 if (!vc4
->needs_flush
)
337 /* The RCL setup would choke if the draw bounds cause no drawing, so
338 * just drop the drawing if that's the case.
340 if (vc4
->draw_max_x
<= vc4
->draw_min_x
||
341 vc4
->draw_max_y
<= vc4
->draw_min_y
) {
346 /* Increment the semaphore indicating that binning is done and
347 * unblocking the render thread. Note that this doesn't act until the
350 cl_u8(&vc4
->bcl
, VC4_PACKET_INCREMENT_SEMAPHORE
);
351 /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */
352 cl_u8(&vc4
->bcl
, VC4_PACKET_FLUSH
);
356 if (vc4_debug
& VC4_DEBUG_CL
) {
357 fprintf(stderr
, "BCL:\n");
358 vc4_dump_cl(vc4
->bcl
.base
, vc4
->bcl
.next
- vc4
->bcl
.base
, false);
359 fprintf(stderr
, "RCL:\n");
360 vc4_dump_cl(vc4
->rcl
.base
, vc4
->rcl
.next
- vc4
->rcl
.base
, true);
363 struct drm_vc4_submit_cl submit
;
364 memset(&submit
, 0, sizeof(submit
));
366 submit
.bo_handles
= (uintptr_t)vc4
->bo_handles
.base
;
367 submit
.bo_handle_count
= (vc4
->bo_handles
.next
-
368 vc4
->bo_handles
.base
) / 4;
369 submit
.bin_cl
= (uintptr_t)vc4
->bcl
.base
;
370 submit
.bin_cl_size
= vc4
->bcl
.next
- vc4
->bcl
.base
;
371 submit
.render_cl
= (uintptr_t)vc4
->rcl
.base
;
372 submit
.render_cl_size
= vc4
->rcl
.next
- vc4
->rcl
.base
;
373 submit
.shader_rec
= (uintptr_t)vc4
->shader_rec
.base
;
374 submit
.shader_rec_size
= vc4
->shader_rec
.next
- vc4
->shader_rec
.base
;
375 submit
.shader_rec_count
= vc4
->shader_rec_count
;
376 submit
.uniforms
= (uintptr_t)vc4
->uniforms
.base
;
377 submit
.uniforms_size
= vc4
->uniforms
.next
- vc4
->uniforms
.base
;
379 if (!(vc4_debug
& VC4_DEBUG_NORAST
)) {
382 #ifndef USE_VC4_SIMULATOR
383 ret
= drmIoctl(vc4
->fd
, DRM_IOCTL_VC4_SUBMIT_CL
, &submit
);
385 ret
= vc4_simulator_flush(vc4
, &submit
);
388 fprintf(stderr
, "VC4 submit failed\n");
393 vc4
->last_emit_seqno
= submit
.seqno
;
395 if (vc4_debug
& VC4_DEBUG_ALWAYS_SYNC
) {
396 if (!vc4_wait_seqno(vc4
->screen
, vc4
->last_emit_seqno
,
397 PIPE_TIMEOUT_INFINITE
)) {
398 fprintf(stderr
, "Wait failed.\n");
407 vc4_pipe_flush(struct pipe_context
*pctx
, struct pipe_fence_handle
**fence
,
410 struct vc4_context
*vc4
= vc4_context(pctx
);
415 struct vc4_fence
*f
= vc4_fence_create(vc4
->screen
,
416 vc4
->last_emit_seqno
);
417 *fence
= (struct pipe_fence_handle
*)f
;
422 * Flushes the current command lists if they reference the given BO.
424 * This helps avoid flushing the command buffers when unnecessary.
427 vc4_cl_references_bo(struct pipe_context
*pctx
, struct vc4_bo
*bo
)
429 struct vc4_context
*vc4
= vc4_context(pctx
);
431 if (!vc4
->needs_flush
)
434 /* Walk all the referenced BOs in the drawing command list to see if
437 struct vc4_bo
**referenced_bos
= vc4
->bo_pointers
.base
;
438 for (int i
= 0; i
< (vc4
->bo_handles
.next
-
439 vc4
->bo_handles
.base
) / 4; i
++) {
440 if (referenced_bos
[i
] == bo
) {
445 /* Also check for the Z/color buffers, since the references to those
446 * are only added immediately before submit.
448 struct vc4_surface
*csurf
= vc4_surface(vc4
->framebuffer
.cbufs
[0]);
450 struct vc4_resource
*ctex
= vc4_resource(csurf
->base
.texture
);
451 if (ctex
->bo
== bo
) {
456 struct vc4_surface
*zsurf
= vc4_surface(vc4
->framebuffer
.zsbuf
);
458 struct vc4_resource
*ztex
=
459 vc4_resource(zsurf
->base
.texture
);
460 if (ztex
->bo
== bo
) {
469 vc4_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
471 struct vc4_context
*vc4
= vc4_context(pctx
);
472 struct pipe_surface
*zsurf
= vc4
->framebuffer
.zsbuf
;
474 if (zsurf
&& zsurf
->texture
== prsc
)
475 vc4
->resolve
&= ~(PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
);
479 vc4_context_destroy(struct pipe_context
*pctx
)
481 struct vc4_context
*vc4
= vc4_context(pctx
);
484 util_blitter_destroy(vc4
->blitter
);
486 if (vc4
->primconvert
)
487 util_primconvert_destroy(vc4
->primconvert
);
489 util_slab_destroy(&vc4
->transfer_pool
);
491 pipe_surface_reference(&vc4
->framebuffer
.cbufs
[0], NULL
);
492 pipe_surface_reference(&vc4
->framebuffer
.zsbuf
, NULL
);
493 vc4_bo_unreference(&vc4
->tile_alloc
);
494 vc4_bo_unreference(&vc4
->tile_state
);
496 vc4_program_fini(pctx
);
501 struct pipe_context
*
502 vc4_context_create(struct pipe_screen
*pscreen
, void *priv
)
504 struct vc4_screen
*screen
= vc4_screen(pscreen
);
505 struct vc4_context
*vc4
;
507 /* Prevent dumping of the shaders built during context setup. */
508 uint32_t saved_shaderdb_flag
= vc4_debug
& VC4_DEBUG_SHADERDB
;
509 vc4_debug
&= ~VC4_DEBUG_SHADERDB
;
511 vc4
= rzalloc(NULL
, struct vc4_context
);
514 struct pipe_context
*pctx
= &vc4
->base
;
516 vc4
->screen
= screen
;
518 pctx
->screen
= pscreen
;
520 pctx
->destroy
= vc4_context_destroy
;
521 pctx
->flush
= vc4_pipe_flush
;
522 pctx
->invalidate_resource
= vc4_invalidate_resource
;
525 vc4_state_init(pctx
);
526 vc4_program_init(pctx
);
527 vc4_query_init(pctx
);
528 vc4_resource_context_init(pctx
);
530 vc4_init_cl(vc4
, &vc4
->bcl
);
531 vc4_init_cl(vc4
, &vc4
->rcl
);
532 vc4_init_cl(vc4
, &vc4
->shader_rec
);
533 vc4_init_cl(vc4
, &vc4
->uniforms
);
534 vc4_init_cl(vc4
, &vc4
->bo_handles
);
535 vc4_init_cl(vc4
, &vc4
->bo_pointers
);
538 vc4
->fd
= screen
->fd
;
540 util_slab_create(&vc4
->transfer_pool
, sizeof(struct vc4_transfer
),
541 16, UTIL_SLAB_SINGLETHREADED
);
542 vc4
->blitter
= util_blitter_create(pctx
);
546 vc4
->primconvert
= util_primconvert_create(pctx
,
547 (1 << PIPE_PRIM_QUADS
) - 1);
548 if (!vc4
->primconvert
)
551 vc4_debug
|= saved_shaderdb_flag
;