2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * Functions for submitting VC5 render jobs to the kernel.
30 #include "v3d_context.h"
31 /* The OQ/semaphore packets are the same across V3D versions. */
32 #define V3D_VERSION 33
33 #include "broadcom/cle/v3dx_pack.h"
34 #include "broadcom/common/v3d_macros.h"
35 #include "util/hash_table.h"
36 #include "util/ralloc.h"
38 #include "broadcom/clif/clif_dump.h"
41 v3d_job_free(struct v3d_context
*v3d
, struct v3d_job
*job
)
43 set_foreach(job
->bos
, entry
) {
44 struct v3d_bo
*bo
= (struct v3d_bo
*)entry
->key
;
45 v3d_bo_unreference(&bo
);
48 _mesa_hash_table_remove_key(v3d
->jobs
, &job
->key
);
50 if (job
->write_prscs
) {
51 set_foreach(job
->write_prscs
, entry
) {
52 const struct pipe_resource
*prsc
= entry
->key
;
54 _mesa_hash_table_remove_key(v3d
->write_jobs
, prsc
);
58 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
60 _mesa_hash_table_remove_key(v3d
->write_jobs
,
61 job
->cbufs
[i
]->texture
);
62 pipe_surface_reference(&job
->cbufs
[i
], NULL
);
66 struct v3d_resource
*rsc
= v3d_resource(job
->zsbuf
->texture
);
67 if (rsc
->separate_stencil
)
68 _mesa_hash_table_remove_key(v3d
->write_jobs
,
69 &rsc
->separate_stencil
->base
);
71 _mesa_hash_table_remove_key(v3d
->write_jobs
,
73 pipe_surface_reference(&job
->zsbuf
, NULL
);
79 v3d_destroy_cl(&job
->bcl
);
80 v3d_destroy_cl(&job
->rcl
);
81 v3d_destroy_cl(&job
->indirect
);
82 v3d_bo_unreference(&job
->tile_alloc
);
83 v3d_bo_unreference(&job
->tile_state
);
88 static struct v3d_job
*
89 v3d_job_create(struct v3d_context
*v3d
)
91 struct v3d_job
*job
= rzalloc(v3d
, struct v3d_job
);
95 v3d_init_cl(job
, &job
->bcl
);
96 v3d_init_cl(job
, &job
->rcl
);
97 v3d_init_cl(job
, &job
->indirect
);
100 job
->draw_min_y
= ~0;
104 job
->bos
= _mesa_set_create(job
,
106 _mesa_key_pointer_equal
);
111 v3d_job_add_bo(struct v3d_job
*job
, struct v3d_bo
*bo
)
116 if (_mesa_set_search(job
->bos
, bo
))
119 v3d_bo_reference(bo
);
120 _mesa_set_add(job
->bos
, bo
);
121 job
->referenced_size
+= bo
->size
;
123 uint32_t *bo_handles
= (void *)(uintptr_t)job
->submit
.bo_handles
;
125 if (job
->submit
.bo_handle_count
>= job
->bo_handles_size
) {
126 job
->bo_handles_size
= MAX2(4, job
->bo_handles_size
* 2);
127 bo_handles
= reralloc(job
, bo_handles
,
128 uint32_t, job
->bo_handles_size
);
129 job
->submit
.bo_handles
= (uintptr_t)(void *)bo_handles
;
131 bo_handles
[job
->submit
.bo_handle_count
++] = bo
->handle
;
135 v3d_job_add_write_resource(struct v3d_job
*job
, struct pipe_resource
*prsc
)
137 struct v3d_context
*v3d
= job
->v3d
;
139 if (!job
->write_prscs
) {
140 job
->write_prscs
= _mesa_set_create(job
,
142 _mesa_key_pointer_equal
);
145 _mesa_set_add(job
->write_prscs
, prsc
);
146 _mesa_hash_table_insert(v3d
->write_jobs
, prsc
, job
);
150 v3d_flush_jobs_using_bo(struct v3d_context
*v3d
, struct v3d_bo
*bo
)
152 hash_table_foreach(v3d
->jobs
, entry
) {
153 struct v3d_job
*job
= entry
->data
;
155 if (_mesa_set_search(job
->bos
, bo
))
156 v3d_job_submit(v3d
, job
);
161 v3d_job_add_tf_write_resource(struct v3d_job
*job
, struct pipe_resource
*prsc
)
163 v3d_job_add_write_resource(job
, prsc
);
165 if (!job
->tf_write_prscs
)
166 job
->tf_write_prscs
= _mesa_pointer_set_create(job
);
168 _mesa_set_add(job
->tf_write_prscs
, prsc
);
172 v3d_job_writes_resource_from_tf(struct v3d_job
*job
,
173 struct pipe_resource
*prsc
)
175 if (!job
->tf_enabled
)
178 if (!job
->tf_write_prscs
)
181 return _mesa_set_search(job
->tf_write_prscs
, prsc
) != NULL
;
185 v3d_flush_jobs_writing_resource(struct v3d_context
*v3d
,
186 struct pipe_resource
*prsc
,
189 struct hash_entry
*entry
= _mesa_hash_table_search(v3d
->write_jobs
,
194 struct v3d_job
*job
= entry
->data
;
196 /* For writes from TF in the same job we use the "Wait for TF"
197 * feature provided by the hardware so we don't want to flush.
198 * The exception to this is when the caller is about to map the
199 * resource since in that case we don't have a 'Wait for TF' command
200 * the in command stream. In this scenario the caller is expected
201 * to set 'always_flush' to True.
206 } else if (!v3d
->job
|| v3d
->job
!= job
) {
207 /* Write from a different job: always flush */
210 /* Write from currrent job: flush if not TF */
211 needs_flush
= !v3d_job_writes_resource_from_tf(job
, prsc
);
215 v3d_job_submit(v3d
, job
);
219 v3d_flush_jobs_reading_resource(struct v3d_context
*v3d
,
220 struct pipe_resource
*prsc
)
222 struct v3d_resource
*rsc
= v3d_resource(prsc
);
224 /* We only need to force the flush on TF writes, which is the only
225 * case where we might skip the flush to use the 'Wait for TF'
226 * command. Here we are flushing for a read, which means that the
227 * caller intends to write to the resource, so we don't care if
228 * there was a previous TF write to it.
230 v3d_flush_jobs_writing_resource(v3d
, prsc
, false);
232 hash_table_foreach(v3d
->jobs
, entry
) {
233 struct v3d_job
*job
= entry
->data
;
235 if (_mesa_set_search(job
->bos
, rsc
->bo
)) {
236 v3d_job_submit(v3d
, job
);
237 /* Reminder: v3d->jobs is safe to keep iterating even
238 * after deletion of an entry.
246 v3d_job_set_tile_buffer_size(struct v3d_job
*job
)
248 static const uint8_t tile_sizes
[] = {
255 int tile_size_index
= 0;
257 tile_size_index
+= 2;
259 if (job
->cbufs
[3] || job
->cbufs
[2])
260 tile_size_index
+= 2;
261 else if (job
->cbufs
[1])
264 int max_bpp
= RENDER_TARGET_MAXIMUM_32BPP
;
265 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
267 struct v3d_surface
*surf
= v3d_surface(job
->cbufs
[i
]);
268 max_bpp
= MAX2(max_bpp
, surf
->internal_bpp
);
271 job
->internal_bpp
= max_bpp
;
272 STATIC_ASSERT(RENDER_TARGET_MAXIMUM_32BPP
== 0);
273 tile_size_index
+= max_bpp
;
275 assert(tile_size_index
< ARRAY_SIZE(tile_sizes
));
276 job
->tile_width
= tile_sizes
[tile_size_index
* 2 + 0];
277 job
->tile_height
= tile_sizes
[tile_size_index
* 2 + 1];
281 * Returns a v3d_job struture for tracking V3D rendering to a particular FBO.
283 * If we've already started rendering to this FBO, then return the same job,
284 * otherwise make a new one. If we're beginning rendering to an FBO, make
285 * sure that any previous reads of the FBO (or writes to its color/Z surfaces)
289 v3d_get_job(struct v3d_context
*v3d
,
290 struct pipe_surface
**cbufs
, struct pipe_surface
*zsbuf
)
292 /* Return the existing job for this FBO if we have one */
293 struct v3d_job_key local_key
= {
302 struct hash_entry
*entry
= _mesa_hash_table_search(v3d
->jobs
,
307 /* Creating a new job. Make sure that any previous jobs reading or
308 * writing these buffers are flushed.
310 struct v3d_job
*job
= v3d_job_create(v3d
);
312 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
314 v3d_flush_jobs_reading_resource(v3d
, cbufs
[i
]->texture
);
315 pipe_surface_reference(&job
->cbufs
[i
], cbufs
[i
]);
317 if (cbufs
[i
]->texture
->nr_samples
> 1)
322 v3d_flush_jobs_reading_resource(v3d
, zsbuf
->texture
);
323 pipe_surface_reference(&job
->zsbuf
, zsbuf
);
324 if (zsbuf
->texture
->nr_samples
> 1)
328 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
330 _mesa_hash_table_insert(v3d
->write_jobs
,
331 cbufs
[i
]->texture
, job
);
334 _mesa_hash_table_insert(v3d
->write_jobs
, zsbuf
->texture
, job
);
336 struct v3d_resource
*rsc
= v3d_resource(zsbuf
->texture
);
337 if (rsc
->separate_stencil
) {
338 v3d_flush_jobs_reading_resource(v3d
,
339 &rsc
->separate_stencil
->base
);
340 _mesa_hash_table_insert(v3d
->write_jobs
,
341 &rsc
->separate_stencil
->base
,
346 memcpy(&job
->key
, &local_key
, sizeof(local_key
));
347 _mesa_hash_table_insert(v3d
->jobs
, &job
->key
, job
);
353 v3d_get_job_for_fbo(struct v3d_context
*v3d
)
358 struct pipe_surface
**cbufs
= v3d
->framebuffer
.cbufs
;
359 struct pipe_surface
*zsbuf
= v3d
->framebuffer
.zsbuf
;
360 struct v3d_job
*job
= v3d_get_job(v3d
, cbufs
, zsbuf
);
362 if (v3d
->framebuffer
.samples
>= 1)
365 v3d_job_set_tile_buffer_size(job
);
367 /* The dirty flags are tracking what's been updated while v3d->job has
368 * been bound, so set them all to ~0 when switching between jobs. We
369 * also need to reset all state at the start of rendering.
373 /* If we're binding to uninitialized buffers, no need to load their
374 * contents before drawing.
376 for (int i
= 0; i
< 4; i
++) {
378 struct v3d_resource
*rsc
= v3d_resource(cbufs
[i
]->texture
);
380 job
->clear
|= PIPE_CLEAR_COLOR0
<< i
;
385 struct v3d_resource
*rsc
= v3d_resource(zsbuf
->texture
);
387 job
->clear
|= PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
;
390 job
->draw_tiles_x
= DIV_ROUND_UP(v3d
->framebuffer
.width
,
392 job
->draw_tiles_y
= DIV_ROUND_UP(v3d
->framebuffer
.height
,
401 v3d_clif_dump(struct v3d_context
*v3d
, struct v3d_job
*job
)
403 if (!(V3D_DEBUG
& (V3D_DEBUG_CL
| V3D_DEBUG_CLIF
)))
406 struct clif_dump
*clif
= clif_dump_init(&v3d
->screen
->devinfo
,
408 V3D_DEBUG
& V3D_DEBUG_CL
);
410 set_foreach(job
->bos
, entry
) {
411 struct v3d_bo
*bo
= (void *)entry
->key
;
412 char *name
= ralloc_asprintf(NULL
, "%s_0x%x",
413 bo
->name
, bo
->offset
);
416 clif_dump_add_bo(clif
, name
, bo
->offset
, bo
->size
, bo
->map
);
421 clif_dump(clif
, &job
->submit
);
423 clif_dump_destroy(clif
);
427 * Submits the job to the kernel and then reinitializes it.
430 v3d_job_submit(struct v3d_context
*v3d
, struct v3d_job
*job
)
432 MAYBE_UNUSED
struct v3d_screen
*screen
= v3d
->screen
;
434 if (!job
->needs_flush
)
437 if (v3d
->screen
->devinfo
.ver
>= 41)
442 if (cl_offset(&job
->bcl
) > 0) {
443 if (screen
->devinfo
.ver
>= 41)
444 v3d41_bcl_epilogue(v3d
, job
);
446 v3d33_bcl_epilogue(v3d
, job
);
449 /* While the RCL will implicitly depend on the last RCL to have
450 * finished, we also need to block on any previous TFU job we may have
453 job
->submit
.in_sync_rcl
= v3d
->out_sync
;
455 /* Update the sync object for the last rendering by our context. */
456 job
->submit
.out_sync
= v3d
->out_sync
;
458 job
->submit
.bcl_end
= job
->bcl
.bo
->offset
+ cl_offset(&job
->bcl
);
459 job
->submit
.rcl_end
= job
->rcl
.bo
->offset
+ cl_offset(&job
->rcl
);
461 /* On V3D 4.1, the tile alloc/state setup moved to register writes
462 * instead of binner packets.
464 if (screen
->devinfo
.ver
>= 41) {
465 v3d_job_add_bo(job
, job
->tile_alloc
);
466 job
->submit
.qma
= job
->tile_alloc
->offset
;
467 job
->submit
.qms
= job
->tile_alloc
->size
;
469 v3d_job_add_bo(job
, job
->tile_state
);
470 job
->submit
.qts
= job
->tile_state
->offset
;
473 v3d_clif_dump(v3d
, job
);
475 if (!(V3D_DEBUG
& V3D_DEBUG_NORAST
)) {
478 ret
= v3d_ioctl(v3d
->fd
, DRM_IOCTL_V3D_SUBMIT_CL
, &job
->submit
);
479 static bool warned
= false;
480 if (ret
&& !warned
) {
481 fprintf(stderr
, "Draw call returned %s. "
482 "Expect corruption.\n", strerror(errno
));
488 v3d_job_free(v3d
, job
);
492 v3d_job_compare(const void *a
, const void *b
)
494 return memcmp(a
, b
, sizeof(struct v3d_job_key
)) == 0;
498 v3d_job_hash(const void *key
)
500 return _mesa_hash_data(key
, sizeof(struct v3d_job_key
));
504 v3d_job_init(struct v3d_context
*v3d
)
506 v3d
->jobs
= _mesa_hash_table_create(v3d
,
509 v3d
->write_jobs
= _mesa_hash_table_create(v3d
,
511 _mesa_key_pointer_equal
);