2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * Functions for submitting VC5 render jobs to the kernel.
30 #include "vc5_context.h"
31 #include "util/hash_table.h"
32 #include "util/ralloc.h"
34 #include "broadcom/clif/clif_dump.h"
35 #include "broadcom/cle/v3d_packet_v33_pack.h"
38 remove_from_ht(struct hash_table
*ht
, void *key
)
40 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
41 _mesa_hash_table_remove(ht
, entry
);
45 vc5_job_free(struct vc5_context
*vc5
, struct vc5_job
*job
)
47 struct set_entry
*entry
;
49 set_foreach(job
->bos
, entry
) {
50 struct vc5_bo
*bo
= (struct vc5_bo
*)entry
->key
;
51 vc5_bo_unreference(&bo
);
54 remove_from_ht(vc5
->jobs
, &job
->key
);
56 if (job
->write_prscs
) {
57 struct set_entry
*entry
;
59 set_foreach(job
->write_prscs
, entry
) {
60 const struct pipe_resource
*prsc
= entry
->key
;
62 remove_from_ht(vc5
->write_jobs
, (void *)prsc
);
66 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
68 remove_from_ht(vc5
->write_jobs
, job
->cbufs
[i
]->texture
);
69 pipe_surface_reference(&job
->cbufs
[i
], NULL
);
73 remove_from_ht(vc5
->write_jobs
, job
->zsbuf
->texture
);
74 pipe_surface_reference(&job
->zsbuf
, NULL
);
80 vc5_destroy_cl(&job
->bcl
);
81 vc5_destroy_cl(&job
->rcl
);
82 vc5_destroy_cl(&job
->indirect
);
83 vc5_bo_unreference(&job
->tile_alloc
);
88 static struct vc5_job
*
89 vc5_job_create(struct vc5_context
*vc5
)
91 struct vc5_job
*job
= rzalloc(vc5
, struct vc5_job
);
95 vc5_init_cl(job
, &job
->bcl
);
96 vc5_init_cl(job
, &job
->rcl
);
97 vc5_init_cl(job
, &job
->indirect
);
100 job
->draw_min_y
= ~0;
104 job
->bos
= _mesa_set_create(job
,
106 _mesa_key_pointer_equal
);
111 vc5_job_add_bo(struct vc5_job
*job
, struct vc5_bo
*bo
)
116 if (_mesa_set_search(job
->bos
, bo
))
119 vc5_bo_reference(bo
);
120 _mesa_set_add(job
->bos
, bo
);
122 uint32_t *bo_handles
= (void *)(uintptr_t)job
->submit
.bo_handles
;
124 if (job
->submit
.bo_handle_count
>= job
->bo_handles_size
) {
125 job
->bo_handles_size
= MAX2(4, job
->bo_handles_size
* 2);
126 bo_handles
= reralloc(job
, bo_handles
,
127 uint32_t, job
->bo_handles_size
);
128 job
->submit
.bo_handles
= (uintptr_t)(void *)bo_handles
;
130 bo_handles
[job
->submit
.bo_handle_count
++] = bo
->handle
;
134 vc5_job_add_write_resource(struct vc5_job
*job
, struct pipe_resource
*prsc
)
136 struct vc5_context
*vc5
= job
->vc5
;
138 if (!job
->write_prscs
) {
139 job
->write_prscs
= _mesa_set_create(job
,
141 _mesa_key_pointer_equal
);
144 _mesa_set_add(job
->write_prscs
, prsc
);
145 _mesa_hash_table_insert(vc5
->write_jobs
, prsc
, job
);
149 vc5_flush_jobs_writing_resource(struct vc5_context
*vc5
,
150 struct pipe_resource
*prsc
)
152 struct hash_entry
*entry
= _mesa_hash_table_search(vc5
->write_jobs
,
155 struct vc5_job
*job
= entry
->data
;
156 vc5_job_submit(vc5
, job
);
161 vc5_flush_jobs_reading_resource(struct vc5_context
*vc5
,
162 struct pipe_resource
*prsc
)
164 struct vc5_resource
*rsc
= vc5_resource(prsc
);
166 vc5_flush_jobs_writing_resource(vc5
, prsc
);
168 struct hash_entry
*entry
;
169 hash_table_foreach(vc5
->jobs
, entry
) {
170 struct vc5_job
*job
= entry
->data
;
172 if (_mesa_set_search(job
->bos
, rsc
->bo
)) {
173 vc5_job_submit(vc5
, job
);
174 /* Reminder: vc5->jobs is safe to keep iterating even
175 * after deletion of an entry.
183 vc5_job_set_tile_buffer_size(struct vc5_job
*job
)
185 static const uint8_t tile_sizes
[] = {
192 int tile_size_index
= 0;
194 tile_size_index
+= 2;
196 if (job
->cbufs
[3] || job
->cbufs
[2])
197 tile_size_index
+= 2;
198 else if (job
->cbufs
[1])
201 int max_bpp
= RENDER_TARGET_MAXIMUM_32BPP
;
202 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
204 struct vc5_surface
*surf
= vc5_surface(job
->cbufs
[i
]);
205 max_bpp
= MAX2(max_bpp
, surf
->internal_bpp
);
208 job
->internal_bpp
= max_bpp
;
209 STATIC_ASSERT(RENDER_TARGET_MAXIMUM_32BPP
== 0);
210 tile_size_index
+= max_bpp
;
212 assert(tile_size_index
< ARRAY_SIZE(tile_sizes
));
213 job
->tile_width
= tile_sizes
[tile_size_index
* 2 + 0];
214 job
->tile_height
= tile_sizes
[tile_size_index
* 2 + 1];
218 * Returns a vc5_job struture for tracking V3D rendering to a particular FBO.
220 * If we've already started rendering to this FBO, then return old same job,
221 * otherwise make a new one. If we're beginning rendering to an FBO, make
222 * sure that any previous reads of the FBO (or writes to its color/Z surfaces)
226 vc5_get_job(struct vc5_context
*vc5
,
227 struct pipe_surface
**cbufs
, struct pipe_surface
*zsbuf
)
229 /* Return the existing job for this FBO if we have one */
230 struct vc5_job_key local_key
= {
239 struct hash_entry
*entry
= _mesa_hash_table_search(vc5
->jobs
,
244 /* Creating a new job. Make sure that any previous jobs reading or
245 * writing these buffers are flushed.
247 struct vc5_job
*job
= vc5_job_create(vc5
);
249 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
251 vc5_flush_jobs_reading_resource(vc5
, cbufs
[i
]->texture
);
252 pipe_surface_reference(&job
->cbufs
[i
], cbufs
[i
]);
254 if (cbufs
[i
]->texture
->nr_samples
> 1)
259 vc5_flush_jobs_reading_resource(vc5
, zsbuf
->texture
);
260 pipe_surface_reference(&job
->zsbuf
, zsbuf
);
261 if (zsbuf
->texture
->nr_samples
> 1)
265 vc5_job_set_tile_buffer_size(job
);
267 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
269 _mesa_hash_table_insert(vc5
->write_jobs
,
270 cbufs
[i
]->texture
, job
);
273 _mesa_hash_table_insert(vc5
->write_jobs
, zsbuf
->texture
, job
);
275 memcpy(&job
->key
, &local_key
, sizeof(local_key
));
276 _mesa_hash_table_insert(vc5
->jobs
, &job
->key
, job
);
282 vc5_get_job_for_fbo(struct vc5_context
*vc5
)
287 struct pipe_surface
**cbufs
= vc5
->framebuffer
.cbufs
;
288 struct pipe_surface
*zsbuf
= vc5
->framebuffer
.zsbuf
;
289 struct vc5_job
*job
= vc5_get_job(vc5
, cbufs
, zsbuf
);
291 /* The dirty flags are tracking what's been updated while vc5->job has
292 * been bound, so set them all to ~0 when switching between jobs. We
293 * also need to reset all state at the start of rendering.
297 /* If we're binding to uninitialized buffers, no need to load their
298 * contents before drawing.
300 for (int i
= 0; i
< 4; i
++) {
302 struct vc5_resource
*rsc
= vc5_resource(cbufs
[i
]->texture
);
304 job
->cleared
|= PIPE_CLEAR_COLOR0
<< i
;
309 struct vc5_resource
*rsc
= vc5_resource(zsbuf
->texture
);
311 job
->cleared
|= PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
;
314 job
->draw_tiles_x
= DIV_ROUND_UP(vc5
->framebuffer
.width
,
316 job
->draw_tiles_y
= DIV_ROUND_UP(vc5
->framebuffer
.height
,
325 vc5_clif_dump_lookup(void *data
, uint32_t addr
, void **vaddr
)
327 struct vc5_job
*job
= data
;
328 struct set_entry
*entry
;
330 set_foreach(job
->bos
, entry
) {
331 struct vc5_bo
*bo
= (void *)entry
->key
;
333 if (addr
>= bo
->offset
&&
334 addr
< bo
->offset
+ bo
->size
) {
336 *vaddr
= bo
->map
+ addr
- bo
->offset
;
345 vc5_clif_dump(struct vc5_context
*vc5
, struct vc5_job
*job
)
347 if (!(V3D_DEBUG
& V3D_DEBUG_CL
))
350 struct clif_dump
*clif
= clif_dump_init(&vc5
->screen
->devinfo
,
351 stderr
, vc5_clif_dump_lookup
,
354 fprintf(stderr
, "BCL: 0x%08x..0x%08x\n",
355 job
->submit
.bcl_start
, job
->submit
.bcl_end
);
357 clif_dump_add_cl(clif
, job
->submit
.bcl_start
, job
->submit
.bcl_end
);
359 fprintf(stderr
, "RCL: 0x%08x..0x%08x\n",
360 job
->submit
.rcl_start
, job
->submit
.rcl_end
);
361 clif_dump_add_cl(clif
, job
->submit
.rcl_start
, job
->submit
.rcl_end
);
365 * Submits the job to the kernel and then reinitializes it.
368 vc5_job_submit(struct vc5_context
*vc5
, struct vc5_job
*job
)
370 if (!job
->needs_flush
)
373 /* The RCL setup would choke if the draw bounds cause no drawing, so
374 * just drop the drawing if that's the case.
376 if (job
->draw_max_x
<= job
->draw_min_x
||
377 job
->draw_max_y
<= job
->draw_min_y
) {
383 if (cl_offset(&job
->bcl
) > 0) {
384 vc5_cl_ensure_space_with_branch(&job
->bcl
,
386 cl_packet_length(OCCLUSION_QUERY_COUNTER
));
388 if (job
->oq_enabled
) {
389 /* Disable the OQ at the end of the CL, so that the
390 * draw calls at the start of the CL don't inherit the
393 cl_emit(&job
->bcl
, OCCLUSION_QUERY_COUNTER
, counter
);
396 /* Increment the semaphore indicating that binning is done and
397 * unblocking the render thread. Note that this doesn't act
398 * until the FLUSH completes.
400 cl_emit(&job
->bcl
, INCREMENT_SEMAPHORE
, incr
);
402 /* The FLUSH_ALL emits any unwritten state changes in each
403 * tile. We can use this to reset any state that needs to be
404 * present at the start of the next tile, as we do with
405 * OCCLUSION_QUERY_COUNTER above.
407 cl_emit(&job
->bcl
, FLUSH_ALL_STATE
, flush
);
410 job
->submit
.bcl_end
= job
->bcl
.bo
->offset
+ cl_offset(&job
->bcl
);
411 job
->submit
.rcl_end
= job
->rcl
.bo
->offset
+ cl_offset(&job
->rcl
);
413 vc5_clif_dump(vc5
, job
);
415 if (!(V3D_DEBUG
& V3D_DEBUG_NORAST
)) {
418 #ifndef USE_VC5_SIMULATOR
419 ret
= drmIoctl(vc5
->fd
, DRM_IOCTL_VC5_SUBMIT_CL
, &job
->submit
);
421 ret
= vc5_simulator_flush(vc5
, &job
->submit
, job
);
423 static bool warned
= false;
424 if (ret
&& !warned
) {
425 fprintf(stderr
, "Draw call returned %s. "
426 "Expect corruption.\n", strerror(errno
));
431 if (vc5
->last_emit_seqno
- vc5
->screen
->finished_seqno
> 5) {
432 if (!vc5_wait_seqno(vc5
->screen
,
433 vc5
->last_emit_seqno
- 5,
434 PIPE_TIMEOUT_INFINITE
,
436 fprintf(stderr
, "Job throttling failed\n");
441 vc5_job_free(vc5
, job
);
445 vc5_job_compare(const void *a
, const void *b
)
447 return memcmp(a
, b
, sizeof(struct vc5_job_key
)) == 0;
451 vc5_job_hash(const void *key
)
453 return _mesa_hash_data(key
, sizeof(struct vc5_job_key
));
457 vc5_job_init(struct vc5_context
*vc5
)
459 vc5
->jobs
= _mesa_hash_table_create(vc5
,
462 vc5
->write_jobs
= _mesa_hash_table_create(vc5
,
464 _mesa_key_pointer_equal
);