2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "freedreno_context.h"
28 #include "freedreno_blitter.h"
29 #include "freedreno_draw.h"
30 #include "freedreno_fence.h"
31 #include "freedreno_log.h"
32 #include "freedreno_program.h"
33 #include "freedreno_resource.h"
34 #include "freedreno_texture.h"
35 #include "freedreno_state.h"
36 #include "freedreno_gmem.h"
37 #include "freedreno_query.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
40 #include "util/u_upload_mgr.h"
43 #include "util/u_process.h"
45 #include <sys/types.h>
49 fd_context_flush(struct pipe_context
*pctx
, struct pipe_fence_handle
**fencep
,
52 struct fd_context
*ctx
= fd_context(pctx
);
53 struct pipe_fence_handle
*fence
= NULL
;
54 // TODO we want to lookup batch if it exists, but not create one if not.
55 struct fd_batch
*batch
= fd_context_batch(ctx
);
57 DBG("%p: flush: flags=%x\n", ctx
->batch
, flags
);
59 /* In some sequence of events, we can end up with a last_fence that is
60 * not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
63 if ((flags
& PIPE_FLUSH_FENCE_FD
) && ctx
->last_fence
&&
64 !fd_fence_is_fd(ctx
->last_fence
))
65 fd_fence_ref(&ctx
->last_fence
, NULL
);
67 /* if no rendering since last flush, ie. app just decided it needed
68 * a fence, re-use the last one:
70 if (ctx
->last_fence
) {
71 fd_fence_ref(&fence
, ctx
->last_fence
);
72 fd_bc_dump(ctx
->screen
, "%p: reuse last_fence, remaining:\n", ctx
);
77 fd_bc_dump(ctx
->screen
, "%p: NULL batch, remaining:\n", ctx
);
81 /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
82 fd_fence_ref(&fence
, batch
->fence
);
84 if (flags
& PIPE_FLUSH_FENCE_FD
)
85 batch
->needs_out_fence_fd
= true;
87 fd_bc_dump(ctx
->screen
, "%p: flushing %p<%u>, flags=0x%x, pending:\n",
88 ctx
, batch
, batch
->seqno
, flags
);
90 if (!ctx
->screen
->reorder
) {
91 fd_batch_flush(batch
);
92 } else if (flags
& PIPE_FLUSH_DEFERRED
) {
93 fd_bc_flush_deferred(&ctx
->screen
->batch_cache
, ctx
);
95 fd_bc_flush(&ctx
->screen
->batch_cache
, ctx
);
98 fd_bc_dump(ctx
->screen
, "%p: remaining:\n", ctx
);
102 fd_fence_ref(fencep
, fence
);
104 fd_fence_ref(&ctx
->last_fence
, fence
);
106 fd_fence_ref(&fence
, NULL
);
108 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
113 fd_texture_barrier(struct pipe_context
*pctx
, unsigned flags
)
115 if (flags
== PIPE_TEXTURE_BARRIER_FRAMEBUFFER
) {
116 struct fd_context
*ctx
= fd_context(pctx
);
118 if (ctx
->framebuffer_barrier
) {
119 ctx
->framebuffer_barrier(ctx
);
124 /* On devices that could sample from GMEM we could possibly do better.
125 * Or if we knew that we were doing GMEM bypass we could just emit a
126 * cache flush, perhaps? But we don't know if future draws would cause
127 * us to use GMEM, and a flush in bypass isn't the end of the world.
129 fd_context_flush(pctx
, NULL
, 0);
133 fd_memory_barrier(struct pipe_context
*pctx
, unsigned flags
)
135 if (!(flags
& ~PIPE_BARRIER_UPDATE
))
138 fd_context_flush(pctx
, NULL
, 0);
139 /* TODO do we need to check for persistently mapped buffers and fd_bo_cpu_prep()?? */
143 emit_string_tail(struct fd_ringbuffer
*ring
, const char *string
, int len
)
145 const uint32_t *buf
= (const void *)string
;
148 OUT_RING(ring
, *buf
);
153 /* copy remainder bytes without reading past end of input string: */
156 memcpy(&w
, buf
, len
);
161 /* for prior to a5xx: */
163 fd_emit_string(struct fd_ringbuffer
*ring
,
164 const char *string
, int len
)
166 /* max packet size is 0x3fff+1 dwords: */
167 len
= MIN2(len
, 0x4000 * 4);
169 OUT_PKT3(ring
, CP_NOP
, align(len
, 4) / 4);
170 emit_string_tail(ring
, string
, len
);
175 fd_emit_string5(struct fd_ringbuffer
*ring
,
176 const char *string
, int len
)
178 /* max packet size is 0x3fff dwords: */
179 len
= MIN2(len
, 0x3fff * 4);
181 OUT_PKT7(ring
, CP_NOP
, align(len
, 4) / 4);
182 emit_string_tail(ring
, string
, len
);
186 * emit marker string as payload of a no-op packet, which can be
187 * decoded by cffdump.
190 fd_emit_string_marker(struct pipe_context
*pctx
, const char *string
, int len
)
192 struct fd_context
*ctx
= fd_context(pctx
);
197 ctx
->batch
->needs_flush
= true;
199 if (ctx
->screen
->gpu_id
>= 500) {
200 fd_emit_string5(ctx
->batch
->draw
, string
, len
);
202 fd_emit_string(ctx
->batch
->draw
, string
, len
);
207 fd_context_destroy(struct pipe_context
*pctx
)
209 struct fd_context
*ctx
= fd_context(pctx
);
214 fd_screen_lock(ctx
->screen
);
215 list_del(&ctx
->node
);
216 fd_screen_unlock(ctx
->screen
);
218 fd_log_process(ctx
, true);
219 assert(list_is_empty(&ctx
->log_chunks
));
221 fd_fence_ref(&ctx
->last_fence
, NULL
);
223 util_copy_framebuffer_state(&ctx
->framebuffer
, NULL
);
224 fd_batch_reference(&ctx
->batch
, NULL
); /* unref current batch */
225 fd_bc_invalidate_context(ctx
);
230 util_blitter_destroy(ctx
->blitter
);
232 if (pctx
->stream_uploader
)
233 u_upload_destroy(pctx
->stream_uploader
);
235 if (ctx
->clear_rs_state
)
236 pctx
->delete_rasterizer_state(pctx
, ctx
->clear_rs_state
);
238 if (ctx
->primconvert
)
239 util_primconvert_destroy(ctx
->primconvert
);
241 slab_destroy_child(&ctx
->transfer_pool
);
243 for (i
= 0; i
< ARRAY_SIZE(ctx
->vsc_pipe_bo
); i
++) {
244 if (!ctx
->vsc_pipe_bo
[i
])
246 fd_bo_del(ctx
->vsc_pipe_bo
[i
]);
249 fd_device_del(ctx
->dev
);
250 fd_pipe_del(ctx
->pipe
);
252 mtx_destroy(&ctx
->gmem_lock
);
254 if (fd_mesa_debug
& (FD_DBG_BSTAT
| FD_DBG_MSGS
)) {
255 printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n",
256 (uint32_t)ctx
->stats
.batch_total
, (uint32_t)ctx
->stats
.batch_sysmem
,
257 (uint32_t)ctx
->stats
.batch_gmem
, (uint32_t)ctx
->stats
.batch_nondraw
,
258 (uint32_t)ctx
->stats
.batch_restore
);
263 fd_set_debug_callback(struct pipe_context
*pctx
,
264 const struct pipe_debug_callback
*cb
)
266 struct fd_context
*ctx
= fd_context(pctx
);
271 memset(&ctx
->debug
, 0, sizeof(ctx
->debug
));
275 fd_get_reset_count(struct fd_context
*ctx
, bool per_context
)
278 enum fd_param_id param
=
279 per_context
? FD_CTX_FAULTS
: FD_GLOBAL_FAULTS
;
280 int ret
= fd_pipe_get_param(ctx
->pipe
, param
, &val
);
285 static enum pipe_reset_status
286 fd_get_device_reset_status(struct pipe_context
*pctx
)
288 struct fd_context
*ctx
= fd_context(pctx
);
289 int context_faults
= fd_get_reset_count(ctx
, true);
290 int global_faults
= fd_get_reset_count(ctx
, false);
291 enum pipe_reset_status status
;
293 if (context_faults
!= ctx
->context_reset_count
) {
294 status
= PIPE_GUILTY_CONTEXT_RESET
;
295 } else if (global_faults
!= ctx
->global_reset_count
) {
296 status
= PIPE_INNOCENT_CONTEXT_RESET
;
298 status
= PIPE_NO_RESET
;
301 ctx
->context_reset_count
= context_faults
;
302 ctx
->global_reset_count
= global_faults
;
307 /* TODO we could combine a few of these small buffers (solid_vbuf,
308 * blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
309 * save a tiny bit of memory
312 static struct pipe_resource
*
313 create_solid_vertexbuf(struct pipe_context
*pctx
)
315 static const float init_shader_const
[] = {
316 -1.000000, +1.000000, +1.000000,
317 +1.000000, -1.000000, +1.000000,
319 struct pipe_resource
*prsc
= pipe_buffer_create(pctx
->screen
,
320 PIPE_BIND_CUSTOM
, PIPE_USAGE_IMMUTABLE
, sizeof(init_shader_const
));
321 pipe_buffer_write(pctx
, prsc
, 0,
322 sizeof(init_shader_const
), init_shader_const
);
326 static struct pipe_resource
*
327 create_blit_texcoord_vertexbuf(struct pipe_context
*pctx
)
329 struct pipe_resource
*prsc
= pipe_buffer_create(pctx
->screen
,
330 PIPE_BIND_CUSTOM
, PIPE_USAGE_DYNAMIC
, 16);
335 fd_context_setup_common_vbos(struct fd_context
*ctx
)
337 struct pipe_context
*pctx
= &ctx
->base
;
339 ctx
->solid_vbuf
= create_solid_vertexbuf(pctx
);
340 ctx
->blit_texcoord_vbuf
= create_blit_texcoord_vertexbuf(pctx
);
342 /* setup solid_vbuf_state: */
343 ctx
->solid_vbuf_state
.vtx
= pctx
->create_vertex_elements_state(
344 pctx
, 1, (struct pipe_vertex_element
[]){{
345 .vertex_buffer_index
= 0,
347 .src_format
= PIPE_FORMAT_R32G32B32_FLOAT
,
349 ctx
->solid_vbuf_state
.vertexbuf
.count
= 1;
350 ctx
->solid_vbuf_state
.vertexbuf
.vb
[0].stride
= 12;
351 ctx
->solid_vbuf_state
.vertexbuf
.vb
[0].buffer
.resource
= ctx
->solid_vbuf
;
353 /* setup blit_vbuf_state: */
354 ctx
->blit_vbuf_state
.vtx
= pctx
->create_vertex_elements_state(
355 pctx
, 2, (struct pipe_vertex_element
[]){{
356 .vertex_buffer_index
= 0,
358 .src_format
= PIPE_FORMAT_R32G32_FLOAT
,
360 .vertex_buffer_index
= 1,
362 .src_format
= PIPE_FORMAT_R32G32B32_FLOAT
,
364 ctx
->blit_vbuf_state
.vertexbuf
.count
= 2;
365 ctx
->blit_vbuf_state
.vertexbuf
.vb
[0].stride
= 8;
366 ctx
->blit_vbuf_state
.vertexbuf
.vb
[0].buffer
.resource
= ctx
->blit_texcoord_vbuf
;
367 ctx
->blit_vbuf_state
.vertexbuf
.vb
[1].stride
= 12;
368 ctx
->blit_vbuf_state
.vertexbuf
.vb
[1].buffer
.resource
= ctx
->solid_vbuf
;
372 fd_context_cleanup_common_vbos(struct fd_context
*ctx
)
374 struct pipe_context
*pctx
= &ctx
->base
;
376 pctx
->delete_vertex_elements_state(pctx
, ctx
->solid_vbuf_state
.vtx
);
377 pctx
->delete_vertex_elements_state(pctx
, ctx
->blit_vbuf_state
.vtx
);
379 pipe_resource_reference(&ctx
->solid_vbuf
, NULL
);
380 pipe_resource_reference(&ctx
->blit_texcoord_vbuf
, NULL
);
383 struct pipe_context
*
384 fd_context_init(struct fd_context
*ctx
, struct pipe_screen
*pscreen
,
385 const uint8_t *primtypes
, void *priv
, unsigned flags
)
387 struct fd_screen
*screen
= fd_screen(pscreen
);
388 struct pipe_context
*pctx
;
392 /* lower numerical value == higher priority: */
393 if (fd_mesa_debug
& FD_DBG_HIPRIO
)
395 else if (flags
& PIPE_CONTEXT_HIGH_PRIORITY
)
397 else if (flags
& PIPE_CONTEXT_LOW_PRIORITY
)
400 ctx
->screen
= screen
;
401 ctx
->pipe
= fd_pipe_new2(screen
->dev
, FD_PIPE_3D
, prio
);
403 if (fd_device_version(screen
->dev
) >= FD_VERSION_ROBUSTNESS
) {
404 ctx
->context_reset_count
= fd_get_reset_count(ctx
, true);
405 ctx
->global_reset_count
= fd_get_reset_count(ctx
, false);
408 ctx
->primtypes
= primtypes
;
409 ctx
->primtype_mask
= 0;
410 for (i
= 0; i
< PIPE_PRIM_MAX
; i
++)
412 ctx
->primtype_mask
|= (1 << i
);
414 (void) mtx_init(&ctx
->gmem_lock
, mtx_plain
);
416 /* need some sane default in case gallium frontends don't
419 ctx
->sample_mask
= 0xffff;
420 ctx
->active_queries
= true;
423 pctx
->screen
= pscreen
;
425 pctx
->flush
= fd_context_flush
;
426 pctx
->emit_string_marker
= fd_emit_string_marker
;
427 pctx
->set_debug_callback
= fd_set_debug_callback
;
428 pctx
->get_device_reset_status
= fd_get_device_reset_status
;
429 pctx
->create_fence_fd
= fd_create_fence_fd
;
430 pctx
->fence_server_sync
= fd_fence_server_sync
;
431 pctx
->fence_server_signal
= fd_fence_server_signal
;
432 pctx
->texture_barrier
= fd_texture_barrier
;
433 pctx
->memory_barrier
= fd_memory_barrier
;
435 pctx
->stream_uploader
= u_upload_create_default(pctx
);
436 if (!pctx
->stream_uploader
)
438 pctx
->const_uploader
= pctx
->stream_uploader
;
440 slab_create_child(&ctx
->transfer_pool
, &screen
->transfer_pool
);
443 fd_resource_context_init(pctx
);
444 fd_query_context_init(pctx
);
445 fd_texture_init(pctx
);
448 ctx
->blitter
= util_blitter_create(pctx
);
452 ctx
->primconvert
= util_primconvert_create(pctx
, ctx
->primtype_mask
);
453 if (!ctx
->primconvert
)
456 list_inithead(&ctx
->hw_active_queries
);
457 list_inithead(&ctx
->acc_active_queries
);
458 list_inithead(&ctx
->log_chunks
);
460 fd_screen_lock(ctx
->screen
);
461 list_add(&ctx
->node
, &ctx
->screen
->context_list
);
462 fd_screen_unlock(ctx
->screen
);
464 ctx
->current_scissor
= &ctx
->disabled_scissor
;
466 ctx
->log_out
= stdout
;
468 if ((fd_mesa_debug
& FD_DBG_LOG
) &&
469 !(ctx
->record_timestamp
&& ctx
->ts_to_ns
)) {
470 printf("logging not supported!\n");
471 fd_mesa_debug
&= ~FD_DBG_LOG
;
474 #if DETECT_OS_ANDROID
475 if (fd_mesa_debug
& FD_DBG_LOG
) {
476 static unsigned idx
= 0;
478 asprintf(&p
, "/data/fdlog/%s-%d.log", util_get_process_name(), idx
++);
480 FILE *f
= fopen(p
, "w");