case nir_type_uint16:
return BIFROST_BLEND_U16;
default:
- DBG("Unsupported blend shader type for NIR alu type %d", nir_type);
- assert(0);
+ unreachable("Unsupported blend shader type for NIR alu type");
return 0;
}
}
bifrost_compile_shader_nir(s, &program, dev->gpu_id);
} else {
midgard_compile_shader_nir(s, &program, false, 0, dev->gpu_id,
- pan_debug & PAN_DBG_PRECOMPILE);
+ dev->debug & PAN_DBG_PRECOMPILE);
}
/* Prepare the compiled binary for upload */
const struct pipe_blit_info *info)
{
struct panfrost_context *ctx = pan_context(pipe);
+ struct panfrost_device *dev = pan_device(pipe->screen);
if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
- DBG("blit unsupported %s -> %s\n",
- util_format_short_name(info->src.resource->format),
- util_format_short_name(info->dst.resource->format));
+ if (dev->debug & PAN_DBG_MSGS) {
+ fprintf(stderr, "blit unsupported %s -> %s\n",
+ util_format_short_name(info->src.resource->format),
+ util_format_short_name(info->dst.resource->format));
+ }
+
return false;
}
unsigned flags)
{
struct panfrost_context *ctx = pan_context(pipe);
+ struct panfrost_device *dev = pan_device(pipe->screen);
struct util_dynarray fences;
/* We must collect the fences before the flush is done, otherwise we'll
util_dynarray_fini(&fences);
}
- if (pan_debug & PAN_DBG_TRACE)
+ if (dev->debug & PAN_DBG_TRACE)
pandecode_next_frame();
}
enum pipe_shader_type stage)
{
struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
+ struct panfrost_device *dev = pan_device(pctx->screen);
so->base = *cso;
/* Token deep copy to prevent memory corruption */
so->base.tokens = tgsi_dup_tokens(so->base.tokens);
/* Precompile for shader-db if we need to */
- if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
+ if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_shader_state state;
struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
if (cso->base.type == PIPE_SHADER_IR_TGSI) {
- DBG("Deleting TGSI shader leaks duplicated tokens\n");
+ /* TODO: leaks TGSI tokens! */
}
for (unsigned i = 0; i < cso->variant_count; ++i) {
const struct pipe_framebuffer_state *fb)
{
/* AFBC implemenation incomplete; hide it */
- if (!(pan_debug & PAN_DBG_AFBC)) return;
+ if (!(device->debug & PAN_DBG_AFBC)) return;
/* Hint AFBC to the resources bound to each color buffer */
break;
default:
- DBG("Skipping query %u\n", query->type);
+ /* TODO: timestamp queries, etc? */
break;
}
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
- DBG("Flushing for occlusion query\n");
panfrost_flush_batches_accessing_bo(ctx, query->bo, PAN_BO_ACCESS_WRITE);
panfrost_bo_wait(query->bo, INT64_MAX, PAN_BO_ACCESS_WRITE);
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
- DBG("Flushing for primitive query\n");
panfrost_flush_all_batches(ctx, true);
vresult->u64 = query->end - query->start;
break;
default:
- DBG("Skipped query get %u\n", query->type);
+ /* TODO: more queries */
break;
}
free(in_syncs);
if (ret) {
- DBG("Error submitting: %m\n");
+ if (dev->debug & PAN_DBG_MSGS)
+ fprintf(stderr, "Error submitting: %m\n");
+
return errno;
}
/* Trace the job if we're doing that */
- if (pan_debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
/* Wait so we can get errors reported back */
drmSyncobjWait(dev->fd, &batch->out_sync->syncobj, 1,
INT64_MAX, 0, NULL);
/* Trace gets priority over sync */
- bool minimal = !(pan_debug & PAN_DBG_TRACE);
+ bool minimal = !(dev->debug & PAN_DBG_TRACE);
pandecode_jc(submit.jc, dev->quirks & IS_BIFROST, dev->gpu_id, minimal);
}
panfrost_batch_submit(struct panfrost_batch *batch)
{
assert(batch);
+ struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
/* Submit the dependencies first. */
util_dynarray_foreach(&batch->dependencies,
ret = panfrost_batch_submit_jobs(batch);
- if (ret)
- DBG("panfrost_batch_submit failed: %d\n", ret);
+ if (ret && dev->debug & PAN_DBG_MSGS)
+ fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
/* We must reset the damage info of our render targets here even
* though a damage reset normally happens when the DRI layer swaps
{
struct panfrost_bo *bo = panfrost_bo_create(dev, size, flags);
- if (pan_debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
if (flags & PAN_BO_INVISIBLE)
pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
else if (!(flags & PAN_BO_DELAY_MMAP))
static void
panfrost_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static struct pipe_surface *
case PIPE_TEXTURE_2D_ARRAY:
break;
default:
- DBG("Unknown texture target %d\n", template->target);
- assert(0);
+ unreachable("Unknown texture target\n");
}
if (dev->ro && (template->bind &
struct pipe_transfer **out_transfer)
{
struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_device *dev = pan_device(pctx->screen);
struct panfrost_resource *rsrc = pan_resource(resource);
int bytes_per_pixel = util_format_get_blocksize(rsrc->internal_format);
struct panfrost_bo *bo = rsrc->bo;
/* If we haven't already mmaped, now's the time */
panfrost_bo_mmap(bo);
- if (pan_debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
bool create_new_bo = usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
*/
if (panfrost_pending_batches_access_bo(ctx, bo) ||
!panfrost_bo_wait(bo, 0, PAN_BO_ACCESS_RW)) {
- struct panfrost_device *dev = pan_device(pctx->screen);
/* We want the BO to be MMAPed. */
uint32_t flags = bo->flags & ~PAN_BO_DELAY_MMAP;
struct panfrost_bo *newbo = NULL;
if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
if (rsrc->layout == MALI_TEXTURE_AFBC) {
- DBG("Unimplemented: reads from AFBC");
+ unreachable("Unimplemented: reads from AFBC");
} else if (rsrc->layout == MALI_TEXTURE_TILED) {
panfrost_load_tiled_image(
transfer->map,
if (transfer->usage & PIPE_TRANSFER_WRITE) {
if (prsrc->layout == MALI_TEXTURE_AFBC) {
- DBG("Unimplemented: writes to AFBC\n");
+ unreachable("Unimplemented: writes to AFBC\n");
} else if (prsrc->layout == MALI_TEXTURE_TILED) {
assert(transfer->box.depth == 1);
static void
panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static enum pipe_format
DEBUG_NAMED_VALUE_END
};
-DEBUG_GET_ONCE_FLAGS_OPTION(pan_debug, "PAN_MESA_DEBUG", debug_options, 0)
-
-int pan_debug = 0;
-
static const char *
panfrost_get_name(struct pipe_screen *screen)
{
panfrost_get_param(struct pipe_screen *screen, enum pipe_cap param)
{
/* We expose in-dev stuff for dEQP that we don't want apps to use yet */
- bool is_deqp = pan_debug & PAN_DBG_DEQP;
struct panfrost_device *dev = pan_device(screen);
+ bool is_deqp = dev->debug & PAN_DBG_DEQP;
/* Our GL 3.x implementation is WIP */
- bool is_gl3 = pan_debug & PAN_DBG_GL3;
+ bool is_gl3 = dev->debug & PAN_DBG_GL3;
is_gl3 |= is_deqp;
/* Same with GLES 3 */
- bool is_gles3 = pan_debug & PAN_DBG_GLES3;
+ bool is_gles3 = dev->debug & PAN_DBG_GLES3;
is_gles3 |= is_gl3;
switch (param) {
enum pipe_shader_type shader,
enum pipe_shader_cap param)
{
- bool is_deqp = pan_debug & PAN_DBG_DEQP;
- bool is_fp16 = pan_debug & PAN_DBG_FP16;
struct panfrost_device *dev = pan_device(screen);
+ bool is_deqp = dev->debug & PAN_DBG_DEQP;
+ bool is_fp16 = dev->debug & PAN_DBG_FP16;
if (shader != PIPE_SHADER_VERTEX &&
shader != PIPE_SHADER_FRAGMENT &&
return 0;
default:
- DBG("unknown shader param %d\n", param);
+ /* Other params are unknown */
return 0;
}
unsigned storage_sample_count,
unsigned bind)
{
+ struct panfrost_device *dev = pan_device(screen);
const struct util_format_description *format_desc;
assert(target == PIPE_BUFFER ||
if (scanout && renderable && !util_format_is_rgba8_variant(format_desc))
return false;
- if (pan_debug & (PAN_DBG_GL3 | PAN_DBG_DEQP)) {
+ if (dev->debug & (PAN_DBG_GL3 | PAN_DBG_DEQP)) {
if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC)
return true;
}
panfrost_get_compute_param(struct pipe_screen *pscreen, enum pipe_shader_ir ir_type,
enum pipe_compute_cap param, void *ret)
{
+ struct panfrost_device *dev = pan_device(pscreen);
const char * const ir = "panfrost";
- if (!(pan_debug & PAN_DBG_DEQP))
+ if (!(dev->debug & PAN_DBG_DEQP))
return 0;
#define RET(x) do { \
struct pipe_screen *
panfrost_create_screen(int fd, struct renderonly *ro)
{
- pan_debug = debug_get_option_pan_debug();
-
/* Blacklist apps known to be buggy under Panfrost */
const char *proc = util_get_process_name();
const char *blacklist[] = {
struct panfrost_device *dev = pan_device(&screen->base);
panfrost_open_device(screen, fd, dev);
+ dev->debug = debug_get_flags_option("PAN_MESA_DEBUG", debug_options, 0);
+
if (ro) {
dev->ro = renderonly_dup(ro);
if (!dev->ro) {
- DBG("Failed to dup renderonly object\n");
+ if (dev->debug & PAN_DBG_MSGS)
+ fprintf(stderr, "Failed to dup renderonly object\n");
+
free(screen);
return NULL;
}
break;
case 0x7093: /* G31 */
case 0x7212: /* G52 */
- if (pan_debug & PAN_DBG_BIFROST)
+ if (dev->debug & PAN_DBG_BIFROST)
break;
/* fallthrough */
return NULL;
}
- if (pan_debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
- pandecode_initialize(!(pan_debug & PAN_DBG_TRACE));
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
+ pandecode_initialize(!(dev->debug & PAN_DBG_TRACE));
screen->base.destroy = panfrost_destroy_screen;
unsigned thread_tls_alloc;
unsigned quirks;
+ /* debug flags, see pan_util.h how to interpret */
+ unsigned debug;
+
drmVersionPtr kernel_version;
struct renderonly *ro;
#define PAN_DBG_BIFROST 0x0100
#define PAN_DBG_GL3 0x0200
-extern int pan_debug;
-
-#define DBG(fmt, ...) \
- do { if (pan_debug & PAN_DBG_MSGS) \
- fprintf(stderr, "%s:%d: "fmt, \
- __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
-
#endif /* PAN_UTIL_H */