#include "util/u_debug.h"
#include "util/u_memory.h"
-#include "util/u_format.h"
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format.h"
+#include "util/format/u_format_s3tc.h"
#include "util/u_video.h"
#include "util/u_screen.h"
#include "util/os_time.h"
#include "pan_context.h"
#include "midgard/midgard_compile.h"
+#include "panfrost-quirks.h"
static const struct debug_named_value debug_options[] = {
{"msgs", PAN_DBG_MSGS, "Print debug messages"},
{"trace", PAN_DBG_TRACE, "Trace the command stream"},
{"deqp", PAN_DBG_DEQP, "Hacks for dEQP"},
{"afbc", PAN_DBG_AFBC, "Enable non-conformant AFBC impl"},
+ {"sync", PAN_DBG_SYNC, "Wait for each job's completion and check for any GPU fault"},
+ {"precompile", PAN_DBG_PRECOMPILE, "Precompile shaders for shader-db"},
DEBUG_NAMED_VALUE_END
};
static const char *
panfrost_get_name(struct pipe_screen *screen)
{
- return "panfrost";
+ return panfrost_model_name(pan_screen(screen)->gpu_id);
}
static const char *
panfrost_get_vendor(struct pipe_screen *screen)
{
- return "panfrost";
+ return "Panfrost";
}
static const char *
case PIPE_CAP_MAX_RENDER_TARGETS:
return is_deqp ? 4 : 1;
+ /* Throttling frames breaks pipelining */
+ case PIPE_CAP_THROTTLE:
+ return 0;
case PIPE_CAP_OCCLUSION_QUERY:
return 1;
case PIPE_CAP_TEXTURE_SWIZZLE:
return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
+ return 1;
+
case PIPE_CAP_TGSI_INSTANCEID:
case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
- return is_deqp ? 1 : 0;
+ return 1;
case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
return is_deqp ? 4 : 0;
return 1;
case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
- return is_deqp ? 256 : 0; /* for GL3 */
+ return 256;
case PIPE_CAP_GLSL_FEATURE_LEVEL:
case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
return is_deqp ? 300 : 120;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
- return is_deqp ? 16 : 0;
+ return 16;
case PIPE_CAP_CUBE_MAP_ARRAY:
return is_deqp;
case PIPE_CAP_MAX_VARYINGS:
return 16;
+ case PIPE_CAP_ALPHA_TEST:
+ case PIPE_CAP_FLATSHADE:
+ case PIPE_CAP_TWO_SIDED_COLOR:
+ case PIPE_CAP_CLIP_PLANES:
+ return 0;
+
default:
return u_pipe_screen_get_param_defaults(screen, param);
}
/* this is probably not totally correct.. but it's a start: */
switch (param) {
- case PIPE_SHADER_CAP_SCALAR_ISA:
- return 0;
-
case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
return 16;
case PIPE_SHADER_CAP_MAX_OUTPUTS:
- return shader == PIPE_SHADER_FRAGMENT ? 4 : 8;
+ return shader == PIPE_SHADER_FRAGMENT ? 4 : 16;
case PIPE_SHADER_CAP_MAX_TEMPS:
return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
return PIPE_SHADER_IR_NIR;
case PIPE_SHADER_CAP_SUPPORTED_IRS:
- return (1 << PIPE_SHADER_IR_NIR);
+ return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_NIR_SERIALIZED);
case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
return 32;
if (!format_desc)
return false;
- if (sample_count > 1)
+ /* MSAA 4x supported, but no more. Technically some revisions of the
+ * hardware can go up to 16x but we don't support higher modes yet. */
+
+ if (sample_count > 1 && !(pan_debug & PAN_DBG_DEQP))
+ return false;
+
+ if (sample_count > 4)
+ return false;
+
+ if (MAX2(sample_count, 1) != MAX2(storage_sample_count, 1))
return false;
/* Format wishlist */
if (scanout && renderable && !util_format_is_rgba8_variant(format_desc))
return false;
- if (format_desc->layout != UTIL_FORMAT_LAYOUT_PLAIN &&
- format_desc->layout != UTIL_FORMAT_LAYOUT_OTHER) {
- /* Compressed formats not yet hooked up. */
- return false;
+ switch (format_desc->layout) {
+ case UTIL_FORMAT_LAYOUT_PLAIN:
+ case UTIL_FORMAT_LAYOUT_OTHER:
+ break;
+ case UTIL_FORMAT_LAYOUT_ETC:
+ return true;
+ default:
+ return false;
}
/* Internally, formats that are depth/stencil renderable are limited.
switch (param) {
case PIPE_COMPUTE_CAP_ADDRESS_BITS:
- /* TODO: We'll want 64-bit pointers soon */
- RET((uint32_t []){ 32 });
+ RET((uint32_t []){ 64 });
case PIPE_COMPUTE_CAP_IR_TARGET:
if (ret)
{
struct panfrost_screen *screen = pan_screen(pscreen);
panfrost_bo_cache_evict_all(screen);
- pthread_mutex_destroy(&screen->bo_cache_lock);
+ pthread_mutex_destroy(&screen->bo_cache.lock);
+ pthread_mutex_destroy(&screen->active_bos_lock);
drmFreeVersion(screen->kernel_version);
ralloc_free(screen);
}
struct panfrost_fence *old = *p;
if (pipe_reference(&(*p)->reference, &f->reference)) {
- close(old->fd);
+ util_dynarray_foreach(&old->syncfds, int, fd)
+ close(*fd);
+ util_dynarray_fini(&old->syncfds);
free(old);
}
*p = f;
{
struct panfrost_screen *screen = pan_screen(pscreen);
struct panfrost_fence *f = (struct panfrost_fence *)fence;
+ struct util_dynarray syncobjs;
int ret;
- unsigned syncobj;
- /* The fence was already signaled */
- if (f->fd == -1)
+ /* All fences were already signaled */
+ if (!util_dynarray_num_elements(&f->syncfds, int))
return true;
- ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
- if (ret) {
- fprintf(stderr, "Failed to create syncobj to wait on: %m\n");
- return false;
- }
+ util_dynarray_init(&syncobjs, NULL);
+ util_dynarray_foreach(&f->syncfds, int, fd) {
+ uint32_t syncobj;
- ret = drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
- if (ret) {
- fprintf(stderr, "Failed to import fence to syncobj: %m\n");
- return false;
+ ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
+ assert(!ret);
+
+ ret = drmSyncobjImportSyncFile(screen->fd, syncobj, *fd);
+ assert(!ret);
+ util_dynarray_append(&syncobjs, uint32_t, syncobj);
}
uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
if (abs_timeout == OS_TIMEOUT_INFINITE)
abs_timeout = INT64_MAX;
- ret = drmSyncobjWait(screen->fd, &syncobj, 1, abs_timeout, 0, NULL);
+ ret = drmSyncobjWait(screen->fd, util_dynarray_begin(&syncobjs),
+ util_dynarray_num_elements(&syncobjs, uint32_t),
+ abs_timeout, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
+ NULL);
- drmSyncobjDestroy(screen->fd, syncobj);
+ util_dynarray_foreach(&syncobjs, uint32_t, syncobj)
+ drmSyncobjDestroy(screen->fd, *syncobj);
return ret >= 0;
}
struct panfrost_fence *
-panfrost_fence_create(struct panfrost_context *ctx)
+panfrost_fence_create(struct panfrost_context *ctx,
+ struct util_dynarray *fences)
{
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
struct panfrost_fence *f = calloc(1, sizeof(*f));
if (!f)
return NULL;
- f->fd = -1;
-
- /* There was no job flushed yet or the batch fence was already
- * signaled, let's return a dummy fence object that returns true
- * directly when ->fence_finish() is called.
- */
- if (!ctx->last_out_sync || ctx->last_out_sync->signaled)
- goto out;
-
- /* Snapshot the last Panfrost's rendering's out fence. We'd rather have
- * another syncobj instead of a sync file, but this is all we get.
- * (HandleToFD/FDToHandle just gives you another syncobj ID for the
- * same syncobj).
- */
- drmSyncobjExportSyncFile(screen->fd, ctx->last_out_sync->syncobj, &f->fd);
- if (f->fd == -1) {
- fprintf(stderr, "export failed: %m\n");
- free(f);
- return NULL;
+ util_dynarray_init(&f->syncfds, NULL);
+
+ /* Export fences from all pending batches. */
+ util_dynarray_foreach(fences, struct panfrost_batch_fence *, fence) {
+ int fd = -1;
+
+ /* The fence is already signaled, no need to export it. */
+ if ((*fence)->signaled)
+ continue;
+
+ drmSyncobjExportSyncFile(screen->fd, (*fence)->syncobj, &fd);
+ if (fd == -1)
+ fprintf(stderr, "export failed: %m\n");
+
+ assert(fd != -1);
+ util_dynarray_append(&f->syncfds, int, fd);
}
-out:
pipe_reference_init(&f->reference, 1);
return f;
return &midgard_nir_options;
}
-static unsigned
-panfrost_query_gpu_version(struct panfrost_screen *screen)
+static uint32_t
+panfrost_active_bos_hash(const void *key)
{
- struct drm_panfrost_get_param get_param = {0,};
- ASSERTED int ret;
+ const struct panfrost_bo *bo = key;
- get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
- assert(!ret);
+ return _mesa_hash_data(&bo->gem_handle, sizeof(bo->gem_handle));
+}
- return get_param.value;
+static bool
+panfrost_active_bos_cmp(const void *keya, const void *keyb)
+{
+ const struct panfrost_bo *a = keya, *b = keyb;
+
+ return a->gem_handle == b->gem_handle;
}
struct pipe_screen *
screen->fd = fd;
- screen->gpu_id = panfrost_query_gpu_version(screen);
- screen->require_sfbd = screen->gpu_id < 0x0750; /* T760 is the first to support MFBD */
+ screen->gpu_id = panfrost_query_gpu_version(screen->fd);
+ screen->core_count = panfrost_query_core_count(screen->fd);
+ screen->thread_tls_alloc = panfrost_query_thread_tls_alloc(screen->fd);
+ screen->quirks = panfrost_get_quirks(screen->gpu_id);
screen->kernel_version = drmGetVersion(fd);
/* Check if we're loading against a supported GPU model. */
switch (screen->gpu_id) {
+ case 0x720: /* T720 */
case 0x750: /* T760 */
case 0x820: /* T820 */
case 0x860: /* T860 */
break;
default:
/* Fail to load against untested models */
- debug_printf("panfrost: Unsupported model %X",
- screen->gpu_id);
+ debug_printf("panfrost: Unsupported model %X", screen->gpu_id);
return NULL;
}
- pthread_mutex_init(&screen->bo_cache_lock, NULL);
- for (unsigned i = 0; i < ARRAY_SIZE(screen->bo_cache); ++i)
- list_inithead(&screen->bo_cache[i]);
+ pthread_mutex_init(&screen->active_bos_lock, NULL);
+ screen->active_bos = _mesa_set_create(screen, panfrost_active_bos_hash,
+ panfrost_active_bos_cmp);
+
+ pthread_mutex_init(&screen->bo_cache.lock, NULL);
+ list_inithead(&screen->bo_cache.lru);
+ for (unsigned i = 0; i < ARRAY_SIZE(screen->bo_cache.buckets); ++i)
+ list_inithead(&screen->bo_cache.buckets[i]);
if (pan_debug & PAN_DBG_TRACE)
pandecode_initialize();