From 04e8c5bb439d7263df96ce32282b729d770a748c Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Sun, 19 Aug 2018 10:08:05 -0700 Subject: [PATCH] iris: precompute hashes for cache tracking saves a touch of cpu overhead in the new resolve tracking --- src/gallium/drivers/iris/iris_bufmgr.c | 18 ++++++++++++++---- src/gallium/drivers/iris/iris_bufmgr.h | 3 +++ src/gallium/drivers/iris/iris_resolve.c | 20 +++++++++++--------- 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c index 46e41c9085b..ab81f2dbb92 100644 --- a/src/gallium/drivers/iris/iris_bufmgr.c +++ b/src/gallium/drivers/iris/iris_bufmgr.c @@ -481,6 +481,16 @@ iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr, } } +static struct iris_bo * +bo_calloc(void) +{ + struct iris_bo *bo = calloc(1, sizeof(*bo)); + if (bo) { + bo->hash = _mesa_hash_pointer(bo); + } + return bo; +} + static struct iris_bo * bo_alloc_internal(struct iris_bufmgr *bufmgr, const char *name, @@ -559,7 +569,7 @@ retry: bo->gtt_offset = 0ull; } } else { - bo = calloc(1, sizeof(*bo)); + bo = bo_calloc(); if (!bo) goto err; @@ -656,7 +666,7 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name, { struct iris_bo *bo; - bo = calloc(1, sizeof(*bo)); + bo = bo_calloc(); if (!bo) return NULL; @@ -744,7 +754,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr, goto out; } - bo = calloc(1, sizeof(*bo)); + bo = bo_calloc(); if (!bo) goto out; @@ -1338,7 +1348,7 @@ iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd) goto out; } - bo = calloc(1, sizeof(*bo)); + bo = bo_calloc(); if (!bo) goto out; diff --git a/src/gallium/drivers/iris/iris_bufmgr.h b/src/gallium/drivers/iris/iris_bufmgr.h index f966b021ea3..8be545cb04b 100644 --- a/src/gallium/drivers/iris/iris_bufmgr.h +++ b/src/gallium/drivers/iris/iris_bufmgr.h @@ -187,6 +187,9 @@ struct iris_bo { * Boolean of whether this buffer points into user memory */ bool userptr; + + /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */ + uint32_t hash; }; #define BO_ALLOC_ZEROED (1<<0) diff --git a/src/gallium/drivers/iris/iris_resolve.c b/src/gallium/drivers/iris/iris_resolve.c index 20b25c7b76b..7b68876643b 100644 --- a/src/gallium/drivers/iris/iris_resolve.c +++ b/src/gallium/drivers/iris/iris_resolve.c @@ -193,8 +193,8 @@ void iris_cache_flush_for_read(struct iris_batch *batch, struct iris_bo *bo) { - if (_mesa_hash_table_search(batch->cache.render, bo) || - _mesa_set_search(batch->cache.depth, bo)) + if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo) || + _mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo)) iris_flush_depth_and_render_caches(batch); } @@ -210,7 +210,7 @@ iris_cache_flush_for_render(struct iris_batch *batch, enum isl_format format, enum isl_aux_usage aux_usage) { - if (_mesa_set_search(batch->cache.depth, bo)) + if (_mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo)) iris_flush_depth_and_render_caches(batch); /* Check to see if this bo has been used by a previous rendering operation @@ -236,7 +236,8 @@ iris_cache_flush_for_render(struct iris_batch *batch, * and flush on format changes too. We can always relax this later if we * find it to be a performance problem. */ - struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo); + struct hash_entry *entry = + _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo); if (entry && entry->data != format_aux_tuple(format, aux_usage)) iris_flush_depth_and_render_caches(batch); } @@ -248,7 +249,8 @@ iris_render_cache_add_bo(struct iris_batch *batch, enum isl_aux_usage aux_usage) { #ifndef NDEBUG - struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo); + struct hash_entry *entry = + _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo); if (entry) { /* Otherwise, someone didn't do a flush_for_render and that would be * very bad indeed. @@ -257,20 +259,20 @@ iris_render_cache_add_bo(struct iris_batch *batch, } #endif - _mesa_hash_table_insert(batch->cache.render, bo, - format_aux_tuple(format, aux_usage)); + _mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo, + format_aux_tuple(format, aux_usage)); } void iris_cache_flush_for_depth(struct iris_batch *batch, struct iris_bo *bo) { - if (_mesa_hash_table_search(batch->cache.render, bo)) + if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo)) iris_flush_depth_and_render_caches(batch); } void iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo) { - _mesa_set_add(batch->cache.depth, bo); + _mesa_set_add_pre_hashed(batch->cache.depth, bo->hash, bo); } -- 2.30.2