X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Futil%2Fdisk_cache.c;h=a92d621927a857f262e15ea100db65a513b755f2;hp=17a6b5e0c4f75bf0ac91815453f8f8894b8f9eb1;hb=848e7b947d0d505d54d27780b052e5532c721678;hpb=6a9020f8dcedd7aa7abc3768d429ce17a6e7865a diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c index 17a6b5e0c4f..a92d621927a 100644 --- a/src/util/disk_cache.c +++ b/src/util/disk_cache.c @@ -31,23 +31,27 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include "zlib.h" +#ifdef HAVE_ZSTD +#include "zstd.h" +#endif + #include "util/crc32.h" +#include "util/debug.h" #include "util/rand_xor.h" #include "util/u_atomic.h" #include "util/u_queue.h" #include "util/mesa-sha1.h" #include "util/ralloc.h" -#include "main/errors.h" -#include "util/macros.h" +#include "util/compiler.h" #include "disk_cache.h" @@ -60,9 +64,27 @@ /* The number of keys that can be stored in the index. */ #define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS) +/* The cache version should be bumped whenever a change is made to the + * structure of cache entries or the index. This will give any 3rd party + * applications reading the cache entries a chance to adjust to the changes. + * + * - The cache version is checked internally when reading a cache entry. If we + * ever have a mismatch we are in big trouble as this means we had a cache + * collision. In case of such an event please check the skys for giant + * asteroids and that the entire Mesa team hasn't been eaten by wolves. + * + * - There is no strict requirement that cache versions be backwards + * compatible but effort should be taken to limit disruption where possible. + */ +#define CACHE_VERSION 1 + +/* 3 is the recomended level, with 22 as the absolute maximum */ +#define ZSTD_COMPRESSION_LEVEL 3 + struct disk_cache { /* The path to the cache directory. */ char *path; + bool path_init_failed; /* Thread queue for compressing and writing cache entries to disk */ struct util_queue cache_queue; @@ -82,6 +104,13 @@ struct disk_cache { /* Maximum size of all cached objects (in bytes). */ uint64_t max_size; + + /* Driver cache keys. */ + uint8_t *driver_keys_blob; + size_t driver_keys_blob_size; + + disk_cache_put_cb blob_put_cb; + disk_cache_get_cb blob_get_cb; }; struct disk_cache_put_job { @@ -96,6 +125,8 @@ struct disk_cache_put_job { /* Size of data to be compressed and written. */ size_t size; + + struct cache_item_metadata cache_item_metadata; }; /* Create a directory named 'path' if it does not already exist. @@ -158,75 +189,15 @@ concatenate_and_mkdir(void *ctx, const char *path, const char *name) return NULL; } -static int -remove_dir(const char *fpath, const struct stat *sb, - int typeflag, struct FTW *ftwbuf) -{ - if (S_ISREG(sb->st_mode)) - unlink(fpath); - else if (S_ISDIR(sb->st_mode)) - rmdir(fpath); - - return 0; -} - -static void -remove_old_cache_directories(void *mem_ctx, const char *path, - const char *timestamp) -{ - DIR *dir = opendir(path); - - struct dirent* d_entry; - while((d_entry = readdir(dir)) != NULL) - { - char *full_path = - ralloc_asprintf(mem_ctx, "%s/%s", path, d_entry->d_name); - - struct stat sb; - if (stat(full_path, &sb) == 0 && S_ISDIR(sb.st_mode) && - strcmp(d_entry->d_name, timestamp) != 0 && - strcmp(d_entry->d_name, "..") != 0 && - strcmp(d_entry->d_name, ".") != 0) { - nftw(full_path, remove_dir, 20, FTW_DEPTH); - } - } - - closedir(dir); -} - -static char * -create_mesa_cache_dir(void *mem_ctx, const char *path, const char *timestamp, - const char *gpu_name) -{ - char *new_path = concatenate_and_mkdir(mem_ctx, path, "mesa"); - if (new_path == NULL) - return NULL; - - /* Create a parent architecture directory so that we don't remove cache - * files for other architectures. In theory we could share the cache - * between architectures but we have no way of knowing if they were created - * by a compatible Mesa version. - */ - new_path = concatenate_and_mkdir(mem_ctx, new_path, get_arch_bitness_str()); - if (new_path == NULL) - return NULL; - - /* Remove cache directories for old Mesa versions */ - remove_old_cache_directories(mem_ctx, new_path, timestamp); - - new_path = concatenate_and_mkdir(mem_ctx, new_path, timestamp); - if (new_path == NULL) - return NULL; - - new_path = concatenate_and_mkdir(mem_ctx, new_path, gpu_name); - if (new_path == NULL) - return NULL; - - return new_path; -} +#define DRV_KEY_CPY(_dst, _src, _src_size) \ +do { \ + memcpy(_dst, _src, _src_size); \ + _dst += _src_size; \ +} while (0); struct disk_cache * -disk_cache_create(const char *gpu_name, const char *timestamp) +disk_cache_create(const char *gpu_name, const char *driver_id, + uint64_t driver_flags) { void *local; struct disk_cache *cache = NULL; @@ -234,9 +205,11 @@ disk_cache_create(const char *gpu_name, const char *timestamp) uint64_t max_size; int fd = -1; struct stat sb; - struct statvfs vfs = { 0 }; size_t size; + uint8_t cache_version = CACHE_VERSION; + size_t cv_size = sizeof(cache_version); + /* If running as a users other than the real user disable cache */ if (geteuid() != getuid()) return NULL; @@ -247,24 +220,30 @@ disk_cache_create(const char *gpu_name, const char *timestamp) goto fail; /* At user request, disable shader cache entirely. */ - if (getenv("MESA_GLSL_CACHE_DISABLE")) + if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false)) + goto fail; + + cache = rzalloc(NULL, struct disk_cache); + if (cache == NULL) goto fail; + /* Assume failure. */ + cache->path_init_failed = true; + /* Determine path for cache based on the first defined name as follows: * * $MESA_GLSL_CACHE_DIR - * $XDG_CACHE_HOME/mesa - * /.cache/mesa + * $XDG_CACHE_HOME/mesa_shader_cache + * /.cache/mesa_shader_cache */ path = getenv("MESA_GLSL_CACHE_DIR"); if (path) { if (mkdir_if_needed(path) == -1) - goto fail; + goto path_fail; - path = create_mesa_cache_dir(local, path, timestamp, - gpu_name); + path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); if (path == NULL) - goto fail; + goto path_fail; } if (path == NULL) { @@ -272,12 +251,11 @@ disk_cache_create(const char *gpu_name, const char *timestamp) if (xdg_cache_home) { if (mkdir_if_needed(xdg_cache_home) == -1) - goto fail; + goto path_fail; - path = create_mesa_cache_dir(local, xdg_cache_home, timestamp, - gpu_name); + path = concatenate_and_mkdir(local, xdg_cache_home, CACHE_DIR_NAME); if (path == NULL) - goto fail; + goto path_fail; } } @@ -303,43 +281,39 @@ disk_cache_create(const char *gpu_name, const char *timestamp) buf = NULL; buf_size *= 2; } else { - goto fail; + goto path_fail; } } path = concatenate_and_mkdir(local, pwd.pw_dir, ".cache"); if (path == NULL) - goto fail; + goto path_fail; - path = create_mesa_cache_dir(local, path, timestamp, gpu_name); + path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); if (path == NULL) - goto fail; + goto path_fail; } - cache = ralloc(NULL, struct disk_cache); - if (cache == NULL) - goto fail; - cache->path = ralloc_strdup(cache, path); if (cache->path == NULL) - goto fail; + goto path_fail; path = ralloc_asprintf(local, "%s/index", cache->path); if (path == NULL) - goto fail; + goto path_fail; fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644); if (fd == -1) - goto fail; + goto path_fail; if (fstat(fd, &sb) == -1) - goto fail; + goto path_fail; /* Force the index file to be the expected size. */ size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE; if (sb.st_size != size) { if (ftruncate(fd, size) == -1) - goto fail; + goto path_fail; } /* We map this shared so that other processes see updates that we @@ -360,11 +334,9 @@ disk_cache_create(const char *gpu_name, const char *timestamp) cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (cache->index_mmap == MAP_FAILED) - goto fail; + goto path_fail; cache->index_mmap_size = size; - close(fd); - cache->size = (uint64_t *) cache->index_mmap; cache->stored_keys = cache->index_mmap + sizeof(uint64_t); @@ -396,21 +368,64 @@ disk_cache_create(const char *gpu_name, const char *timestamp) } } - /* Default to 1GB or 10% of filesystem for maximum cache size. */ + /* Default to 1GB for maximum cache size. */ if (max_size == 0) { - statvfs(path, &vfs); - max_size = MAX2(1024*1024*1024, vfs.f_blocks * vfs.f_bsize / 10); + max_size = 1024*1024*1024; } cache->max_size = max_size; - /* A limit of 32 jobs was choosen as observations of Deus Ex start-up times - * showed that we reached at most 11 jobs on an Intel i5-6400 CPU@2.70GHz - * (a fairly modest desktop CPU). 1 thread was chosen because we don't - * really care about getting things to disk quickly just that it's not - * blocking other tasks. + /* 4 threads were chosen below because just about all modern CPUs currently + * available that run Mesa have *at least* 4 cores. For these CPUs allowing + * more threads can result in the queue being processed faster, thus + * avoiding excessive memory use due to a backlog of cache entrys building + * up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY + * flag this should have little negative impact on low core systems. + * + * The queue will resize automatically when it's full, so adding new jobs + * doesn't stall. + */ + util_queue_init(&cache->cache_queue, "disk$", 32, 4, + UTIL_QUEUE_INIT_RESIZE_IF_FULL | + UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY | + UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY); + + cache->path_init_failed = false; + + path_fail: + + if (fd != -1) + close(fd); + + cache->driver_keys_blob_size = cv_size; + + /* Create driver id keys */ + size_t id_size = strlen(driver_id) + 1; + size_t gpu_name_size = strlen(gpu_name) + 1; + cache->driver_keys_blob_size += id_size; + cache->driver_keys_blob_size += gpu_name_size; + + /* We sometimes store entire structs that contains a pointers in the cache, + * use pointer size as a key to avoid hard to debug issues. */ - util_queue_init(&cache->cache_queue, "disk_cache", 32, 1); + uint8_t ptr_size = sizeof(void *); + size_t ptr_size_size = sizeof(ptr_size); + cache->driver_keys_blob_size += ptr_size_size; + + size_t driver_flags_size = sizeof(driver_flags); + cache->driver_keys_blob_size += driver_flags_size; + + cache->driver_keys_blob = + ralloc_size(cache, cache->driver_keys_blob_size); + if (!cache->driver_keys_blob) + goto fail; + + uint8_t *drv_key_blob = cache->driver_keys_blob; + DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size) + DRV_KEY_CPY(drv_key_blob, driver_id, id_size) + DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size) + DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size) + DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size) /* Seed our rand function */ s_rand_xorshift128plus(cache->seed_xorshift128plus, true); @@ -420,8 +435,6 @@ disk_cache_create(const char *gpu_name, const char *timestamp) return cache; fail: - if (fd != -1) - close(fd); if (cache) ralloc_free(cache); ralloc_free(local); @@ -432,7 +445,8 @@ disk_cache_create(const char *gpu_name, const char *timestamp) void disk_cache_destroy(struct disk_cache *cache) { - if (cache) { + if (cache && !cache->path_init_failed) { + util_queue_finish(&cache->cache_queue); util_queue_destroy(&cache->cache_queue); munmap(cache->index_mmap, cache->index_mmap_size); } @@ -440,6 +454,12 @@ disk_cache_destroy(struct disk_cache *cache) ralloc_free(cache); } +void +disk_cache_wait_for_idle(struct disk_cache *cache) +{ + util_queue_finish(&cache->cache_queue); +} + /* Return a filename within the cache's directory corresponding to 'key'. The * returned filename is ralloced with 'cache' as the parent context. * @@ -451,6 +471,9 @@ get_cache_file(struct disk_cache *cache, const cache_key key) char buf[41]; char *filename; + if (cache->path_init_failed) + return NULL; + _mesa_sha1_format(buf, key); if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0], buf[1], buf + 2) == -1) @@ -574,7 +597,7 @@ unlink_lru_file_from_directory(const char *path) unlink(filename); free (filename); - return sb.st_size; + return sb.st_blocks * 512; } /* Is entry a directory with a two-character name, (and not the @@ -679,8 +702,23 @@ disk_cache_remove(struct disk_cache *cache, const cache_key key) unlink(filename); free(filename); - if (sb.st_size) - p_atomic_add(cache->size, - (uint64_t)sb.st_size); + if (sb.st_blocks) + p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512); +} + +static ssize_t +read_all(int fd, void *buf, size_t count) +{ + char *in = buf; + ssize_t read_ret; + size_t done; + + for (done = 0; done < count; done += read_ret) { + read_ret = read(fd, in + done, count - done); + if (read_ret == -1 || read_ret == 0) + return -1; + } + return done; } static ssize_t @@ -712,7 +750,28 @@ static size_t deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, const char *filename) { - unsigned char out[BUFSIZE]; +#ifdef HAVE_ZSTD + /* from the zstd docs (https://facebook.github.io/zstd/zstd_manual.html): + * compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + */ + size_t out_size = ZSTD_compressBound(in_data_size); + void * out = malloc(out_size); + + size_t ret = ZSTD_compress(out, out_size, in_data, in_data_size, + ZSTD_COMPRESSION_LEVEL); + if (ZSTD_isError(ret)) { + free(out); + return 0; + } + ssize_t written = write_all(dest, out, ret); + if (written == -1) { + free(out); + return 0; + } + free(out); + return ret; +#else + unsigned char *out; /* allocate deflate state */ z_stream strm; @@ -729,6 +788,11 @@ deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, /* compress until end of in_data */ size_t compressed_size = 0; int flush; + + out = malloc(BUFSIZE * sizeof(unsigned char)); + if (out == NULL) + return 0; + do { int remaining = in_data_size - BUFSIZE; flush = remaining > 0 ? Z_NO_FLUSH : Z_FINISH; @@ -750,6 +814,7 @@ deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, ssize_t written = write_all(dest, out, have); if (written == -1) { (void)deflateEnd(&strm); + free(out); return 0; } } while (strm.avail_out == 0); @@ -764,12 +829,15 @@ deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, /* clean up and return */ (void)deflateEnd(&strm); + free(out); return compressed_size; +# endif } static struct disk_cache_put_job * create_put_job(struct disk_cache *cache, const cache_key key, - const void *data, size_t size) + const void *data, size_t size, + struct cache_item_metadata *cache_item_metadata) { struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) malloc(sizeof(struct disk_cache_put_job) + size); @@ -780,15 +848,44 @@ create_put_job(struct disk_cache *cache, const cache_key key, dc_job->data = dc_job + 1; memcpy(dc_job->data, data, size); dc_job->size = size; + + /* Copy the cache item metadata */ + if (cache_item_metadata) { + dc_job->cache_item_metadata.type = cache_item_metadata->type; + if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) { + dc_job->cache_item_metadata.num_keys = + cache_item_metadata->num_keys; + dc_job->cache_item_metadata.keys = (cache_key *) + malloc(cache_item_metadata->num_keys * sizeof(cache_key)); + + if (!dc_job->cache_item_metadata.keys) + goto fail; + + memcpy(dc_job->cache_item_metadata.keys, + cache_item_metadata->keys, + sizeof(cache_key) * cache_item_metadata->num_keys); + } + } else { + dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN; + dc_job->cache_item_metadata.keys = NULL; + } } return dc_job; + +fail: + free(dc_job); + + return NULL; } static void destroy_put_job(void *job, int thread_index) { if (job) { + struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job; + free(dc_job->cache_item_metadata.keys); + free(job); } } @@ -845,7 +942,17 @@ cache_put(void *job, int thread_index) * open with the flock held. So just let that file be responsible * for writing the file. */ +#ifdef HAVE_FLOCK err = flock(fd, LOCK_EX | LOCK_NB); +#else + struct flock lock = { + .l_start = 0, + .l_len = 0, /* entire file */ + .l_type = F_WRLCK, + .l_whence = SEEK_SET + }; + err = fcntl(fd, F_SETLK, &lock); +#endif if (err == -1) goto done; @@ -864,9 +971,49 @@ cache_put(void *job, int thread_index) /* OK, we're now on the hook to write out a file that we know is * not in the cache, and is also not being written out to the cache * by some other process. - * - * Create CRC of the data and store at the start of the file. We will - * read this when restoring the cache and use it to check for corruption. + */ + + /* Write the driver_keys_blob, this can be used find information about the + * mesa version that produced the entry or deal with hash collisions, + * should that ever become a real problem. + */ + ret = write_all(fd, dc_job->cache->driver_keys_blob, + dc_job->cache->driver_keys_blob_size); + if (ret == -1) { + unlink(filename_tmp); + goto done; + } + + /* Write the cache item metadata. This data can be used to deal with + * hash collisions, as well as providing useful information to 3rd party + * tools reading the cache files. + */ + ret = write_all(fd, &dc_job->cache_item_metadata.type, + sizeof(uint32_t)); + if (ret == -1) { + unlink(filename_tmp); + goto done; + } + + if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) { + ret = write_all(fd, &dc_job->cache_item_metadata.num_keys, + sizeof(uint32_t)); + if (ret == -1) { + unlink(filename_tmp); + goto done; + } + + ret = write_all(fd, dc_job->cache_item_metadata.keys[0], + dc_job->cache_item_metadata.num_keys * + sizeof(cache_key)); + if (ret == -1) { + unlink(filename_tmp); + goto done; + } + } + + /* Create CRC of the data. We will read this when restoring the cache and + * use it to check for corruption. */ struct cache_entry_file_data cf_data; cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size); @@ -895,8 +1042,14 @@ cache_put(void *job, int thread_index) goto done; } - file_size += cf_data_size; - p_atomic_add(dc_job->cache->size, file_size); + struct stat sb; + if (stat(filename, &sb) == -1) { + /* Something went wrong remove the file */ + unlink(filename); + goto done; + } + + p_atomic_add(dc_job->cache->size, sb.st_blocks * 512); done: if (fd_final != -1) @@ -906,23 +1059,30 @@ cache_put(void *job, int thread_index) */ if (fd != -1) close(fd); - if (filename_tmp) - free(filename_tmp); - if (filename) - free(filename); + free(filename_tmp); + free(filename); } void disk_cache_put(struct disk_cache *cache, const cache_key key, - const void *data, size_t size) + const void *data, size_t size, + struct cache_item_metadata *cache_item_metadata) { + if (cache->blob_put_cb) { + cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size); + return; + } + + if (cache->path_init_failed) + return; + struct disk_cache_put_job *dc_job = - create_put_job(cache, key, data, size); + create_put_job(cache, key, data, size, cache_item_metadata); if (dc_job) { util_queue_fence_init(&dc_job->fence); util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence, - cache_put, destroy_put_job); + cache_put, destroy_put_job, dc_job->size); } } @@ -933,6 +1093,10 @@ static bool inflate_cache_data(uint8_t *in_data, size_t in_data_size, uint8_t *out_data, size_t out_data_size) { +#ifdef HAVE_ZSTD + size_t ret = ZSTD_decompress(out_data, out_data_size, in_data, in_data_size); + return !ZSTD_isError(ret); +#else z_stream strm; /* allocate inflate state */ @@ -963,20 +1127,44 @@ inflate_cache_data(uint8_t *in_data, size_t in_data_size, /* clean up and return */ (void)inflateEnd(&strm); return true; +#endif } void * disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) { - int fd = -1, ret, len; + int fd = -1, ret; struct stat sb; char *filename = NULL; uint8_t *data = NULL; uint8_t *uncompressed_data = NULL; + uint8_t *file_header = NULL; if (size) *size = 0; + if (cache->blob_get_cb) { + /* This is what Android EGL defines as the maxValueSize in egl_cache_t + * class implementation. + */ + const signed long max_blob_size = 64 * 1024; + void *blob = malloc(max_blob_size); + if (!blob) + return NULL; + + signed long bytes = + cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size); + + if (!bytes) { + free(blob); + return NULL; + } + + if (size) + *size = bytes; + return blob; + } + filename = get_cache_file(cache, key); if (filename == NULL) goto fail; @@ -992,24 +1180,63 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) if (data == NULL) goto fail; - /* Load the CRC that was created when the file was written. */ - struct cache_entry_file_data cf_data; - size_t cf_data_size = sizeof(cf_data); - assert(sb.st_size > cf_data_size); - for (len = 0; len < cf_data_size; len += ret) { - ret = read(fd, ((uint8_t *) &cf_data) + len, cf_data_size - len); + size_t ck_size = cache->driver_keys_blob_size; + file_header = malloc(ck_size); + if (!file_header) + goto fail; + + if (sb.st_size < ck_size) + goto fail; + + ret = read_all(fd, file_header, ck_size); + if (ret == -1) + goto fail; + + /* Check for extremely unlikely hash collisions */ + if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0) { + assert(!"Mesa cache keys mismatch!"); + goto fail; + } + + size_t cache_item_md_size = sizeof(uint32_t); + uint32_t md_type; + ret = read_all(fd, &md_type, cache_item_md_size); + if (ret == -1) + goto fail; + + if (md_type == CACHE_ITEM_TYPE_GLSL) { + uint32_t num_keys; + cache_item_md_size += sizeof(uint32_t); + ret = read_all(fd, &num_keys, sizeof(uint32_t)); if (ret == -1) goto fail; - } - /* Load the actual cache data. */ - size_t cache_data_size = sb.st_size - cf_data_size; - for (len = 0; len < cache_data_size; len += ret) { - ret = read(fd, data + len, cache_data_size - len); + /* The cache item metadata is currently just used for distributing + * precompiled shaders, they are not used by Mesa so just skip them for + * now. + * TODO: pass the metadata back to the caller and do some basic + * validation. + */ + cache_item_md_size += num_keys * sizeof(cache_key); + ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR); if (ret == -1) goto fail; } + /* Load the CRC that was created when the file was written. */ + struct cache_entry_file_data cf_data; + size_t cf_data_size = sizeof(cf_data); + ret = read_all(fd, &cf_data, cf_data_size); + if (ret == -1) + goto fail; + + /* Load the actual cache data. */ + size_t cache_data_size = + sb.st_size - cf_data_size - ck_size - cache_item_md_size; + ret = read_all(fd, data, cache_data_size); + if (ret == -1) + goto fail; + /* Uncompress the cache data */ uncompressed_data = malloc(cf_data.uncompressed_size); if (!inflate_cache_data(data, cache_data_size, uncompressed_data, @@ -1023,6 +1250,7 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) free(data); free(filename); + free(file_header); close(fd); if (size) @@ -1037,6 +1265,8 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) free(uncompressed_data); if (filename) free(filename); + if (file_header) + free(file_header); if (fd != -1) close(fd); @@ -1047,9 +1277,17 @@ void disk_cache_put_key(struct disk_cache *cache, const cache_key key) { const uint32_t *key_chunk = (const uint32_t *) key; - int i = *key_chunk & CACHE_INDEX_KEY_MASK; + int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; unsigned char *entry; + if (cache->blob_put_cb) { + cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t)); + return; + } + + if (cache->path_init_failed) + return; + entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; memcpy(entry, key, CACHE_KEY_SIZE); @@ -1066,9 +1304,17 @@ bool disk_cache_has_key(struct disk_cache *cache, const cache_key key) { const uint32_t *key_chunk = (const uint32_t *) key; - int i = *key_chunk & CACHE_INDEX_KEY_MASK; + int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; unsigned char *entry; + if (cache->blob_get_cb) { + uint32_t blob; + return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t)); + } + + if (cache->path_init_failed) + return false; + entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; return memcmp(entry, key, CACHE_KEY_SIZE) == 0; @@ -1078,7 +1324,21 @@ void disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size, cache_key key) { - _mesa_sha1_compute(data, size, key); + struct mesa_sha1 ctx; + + _mesa_sha1_init(&ctx); + _mesa_sha1_update(&ctx, cache->driver_keys_blob, + cache->driver_keys_blob_size); + _mesa_sha1_update(&ctx, data, size); + _mesa_sha1_final(&ctx, key); +} + +void +disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put, + disk_cache_get_cb get) +{ + cache->blob_put_cb = put; + cache->blob_get_cb = get; } #endif /* ENABLE_SHADER_CACHE */