2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #ifdef ENABLE_SHADER_CACHE
32 #include <sys/types.h>
43 #include "util/crc32.h"
44 #include "util/debug.h"
45 #include "util/rand_xor.h"
46 #include "util/u_atomic.h"
47 #include "util/u_queue.h"
48 #include "util/mesa-sha1.h"
49 #include "util/ralloc.h"
50 #include "main/compiler.h"
51 #include "main/errors.h"
53 #include "disk_cache.h"
55 /* Number of bits to mask off from a cache key to get an index. */
56 #define CACHE_INDEX_KEY_BITS 16
58 /* Mask for computing an index from a key. */
59 #define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1)
61 /* The number of keys that can be stored in the index. */
62 #define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS)
64 /* The cache version should be bumped whenever a change is made to the
65 * structure of cache entries or the index. This will give any 3rd party
66 * applications reading the cache entries a chance to adjust to the changes.
68 * - The cache version is checked internally when reading a cache entry. If we
69 * ever have a mismatch we are in big trouble as this means we had a cache
70 * collision. In case of such an event please check the skys for giant
71 * asteroids and that the entire Mesa team hasn't been eaten by wolves.
73 * - There is no strict requirement that cache versions be backwards
74 * compatible but effort should be taken to limit disruption where possible.
76 #define CACHE_VERSION 1
79 /* The path to the cache directory. */
81 bool path_init_failed
;
83 /* Thread queue for compressing and writing cache entries to disk */
84 struct util_queue cache_queue
;
86 /* Seed for rand, which is used to pick a random directory */
87 uint64_t seed_xorshift128plus
[2];
89 /* A pointer to the mmapped index file within the cache directory. */
91 size_t index_mmap_size
;
93 /* Pointer to total size of all objects in cache (within index_mmap) */
96 /* Pointer to stored keys, (within index_mmap). */
99 /* Maximum size of all cached objects (in bytes). */
102 /* Driver cache keys. */
103 uint8_t *driver_keys_blob
;
104 size_t driver_keys_blob_size
;
106 disk_cache_put_cb blob_put_cb
;
107 disk_cache_get_cb blob_get_cb
;
110 struct disk_cache_put_job
{
111 struct util_queue_fence fence
;
113 struct disk_cache
*cache
;
117 /* Copy of cache data to be compressed and written. */
120 /* Size of data to be compressed and written. */
123 struct cache_item_metadata cache_item_metadata
;
126 /* Create a directory named 'path' if it does not already exist.
128 * Returns: 0 if path already exists as a directory or if created.
129 * -1 in all other cases.
132 mkdir_if_needed(const char *path
)
136 /* If the path exists already, then our work is done if it's a
137 * directory, but it's an error if it is not.
139 if (stat(path
, &sb
) == 0) {
140 if (S_ISDIR(sb
.st_mode
)) {
143 fprintf(stderr
, "Cannot use %s for shader cache (not a directory)"
144 "---disabling.\n", path
);
149 int ret
= mkdir(path
, 0755);
150 if (ret
== 0 || (ret
== -1 && errno
== EEXIST
))
153 fprintf(stderr
, "Failed to create %s for shader cache (%s)---disabling.\n",
154 path
, strerror(errno
));
159 /* Concatenate an existing path and a new name to form a new path. If the new
160 * path does not exist as a directory, create it then return the resulting
161 * name of the new path (ralloc'ed off of 'ctx').
163 * Returns NULL on any error, such as:
165 * <path> does not exist or is not a directory
166 * <path>/<name> exists but is not a directory
167 * <path>/<name> cannot be created as a directory
170 concatenate_and_mkdir(void *ctx
, const char *path
, const char *name
)
175 if (stat(path
, &sb
) != 0 || ! S_ISDIR(sb
.st_mode
))
178 new_path
= ralloc_asprintf(ctx
, "%s/%s", path
, name
);
180 if (mkdir_if_needed(new_path
) == 0)
186 #define DRV_KEY_CPY(_dst, _src, _src_size) \
188 memcpy(_dst, _src, _src_size); \
193 disk_cache_create(const char *gpu_name
, const char *driver_id
,
194 uint64_t driver_flags
)
197 struct disk_cache
*cache
= NULL
;
198 char *path
, *max_size_str
;
204 uint8_t cache_version
= CACHE_VERSION
;
205 size_t cv_size
= sizeof(cache_version
);
207 /* If running as a users other than the real user disable cache */
208 if (geteuid() != getuid())
211 /* A ralloc context for transient data during this invocation. */
212 local
= ralloc_context(NULL
);
216 /* At user request, disable shader cache entirely. */
217 if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false))
220 cache
= rzalloc(NULL
, struct disk_cache
);
224 /* Assume failure. */
225 cache
->path_init_failed
= true;
227 /* Determine path for cache based on the first defined name as follows:
229 * $MESA_GLSL_CACHE_DIR
230 * $XDG_CACHE_HOME/mesa_shader_cache
231 * <pwd.pw_dir>/.cache/mesa_shader_cache
233 path
= getenv("MESA_GLSL_CACHE_DIR");
235 if (mkdir_if_needed(path
) == -1)
238 path
= concatenate_and_mkdir(local
, path
, CACHE_DIR_NAME
);
244 char *xdg_cache_home
= getenv("XDG_CACHE_HOME");
246 if (xdg_cache_home
) {
247 if (mkdir_if_needed(xdg_cache_home
) == -1)
250 path
= concatenate_and_mkdir(local
, xdg_cache_home
, CACHE_DIR_NAME
);
259 struct passwd pwd
, *result
;
261 buf_size
= sysconf(_SC_GETPW_R_SIZE_MAX
);
265 /* Loop until buf_size is large enough to query the directory */
267 buf
= ralloc_size(local
, buf_size
);
269 getpwuid_r(getuid(), &pwd
, buf
, buf_size
, &result
);
273 if (errno
== ERANGE
) {
282 path
= concatenate_and_mkdir(local
, pwd
.pw_dir
, ".cache");
286 path
= concatenate_and_mkdir(local
, path
, CACHE_DIR_NAME
);
291 cache
->path
= ralloc_strdup(cache
, path
);
292 if (cache
->path
== NULL
)
295 path
= ralloc_asprintf(local
, "%s/index", cache
->path
);
299 fd
= open(path
, O_RDWR
| O_CREAT
| O_CLOEXEC
, 0644);
303 if (fstat(fd
, &sb
) == -1)
306 /* Force the index file to be the expected size. */
307 size
= sizeof(*cache
->size
) + CACHE_INDEX_MAX_KEYS
* CACHE_KEY_SIZE
;
308 if (sb
.st_size
!= size
) {
309 if (ftruncate(fd
, size
) == -1)
313 /* We map this shared so that other processes see updates that we
316 * Note: We do use atomic addition to ensure that multiple
317 * processes don't scramble the cache size recorded in the
318 * index. But we don't use any locking to prevent multiple
319 * processes from updating the same entry simultaneously. The idea
320 * is that if either result lands entirely in the index, then
321 * that's equivalent to a well-ordered write followed by an
322 * eviction and a write. On the other hand, if the simultaneous
323 * writes result in a corrupt entry, that's not really any
324 * different than both entries being evicted, (since within the
325 * guarantees of the cryptographic hash, a corrupt entry is
326 * unlikely to ever match a real cache key).
328 cache
->index_mmap
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
330 if (cache
->index_mmap
== MAP_FAILED
)
332 cache
->index_mmap_size
= size
;
334 cache
->size
= (uint64_t *) cache
->index_mmap
;
335 cache
->stored_keys
= cache
->index_mmap
+ sizeof(uint64_t);
339 max_size_str
= getenv("MESA_GLSL_CACHE_MAX_SIZE");
342 max_size
= strtoul(max_size_str
, &end
, 10);
343 if (end
== max_size_str
) {
353 max_size
*= 1024*1024;
359 max_size
*= 1024*1024*1024;
365 /* Default to 1GB for maximum cache size. */
367 max_size
= 1024*1024*1024;
370 cache
->max_size
= max_size
;
372 /* 4 threads were chosen below because just about all modern CPUs currently
373 * available that run Mesa have *at least* 4 cores. For these CPUs allowing
374 * more threads can result in the queue being processed faster, thus
375 * avoiding excessive memory use due to a backlog of cache entrys building
376 * up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
377 * flag this should have little negative impact on low core systems.
379 * The queue will resize automatically when it's full, so adding new jobs
382 util_queue_init(&cache
->cache_queue
, "disk$", 32, 4,
383 UTIL_QUEUE_INIT_RESIZE_IF_FULL
|
384 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
|
385 UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
);
387 cache
->path_init_failed
= false;
394 cache
->driver_keys_blob_size
= cv_size
;
396 /* Create driver id keys */
397 size_t id_size
= strlen(driver_id
) + 1;
398 size_t gpu_name_size
= strlen(gpu_name
) + 1;
399 cache
->driver_keys_blob_size
+= id_size
;
400 cache
->driver_keys_blob_size
+= gpu_name_size
;
402 /* We sometimes store entire structs that contains a pointers in the cache,
403 * use pointer size as a key to avoid hard to debug issues.
405 uint8_t ptr_size
= sizeof(void *);
406 size_t ptr_size_size
= sizeof(ptr_size
);
407 cache
->driver_keys_blob_size
+= ptr_size_size
;
409 size_t driver_flags_size
= sizeof(driver_flags
);
410 cache
->driver_keys_blob_size
+= driver_flags_size
;
412 cache
->driver_keys_blob
=
413 ralloc_size(cache
, cache
->driver_keys_blob_size
);
414 if (!cache
->driver_keys_blob
)
417 uint8_t *drv_key_blob
= cache
->driver_keys_blob
;
418 DRV_KEY_CPY(drv_key_blob
, &cache_version
, cv_size
)
419 DRV_KEY_CPY(drv_key_blob
, driver_id
, id_size
)
420 DRV_KEY_CPY(drv_key_blob
, gpu_name
, gpu_name_size
)
421 DRV_KEY_CPY(drv_key_blob
, &ptr_size
, ptr_size_size
)
422 DRV_KEY_CPY(drv_key_blob
, &driver_flags
, driver_flags_size
)
424 /* Seed our rand function */
425 s_rand_xorshift128plus(cache
->seed_xorshift128plus
, true);
440 disk_cache_destroy(struct disk_cache
*cache
)
442 if (cache
&& !cache
->path_init_failed
) {
443 util_queue_finish(&cache
->cache_queue
);
444 util_queue_destroy(&cache
->cache_queue
);
445 munmap(cache
->index_mmap
, cache
->index_mmap_size
);
451 /* Return a filename within the cache's directory corresponding to 'key'. The
452 * returned filename is ralloced with 'cache' as the parent context.
454 * Returns NULL if out of memory.
457 get_cache_file(struct disk_cache
*cache
, const cache_key key
)
462 if (cache
->path_init_failed
)
465 _mesa_sha1_format(buf
, key
);
466 if (asprintf(&filename
, "%s/%c%c/%s", cache
->path
, buf
[0],
467 buf
[1], buf
+ 2) == -1)
473 /* Create the directory that will be needed for the cache file for \key.
475 * Obviously, the implementation here must closely match
476 * _get_cache_file above.
479 make_cache_file_directory(struct disk_cache
*cache
, const cache_key key
)
484 _mesa_sha1_format(buf
, key
);
485 if (asprintf(&dir
, "%s/%c%c", cache
->path
, buf
[0], buf
[1]) == -1)
488 mkdir_if_needed(dir
);
492 /* Given a directory path and predicate function, find the entry with
493 * the oldest access time in that directory for which the predicate
496 * Returns: A malloc'ed string for the path to the chosen file, (or
497 * NULL on any error). The caller should free the string when
501 choose_lru_file_matching(const char *dir_path
,
502 bool (*predicate
)(const char *dir_path
,
504 const char *, const size_t))
507 struct dirent
*entry
;
509 char *lru_name
= NULL
;
510 time_t lru_atime
= 0;
512 dir
= opendir(dir_path
);
517 entry
= readdir(dir
);
522 if (fstatat(dirfd(dir
), entry
->d_name
, &sb
, 0) == 0) {
523 if (!lru_atime
|| (sb
.st_atime
< lru_atime
)) {
524 size_t len
= strlen(entry
->d_name
);
526 if (!predicate(dir_path
, &sb
, entry
->d_name
, len
))
529 char *tmp
= realloc(lru_name
, len
+ 1);
532 memcpy(lru_name
, entry
->d_name
, len
+ 1);
533 lru_atime
= sb
.st_atime
;
539 if (lru_name
== NULL
) {
544 if (asprintf(&filename
, "%s/%s", dir_path
, lru_name
) < 0)
553 /* Is entry a regular file, and not having a name with a trailing
557 is_regular_non_tmp_file(const char *path
, const struct stat
*sb
,
558 const char *d_name
, const size_t len
)
560 if (!S_ISREG(sb
->st_mode
))
563 if (len
>= 4 && strcmp(&d_name
[len
-4], ".tmp") == 0)
569 /* Returns the size of the deleted file, (or 0 on any error). */
571 unlink_lru_file_from_directory(const char *path
)
576 filename
= choose_lru_file_matching(path
, is_regular_non_tmp_file
);
577 if (filename
== NULL
)
580 if (stat(filename
, &sb
) == -1) {
588 return sb
.st_blocks
* 512;
591 /* Is entry a directory with a two-character name, (and not the
592 * special name of ".."). We also return false if the dir is empty.
595 is_two_character_sub_directory(const char *path
, const struct stat
*sb
,
596 const char *d_name
, const size_t len
)
598 if (!S_ISDIR(sb
->st_mode
))
604 if (strcmp(d_name
, "..") == 0)
608 if (asprintf(&subdir
, "%s/%s", path
, d_name
) == -1)
610 DIR *dir
= opendir(subdir
);
616 unsigned subdir_entries
= 0;
618 while ((d
= readdir(dir
)) != NULL
) {
619 if(++subdir_entries
> 2)
624 /* If dir only contains '.' and '..' it must be empty */
625 if (subdir_entries
<= 2)
632 evict_lru_item(struct disk_cache
*cache
)
636 /* With a reasonably-sized, full cache, (and with keys generated
637 * from a cryptographic hash), we can choose two random hex digits
638 * and reasonably expect the directory to exist with a file in it.
639 * Provides pseudo-LRU eviction to reduce checking all cache files.
641 uint64_t rand64
= rand_xorshift128plus(cache
->seed_xorshift128plus
);
642 if (asprintf(&dir_path
, "%s/%02" PRIx64
, cache
->path
, rand64
& 0xff) < 0)
645 size_t size
= unlink_lru_file_from_directory(dir_path
);
650 p_atomic_add(cache
->size
, - (uint64_t)size
);
654 /* In the case where the random choice of directory didn't find
655 * something, we choose the least recently accessed from the
656 * existing directories.
658 * Really, the only reason this code exists is to allow the unit
659 * tests to work, (which use an artificially-small cache to be able
660 * to force a single cached item to be evicted).
662 dir_path
= choose_lru_file_matching(cache
->path
,
663 is_two_character_sub_directory
);
664 if (dir_path
== NULL
)
667 size
= unlink_lru_file_from_directory(dir_path
);
672 p_atomic_add(cache
->size
, - (uint64_t)size
);
676 disk_cache_remove(struct disk_cache
*cache
, const cache_key key
)
680 char *filename
= get_cache_file(cache
, key
);
681 if (filename
== NULL
) {
685 if (stat(filename
, &sb
) == -1) {
694 p_atomic_add(cache
->size
, - (uint64_t)sb
.st_blocks
* 512);
698 read_all(int fd
, void *buf
, size_t count
)
704 for (done
= 0; done
< count
; done
+= read_ret
) {
705 read_ret
= read(fd
, in
+ done
, count
- done
);
706 if (read_ret
== -1 || read_ret
== 0)
713 write_all(int fd
, const void *buf
, size_t count
)
715 const char *out
= buf
;
719 for (done
= 0; done
< count
; done
+= written
) {
720 written
= write(fd
, out
+ done
, count
- done
);
727 /* From the zlib docs:
728 * "If the memory is available, buffers sizes on the order of 128K or 256K
729 * bytes should be used."
731 #define BUFSIZE 256 * 1024
734 * Compresses cache entry in memory and writes it to disk. Returns the size
735 * of the data written to disk.
738 deflate_and_write_to_disk(const void *in_data
, size_t in_data_size
, int dest
,
739 const char *filename
)
743 /* allocate deflate state */
745 strm
.zalloc
= Z_NULL
;
747 strm
.opaque
= Z_NULL
;
748 strm
.next_in
= (uint8_t *) in_data
;
749 strm
.avail_in
= in_data_size
;
751 int ret
= deflateInit(&strm
, Z_BEST_COMPRESSION
);
755 /* compress until end of in_data */
756 size_t compressed_size
= 0;
759 out
= malloc(BUFSIZE
* sizeof(unsigned char));
764 int remaining
= in_data_size
- BUFSIZE
;
765 flush
= remaining
> 0 ? Z_NO_FLUSH
: Z_FINISH
;
766 in_data_size
-= BUFSIZE
;
768 /* Run deflate() on input until the output buffer is not full (which
769 * means there is no more data to deflate).
772 strm
.avail_out
= BUFSIZE
;
775 ret
= deflate(&strm
, flush
); /* no bad return value */
776 assert(ret
!= Z_STREAM_ERROR
); /* state not clobbered */
778 size_t have
= BUFSIZE
- strm
.avail_out
;
779 compressed_size
+= have
;
781 ssize_t written
= write_all(dest
, out
, have
);
783 (void)deflateEnd(&strm
);
787 } while (strm
.avail_out
== 0);
789 /* all input should be used */
790 assert(strm
.avail_in
== 0);
792 } while (flush
!= Z_FINISH
);
794 /* stream should be complete */
795 assert(ret
== Z_STREAM_END
);
797 /* clean up and return */
798 (void)deflateEnd(&strm
);
800 return compressed_size
;
803 static struct disk_cache_put_job
*
804 create_put_job(struct disk_cache
*cache
, const cache_key key
,
805 const void *data
, size_t size
,
806 struct cache_item_metadata
*cache_item_metadata
)
808 struct disk_cache_put_job
*dc_job
= (struct disk_cache_put_job
*)
809 malloc(sizeof(struct disk_cache_put_job
) + size
);
812 dc_job
->cache
= cache
;
813 memcpy(dc_job
->key
, key
, sizeof(cache_key
));
814 dc_job
->data
= dc_job
+ 1;
815 memcpy(dc_job
->data
, data
, size
);
818 /* Copy the cache item metadata */
819 if (cache_item_metadata
) {
820 dc_job
->cache_item_metadata
.type
= cache_item_metadata
->type
;
821 if (cache_item_metadata
->type
== CACHE_ITEM_TYPE_GLSL
) {
822 dc_job
->cache_item_metadata
.num_keys
=
823 cache_item_metadata
->num_keys
;
824 dc_job
->cache_item_metadata
.keys
= (cache_key
*)
825 malloc(cache_item_metadata
->num_keys
* sizeof(cache_key
));
827 if (!dc_job
->cache_item_metadata
.keys
)
830 memcpy(dc_job
->cache_item_metadata
.keys
,
831 cache_item_metadata
->keys
,
832 sizeof(cache_key
) * cache_item_metadata
->num_keys
);
835 dc_job
->cache_item_metadata
.type
= CACHE_ITEM_TYPE_UNKNOWN
;
836 dc_job
->cache_item_metadata
.keys
= NULL
;
849 destroy_put_job(void *job
, int thread_index
)
852 struct disk_cache_put_job
*dc_job
= (struct disk_cache_put_job
*) job
;
853 free(dc_job
->cache_item_metadata
.keys
);
859 struct cache_entry_file_data
{
861 uint32_t uncompressed_size
;
865 cache_put(void *job
, int thread_index
)
869 int fd
= -1, fd_final
= -1, err
, ret
;
871 char *filename
= NULL
, *filename_tmp
= NULL
;
872 struct disk_cache_put_job
*dc_job
= (struct disk_cache_put_job
*) job
;
874 filename
= get_cache_file(dc_job
->cache
, dc_job
->key
);
875 if (filename
== NULL
)
878 /* If the cache is too large, evict something else first. */
879 while (*dc_job
->cache
->size
+ dc_job
->size
> dc_job
->cache
->max_size
&&
881 evict_lru_item(dc_job
->cache
);
885 /* Write to a temporary file to allow for an atomic rename to the
886 * final destination filename, (to prevent any readers from seeing
887 * a partially written file).
889 if (asprintf(&filename_tmp
, "%s.tmp", filename
) == -1)
892 fd
= open(filename_tmp
, O_WRONLY
| O_CLOEXEC
| O_CREAT
, 0644);
894 /* Make the two-character subdirectory within the cache as needed. */
899 make_cache_file_directory(dc_job
->cache
, dc_job
->key
);
901 fd
= open(filename_tmp
, O_WRONLY
| O_CLOEXEC
| O_CREAT
, 0644);
906 /* With the temporary file open, we take an exclusive flock on
907 * it. If the flock fails, then another process still has the file
908 * open with the flock held. So just let that file be responsible
909 * for writing the file.
912 err
= flock(fd
, LOCK_EX
| LOCK_NB
);
914 struct flock lock
= {
916 .l_len
= 0, /* entire file */
920 err
= fcntl(fd
, F_SETLK
, &lock
);
925 /* Now that we have the lock on the open temporary file, we can
926 * check to see if the destination file already exists. If so,
927 * another process won the race between when we saw that the file
928 * didn't exist and now. In this case, we don't do anything more,
929 * (to ensure the size accounting of the cache doesn't get off).
931 fd_final
= open(filename
, O_RDONLY
| O_CLOEXEC
);
932 if (fd_final
!= -1) {
933 unlink(filename_tmp
);
937 /* OK, we're now on the hook to write out a file that we know is
938 * not in the cache, and is also not being written out to the cache
939 * by some other process.
942 /* Write the driver_keys_blob, this can be used find information about the
943 * mesa version that produced the entry or deal with hash collisions,
944 * should that ever become a real problem.
946 ret
= write_all(fd
, dc_job
->cache
->driver_keys_blob
,
947 dc_job
->cache
->driver_keys_blob_size
);
949 unlink(filename_tmp
);
953 /* Write the cache item metadata. This data can be used to deal with
954 * hash collisions, as well as providing useful information to 3rd party
955 * tools reading the cache files.
957 ret
= write_all(fd
, &dc_job
->cache_item_metadata
.type
,
960 unlink(filename_tmp
);
964 if (dc_job
->cache_item_metadata
.type
== CACHE_ITEM_TYPE_GLSL
) {
965 ret
= write_all(fd
, &dc_job
->cache_item_metadata
.num_keys
,
968 unlink(filename_tmp
);
972 ret
= write_all(fd
, dc_job
->cache_item_metadata
.keys
[0],
973 dc_job
->cache_item_metadata
.num_keys
*
976 unlink(filename_tmp
);
981 /* Create CRC of the data. We will read this when restoring the cache and
982 * use it to check for corruption.
984 struct cache_entry_file_data cf_data
;
985 cf_data
.crc32
= util_hash_crc32(dc_job
->data
, dc_job
->size
);
986 cf_data
.uncompressed_size
= dc_job
->size
;
988 size_t cf_data_size
= sizeof(cf_data
);
989 ret
= write_all(fd
, &cf_data
, cf_data_size
);
991 unlink(filename_tmp
);
995 /* Now, finally, write out the contents to the temporary file, then
996 * rename them atomically to the destination filename, and also
997 * perform an atomic increment of the total cache size.
999 size_t file_size
= deflate_and_write_to_disk(dc_job
->data
, dc_job
->size
,
1001 if (file_size
== 0) {
1002 unlink(filename_tmp
);
1005 ret
= rename(filename_tmp
, filename
);
1007 unlink(filename_tmp
);
1012 if (stat(filename
, &sb
) == -1) {
1013 /* Something went wrong remove the file */
1018 p_atomic_add(dc_job
->cache
->size
, sb
.st_blocks
* 512);
1023 /* This close finally releases the flock, (now that the final file
1024 * has been renamed into place and the size has been added).
1033 disk_cache_put(struct disk_cache
*cache
, const cache_key key
,
1034 const void *data
, size_t size
,
1035 struct cache_item_metadata
*cache_item_metadata
)
1037 if (cache
->blob_put_cb
) {
1038 cache
->blob_put_cb(key
, CACHE_KEY_SIZE
, data
, size
);
1042 if (cache
->path_init_failed
)
1045 struct disk_cache_put_job
*dc_job
=
1046 create_put_job(cache
, key
, data
, size
, cache_item_metadata
);
1049 util_queue_fence_init(&dc_job
->fence
);
1050 util_queue_add_job(&cache
->cache_queue
, dc_job
, &dc_job
->fence
,
1051 cache_put
, destroy_put_job
, dc_job
->size
);
1056 * Decompresses cache entry, returns true if successful.
1059 inflate_cache_data(uint8_t *in_data
, size_t in_data_size
,
1060 uint8_t *out_data
, size_t out_data_size
)
1064 /* allocate inflate state */
1065 strm
.zalloc
= Z_NULL
;
1066 strm
.zfree
= Z_NULL
;
1067 strm
.opaque
= Z_NULL
;
1068 strm
.next_in
= in_data
;
1069 strm
.avail_in
= in_data_size
;
1070 strm
.next_out
= out_data
;
1071 strm
.avail_out
= out_data_size
;
1073 int ret
= inflateInit(&strm
);
1077 ret
= inflate(&strm
, Z_NO_FLUSH
);
1078 assert(ret
!= Z_STREAM_ERROR
); /* state not clobbered */
1080 /* Unless there was an error we should have decompressed everything in one
1081 * go as we know the uncompressed file size.
1083 if (ret
!= Z_STREAM_END
) {
1084 (void)inflateEnd(&strm
);
1087 assert(strm
.avail_out
== 0);
1089 /* clean up and return */
1090 (void)inflateEnd(&strm
);
1095 disk_cache_get(struct disk_cache
*cache
, const cache_key key
, size_t *size
)
1099 char *filename
= NULL
;
1100 uint8_t *data
= NULL
;
1101 uint8_t *uncompressed_data
= NULL
;
1102 uint8_t *file_header
= NULL
;
1107 if (cache
->blob_get_cb
) {
1108 /* This is what Android EGL defines as the maxValueSize in egl_cache_t
1109 * class implementation.
1111 const signed long max_blob_size
= 64 * 1024;
1112 void *blob
= malloc(max_blob_size
);
1117 cache
->blob_get_cb(key
, CACHE_KEY_SIZE
, blob
, max_blob_size
);
1129 filename
= get_cache_file(cache
, key
);
1130 if (filename
== NULL
)
1133 fd
= open(filename
, O_RDONLY
| O_CLOEXEC
);
1137 if (fstat(fd
, &sb
) == -1)
1140 data
= malloc(sb
.st_size
);
1144 size_t ck_size
= cache
->driver_keys_blob_size
;
1145 file_header
= malloc(ck_size
);
1149 if (sb
.st_size
< ck_size
)
1152 ret
= read_all(fd
, file_header
, ck_size
);
1156 /* Check for extremely unlikely hash collisions */
1157 if (memcmp(cache
->driver_keys_blob
, file_header
, ck_size
) != 0) {
1158 assert(!"Mesa cache keys mismatch!");
1162 size_t cache_item_md_size
= sizeof(uint32_t);
1164 ret
= read_all(fd
, &md_type
, cache_item_md_size
);
1168 if (md_type
== CACHE_ITEM_TYPE_GLSL
) {
1170 cache_item_md_size
+= sizeof(uint32_t);
1171 ret
= read_all(fd
, &num_keys
, sizeof(uint32_t));
1175 /* The cache item metadata is currently just used for distributing
1176 * precompiled shaders, they are not used by Mesa so just skip them for
1178 * TODO: pass the metadata back to the caller and do some basic
1181 cache_item_md_size
+= num_keys
* sizeof(cache_key
);
1182 ret
= lseek(fd
, num_keys
* sizeof(cache_key
), SEEK_CUR
);
1187 /* Load the CRC that was created when the file was written. */
1188 struct cache_entry_file_data cf_data
;
1189 size_t cf_data_size
= sizeof(cf_data
);
1190 ret
= read_all(fd
, &cf_data
, cf_data_size
);
1194 /* Load the actual cache data. */
1195 size_t cache_data_size
=
1196 sb
.st_size
- cf_data_size
- ck_size
- cache_item_md_size
;
1197 ret
= read_all(fd
, data
, cache_data_size
);
1201 /* Uncompress the cache data */
1202 uncompressed_data
= malloc(cf_data
.uncompressed_size
);
1203 if (!inflate_cache_data(data
, cache_data_size
, uncompressed_data
,
1204 cf_data
.uncompressed_size
))
1207 /* Check the data for corruption */
1208 if (cf_data
.crc32
!= util_hash_crc32(uncompressed_data
,
1209 cf_data
.uncompressed_size
))
1218 *size
= cf_data
.uncompressed_size
;
1220 return uncompressed_data
;
1225 if (uncompressed_data
)
1226 free(uncompressed_data
);
1238 disk_cache_put_key(struct disk_cache
*cache
, const cache_key key
)
1240 const uint32_t *key_chunk
= (const uint32_t *) key
;
1241 int i
= CPU_TO_LE32(*key_chunk
) & CACHE_INDEX_KEY_MASK
;
1242 unsigned char *entry
;
1244 if (cache
->blob_put_cb
) {
1245 cache
->blob_put_cb(key
, CACHE_KEY_SIZE
, key_chunk
, sizeof(uint32_t));
1249 if (cache
->path_init_failed
)
1252 entry
= &cache
->stored_keys
[i
* CACHE_KEY_SIZE
];
1254 memcpy(entry
, key
, CACHE_KEY_SIZE
);
1257 /* This function lets us test whether a given key was previously
1258 * stored in the cache with disk_cache_put_key(). The implement is
1259 * efficient by not using syscalls or hitting the disk. It's not
1260 * race-free, but the races are benign. If we race with someone else
1261 * calling disk_cache_put_key, then that's just an extra cache miss and an
1265 disk_cache_has_key(struct disk_cache
*cache
, const cache_key key
)
1267 const uint32_t *key_chunk
= (const uint32_t *) key
;
1268 int i
= CPU_TO_LE32(*key_chunk
) & CACHE_INDEX_KEY_MASK
;
1269 unsigned char *entry
;
1271 if (cache
->blob_get_cb
) {
1273 return cache
->blob_get_cb(key
, CACHE_KEY_SIZE
, &blob
, sizeof(uint32_t));
1276 if (cache
->path_init_failed
)
1279 entry
= &cache
->stored_keys
[i
* CACHE_KEY_SIZE
];
1281 return memcmp(entry
, key
, CACHE_KEY_SIZE
) == 0;
1285 disk_cache_compute_key(struct disk_cache
*cache
, const void *data
, size_t size
,
1288 struct mesa_sha1 ctx
;
1290 _mesa_sha1_init(&ctx
);
1291 _mesa_sha1_update(&ctx
, cache
->driver_keys_blob
,
1292 cache
->driver_keys_blob_size
);
1293 _mesa_sha1_update(&ctx
, data
, size
);
1294 _mesa_sha1_final(&ctx
, key
);
1298 disk_cache_set_callbacks(struct disk_cache
*cache
, disk_cache_put_cb put
,
1299 disk_cache_get_cb get
)
1301 cache
->blob_put_cb
= put
;
1302 cache
->blob_get_cb
= get
;
1305 #endif /* ENABLE_SHADER_CACHE */