2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #ifdef ENABLE_SHADER_CACHE
32 #include <sys/types.h>
34 #include <sys/statvfs.h>
43 #include "util/crc32.h"
44 #include "util/u_atomic.h"
45 #include "util/u_queue.h"
46 #include "util/mesa-sha1.h"
47 #include "util/ralloc.h"
48 #include "main/errors.h"
49 #include "util/macros.h"
51 #include "disk_cache.h"
53 /* Number of bits to mask off from a cache key to get an index. */
54 #define CACHE_INDEX_KEY_BITS 16
56 /* Mask for computing an index from a key. */
57 #define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1)
59 /* The number of keys that can be stored in the index. */
60 #define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS)
63 /* The path to the cache directory. */
66 /* Thread queue for compressing and writing cache entries to disk */
67 struct util_queue cache_queue
;
69 /* A pointer to the mmapped index file within the cache directory. */
71 size_t index_mmap_size
;
73 /* Pointer to total size of all objects in cache (within index_mmap) */
76 /* Pointer to stored keys, (within index_mmap). */
79 /* Maximum size of all cached objects (in bytes). */
83 struct disk_cache_put_job
{
84 struct util_queue_fence fence
;
86 struct disk_cache
*cache
;
90 /* Copy of cache data to be compressed and written. */
93 /* Size of data to be compressed and written. */
97 /* Create a directory named 'path' if it does not already exist.
99 * Returns: 0 if path already exists as a directory or if created.
100 * -1 in all other cases.
103 mkdir_if_needed(const char *path
)
107 /* If the path exists already, then our work is done if it's a
108 * directory, but it's an error if it is not.
110 if (stat(path
, &sb
) == 0) {
111 if (S_ISDIR(sb
.st_mode
)) {
114 fprintf(stderr
, "Cannot use %s for shader cache (not a directory)"
115 "---disabling.\n", path
);
120 int ret
= mkdir(path
, 0755);
121 if (ret
== 0 || (ret
== -1 && errno
== EEXIST
))
124 fprintf(stderr
, "Failed to create %s for shader cache (%s)---disabling.\n",
125 path
, strerror(errno
));
130 /* Concatenate an existing path and a new name to form a new path. If the new
131 * path does not exist as a directory, create it then return the resulting
132 * name of the new path (ralloc'ed off of 'ctx').
134 * Returns NULL on any error, such as:
136 * <path> does not exist or is not a directory
137 * <path>/<name> exists but is not a directory
138 * <path>/<name> cannot be created as a directory
141 concatenate_and_mkdir(void *ctx
, const char *path
, const char *name
)
146 if (stat(path
, &sb
) != 0 || ! S_ISDIR(sb
.st_mode
))
149 new_path
= ralloc_asprintf(ctx
, "%s/%s", path
, name
);
151 if (mkdir_if_needed(new_path
) == 0)
158 remove_dir(const char *fpath
, const struct stat
*sb
,
159 int typeflag
, struct FTW
*ftwbuf
)
161 if (S_ISREG(sb
->st_mode
))
163 else if (S_ISDIR(sb
->st_mode
))
170 remove_old_cache_directories(void *mem_ctx
, const char *path
,
171 const char *timestamp
)
173 DIR *dir
= opendir(path
);
175 struct dirent
* d_entry
;
176 while((d_entry
= readdir(dir
)) != NULL
)
179 ralloc_asprintf(mem_ctx
, "%s/%s", path
, d_entry
->d_name
);
182 if (stat(full_path
, &sb
) == 0 && S_ISDIR(sb
.st_mode
) &&
183 strcmp(d_entry
->d_name
, timestamp
) != 0 &&
184 strcmp(d_entry
->d_name
, "..") != 0 &&
185 strcmp(d_entry
->d_name
, ".") != 0) {
186 nftw(full_path
, remove_dir
, 20, FTW_DEPTH
);
194 create_mesa_cache_dir(void *mem_ctx
, const char *path
, const char *timestamp
,
195 const char *gpu_name
)
197 char *new_path
= concatenate_and_mkdir(mem_ctx
, path
, "mesa");
198 if (new_path
== NULL
)
201 /* Create a parent architecture directory so that we don't remove cache
202 * files for other architectures. In theory we could share the cache
203 * between architectures but we have no way of knowing if they were created
204 * by a compatible Mesa version.
206 new_path
= concatenate_and_mkdir(mem_ctx
, new_path
, get_arch_bitness_str());
207 if (new_path
== NULL
)
210 /* Remove cache directories for old Mesa versions */
211 remove_old_cache_directories(mem_ctx
, new_path
, timestamp
);
213 new_path
= concatenate_and_mkdir(mem_ctx
, new_path
, timestamp
);
214 if (new_path
== NULL
)
217 new_path
= concatenate_and_mkdir(mem_ctx
, new_path
, gpu_name
);
218 if (new_path
== NULL
)
225 disk_cache_create(const char *gpu_name
, const char *timestamp
)
228 struct disk_cache
*cache
= NULL
;
229 char *path
, *max_size_str
;
233 struct statvfs vfs
= { 0 };
236 /* If running as a users other than the real user disable cache */
237 if (geteuid() != getuid())
240 /* A ralloc context for transient data during this invocation. */
241 local
= ralloc_context(NULL
);
245 /* At user request, disable shader cache entirely. */
246 if (getenv("MESA_GLSL_CACHE_DISABLE"))
249 /* Determine path for cache based on the first defined name as follows:
251 * $MESA_GLSL_CACHE_DIR
252 * $XDG_CACHE_HOME/mesa
253 * <pwd.pw_dir>/.cache/mesa
255 path
= getenv("MESA_GLSL_CACHE_DIR");
257 if (mkdir_if_needed(path
) == -1)
260 path
= create_mesa_cache_dir(local
, path
, timestamp
,
267 char *xdg_cache_home
= getenv("XDG_CACHE_HOME");
269 if (xdg_cache_home
) {
270 if (mkdir_if_needed(xdg_cache_home
) == -1)
273 path
= create_mesa_cache_dir(local
, xdg_cache_home
, timestamp
,
283 struct passwd pwd
, *result
;
285 buf_size
= sysconf(_SC_GETPW_R_SIZE_MAX
);
289 /* Loop until buf_size is large enough to query the directory */
291 buf
= ralloc_size(local
, buf_size
);
293 getpwuid_r(getuid(), &pwd
, buf
, buf_size
, &result
);
297 if (errno
== ERANGE
) {
306 path
= concatenate_and_mkdir(local
, pwd
.pw_dir
, ".cache");
310 path
= create_mesa_cache_dir(local
, path
, timestamp
, gpu_name
);
315 cache
= ralloc(NULL
, struct disk_cache
);
319 cache
->path
= ralloc_strdup(cache
, path
);
320 if (cache
->path
== NULL
)
323 path
= ralloc_asprintf(local
, "%s/index", cache
->path
);
327 fd
= open(path
, O_RDWR
| O_CREAT
| O_CLOEXEC
, 0644);
331 if (fstat(fd
, &sb
) == -1)
334 /* Force the index file to be the expected size. */
335 size
= sizeof(*cache
->size
) + CACHE_INDEX_MAX_KEYS
* CACHE_KEY_SIZE
;
336 if (sb
.st_size
!= size
) {
337 if (ftruncate(fd
, size
) == -1)
341 /* We map this shared so that other processes see updates that we
344 * Note: We do use atomic addition to ensure that multiple
345 * processes don't scramble the cache size recorded in the
346 * index. But we don't use any locking to prevent multiple
347 * processes from updating the same entry simultaneously. The idea
348 * is that if either result lands entirely in the index, then
349 * that's equivalent to a well-ordered write followed by an
350 * eviction and a write. On the other hand, if the simultaneous
351 * writes result in a corrupt entry, that's not really any
352 * different than both entries being evicted, (since within the
353 * guarantees of the cryptographic hash, a corrupt entry is
354 * unlikely to ever match a real cache key).
356 cache
->index_mmap
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
358 if (cache
->index_mmap
== MAP_FAILED
)
360 cache
->index_mmap_size
= size
;
364 cache
->size
= (uint64_t *) cache
->index_mmap
;
365 cache
->stored_keys
= cache
->index_mmap
+ sizeof(uint64_t);
369 max_size_str
= getenv("MESA_GLSL_CACHE_MAX_SIZE");
372 max_size
= strtoul(max_size_str
, &end
, 10);
373 if (end
== max_size_str
) {
383 max_size
*= 1024*1024;
389 max_size
*= 1024*1024*1024;
395 /* Default to 1GB or 10% of filesystem for maximum cache size. */
398 max_size
= MAX2(1024*1024*1024, vfs
.f_blocks
* vfs
.f_bsize
/ 10);
401 cache
->max_size
= max_size
;
403 /* A limit of 32 jobs was choosen as observations of Deus Ex start-up times
404 * showed that we reached at most 11 jobs on an Intel i5-6400 CPU@2.70GHz
405 * (a fairly modest desktop CPU). 1 thread was chosen because we don't
406 * really care about getting things to disk quickly just that it's not
407 * blocking other tasks.
409 util_queue_init(&cache
->cache_queue
, "disk_cache", 32, 1);
426 disk_cache_destroy(struct disk_cache
*cache
)
429 util_queue_destroy(&cache
->cache_queue
);
430 munmap(cache
->index_mmap
, cache
->index_mmap_size
);
436 /* Return a filename within the cache's directory corresponding to 'key'. The
437 * returned filename is ralloced with 'cache' as the parent context.
439 * Returns NULL if out of memory.
442 get_cache_file(struct disk_cache
*cache
, const cache_key key
)
447 _mesa_sha1_format(buf
, key
);
448 if (asprintf(&filename
, "%s/%c%c/%s", cache
->path
, buf
[0],
449 buf
[1], buf
+ 2) == -1)
455 /* Create the directory that will be needed for the cache file for \key.
457 * Obviously, the implementation here must closely match
458 * _get_cache_file above.
461 make_cache_file_directory(struct disk_cache
*cache
, const cache_key key
)
466 _mesa_sha1_format(buf
, key
);
467 if (asprintf(&dir
, "%s/%c%c", cache
->path
, buf
[0], buf
[1]) == -1)
470 mkdir_if_needed(dir
);
474 /* Given a directory path and predicate function, find the entry with
475 * the oldest access time in that directory for which the predicate
478 * Returns: A malloc'ed string for the path to the chosen file, (or
479 * NULL on any error). The caller should free the string when
483 choose_lru_file_matching(const char *dir_path
,
484 bool (*predicate
)(const char *dir_path
,
486 const char *, const size_t))
489 struct dirent
*entry
;
491 char *lru_name
= NULL
;
492 time_t lru_atime
= 0;
494 dir
= opendir(dir_path
);
499 entry
= readdir(dir
);
504 if (fstatat(dirfd(dir
), entry
->d_name
, &sb
, 0) == 0) {
505 if (!lru_atime
|| (sb
.st_atime
< lru_atime
)) {
506 size_t len
= strlen(entry
->d_name
);
508 if (!predicate(dir_path
, &sb
, entry
->d_name
, len
))
511 char *tmp
= realloc(lru_name
, len
+ 1);
514 memcpy(lru_name
, entry
->d_name
, len
+ 1);
515 lru_atime
= sb
.st_atime
;
521 if (lru_name
== NULL
) {
526 if (asprintf(&filename
, "%s/%s", dir_path
, lru_name
) < 0)
535 /* Is entry a regular file, and not having a name with a trailing
539 is_regular_non_tmp_file(const char *path
, const struct stat
*sb
,
540 const char *d_name
, const size_t len
)
542 if (!S_ISREG(sb
->st_mode
))
545 if (len
>= 4 && strcmp(&d_name
[len
-4], ".tmp") == 0)
551 /* Returns the size of the deleted file, (or 0 on any error). */
553 unlink_lru_file_from_directory(const char *path
)
558 filename
= choose_lru_file_matching(path
, is_regular_non_tmp_file
);
559 if (filename
== NULL
)
562 if (stat(filename
, &sb
) == -1) {
573 /* Is entry a directory with a two-character name, (and not the
574 * special name of ".."). We also return false if the dir is empty.
577 is_two_character_sub_directory(const char *path
, const struct stat
*sb
,
578 const char *d_name
, const size_t len
)
580 if (!S_ISDIR(sb
->st_mode
))
586 if (strcmp(d_name
, "..") == 0)
590 if (asprintf(&subdir
, "%s/%s", path
, d_name
) == -1)
592 DIR *dir
= opendir(subdir
);
598 unsigned subdir_entries
= 0;
600 while ((d
= readdir(dir
)) != NULL
) {
601 if(++subdir_entries
> 2)
606 /* If dir only contains '.' and '..' it must be empty */
607 if (subdir_entries
<= 2)
614 evict_lru_item(struct disk_cache
*cache
)
616 const char hex
[] = "0123456789abcde";
621 /* With a reasonably-sized, full cache, (and with keys generated
622 * from a cryptographic hash), we can choose two random hex digits
623 * and reasonably expect the directory to exist with a file in it.
624 * Provides pseudo-LRU eviction to reduce checking all cache files.
629 if (asprintf(&dir_path
, "%s/%c%c", cache
->path
, hex
[a
], hex
[b
]) < 0)
632 size
= unlink_lru_file_from_directory(dir_path
);
637 p_atomic_add(cache
->size
, - (uint64_t)size
);
641 /* In the case where the random choice of directory didn't find
642 * something, we choose the least recently accessed from the
643 * existing directories.
645 * Really, the only reason this code exists is to allow the unit
646 * tests to work, (which use an artificially-small cache to be able
647 * to force a single cached item to be evicted).
649 dir_path
= choose_lru_file_matching(cache
->path
,
650 is_two_character_sub_directory
);
651 if (dir_path
== NULL
)
654 size
= unlink_lru_file_from_directory(dir_path
);
659 p_atomic_add(cache
->size
, - (uint64_t)size
);
663 disk_cache_remove(struct disk_cache
*cache
, const cache_key key
)
667 char *filename
= get_cache_file(cache
, key
);
668 if (filename
== NULL
) {
672 if (stat(filename
, &sb
) == -1) {
681 p_atomic_add(cache
->size
, - (uint64_t)sb
.st_size
);
684 /* From the zlib docs:
685 * "If the memory is available, buffers sizes on the order of 128K or 256K
686 * bytes should be used."
688 #define BUFSIZE 256 * 1024
691 * Compresses cache entry in memory and writes it to disk. Returns the size
692 * of the data written to disk.
695 deflate_and_write_to_disk(const void *in_data
, size_t in_data_size
, int dest
,
696 const char *filename
)
698 unsigned char out
[BUFSIZE
];
700 /* allocate deflate state */
702 strm
.zalloc
= Z_NULL
;
704 strm
.opaque
= Z_NULL
;
705 strm
.next_in
= (uint8_t *) in_data
;
706 strm
.avail_in
= in_data_size
;
708 int ret
= deflateInit(&strm
, Z_BEST_COMPRESSION
);
712 /* compress until end of in_data */
713 size_t compressed_size
= 0;
716 int remaining
= in_data_size
- BUFSIZE
;
717 flush
= remaining
> 0 ? Z_NO_FLUSH
: Z_FINISH
;
718 in_data_size
-= BUFSIZE
;
720 /* Run deflate() on input until the output buffer is not full (which
721 * means there is no more data to deflate).
724 strm
.avail_out
= BUFSIZE
;
727 ret
= deflate(&strm
, flush
); /* no bad return value */
728 assert(ret
!= Z_STREAM_ERROR
); /* state not clobbered */
730 size_t have
= BUFSIZE
- strm
.avail_out
;
731 compressed_size
+= have
;
734 for (size_t len
= 0; len
< have
; len
+= written
) {
735 written
= write(dest
, out
+ len
, have
- len
);
737 (void)deflateEnd(&strm
);
741 } while (strm
.avail_out
== 0);
743 /* all input should be used */
744 assert(strm
.avail_in
== 0);
746 } while (flush
!= Z_FINISH
);
748 /* stream should be complete */
749 assert(ret
== Z_STREAM_END
);
751 /* clean up and return */
752 (void)deflateEnd(&strm
);
753 return compressed_size
;
756 static struct disk_cache_put_job
*
757 create_put_job(struct disk_cache
*cache
, const cache_key key
,
758 const void *data
, size_t size
)
760 struct disk_cache_put_job
*dc_job
= (struct disk_cache_put_job
*)
761 malloc(sizeof(struct disk_cache_put_job
) + size
);
764 dc_job
->cache
= cache
;
765 memcpy(dc_job
->key
, key
, sizeof(cache_key
));
766 dc_job
->data
= dc_job
+ 1;
767 memcpy(dc_job
->data
, data
, size
);
775 destroy_put_job(void *job
, int thread_index
)
782 struct cache_entry_file_data
{
784 uint32_t uncompressed_size
;
788 cache_put(void *job
, int thread_index
)
792 int fd
= -1, fd_final
= -1, err
, ret
;
795 char *filename
= NULL
, *filename_tmp
= NULL
;
796 struct disk_cache_put_job
*dc_job
= (struct disk_cache_put_job
*) job
;
798 filename
= get_cache_file(dc_job
->cache
, dc_job
->key
);
799 if (filename
== NULL
)
802 /* If the cache is too large, evict something else first. */
803 while (*dc_job
->cache
->size
+ dc_job
->size
> dc_job
->cache
->max_size
&&
805 evict_lru_item(dc_job
->cache
);
809 /* Write to a temporary file to allow for an atomic rename to the
810 * final destination filename, (to prevent any readers from seeing
811 * a partially written file).
813 if (asprintf(&filename_tmp
, "%s.tmp", filename
) == -1)
816 fd
= open(filename_tmp
, O_WRONLY
| O_CLOEXEC
| O_CREAT
, 0644);
818 /* Make the two-character subdirectory within the cache as needed. */
823 make_cache_file_directory(dc_job
->cache
, dc_job
->key
);
825 fd
= open(filename_tmp
, O_WRONLY
| O_CLOEXEC
| O_CREAT
, 0644);
830 /* With the temporary file open, we take an exclusive flock on
831 * it. If the flock fails, then another process still has the file
832 * open with the flock held. So just let that file be responsible
833 * for writing the file.
835 err
= flock(fd
, LOCK_EX
| LOCK_NB
);
839 /* Now that we have the lock on the open temporary file, we can
840 * check to see if the destination file already exists. If so,
841 * another process won the race between when we saw that the file
842 * didn't exist and now. In this case, we don't do anything more,
843 * (to ensure the size accounting of the cache doesn't get off).
845 fd_final
= open(filename
, O_RDONLY
| O_CLOEXEC
);
846 if (fd_final
!= -1) {
847 unlink(filename_tmp
);
851 /* OK, we're now on the hook to write out a file that we know is
852 * not in the cache, and is also not being written out to the cache
853 * by some other process.
855 * Create CRC of the data and store at the start of the file. We will
856 * read this when restoring the cache and use it to check for corruption.
858 struct cache_entry_file_data cf_data
;
859 cf_data
.crc32
= util_hash_crc32(dc_job
->data
, dc_job
->size
);
860 cf_data
.uncompressed_size
= dc_job
->size
;
862 size_t cf_data_size
= sizeof(cf_data
);
863 for (len
= 0; len
< cf_data_size
; len
+= ret
) {
864 ret
= write(fd
, ((uint8_t *) &cf_data
) + len
, cf_data_size
- len
);
866 unlink(filename_tmp
);
871 /* Now, finally, write out the contents to the temporary file, then
872 * rename them atomically to the destination filename, and also
873 * perform an atomic increment of the total cache size.
875 size_t file_size
= deflate_and_write_to_disk(dc_job
->data
, dc_job
->size
,
877 if (file_size
== 0) {
878 unlink(filename_tmp
);
881 ret
= rename(filename_tmp
, filename
);
883 unlink(filename_tmp
);
887 file_size
+= cf_data_size
;
888 p_atomic_add(dc_job
->cache
->size
, file_size
);
893 /* This close finally releases the flock, (now that the final file
894 * has been renamed into place and the size has been added).
905 disk_cache_put(struct disk_cache
*cache
, const cache_key key
,
906 const void *data
, size_t size
)
908 struct disk_cache_put_job
*dc_job
=
909 create_put_job(cache
, key
, data
, size
);
912 util_queue_fence_init(&dc_job
->fence
);
913 util_queue_add_job(&cache
->cache_queue
, dc_job
, &dc_job
->fence
,
914 cache_put
, destroy_put_job
);
919 * Decompresses cache entry, returns true if successful.
922 inflate_cache_data(uint8_t *in_data
, size_t in_data_size
,
923 uint8_t *out_data
, size_t out_data_size
)
927 /* allocate inflate state */
928 strm
.zalloc
= Z_NULL
;
930 strm
.opaque
= Z_NULL
;
931 strm
.next_in
= in_data
;
932 strm
.avail_in
= in_data_size
;
933 strm
.next_out
= out_data
;
934 strm
.avail_out
= out_data_size
;
936 int ret
= inflateInit(&strm
);
940 ret
= inflate(&strm
, Z_NO_FLUSH
);
941 assert(ret
!= Z_STREAM_ERROR
); /* state not clobbered */
943 /* Unless there was an error we should have decompressed everything in one
944 * go as we know the uncompressed file size.
946 if (ret
!= Z_STREAM_END
) {
947 (void)inflateEnd(&strm
);
950 assert(strm
.avail_out
== 0);
952 /* clean up and return */
953 (void)inflateEnd(&strm
);
958 disk_cache_get(struct disk_cache
*cache
, const cache_key key
, size_t *size
)
960 int fd
= -1, ret
, len
;
962 char *filename
= NULL
;
963 uint8_t *data
= NULL
;
964 uint8_t *uncompressed_data
= NULL
;
969 filename
= get_cache_file(cache
, key
);
970 if (filename
== NULL
)
973 fd
= open(filename
, O_RDONLY
| O_CLOEXEC
);
977 if (fstat(fd
, &sb
) == -1)
980 data
= malloc(sb
.st_size
);
984 /* Load the CRC that was created when the file was written. */
985 struct cache_entry_file_data cf_data
;
986 size_t cf_data_size
= sizeof(cf_data
);
987 assert(sb
.st_size
> cf_data_size
);
988 for (len
= 0; len
< cf_data_size
; len
+= ret
) {
989 ret
= read(fd
, ((uint8_t *) &cf_data
) + len
, cf_data_size
- len
);
994 /* Load the actual cache data. */
995 size_t cache_data_size
= sb
.st_size
- cf_data_size
;
996 for (len
= 0; len
< cache_data_size
; len
+= ret
) {
997 ret
= read(fd
, data
+ len
, cache_data_size
- len
);
1002 /* Uncompress the cache data */
1003 uncompressed_data
= malloc(cf_data
.uncompressed_size
);
1004 if (!inflate_cache_data(data
, cache_data_size
, uncompressed_data
,
1005 cf_data
.uncompressed_size
))
1008 /* Check the data for corruption */
1009 if (cf_data
.crc32
!= util_hash_crc32(uncompressed_data
,
1010 cf_data
.uncompressed_size
))
1018 *size
= cf_data
.uncompressed_size
;
1020 return uncompressed_data
;
1025 if (uncompressed_data
)
1026 free(uncompressed_data
);
1036 disk_cache_put_key(struct disk_cache
*cache
, const cache_key key
)
1038 const uint32_t *key_chunk
= (const uint32_t *) key
;
1039 int i
= *key_chunk
& CACHE_INDEX_KEY_MASK
;
1040 unsigned char *entry
;
1042 entry
= &cache
->stored_keys
[i
* CACHE_KEY_SIZE
];
1044 memcpy(entry
, key
, CACHE_KEY_SIZE
);
1047 /* This function lets us test whether a given key was previously
1048 * stored in the cache with disk_cache_put_key(). The implement is
1049 * efficient by not using syscalls or hitting the disk. It's not
1050 * race-free, but the races are benign. If we race with someone else
1051 * calling disk_cache_put_key, then that's just an extra cache miss and an
1055 disk_cache_has_key(struct disk_cache
*cache
, const cache_key key
)
1057 const uint32_t *key_chunk
= (const uint32_t *) key
;
1058 int i
= *key_chunk
& CACHE_INDEX_KEY_MASK
;
1059 unsigned char *entry
;
1061 entry
= &cache
->stored_keys
[i
* CACHE_KEY_SIZE
];
1063 return memcmp(entry
, key
, CACHE_KEY_SIZE
) == 0;
1067 disk_cache_compute_key(struct disk_cache
*cache
, const void *data
, size_t size
,
1070 _mesa_sha1_compute(data
, size
, key
);
1073 #endif /* ENABLE_SHADER_CACHE */