radv: convert all COMPUTE operations to the RADV_META_SAVE_XXX flags
[mesa.git] / src / amd / vulkan / radv_meta.c
1 /*
2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include "radv_meta.h"
27
28 #include <fcntl.h>
29 #include <limits.h>
30 #include <pwd.h>
31 #include <sys/stat.h>
32
33 void
34 radv_meta_save(struct radv_meta_saved_state *state,
35 struct radv_cmd_buffer *cmd_buffer, uint32_t flags)
36 {
37 assert(flags & (RADV_META_SAVE_GRAPHICS_PIPELINE |
38 RADV_META_SAVE_COMPUTE_PIPELINE));
39
40 state->flags = flags;
41
42 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
43 assert(!(state->flags & RADV_META_SAVE_COMPUTE_PIPELINE));
44
45 state->old_pipeline = cmd_buffer->state.pipeline;
46
47 /* Save all viewports. */
48 state->viewport.count = cmd_buffer->state.dynamic.viewport.count;
49 typed_memcpy(state->viewport.viewports,
50 cmd_buffer->state.dynamic.viewport.viewports,
51 MAX_VIEWPORTS);
52
53 /* Save all scissors. */
54 state->scissor.count = cmd_buffer->state.dynamic.scissor.count;
55 typed_memcpy(state->scissor.scissors,
56 cmd_buffer->state.dynamic.scissor.scissors,
57 MAX_SCISSORS);
58
59 /* The most common meta operations all want to have the
60 * viewport reset and any scissors disabled. The rest of the
61 * dynamic state should have no effect.
62 */
63 cmd_buffer->state.dynamic.viewport.count = 0;
64 cmd_buffer->state.dynamic.scissor.count = 0;
65 cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT |
66 1 << VK_DYNAMIC_STATE_SCISSOR;
67 }
68
69 if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
70 assert(!(state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE));
71
72 state->old_pipeline = cmd_buffer->state.compute_pipeline;
73 }
74
75 if (state->flags & RADV_META_SAVE_DESCRIPTORS) {
76 state->old_descriptor_set0 = cmd_buffer->state.descriptors[0];
77 }
78
79 if (state->flags & RADV_META_SAVE_CONSTANTS) {
80 memcpy(state->push_constants, cmd_buffer->push_constants,
81 MAX_PUSH_CONSTANTS_SIZE);
82 }
83
84 if (state->flags & RADV_META_SAVE_PASS) {
85 state->pass = cmd_buffer->state.pass;
86 state->subpass = cmd_buffer->state.subpass;
87 state->framebuffer = cmd_buffer->state.framebuffer;
88 state->attachments = cmd_buffer->state.attachments;
89 state->render_area = cmd_buffer->state.render_area;
90 }
91 }
92
93 void
94 radv_meta_restore(const struct radv_meta_saved_state *state,
95 struct radv_cmd_buffer *cmd_buffer)
96 {
97 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
98 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
99 VK_PIPELINE_BIND_POINT_GRAPHICS,
100 radv_pipeline_to_handle(state->old_pipeline));
101
102 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
103
104 /* Restore all viewports. */
105 cmd_buffer->state.dynamic.viewport.count = state->viewport.count;
106 typed_memcpy(cmd_buffer->state.dynamic.viewport.viewports,
107 state->viewport.viewports,
108 MAX_VIEWPORTS);
109
110 /* Restore all scissors. */
111 cmd_buffer->state.dynamic.scissor.count = state->scissor.count;
112 typed_memcpy(cmd_buffer->state.dynamic.scissor.scissors,
113 state->scissor.scissors,
114 MAX_SCISSORS);
115
116 cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT |
117 1 << VK_DYNAMIC_STATE_SCISSOR;
118 }
119
120 if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
121 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
122 VK_PIPELINE_BIND_POINT_COMPUTE,
123 radv_pipeline_to_handle(state->old_pipeline));
124 }
125
126 if (state->flags & RADV_META_SAVE_DESCRIPTORS) {
127 cmd_buffer->state.descriptors[0] = state->old_descriptor_set0;
128 cmd_buffer->state.descriptors_dirty |= (1 << 0);
129 }
130
131 if (state->flags & RADV_META_SAVE_CONSTANTS) {
132 memcpy(cmd_buffer->push_constants, state->push_constants,
133 MAX_PUSH_CONSTANTS_SIZE);
134 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
135
136 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
137 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_ALL_GRAPHICS;
138 }
139 }
140
141 if (state->flags & RADV_META_SAVE_PASS) {
142 cmd_buffer->state.pass = state->pass;
143 cmd_buffer->state.subpass = state->subpass;
144 cmd_buffer->state.framebuffer = state->framebuffer;
145 cmd_buffer->state.attachments = state->attachments;
146 cmd_buffer->state.render_area = state->render_area;
147 if (state->subpass)
148 radv_emit_framebuffer_state(cmd_buffer);
149 }
150 }
151
152 VkImageViewType
153 radv_meta_get_view_type(const struct radv_image *image)
154 {
155 switch (image->type) {
156 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
157 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
158 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
159 default:
160 unreachable("bad VkImageViewType");
161 }
162 }
163
164 /**
165 * When creating a destination VkImageView, this function provides the needed
166 * VkImageViewCreateInfo::subresourceRange::baseArrayLayer.
167 */
168 uint32_t
169 radv_meta_get_iview_layer(const struct radv_image *dest_image,
170 const VkImageSubresourceLayers *dest_subresource,
171 const VkOffset3D *dest_offset)
172 {
173 switch (dest_image->type) {
174 case VK_IMAGE_TYPE_1D:
175 case VK_IMAGE_TYPE_2D:
176 return dest_subresource->baseArrayLayer;
177 case VK_IMAGE_TYPE_3D:
178 /* HACK: Vulkan does not allow attaching a 3D image to a framebuffer,
179 * but meta does it anyway. When doing so, we translate the
180 * destination's z offset into an array offset.
181 */
182 return dest_offset->z;
183 default:
184 assert(!"bad VkImageType");
185 return 0;
186 }
187 }
188
189 static void *
190 meta_alloc(void* _device, size_t size, size_t alignment,
191 VkSystemAllocationScope allocationScope)
192 {
193 struct radv_device *device = _device;
194 return device->alloc.pfnAllocation(device->alloc.pUserData, size, alignment,
195 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
196 }
197
198 static void *
199 meta_realloc(void* _device, void *original, size_t size, size_t alignment,
200 VkSystemAllocationScope allocationScope)
201 {
202 struct radv_device *device = _device;
203 return device->alloc.pfnReallocation(device->alloc.pUserData, original,
204 size, alignment,
205 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
206 }
207
208 static void
209 meta_free(void* _device, void *data)
210 {
211 struct radv_device *device = _device;
212 return device->alloc.pfnFree(device->alloc.pUserData, data);
213 }
214
215 static bool
216 radv_builtin_cache_path(char *path)
217 {
218 char *xdg_cache_home = getenv("XDG_CACHE_HOME");
219 const char *suffix = "/radv_builtin_shaders";
220 const char *suffix2 = "/.cache/radv_builtin_shaders";
221 struct passwd pwd, *result;
222 char path2[PATH_MAX + 1]; /* PATH_MAX is not a real max,but suffices here. */
223
224 if (xdg_cache_home) {
225
226 if (strlen(xdg_cache_home) + strlen(suffix) > PATH_MAX)
227 return false;
228
229 strcpy(path, xdg_cache_home);
230 strcat(path, suffix);
231 return true;
232 }
233
234 getpwuid_r(getuid(), &pwd, path2, PATH_MAX - strlen(suffix2), &result);
235 if (!result)
236 return false;
237
238 strcpy(path, pwd.pw_dir);
239 strcat(path, "/.cache");
240 mkdir(path, 0755);
241
242 strcat(path, suffix);
243 return true;
244 }
245
246 static void
247 radv_load_meta_pipeline(struct radv_device *device)
248 {
249 char path[PATH_MAX + 1];
250 struct stat st;
251 void *data = NULL;
252
253 if (!radv_builtin_cache_path(path))
254 return;
255
256 int fd = open(path, O_RDONLY);
257 if (fd < 0)
258 return;
259 if (fstat(fd, &st))
260 goto fail;
261 data = malloc(st.st_size);
262 if (!data)
263 goto fail;
264 if(read(fd, data, st.st_size) == -1)
265 goto fail;
266
267 radv_pipeline_cache_load(&device->meta_state.cache, data, st.st_size);
268 fail:
269 free(data);
270 close(fd);
271 }
272
273 static void
274 radv_store_meta_pipeline(struct radv_device *device)
275 {
276 char path[PATH_MAX + 1], path2[PATH_MAX + 7];
277 size_t size;
278 void *data = NULL;
279
280 if (!device->meta_state.cache.modified)
281 return;
282
283 if (radv_GetPipelineCacheData(radv_device_to_handle(device),
284 radv_pipeline_cache_to_handle(&device->meta_state.cache),
285 &size, NULL))
286 return;
287
288 if (!radv_builtin_cache_path(path))
289 return;
290
291 strcpy(path2, path);
292 strcat(path2, "XXXXXX");
293 int fd = mkstemp(path2);//open(path, O_WRONLY | O_CREAT, 0600);
294 if (fd < 0)
295 return;
296 data = malloc(size);
297 if (!data)
298 goto fail;
299
300 if (radv_GetPipelineCacheData(radv_device_to_handle(device),
301 radv_pipeline_cache_to_handle(&device->meta_state.cache),
302 &size, data))
303 goto fail;
304 if(write(fd, data, size) == -1)
305 goto fail;
306
307 rename(path2, path);
308 fail:
309 free(data);
310 close(fd);
311 unlink(path2);
312 }
313
314 VkResult
315 radv_device_init_meta(struct radv_device *device)
316 {
317 VkResult result;
318
319 device->meta_state.alloc = (VkAllocationCallbacks) {
320 .pUserData = device,
321 .pfnAllocation = meta_alloc,
322 .pfnReallocation = meta_realloc,
323 .pfnFree = meta_free,
324 };
325
326 device->meta_state.cache.alloc = device->meta_state.alloc;
327 radv_pipeline_cache_init(&device->meta_state.cache, device);
328 radv_load_meta_pipeline(device);
329
330 result = radv_device_init_meta_clear_state(device);
331 if (result != VK_SUCCESS)
332 goto fail_clear;
333
334 result = radv_device_init_meta_resolve_state(device);
335 if (result != VK_SUCCESS)
336 goto fail_resolve;
337
338 result = radv_device_init_meta_blit_state(device);
339 if (result != VK_SUCCESS)
340 goto fail_blit;
341
342 result = radv_device_init_meta_blit2d_state(device);
343 if (result != VK_SUCCESS)
344 goto fail_blit2d;
345
346 result = radv_device_init_meta_bufimage_state(device);
347 if (result != VK_SUCCESS)
348 goto fail_bufimage;
349
350 result = radv_device_init_meta_depth_decomp_state(device);
351 if (result != VK_SUCCESS)
352 goto fail_depth_decomp;
353
354 result = radv_device_init_meta_buffer_state(device);
355 if (result != VK_SUCCESS)
356 goto fail_buffer;
357
358 result = radv_device_init_meta_query_state(device);
359 if (result != VK_SUCCESS)
360 goto fail_query;
361
362 result = radv_device_init_meta_fast_clear_flush_state(device);
363 if (result != VK_SUCCESS)
364 goto fail_fast_clear;
365
366 result = radv_device_init_meta_resolve_compute_state(device);
367 if (result != VK_SUCCESS)
368 goto fail_resolve_compute;
369
370 result = radv_device_init_meta_resolve_fragment_state(device);
371 if (result != VK_SUCCESS)
372 goto fail_resolve_fragment;
373 return VK_SUCCESS;
374
375 fail_resolve_fragment:
376 radv_device_finish_meta_resolve_compute_state(device);
377 fail_resolve_compute:
378 radv_device_finish_meta_fast_clear_flush_state(device);
379 fail_fast_clear:
380 radv_device_finish_meta_buffer_state(device);
381 fail_query:
382 radv_device_finish_meta_query_state(device);
383 fail_buffer:
384 radv_device_finish_meta_depth_decomp_state(device);
385 fail_depth_decomp:
386 radv_device_finish_meta_bufimage_state(device);
387 fail_bufimage:
388 radv_device_finish_meta_blit2d_state(device);
389 fail_blit2d:
390 radv_device_finish_meta_blit_state(device);
391 fail_blit:
392 radv_device_finish_meta_resolve_state(device);
393 fail_resolve:
394 radv_device_finish_meta_clear_state(device);
395 fail_clear:
396 radv_pipeline_cache_finish(&device->meta_state.cache);
397 return result;
398 }
399
400 void
401 radv_device_finish_meta(struct radv_device *device)
402 {
403 radv_device_finish_meta_clear_state(device);
404 radv_device_finish_meta_resolve_state(device);
405 radv_device_finish_meta_blit_state(device);
406 radv_device_finish_meta_blit2d_state(device);
407 radv_device_finish_meta_bufimage_state(device);
408 radv_device_finish_meta_depth_decomp_state(device);
409 radv_device_finish_meta_query_state(device);
410 radv_device_finish_meta_buffer_state(device);
411 radv_device_finish_meta_fast_clear_flush_state(device);
412 radv_device_finish_meta_resolve_compute_state(device);
413 radv_device_finish_meta_resolve_fragment_state(device);
414
415 radv_store_meta_pipeline(device);
416 radv_pipeline_cache_finish(&device->meta_state.cache);
417 }
418
419 nir_ssa_def *radv_meta_gen_rect_vertices_comp2(nir_builder *vs_b, nir_ssa_def *comp2)
420 {
421
422 nir_intrinsic_instr *vertex_id = nir_intrinsic_instr_create(vs_b->shader, nir_intrinsic_load_vertex_id_zero_base);
423 nir_ssa_dest_init(&vertex_id->instr, &vertex_id->dest, 1, 32, "vertexid");
424 nir_builder_instr_insert(vs_b, &vertex_id->instr);
425
426 /* vertex 0 - -1.0, -1.0 */
427 /* vertex 1 - -1.0, 1.0 */
428 /* vertex 2 - 1.0, -1.0 */
429 /* so channel 0 is vertex_id != 2 ? -1.0 : 1.0
430 channel 1 is vertex id != 1 ? -1.0 : 1.0 */
431
432 nir_ssa_def *c0cmp = nir_ine(vs_b, &vertex_id->dest.ssa,
433 nir_imm_int(vs_b, 2));
434 nir_ssa_def *c1cmp = nir_ine(vs_b, &vertex_id->dest.ssa,
435 nir_imm_int(vs_b, 1));
436
437 nir_ssa_def *comp[4];
438 comp[0] = nir_bcsel(vs_b, c0cmp,
439 nir_imm_float(vs_b, -1.0),
440 nir_imm_float(vs_b, 1.0));
441
442 comp[1] = nir_bcsel(vs_b, c1cmp,
443 nir_imm_float(vs_b, -1.0),
444 nir_imm_float(vs_b, 1.0));
445 comp[2] = comp2;
446 comp[3] = nir_imm_float(vs_b, 1.0);
447 nir_ssa_def *outvec = nir_vec(vs_b, comp, 4);
448
449 return outvec;
450 }
451
452 nir_ssa_def *radv_meta_gen_rect_vertices(nir_builder *vs_b)
453 {
454 return radv_meta_gen_rect_vertices_comp2(vs_b, nir_imm_float(vs_b, 0.0));
455 }
456
457 /* vertex shader that generates vertices */
458 nir_shader *
459 radv_meta_build_nir_vs_generate_vertices(void)
460 {
461 const struct glsl_type *vec4 = glsl_vec4_type();
462
463 nir_builder b;
464 nir_variable *v_position;
465
466 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
467 b.shader->info.name = ralloc_strdup(b.shader, "meta_vs_gen_verts");
468
469 nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&b);
470
471 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
472 "gl_Position");
473 v_position->data.location = VARYING_SLOT_POS;
474
475 nir_store_var(&b, v_position, outvec, 0xf);
476
477 return b.shader;
478 }
479
480 nir_shader *
481 radv_meta_build_nir_fs_noop(void)
482 {
483 nir_builder b;
484
485 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
486 b.shader->info.name = ralloc_asprintf(b.shader,
487 "meta_noop_fs");
488
489 return b.shader;
490 }
491
492 void radv_meta_build_resolve_shader_core(nir_builder *b,
493 bool is_integer,
494 int samples,
495 nir_variable *input_img,
496 nir_variable *color,
497 nir_ssa_def *img_coord)
498 {
499 /* do a txf_ms on each sample */
500 nir_ssa_def *tmp;
501 nir_if *outer_if = NULL;
502
503 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
504 tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
505 tex->op = nir_texop_txf_ms;
506 tex->src[0].src_type = nir_tex_src_coord;
507 tex->src[0].src = nir_src_for_ssa(img_coord);
508 tex->src[1].src_type = nir_tex_src_ms_index;
509 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0));
510 tex->dest_type = nir_type_float;
511 tex->is_array = false;
512 tex->coord_components = 2;
513 tex->texture = nir_deref_var_create(tex, input_img);
514 tex->sampler = NULL;
515
516 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
517 nir_builder_instr_insert(b, &tex->instr);
518
519 tmp = &tex->dest.ssa;
520
521 if (!is_integer && samples > 1) {
522 nir_tex_instr *tex_all_same = nir_tex_instr_create(b->shader, 1);
523 tex_all_same->sampler_dim = GLSL_SAMPLER_DIM_MS;
524 tex_all_same->op = nir_texop_samples_identical;
525 tex_all_same->src[0].src_type = nir_tex_src_coord;
526 tex_all_same->src[0].src = nir_src_for_ssa(img_coord);
527 tex_all_same->dest_type = nir_type_float;
528 tex_all_same->is_array = false;
529 tex_all_same->coord_components = 2;
530 tex_all_same->texture = nir_deref_var_create(tex_all_same, input_img);
531 tex_all_same->sampler = NULL;
532
533 nir_ssa_dest_init(&tex_all_same->instr, &tex_all_same->dest, 1, 32, "tex");
534 nir_builder_instr_insert(b, &tex_all_same->instr);
535
536 nir_ssa_def *all_same = nir_ine(b, &tex_all_same->dest.ssa, nir_imm_int(b, 0));
537 nir_if *if_stmt = nir_if_create(b->shader);
538 if_stmt->condition = nir_src_for_ssa(all_same);
539 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
540
541 b->cursor = nir_after_cf_list(&if_stmt->then_list);
542 for (int i = 1; i < samples; i++) {
543 nir_tex_instr *tex_add = nir_tex_instr_create(b->shader, 2);
544 tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS;
545 tex_add->op = nir_texop_txf_ms;
546 tex_add->src[0].src_type = nir_tex_src_coord;
547 tex_add->src[0].src = nir_src_for_ssa(img_coord);
548 tex_add->src[1].src_type = nir_tex_src_ms_index;
549 tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(b, i));
550 tex_add->dest_type = nir_type_float;
551 tex_add->is_array = false;
552 tex_add->coord_components = 2;
553 tex_add->texture = nir_deref_var_create(tex_add, input_img);
554 tex_add->sampler = NULL;
555
556 nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex");
557 nir_builder_instr_insert(b, &tex_add->instr);
558
559 tmp = nir_fadd(b, tmp, &tex_add->dest.ssa);
560 }
561
562 tmp = nir_fdiv(b, tmp, nir_imm_float(b, samples));
563 nir_store_var(b, color, tmp, 0xf);
564 b->cursor = nir_after_cf_list(&if_stmt->else_list);
565 outer_if = if_stmt;
566 }
567 nir_store_var(b, color, &tex->dest.ssa, 0xf);
568
569 if (outer_if)
570 b->cursor = nir_after_cf_node(&outer_if->cf_node);
571 }