radv: add a compute shader implementation for buffer to image
[mesa.git] / src / amd / vulkan / radv_meta_bufimage.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include "radv_meta.h"
25 #include "nir/nir_builder.h"
26
27 /*
28 * Compute shader implementation of image->buffer copy.
29 */
30
31 static nir_shader *
32 build_nir_itob_compute_shader(struct radv_device *dev)
33 {
34 nir_builder b;
35 const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
36 false,
37 false,
38 GLSL_TYPE_FLOAT);
39 const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_BUF,
40 false,
41 false,
42 GLSL_TYPE_FLOAT);
43 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
44 b.shader->info->name = ralloc_strdup(b.shader, "meta_itob_cs");
45 b.shader->info->cs.local_size[0] = 16;
46 b.shader->info->cs.local_size[1] = 16;
47 b.shader->info->cs.local_size[2] = 1;
48 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
49 sampler_type, "s_tex");
50 input_img->data.descriptor_set = 0;
51 input_img->data.binding = 0;
52
53 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
54 img_type, "out_img");
55 output_img->data.descriptor_set = 0;
56 output_img->data.binding = 1;
57
58 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
59 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
60 nir_ssa_def *block_size = nir_imm_ivec4(&b,
61 b.shader->info->cs.local_size[0],
62 b.shader->info->cs.local_size[1],
63 b.shader->info->cs.local_size[2], 0);
64
65 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
66
67
68
69 nir_intrinsic_instr *offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
70 offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
71 offset->num_components = 2;
72 nir_ssa_dest_init(&offset->instr, &offset->dest, 2, 32, "offset");
73 nir_builder_instr_insert(&b, &offset->instr);
74
75 nir_intrinsic_instr *stride = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
76 stride->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8));
77 stride->num_components = 1;
78 nir_ssa_dest_init(&stride->instr, &stride->dest, 1, 32, "stride");
79 nir_builder_instr_insert(&b, &stride->instr);
80
81 nir_ssa_def *img_coord = nir_iadd(&b, global_id, &offset->dest.ssa);
82
83 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
84 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
85 tex->op = nir_texop_txf;
86 tex->src[0].src_type = nir_tex_src_coord;
87 tex->src[0].src = nir_src_for_ssa(img_coord);
88 tex->src[1].src_type = nir_tex_src_lod;
89 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
90 tex->dest_type = nir_type_float;
91 tex->is_array = false;
92 tex->coord_components = 2;
93 tex->texture = nir_deref_var_create(tex, input_img);
94 tex->sampler = NULL;
95
96 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
97 nir_builder_instr_insert(&b, &tex->instr);
98
99 nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
100 nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);
101
102 nir_ssa_def *tmp = nir_imul(&b, pos_y, &stride->dest.ssa);
103 tmp = nir_iadd(&b, tmp, pos_x);
104
105 nir_ssa_def *coord = nir_vec4(&b, tmp, tmp, tmp, tmp);
106
107 nir_ssa_def *outval = &tex->dest.ssa;
108 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
109 store->src[0] = nir_src_for_ssa(coord);
110 store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
111 store->src[2] = nir_src_for_ssa(outval);
112 store->variables[0] = nir_deref_var_create(store, output_img);
113
114 nir_builder_instr_insert(&b, &store->instr);
115 return b.shader;
116 }
117
118 /* Image to buffer - don't write use image accessors */
119 static VkResult
120 radv_device_init_meta_itob_state(struct radv_device *device)
121 {
122 VkResult result;
123 struct radv_shader_module cs = { .nir = NULL };
124
125 zero(device->meta_state.itob);
126
127 cs.nir = build_nir_itob_compute_shader(device);
128
129 /*
130 * two descriptors one for the image being sampled
131 * one for the buffer being written.
132 */
133 VkDescriptorSetLayoutCreateInfo ds_create_info = {
134 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
135 .bindingCount = 2,
136 .pBindings = (VkDescriptorSetLayoutBinding[]) {
137 {
138 .binding = 0,
139 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
140 .descriptorCount = 1,
141 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
142 .pImmutableSamplers = NULL
143 },
144 {
145 .binding = 1,
146 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
147 .descriptorCount = 1,
148 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
149 .pImmutableSamplers = NULL
150 },
151 }
152 };
153
154 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
155 &ds_create_info,
156 &device->meta_state.alloc,
157 &device->meta_state.itob.img_ds_layout);
158 if (result != VK_SUCCESS)
159 goto fail;
160
161
162 VkPipelineLayoutCreateInfo pl_create_info = {
163 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
164 .setLayoutCount = 1,
165 .pSetLayouts = &device->meta_state.itob.img_ds_layout,
166 .pushConstantRangeCount = 1,
167 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 12},
168 };
169
170 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
171 &pl_create_info,
172 &device->meta_state.alloc,
173 &device->meta_state.itob.img_p_layout);
174 if (result != VK_SUCCESS)
175 goto fail;
176
177 /* compute shader */
178
179 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
180 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
181 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
182 .module = radv_shader_module_to_handle(&cs),
183 .pName = "main",
184 .pSpecializationInfo = NULL,
185 };
186
187 VkComputePipelineCreateInfo vk_pipeline_info = {
188 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
189 .stage = pipeline_shader_stage,
190 .flags = 0,
191 .layout = device->meta_state.itob.img_p_layout,
192 };
193
194 result = radv_CreateComputePipelines(radv_device_to_handle(device),
195 radv_pipeline_cache_to_handle(&device->meta_state.cache),
196 1, &vk_pipeline_info, NULL,
197 &device->meta_state.itob.pipeline);
198 if (result != VK_SUCCESS)
199 goto fail;
200
201 ralloc_free(cs.nir);
202 return VK_SUCCESS;
203 fail:
204 ralloc_free(cs.nir);
205 return result;
206 }
207
208 static void
209 radv_device_finish_meta_itob_state(struct radv_device *device)
210 {
211 if (device->meta_state.itob.img_p_layout) {
212 radv_DestroyPipelineLayout(radv_device_to_handle(device),
213 device->meta_state.itob.img_p_layout,
214 &device->meta_state.alloc);
215 }
216 if (device->meta_state.itob.img_ds_layout) {
217 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
218 device->meta_state.itob.img_ds_layout,
219 &device->meta_state.alloc);
220 }
221 if (device->meta_state.itob.pipeline) {
222 radv_DestroyPipeline(radv_device_to_handle(device),
223 device->meta_state.itob.pipeline,
224 &device->meta_state.alloc);
225 }
226 }
227
228 static nir_shader *
229 build_nir_btoi_compute_shader(struct radv_device *dev)
230 {
231 nir_builder b;
232 const struct glsl_type *buf_type = glsl_sampler_type(GLSL_SAMPLER_DIM_BUF,
233 false,
234 false,
235 GLSL_TYPE_FLOAT);
236 const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
237 false,
238 false,
239 GLSL_TYPE_FLOAT);
240 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
241 b.shader->info->name = ralloc_strdup(b.shader, "meta_btoi_cs");
242 b.shader->info->cs.local_size[0] = 16;
243 b.shader->info->cs.local_size[1] = 16;
244 b.shader->info->cs.local_size[2] = 1;
245 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
246 buf_type, "s_tex");
247 input_img->data.descriptor_set = 0;
248 input_img->data.binding = 0;
249
250 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
251 img_type, "out_img");
252 output_img->data.descriptor_set = 0;
253 output_img->data.binding = 1;
254
255 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
256 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
257 nir_ssa_def *block_size = nir_imm_ivec4(&b,
258 b.shader->info->cs.local_size[0],
259 b.shader->info->cs.local_size[1],
260 b.shader->info->cs.local_size[2], 0);
261
262 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
263
264 nir_intrinsic_instr *offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
265 offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
266 offset->num_components = 2;
267 nir_ssa_dest_init(&offset->instr, &offset->dest, 2, 32, "offset");
268 nir_builder_instr_insert(&b, &offset->instr);
269
270 nir_intrinsic_instr *stride = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
271 stride->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8));
272 stride->num_components = 1;
273 nir_ssa_dest_init(&stride->instr, &stride->dest, 1, 32, "stride");
274 nir_builder_instr_insert(&b, &stride->instr);
275
276 nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
277 nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);
278
279 nir_ssa_def *tmp = nir_imul(&b, pos_y, &stride->dest.ssa);
280 tmp = nir_iadd(&b, tmp, pos_x);
281
282 nir_ssa_def *buf_coord = nir_vec4(&b, tmp, tmp, tmp, tmp);
283
284 nir_ssa_def *img_coord = nir_iadd(&b, global_id, &offset->dest.ssa);
285
286 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
287 tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
288 tex->op = nir_texop_txf;
289 tex->src[0].src_type = nir_tex_src_coord;
290 tex->src[0].src = nir_src_for_ssa(buf_coord);
291 tex->src[1].src_type = nir_tex_src_lod;
292 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
293 tex->dest_type = nir_type_float;
294 tex->is_array = false;
295 tex->coord_components = 1;
296 tex->texture = nir_deref_var_create(tex, input_img);
297 tex->sampler = NULL;
298
299 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
300 nir_builder_instr_insert(&b, &tex->instr);
301
302 nir_ssa_def *outval = &tex->dest.ssa;
303 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
304 store->src[0] = nir_src_for_ssa(img_coord);
305 store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
306 store->src[2] = nir_src_for_ssa(outval);
307 store->variables[0] = nir_deref_var_create(store, output_img);
308
309 nir_builder_instr_insert(&b, &store->instr);
310 return b.shader;
311 }
312
313 /* Buffer to image - don't write use image accessors */
314 static VkResult
315 radv_device_init_meta_btoi_state(struct radv_device *device)
316 {
317 VkResult result;
318 struct radv_shader_module cs = { .nir = NULL };
319
320 zero(device->meta_state.btoi);
321
322 cs.nir = build_nir_btoi_compute_shader(device);
323
324 /*
325 * two descriptors one for the image being sampled
326 * one for the buffer being written.
327 */
328 VkDescriptorSetLayoutCreateInfo ds_create_info = {
329 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
330 .bindingCount = 2,
331 .pBindings = (VkDescriptorSetLayoutBinding[]) {
332 {
333 .binding = 0,
334 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
335 .descriptorCount = 1,
336 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
337 .pImmutableSamplers = NULL
338 },
339 {
340 .binding = 1,
341 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
342 .descriptorCount = 1,
343 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
344 .pImmutableSamplers = NULL
345 },
346 }
347 };
348
349 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
350 &ds_create_info,
351 &device->meta_state.alloc,
352 &device->meta_state.btoi.img_ds_layout);
353 if (result != VK_SUCCESS)
354 goto fail;
355
356
357 VkPipelineLayoutCreateInfo pl_create_info = {
358 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
359 .setLayoutCount = 1,
360 .pSetLayouts = &device->meta_state.btoi.img_ds_layout,
361 .pushConstantRangeCount = 1,
362 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 12},
363 };
364
365 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
366 &pl_create_info,
367 &device->meta_state.alloc,
368 &device->meta_state.btoi.img_p_layout);
369 if (result != VK_SUCCESS)
370 goto fail;
371
372 /* compute shader */
373
374 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
375 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
376 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
377 .module = radv_shader_module_to_handle(&cs),
378 .pName = "main",
379 .pSpecializationInfo = NULL,
380 };
381
382 VkComputePipelineCreateInfo vk_pipeline_info = {
383 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
384 .stage = pipeline_shader_stage,
385 .flags = 0,
386 .layout = device->meta_state.btoi.img_p_layout,
387 };
388
389 result = radv_CreateComputePipelines(radv_device_to_handle(device),
390 radv_pipeline_cache_to_handle(&device->meta_state.cache),
391 1, &vk_pipeline_info, NULL,
392 &device->meta_state.btoi.pipeline);
393 if (result != VK_SUCCESS)
394 goto fail;
395
396 ralloc_free(cs.nir);
397 return VK_SUCCESS;
398 fail:
399 ralloc_free(cs.nir);
400 return result;
401 }
402
403 static void
404 radv_device_finish_meta_btoi_state(struct radv_device *device)
405 {
406 if (device->meta_state.btoi.img_p_layout) {
407 radv_DestroyPipelineLayout(radv_device_to_handle(device),
408 device->meta_state.btoi.img_p_layout,
409 &device->meta_state.alloc);
410 }
411 if (device->meta_state.btoi.img_ds_layout) {
412 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
413 device->meta_state.btoi.img_ds_layout,
414 &device->meta_state.alloc);
415 }
416 if (device->meta_state.btoi.pipeline) {
417 radv_DestroyPipeline(radv_device_to_handle(device),
418 device->meta_state.btoi.pipeline,
419 &device->meta_state.alloc);
420 }
421 }
422
423 void
424 radv_device_finish_meta_bufimage_state(struct radv_device *device)
425 {
426 radv_device_finish_meta_itob_state(device);
427 radv_device_finish_meta_btoi_state(device);
428 }
429
430 VkResult
431 radv_device_init_meta_bufimage_state(struct radv_device *device)
432 {
433 VkResult result;
434
435 result = radv_device_init_meta_itob_state(device);
436 if (result != VK_SUCCESS)
437 return result;
438
439 result = radv_device_init_meta_btoi_state(device);
440 if (result != VK_SUCCESS) {
441 radv_device_finish_meta_itob_state(device);
442 return result;
443 }
444 return VK_SUCCESS;
445 }
446
447 void
448 radv_meta_begin_bufimage(struct radv_cmd_buffer *cmd_buffer,
449 struct radv_meta_saved_compute_state *save)
450 {
451 radv_meta_save_compute(save, cmd_buffer, 12);
452 }
453
454 void
455 radv_meta_end_bufimage(struct radv_cmd_buffer *cmd_buffer,
456 struct radv_meta_saved_compute_state *save)
457 {
458 radv_meta_restore_compute(save, cmd_buffer, 12);
459 }
460
461 static void
462 create_iview(struct radv_cmd_buffer *cmd_buffer,
463 struct radv_meta_blit2d_surf *surf,
464 VkImageUsageFlags usage,
465 struct radv_image_view *iview)
466 {
467
468 radv_image_view_init(iview, cmd_buffer->device,
469 &(VkImageViewCreateInfo) {
470 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
471 .image = radv_image_to_handle(surf->image),
472 .viewType = VK_IMAGE_VIEW_TYPE_2D,
473 .format = surf->format,
474 .subresourceRange = {
475 .aspectMask = surf->aspect_mask,
476 .baseMipLevel = surf->level,
477 .levelCount = 1,
478 .baseArrayLayer = surf->layer,
479 .layerCount = 1
480 },
481 }, cmd_buffer, usage);
482 }
483
484 static void
485 create_bview(struct radv_cmd_buffer *cmd_buffer,
486 struct radv_buffer *buffer,
487 unsigned offset,
488 VkFormat format,
489 struct radv_buffer_view *bview)
490 {
491 radv_buffer_view_init(bview, cmd_buffer->device,
492 &(VkBufferViewCreateInfo) {
493 .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
494 .flags = 0,
495 .buffer = radv_buffer_to_handle(buffer),
496 .format = format,
497 .offset = offset,
498 .range = VK_WHOLE_SIZE,
499 }, cmd_buffer);
500
501 }
502
503 struct itob_temps {
504 struct radv_image_view src_iview;
505 struct radv_buffer_view dst_bview;
506 VkDescriptorSet set;
507 };
508
509 static void
510 itob_bind_descriptors(struct radv_cmd_buffer *cmd_buffer,
511 struct itob_temps *tmp)
512 {
513 struct radv_device *device = cmd_buffer->device;
514 VkDevice vk_device = radv_device_to_handle(cmd_buffer->device);
515
516 radv_temp_descriptor_set_create(device, cmd_buffer,
517 device->meta_state.itob.img_ds_layout,
518 &tmp->set);
519
520 radv_UpdateDescriptorSets(vk_device,
521 2, /* writeCount */
522 (VkWriteDescriptorSet[]) {
523 {
524 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
525 .dstSet = tmp->set,
526 .dstBinding = 0,
527 .dstArrayElement = 0,
528 .descriptorCount = 1,
529 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
530 .pImageInfo = (VkDescriptorImageInfo[]) {
531 {
532 .sampler = VK_NULL_HANDLE,
533 .imageView = radv_image_view_to_handle(&tmp->src_iview),
534 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
535 },
536 }
537 },
538 {
539 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
540 .dstSet = tmp->set,
541 .dstBinding = 1,
542 .dstArrayElement = 0,
543 .descriptorCount = 1,
544 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
545 .pTexelBufferView = (VkBufferView[]) { radv_buffer_view_to_handle(&tmp->dst_bview) },
546 }
547 }, 0, NULL);
548
549 radv_CmdBindDescriptorSets(radv_cmd_buffer_to_handle(cmd_buffer),
550 VK_PIPELINE_BIND_POINT_COMPUTE,
551 device->meta_state.itob.img_p_layout, 0, 1,
552 &tmp->set, 0, NULL);
553 }
554
555 static void
556 itob_bind_pipeline(struct radv_cmd_buffer *cmd_buffer)
557 {
558 VkPipeline pipeline =
559 cmd_buffer->device->meta_state.itob.pipeline;
560
561 if (cmd_buffer->state.compute_pipeline != radv_pipeline_from_handle(pipeline)) {
562 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
563 VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
564 }
565 }
566
567 void
568 radv_meta_image_to_buffer(struct radv_cmd_buffer *cmd_buffer,
569 struct radv_meta_blit2d_surf *src,
570 struct radv_meta_blit2d_buffer *dst,
571 unsigned num_rects,
572 struct radv_meta_blit2d_rect *rects)
573 {
574 struct radv_device *device = cmd_buffer->device;
575 struct itob_temps temps;
576
577 create_iview(cmd_buffer, src, VK_IMAGE_USAGE_SAMPLED_BIT, &temps.src_iview);
578 create_bview(cmd_buffer, dst->buffer, dst->offset, dst->format, &temps.dst_bview);
579 itob_bind_descriptors(cmd_buffer, &temps);
580
581 itob_bind_pipeline(cmd_buffer);
582
583 for (unsigned r = 0; r < num_rects; ++r) {
584 unsigned push_constants[3] = {
585 rects[r].src_x,
586 rects[r].src_y,
587 dst->pitch
588 };
589 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
590 device->meta_state.itob.img_p_layout,
591 VK_SHADER_STAGE_COMPUTE_BIT, 0, 12,
592 push_constants);
593
594 radv_unaligned_dispatch(cmd_buffer, rects[r].width, rects[r].height, 1);
595 }
596 radv_temp_descriptor_set_destroy(cmd_buffer->device, temps.set);
597 }
598
599 struct btoi_temps {
600 struct radv_buffer_view src_bview;
601 struct radv_image_view dst_iview;
602 VkDescriptorSet set;
603 };
604
605 static void
606 btoi_bind_descriptors(struct radv_cmd_buffer *cmd_buffer,
607 struct btoi_temps *tmp)
608 {
609 struct radv_device *device = cmd_buffer->device;
610 VkDevice vk_device = radv_device_to_handle(cmd_buffer->device);
611
612 radv_temp_descriptor_set_create(device, cmd_buffer,
613 device->meta_state.btoi.img_ds_layout,
614 &tmp->set);
615
616 radv_UpdateDescriptorSets(vk_device,
617 2, /* writeCount */
618 (VkWriteDescriptorSet[]) {
619 {
620 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
621 .dstSet = tmp->set,
622 .dstBinding = 0,
623 .dstArrayElement = 0,
624 .descriptorCount = 1,
625 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
626 .pTexelBufferView = (VkBufferView[]) { radv_buffer_view_to_handle(&tmp->src_bview) },
627 },
628 {
629 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
630 .dstSet = tmp->set,
631 .dstBinding = 1,
632 .dstArrayElement = 0,
633 .descriptorCount = 1,
634 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
635 .pImageInfo = (VkDescriptorImageInfo[]) {
636 {
637 .sampler = NULL,
638 .imageView = radv_image_view_to_handle(&tmp->dst_iview),
639 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
640 },
641 }
642 }
643 }, 0, NULL);
644
645 radv_CmdBindDescriptorSets(radv_cmd_buffer_to_handle(cmd_buffer),
646 VK_PIPELINE_BIND_POINT_COMPUTE,
647 device->meta_state.btoi.img_p_layout, 0, 1,
648 &tmp->set, 0, NULL);
649 }
650
651 static void
652 btoi_bind_pipeline(struct radv_cmd_buffer *cmd_buffer)
653 {
654 VkPipeline pipeline =
655 cmd_buffer->device->meta_state.btoi.pipeline;
656
657 if (cmd_buffer->state.compute_pipeline != radv_pipeline_from_handle(pipeline)) {
658 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
659 VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
660 }
661 }
662
663 void
664 radv_meta_buffer_to_image_cs(struct radv_cmd_buffer *cmd_buffer,
665 struct radv_meta_blit2d_buffer *src,
666 struct radv_meta_blit2d_surf *dst,
667 unsigned num_rects,
668 struct radv_meta_blit2d_rect *rects)
669 {
670 struct radv_device *device = cmd_buffer->device;
671 struct btoi_temps temps;
672
673 create_bview(cmd_buffer, src->buffer, src->offset, src->format, &temps.src_bview);
674 create_iview(cmd_buffer, dst, VK_IMAGE_USAGE_STORAGE_BIT, &temps.dst_iview);
675 btoi_bind_descriptors(cmd_buffer, &temps);
676
677 btoi_bind_pipeline(cmd_buffer);
678
679 for (unsigned r = 0; r < num_rects; ++r) {
680 unsigned push_constants[3] = {
681 rects[r].dst_x,
682 rects[r].dst_y,
683 src->pitch
684 };
685 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
686 device->meta_state.btoi.img_p_layout,
687 VK_SHADER_STAGE_COMPUTE_BIT, 0, 12,
688 push_constants);
689
690 radv_unaligned_dispatch(cmd_buffer, rects[r].width, rects[r].height, 1);
691 }
692 radv_temp_descriptor_set_destroy(cmd_buffer->device, temps.set);
693 }