radv: change base aligmment for allocated memory.
[mesa.git] / src / amd / vulkan / radv_meta_decompress.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
30 #include "sid.h"
31 /**
32 * Vertex attributes used by all pipelines.
33 */
34 struct vertex_attrs {
35 float position[2]; /**< 3DPRIM_RECTLIST */
36 };
37
38 /* passthrough vertex shader */
39 static nir_shader *
40 build_nir_vs(void)
41 {
42 const struct glsl_type *vec4 = glsl_vec4_type();
43
44 nir_builder b;
45 nir_variable *a_position;
46 nir_variable *v_position;
47
48 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
49 b.shader->info->name = ralloc_strdup(b.shader, "meta_depth_decomp_vs");
50
51 a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
52 "a_position");
53 a_position->data.location = VERT_ATTRIB_GENERIC0;
54
55 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
56 "gl_Position");
57 v_position->data.location = VARYING_SLOT_POS;
58
59 nir_copy_var(&b, v_position, a_position);
60
61 return b.shader;
62 }
63
64 /* simple passthrough shader */
65 static nir_shader *
66 build_nir_fs(void)
67 {
68 nir_builder b;
69
70 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
71 b.shader->info->name = ralloc_asprintf(b.shader,
72 "meta_depth_decomp_noop_fs");
73
74 return b.shader;
75 }
76
77 static VkResult
78 create_pass(struct radv_device *device)
79 {
80 VkResult result;
81 VkDevice device_h = radv_device_to_handle(device);
82 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
83 VkAttachmentDescription attachment;
84
85 attachment.format = VK_FORMAT_UNDEFINED;
86 attachment.samples = 1;
87 attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
88 attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
89 attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
90 attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
91
92 result = radv_CreateRenderPass(device_h,
93 &(VkRenderPassCreateInfo) {
94 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
95 .attachmentCount = 1,
96 .pAttachments = &attachment,
97 .subpassCount = 1,
98 .pSubpasses = &(VkSubpassDescription) {
99 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
100 .inputAttachmentCount = 0,
101 .colorAttachmentCount = 0,
102 .pColorAttachments = NULL,
103 .pResolveAttachments = NULL,
104 .pDepthStencilAttachment = &(VkAttachmentReference) {
105 .attachment = 0,
106 .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
107 },
108 .preserveAttachmentCount = 0,
109 .pPreserveAttachments = NULL,
110 },
111 .dependencyCount = 0,
112 },
113 alloc,
114 &device->meta_state.depth_decomp.pass);
115
116 return result;
117 }
118
119 static VkResult
120 create_pipeline(struct radv_device *device,
121 VkShaderModule vs_module_h)
122 {
123 VkResult result;
124 VkDevice device_h = radv_device_to_handle(device);
125
126 struct radv_shader_module fs_module = {
127 .nir = build_nir_fs(),
128 };
129
130 if (!fs_module.nir) {
131 /* XXX: Need more accurate error */
132 result = VK_ERROR_OUT_OF_HOST_MEMORY;
133 goto cleanup;
134 }
135
136 const VkGraphicsPipelineCreateInfo pipeline_create_info = {
137 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
138 .stageCount = 2,
139 .pStages = (VkPipelineShaderStageCreateInfo[]) {
140 {
141 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
142 .stage = VK_SHADER_STAGE_VERTEX_BIT,
143 .module = vs_module_h,
144 .pName = "main",
145 },
146 {
147 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
148 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
149 .module = radv_shader_module_to_handle(&fs_module),
150 .pName = "main",
151 },
152 },
153 .pVertexInputState = &(VkPipelineVertexInputStateCreateInfo) {
154 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
155 .vertexBindingDescriptionCount = 1,
156 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
157 {
158 .binding = 0,
159 .stride = sizeof(struct vertex_attrs),
160 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
161 },
162 },
163 .vertexAttributeDescriptionCount = 1,
164 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
165 {
166 /* Position */
167 .location = 0,
168 .binding = 0,
169 .format = VK_FORMAT_R32G32_SFLOAT,
170 .offset = offsetof(struct vertex_attrs, position),
171 },
172 },
173 },
174 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
175 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
176 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
177 .primitiveRestartEnable = false,
178 },
179 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
180 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
181 .viewportCount = 0,
182 .scissorCount = 0,
183 },
184 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
185 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
186 .depthClampEnable = false,
187 .rasterizerDiscardEnable = false,
188 .polygonMode = VK_POLYGON_MODE_FILL,
189 .cullMode = VK_CULL_MODE_NONE,
190 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
191 },
192 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
193 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
194 .rasterizationSamples = 1,
195 .sampleShadingEnable = false,
196 .pSampleMask = NULL,
197 .alphaToCoverageEnable = false,
198 .alphaToOneEnable = false,
199 },
200 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
201 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
202 .logicOpEnable = false,
203 .attachmentCount = 0,
204 .pAttachments = NULL,
205 },
206 .pDepthStencilState = &(VkPipelineDepthStencilStateCreateInfo) {
207 .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
208 .depthTestEnable = false,
209 .depthWriteEnable = false,
210 .depthBoundsTestEnable = false,
211 .stencilTestEnable = false,
212 },
213 .pDynamicState = NULL,
214 .renderPass = device->meta_state.depth_decomp.pass,
215 .subpass = 0,
216 };
217
218 result = radv_graphics_pipeline_create(device_h,
219 radv_pipeline_cache_to_handle(&device->meta_state.cache),
220 &pipeline_create_info,
221 &(struct radv_graphics_pipeline_create_info) {
222 .use_rectlist = true,
223 .db_flush_depth_inplace = true,
224 .db_flush_stencil_inplace = true,
225 },
226 &device->meta_state.alloc,
227 &device->meta_state.depth_decomp.decompress_pipeline);
228 if (result != VK_SUCCESS)
229 goto cleanup;
230
231 result = radv_graphics_pipeline_create(device_h,
232 radv_pipeline_cache_to_handle(&device->meta_state.cache),
233 &pipeline_create_info,
234 &(struct radv_graphics_pipeline_create_info) {
235 .use_rectlist = true,
236 .db_flush_depth_inplace = true,
237 .db_flush_stencil_inplace = true,
238 .db_resummarize = true,
239 },
240 &device->meta_state.alloc,
241 &device->meta_state.depth_decomp.resummarize_pipeline);
242 if (result != VK_SUCCESS)
243 goto cleanup;
244
245 goto cleanup;
246
247 cleanup:
248 ralloc_free(fs_module.nir);
249 return result;
250 }
251
252 void
253 radv_device_finish_meta_depth_decomp_state(struct radv_device *device)
254 {
255 struct radv_meta_state *state = &device->meta_state;
256 VkDevice device_h = radv_device_to_handle(device);
257 VkRenderPass pass_h = device->meta_state.depth_decomp.pass;
258 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
259
260 if (pass_h)
261 radv_DestroyRenderPass(device_h, pass_h,
262 &device->meta_state.alloc);
263
264 VkPipeline pipeline_h = state->depth_decomp.decompress_pipeline;
265 if (pipeline_h) {
266 radv_DestroyPipeline(device_h, pipeline_h, alloc);
267 }
268 pipeline_h = state->depth_decomp.resummarize_pipeline;
269 if (pipeline_h) {
270 radv_DestroyPipeline(device_h, pipeline_h, alloc);
271 }
272 }
273
274 VkResult
275 radv_device_init_meta_depth_decomp_state(struct radv_device *device)
276 {
277 VkResult res = VK_SUCCESS;
278
279 zero(device->meta_state.depth_decomp);
280
281 struct radv_shader_module vs_module = { .nir = build_nir_vs() };
282 if (!vs_module.nir) {
283 /* XXX: Need more accurate error */
284 res = VK_ERROR_OUT_OF_HOST_MEMORY;
285 goto fail;
286 }
287
288 res = create_pass(device);
289 if (res != VK_SUCCESS)
290 goto fail;
291
292 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
293 res = create_pipeline(device, vs_module_h);
294 if (res != VK_SUCCESS)
295 goto fail;
296
297 goto cleanup;
298
299 fail:
300 radv_device_finish_meta_depth_decomp_state(device);
301
302 cleanup:
303 ralloc_free(vs_module.nir);
304
305 return res;
306 }
307
308 static void
309 emit_depth_decomp(struct radv_cmd_buffer *cmd_buffer,
310 const VkOffset2D *dest_offset,
311 const VkExtent2D *depth_decomp_extent,
312 VkPipeline pipeline_h)
313 {
314 struct radv_device *device = cmd_buffer->device;
315 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
316 uint32_t offset;
317 const struct vertex_attrs vertex_data[3] = {
318 {
319 .position = {
320 dest_offset->x,
321 dest_offset->y,
322 },
323 },
324 {
325 .position = {
326 dest_offset->x,
327 dest_offset->y + depth_decomp_extent->height,
328 },
329 },
330 {
331 .position = {
332 dest_offset->x + depth_decomp_extent->width,
333 dest_offset->y,
334 },
335 },
336 };
337
338 radv_cmd_buffer_upload_data(cmd_buffer, sizeof(vertex_data), 16, vertex_data, &offset);
339 struct radv_buffer vertex_buffer = {
340 .device = device,
341 .size = sizeof(vertex_data),
342 .bo = cmd_buffer->upload.upload_bo,
343 .offset = offset,
344 };
345
346 VkBuffer vertex_buffer_h = radv_buffer_to_handle(&vertex_buffer);
347
348 radv_CmdBindVertexBuffers(cmd_buffer_h,
349 /*firstBinding*/ 0,
350 /*bindingCount*/ 1,
351 (VkBuffer[]) { vertex_buffer_h },
352 (VkDeviceSize[]) { 0 });
353
354 RADV_FROM_HANDLE(radv_pipeline, pipeline, pipeline_h);
355
356 if (cmd_buffer->state.pipeline != pipeline) {
357 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
358 pipeline_h);
359 }
360
361 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, 0);
362 }
363
364
365 static void radv_process_depth_image_inplace(struct radv_cmd_buffer *cmd_buffer,
366 struct radv_image *image,
367 VkImageSubresourceRange *subresourceRange,
368 VkPipeline pipeline_h)
369 {
370 struct radv_meta_saved_state saved_state;
371 struct radv_meta_saved_pass_state saved_pass_state;
372 VkDevice device_h = radv_device_to_handle(cmd_buffer->device);
373 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
374 uint32_t width = radv_minify(image->extent.width,
375 subresourceRange->baseMipLevel);
376 uint32_t height = radv_minify(image->extent.height,
377 subresourceRange->baseMipLevel);
378
379 if (!image->htile.size)
380 return;
381 radv_meta_save_pass(&saved_pass_state, cmd_buffer);
382
383 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
384
385 for (uint32_t layer = 0; layer < radv_get_layerCount(image, subresourceRange); layer++) {
386 struct radv_image_view iview;
387
388 radv_image_view_init(&iview, cmd_buffer->device,
389 &(VkImageViewCreateInfo) {
390 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
391 .image = radv_image_to_handle(image),
392 .format = image->vk_format,
393 .subresourceRange = {
394 .aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
395 .baseMipLevel = subresourceRange->baseMipLevel,
396 .levelCount = 1,
397 .baseArrayLayer = subresourceRange->baseArrayLayer + layer,
398 .layerCount = 1,
399 },
400 },
401 cmd_buffer, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
402
403
404 VkFramebuffer fb_h;
405 radv_CreateFramebuffer(device_h,
406 &(VkFramebufferCreateInfo) {
407 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
408 .attachmentCount = 1,
409 .pAttachments = (VkImageView[]) {
410 radv_image_view_to_handle(&iview)
411 },
412 .width = width,
413 .height = height,
414 .layers = 1
415 },
416 &cmd_buffer->pool->alloc,
417 &fb_h);
418
419 radv_CmdBeginRenderPass(cmd_buffer_h,
420 &(VkRenderPassBeginInfo) {
421 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
422 .renderPass = cmd_buffer->device->meta_state.depth_decomp.pass,
423 .framebuffer = fb_h,
424 .renderArea = {
425 .offset = {
426 0,
427 0,
428 },
429 .extent = {
430 width,
431 height,
432 }
433 },
434 .clearValueCount = 0,
435 .pClearValues = NULL,
436 },
437 VK_SUBPASS_CONTENTS_INLINE);
438
439 emit_depth_decomp(cmd_buffer, &(VkOffset2D){0, 0 }, &(VkExtent2D){width, height}, pipeline_h);
440 radv_CmdEndRenderPass(cmd_buffer_h);
441
442 radv_DestroyFramebuffer(device_h, fb_h,
443 &cmd_buffer->pool->alloc);
444 }
445 radv_meta_restore(&saved_state, cmd_buffer);
446 radv_meta_restore_pass(&saved_pass_state, cmd_buffer);
447 }
448
449 void radv_decompress_depth_image_inplace(struct radv_cmd_buffer *cmd_buffer,
450 struct radv_image *image,
451 VkImageSubresourceRange *subresourceRange)
452 {
453 assert(cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL);
454 radv_process_depth_image_inplace(cmd_buffer, image, subresourceRange,
455 cmd_buffer->device->meta_state.depth_decomp.decompress_pipeline);
456 }
457
458 void radv_resummarize_depth_image_inplace(struct radv_cmd_buffer *cmd_buffer,
459 struct radv_image *image,
460 VkImageSubresourceRange *subresourceRange)
461 {
462 assert(cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL);
463 radv_process_depth_image_inplace(cmd_buffer, image, subresourceRange,
464 cmd_buffer->device->meta_state.depth_decomp.resummarize_pipeline);
465 }