radv: drop entrypoint split out.
[mesa.git] / src / amd / vulkan / radv_meta_fast_clear.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
30 #include "sid.h"
31 /**
32 * Vertex attributes used by all pipelines.
33 */
34 struct vertex_attrs {
35 float position[2]; /**< 3DPRIM_RECTLIST */
36 float tex_position[2];
37 };
38
39 /* passthrough vertex shader */
40 static nir_shader *
41 build_nir_vs(void)
42 {
43 const struct glsl_type *vec4 = glsl_vec4_type();
44
45 nir_builder b;
46 nir_variable *a_position;
47 nir_variable *v_position;
48 nir_variable *a_tex_position;
49 nir_variable *v_tex_position;
50
51 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
52 b.shader->info.name = ralloc_strdup(b.shader, "meta_fast_clear_vs");
53
54 a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
55 "a_position");
56 a_position->data.location = VERT_ATTRIB_GENERIC0;
57
58 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
59 "gl_Position");
60 v_position->data.location = VARYING_SLOT_POS;
61
62 a_tex_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
63 "a_tex_position");
64 a_tex_position->data.location = VERT_ATTRIB_GENERIC1;
65
66 v_tex_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
67 "v_tex_position");
68 v_tex_position->data.location = VARYING_SLOT_VAR0;
69
70 nir_copy_var(&b, v_position, a_position);
71 nir_copy_var(&b, v_tex_position, a_tex_position);
72
73 return b.shader;
74 }
75
76 /* simple passthrough shader */
77 static nir_shader *
78 build_nir_fs(void)
79 {
80 const struct glsl_type *vec4 = glsl_vec4_type();
81 nir_builder b;
82 nir_variable *v_tex_position; /* vec4, varying texture coordinate */
83 nir_variable *f_color; /* vec4, fragment output color */
84
85 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
86 b.shader->info.name = ralloc_asprintf(b.shader,
87 "meta_fast_clear_fs");
88
89 v_tex_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
90 "v_tex_position");
91 v_tex_position->data.location = VARYING_SLOT_VAR0;
92
93 f_color = nir_variable_create(b.shader, nir_var_shader_out, vec4,
94 "f_color");
95 f_color->data.location = FRAG_RESULT_DATA0;
96
97 nir_copy_var(&b, f_color, v_tex_position);
98
99 return b.shader;
100 }
101
102 static VkResult
103 create_pass(struct radv_device *device)
104 {
105 VkResult result;
106 VkDevice device_h = radv_device_to_handle(device);
107 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
108 VkAttachmentDescription attachment;
109
110 attachment.format = VK_FORMAT_UNDEFINED;
111 attachment.samples = 1;
112 attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
113 attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
114 attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
115 attachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
116
117 result = radv_CreateRenderPass(device_h,
118 &(VkRenderPassCreateInfo) {
119 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
120 .attachmentCount = 1,
121 .pAttachments = &attachment,
122 .subpassCount = 1,
123 .pSubpasses = &(VkSubpassDescription) {
124 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
125 .inputAttachmentCount = 0,
126 .colorAttachmentCount = 1,
127 .pColorAttachments = (VkAttachmentReference[]) {
128 {
129 .attachment = 0,
130 .layout = VK_IMAGE_LAYOUT_GENERAL,
131 },
132 },
133 .pResolveAttachments = NULL,
134 .pDepthStencilAttachment = &(VkAttachmentReference) {
135 .attachment = VK_ATTACHMENT_UNUSED,
136 },
137 .preserveAttachmentCount = 0,
138 .pPreserveAttachments = NULL,
139 },
140 .dependencyCount = 0,
141 },
142 alloc,
143 &device->meta_state.fast_clear_flush.pass);
144
145 return result;
146 }
147
148 static VkResult
149 create_pipeline(struct radv_device *device,
150 VkShaderModule vs_module_h)
151 {
152 VkResult result;
153 VkDevice device_h = radv_device_to_handle(device);
154
155 struct radv_shader_module fs_module = {
156 .nir = build_nir_fs(),
157 };
158
159 if (!fs_module.nir) {
160 /* XXX: Need more accurate error */
161 result = VK_ERROR_OUT_OF_HOST_MEMORY;
162 goto cleanup;
163 }
164
165 const VkPipelineShaderStageCreateInfo stages[2] = {
166 {
167 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
168 .stage = VK_SHADER_STAGE_VERTEX_BIT,
169 .module = vs_module_h,
170 .pName = "main",
171 },
172 {
173 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
174 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
175 .module = radv_shader_module_to_handle(&fs_module),
176 .pName = "main",
177 },
178 };
179
180 const VkPipelineVertexInputStateCreateInfo vi_state = {
181 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
182 .vertexBindingDescriptionCount = 1,
183 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
184 {
185 .binding = 0,
186 .stride = sizeof(struct vertex_attrs),
187 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
188 },
189 },
190 .vertexAttributeDescriptionCount = 2,
191 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
192 {
193 /* Position */
194 .location = 0,
195 .binding = 0,
196 .format = VK_FORMAT_R32G32_SFLOAT,
197 .offset = offsetof(struct vertex_attrs, position),
198 },
199 {
200 /* Texture Coordinate */
201 .location = 1,
202 .binding = 0,
203 .format = VK_FORMAT_R32G32_SFLOAT,
204 .offset = offsetof(struct vertex_attrs, tex_position),
205 },
206 }
207 };
208
209 const VkPipelineInputAssemblyStateCreateInfo ia_state = {
210 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
211 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
212 .primitiveRestartEnable = false,
213 };
214
215 const VkPipelineColorBlendStateCreateInfo blend_state = {
216 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
217 .logicOpEnable = false,
218 .attachmentCount = 1,
219 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
220 {
221 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
222 VK_COLOR_COMPONENT_G_BIT |
223 VK_COLOR_COMPONENT_B_BIT |
224 VK_COLOR_COMPONENT_A_BIT,
225 },
226 }
227 };
228 const VkPipelineRasterizationStateCreateInfo rs_state = {
229 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
230 .depthClampEnable = false,
231 .rasterizerDiscardEnable = false,
232 .polygonMode = VK_POLYGON_MODE_FILL,
233 .cullMode = VK_CULL_MODE_NONE,
234 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
235 };
236
237 result = radv_graphics_pipeline_create(device_h,
238 radv_pipeline_cache_to_handle(&device->meta_state.cache),
239 &(VkGraphicsPipelineCreateInfo) {
240 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
241 .stageCount = 2,
242 .pStages = stages,
243
244 .pVertexInputState = &vi_state,
245 .pInputAssemblyState = &ia_state,
246
247 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
248 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
249 .viewportCount = 0,
250 .scissorCount = 0,
251 },
252 .pRasterizationState = &rs_state,
253 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
254 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
255 .rasterizationSamples = 1,
256 .sampleShadingEnable = false,
257 .pSampleMask = NULL,
258 .alphaToCoverageEnable = false,
259 .alphaToOneEnable = false,
260 },
261 .pColorBlendState = &blend_state,
262 .pDynamicState = NULL,
263 .renderPass = device->meta_state.fast_clear_flush.pass,
264 .subpass = 0,
265 },
266 &(struct radv_graphics_pipeline_create_info) {
267 .use_rectlist = true,
268 .custom_blend_mode = V_028808_CB_ELIMINATE_FAST_CLEAR,
269 },
270 &device->meta_state.alloc,
271 &device->meta_state.fast_clear_flush.cmask_eliminate_pipeline);
272 if (result != VK_SUCCESS)
273 goto cleanup;
274
275 result = radv_graphics_pipeline_create(device_h,
276 radv_pipeline_cache_to_handle(&device->meta_state.cache),
277 &(VkGraphicsPipelineCreateInfo) {
278 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
279 .stageCount = 2,
280 .pStages = stages,
281
282 .pVertexInputState = &vi_state,
283 .pInputAssemblyState = &ia_state,
284
285 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
286 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
287 .viewportCount = 0,
288 .scissorCount = 0,
289 },
290 .pRasterizationState = &rs_state,
291 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
292 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
293 .rasterizationSamples = 1,
294 .sampleShadingEnable = false,
295 .pSampleMask = NULL,
296 .alphaToCoverageEnable = false,
297 .alphaToOneEnable = false,
298 },
299 .pColorBlendState = &blend_state,
300 .pDynamicState = NULL,
301 .renderPass = device->meta_state.fast_clear_flush.pass,
302 .subpass = 0,
303 },
304 &(struct radv_graphics_pipeline_create_info) {
305 .use_rectlist = true,
306 .custom_blend_mode = V_028808_CB_FMASK_DECOMPRESS,
307 },
308 &device->meta_state.alloc,
309 &device->meta_state.fast_clear_flush.fmask_decompress_pipeline);
310 if (result != VK_SUCCESS)
311 goto cleanup_cmask;
312
313 goto cleanup;
314 cleanup_cmask:
315 radv_DestroyPipeline(device_h, device->meta_state.fast_clear_flush.cmask_eliminate_pipeline, &device->meta_state.alloc);
316 cleanup:
317 ralloc_free(fs_module.nir);
318 return result;
319 }
320
321 void
322 radv_device_finish_meta_fast_clear_flush_state(struct radv_device *device)
323 {
324 struct radv_meta_state *state = &device->meta_state;
325 VkDevice device_h = radv_device_to_handle(device);
326 VkRenderPass pass_h = device->meta_state.fast_clear_flush.pass;
327 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
328
329 if (pass_h)
330 radv_DestroyRenderPass(device_h, pass_h,
331 &device->meta_state.alloc);
332
333 VkPipeline pipeline_h = state->fast_clear_flush.cmask_eliminate_pipeline;
334 if (pipeline_h) {
335 radv_DestroyPipeline(device_h, pipeline_h, alloc);
336 }
337
338 pipeline_h = state->fast_clear_flush.fmask_decompress_pipeline;
339 if (pipeline_h) {
340 radv_DestroyPipeline(device_h, pipeline_h, alloc);
341 }
342 }
343
344 VkResult
345 radv_device_init_meta_fast_clear_flush_state(struct radv_device *device)
346 {
347 VkResult res = VK_SUCCESS;
348
349 zero(device->meta_state.fast_clear_flush);
350
351 struct radv_shader_module vs_module = { .nir = build_nir_vs() };
352 if (!vs_module.nir) {
353 /* XXX: Need more accurate error */
354 res = VK_ERROR_OUT_OF_HOST_MEMORY;
355 goto fail;
356 }
357
358 res = create_pass(device);
359 if (res != VK_SUCCESS)
360 goto fail;
361
362 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
363 res = create_pipeline(device, vs_module_h);
364 if (res != VK_SUCCESS)
365 goto fail;
366
367 goto cleanup;
368
369 fail:
370 radv_device_finish_meta_fast_clear_flush_state(device);
371
372 cleanup:
373 ralloc_free(vs_module.nir);
374
375 return res;
376 }
377
378 static void
379 emit_fast_clear_flush(struct radv_cmd_buffer *cmd_buffer,
380 const VkExtent2D *resolve_extent,
381 bool fmask_decompress)
382 {
383 struct radv_device *device = cmd_buffer->device;
384 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
385 uint32_t offset;
386 const struct vertex_attrs vertex_data[3] = {
387 {
388 .position = {
389 0,
390 0,
391 },
392 .tex_position = {
393 0,
394 0,
395 },
396 },
397 {
398 .position = {
399 0,
400 resolve_extent->height,
401 },
402 .tex_position = {
403 0,
404 resolve_extent->height,
405 },
406 },
407 {
408 .position = {
409 resolve_extent->width,
410 0,
411 },
412 .tex_position = {
413 resolve_extent->width,
414 0,
415 },
416 },
417 };
418
419 cmd_buffer->state.flush_bits |= (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
420 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META);
421 radv_cmd_buffer_upload_data(cmd_buffer, sizeof(vertex_data), 16, vertex_data, &offset);
422 struct radv_buffer vertex_buffer = {
423 .device = device,
424 .size = sizeof(vertex_data),
425 .bo = cmd_buffer->upload.upload_bo,
426 .offset = offset,
427 };
428
429 VkBuffer vertex_buffer_h = radv_buffer_to_handle(&vertex_buffer);
430
431 radv_CmdBindVertexBuffers(cmd_buffer_h,
432 /*firstBinding*/ 0,
433 /*bindingCount*/ 1,
434 (VkBuffer[]) { vertex_buffer_h },
435 (VkDeviceSize[]) { 0 });
436
437 VkPipeline pipeline_h;
438 if (fmask_decompress)
439 pipeline_h = device->meta_state.fast_clear_flush.fmask_decompress_pipeline;
440 else
441 pipeline_h = device->meta_state.fast_clear_flush.cmask_eliminate_pipeline;
442 RADV_FROM_HANDLE(radv_pipeline, pipeline, pipeline_h);
443
444 if (cmd_buffer->state.pipeline != pipeline) {
445 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
446 pipeline_h);
447 }
448
449 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, 0);
450 cmd_buffer->state.flush_bits |= (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
451 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META);
452 si_emit_cache_flush(cmd_buffer);
453 }
454
455 /**
456 */
457 void
458 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer *cmd_buffer,
459 struct radv_image *image)
460 {
461 struct radv_meta_saved_state saved_state;
462 struct radv_meta_saved_pass_state saved_pass_state;
463 VkDevice device_h = radv_device_to_handle(cmd_buffer->device);
464 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
465
466 if (!image->cmask.size)
467 return;
468
469 if (!cmd_buffer->device->allow_fast_clears)
470 return;
471
472 radv_meta_save_pass(&saved_pass_state, cmd_buffer);
473 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
474
475 struct radv_image_view iview;
476 radv_image_view_init(&iview, cmd_buffer->device,
477 &(VkImageViewCreateInfo) {
478 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
479 .image = radv_image_to_handle(image),
480 .format = image->vk_format,
481 .subresourceRange = {
482 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
483 .baseMipLevel = 0,
484 .levelCount = 1,
485 .baseArrayLayer = 0,
486 .layerCount = 1,
487 },
488 },
489 cmd_buffer, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
490
491 VkFramebuffer fb_h;
492 radv_CreateFramebuffer(device_h,
493 &(VkFramebufferCreateInfo) {
494 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
495 .attachmentCount = 1,
496 .pAttachments = (VkImageView[]) {
497 radv_image_view_to_handle(&iview)
498 },
499 .width = image->extent.width,
500 .height = image->extent.height,
501 .layers = 1
502 },
503 &cmd_buffer->pool->alloc,
504 &fb_h);
505
506 radv_CmdBeginRenderPass(cmd_buffer_h,
507 &(VkRenderPassBeginInfo) {
508 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
509 .renderPass = cmd_buffer->device->meta_state.fast_clear_flush.pass,
510 .framebuffer = fb_h,
511 .renderArea = {
512 .offset = {
513 0,
514 0,
515 },
516 .extent = {
517 image->extent.width,
518 image->extent.height,
519 }
520 },
521 .clearValueCount = 0,
522 .pClearValues = NULL,
523 },
524 VK_SUBPASS_CONTENTS_INLINE);
525
526 emit_fast_clear_flush(cmd_buffer,
527 &(VkExtent2D) { image->extent.width, image->extent.height },
528 image->fmask.size > 0);
529 radv_CmdEndRenderPass(cmd_buffer_h);
530
531 radv_DestroyFramebuffer(device_h, fb_h,
532 &cmd_buffer->pool->alloc);
533
534 radv_meta_restore(&saved_state, cmd_buffer);
535 radv_meta_restore_pass(&saved_pass_state, cmd_buffer);
536 }