configure.ac: split the wayland client/server confusion
[mesa.git] / src / amd / vulkan / radv_image.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "vk_format.h"
31 #include "vk_util.h"
32 #include "radv_radeon_winsys.h"
33 #include "sid.h"
34 #include "gfx9d.h"
35 #include "util/debug.h"
36 #include "util/u_atomic.h"
37 static unsigned
38 radv_choose_tiling(struct radv_device *device,
39 const struct radv_image_create_info *create_info)
40 {
41 const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
42
43 if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) {
44 assert(pCreateInfo->samples <= 1);
45 return RADEON_SURF_MODE_LINEAR_ALIGNED;
46 }
47
48 if (!vk_format_is_compressed(pCreateInfo->format) &&
49 !vk_format_is_depth_or_stencil(pCreateInfo->format)
50 && device->physical_device->rad_info.chip_class <= VI) {
51 /* this causes hangs in some VK CTS tests on GFX9. */
52 /* Textures with a very small height are recommended to be linear. */
53 if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
54 /* Only very thin and long 2D textures should benefit from
55 * linear_aligned. */
56 (pCreateInfo->extent.width > 8 && pCreateInfo->extent.height <= 2))
57 return RADEON_SURF_MODE_LINEAR_ALIGNED;
58 }
59
60 /* MSAA resources must be 2D tiled. */
61 if (pCreateInfo->samples > 1)
62 return RADEON_SURF_MODE_2D;
63
64 return RADEON_SURF_MODE_2D;
65 }
66 static int
67 radv_init_surface(struct radv_device *device,
68 struct radeon_surf *surface,
69 const struct radv_image_create_info *create_info)
70 {
71 const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
72 unsigned array_mode = radv_choose_tiling(device, create_info);
73 const struct vk_format_description *desc =
74 vk_format_description(pCreateInfo->format);
75 bool is_depth, is_stencil, blendable;
76
77 is_depth = vk_format_has_depth(desc);
78 is_stencil = vk_format_has_stencil(desc);
79
80 surface->blk_w = vk_format_get_blockwidth(pCreateInfo->format);
81 surface->blk_h = vk_format_get_blockheight(pCreateInfo->format);
82
83 surface->bpe = vk_format_get_blocksize(vk_format_depth_only(pCreateInfo->format));
84 /* align byte per element on dword */
85 if (surface->bpe == 3) {
86 surface->bpe = 4;
87 }
88 surface->flags = RADEON_SURF_SET(array_mode, MODE);
89
90 switch (pCreateInfo->imageType){
91 case VK_IMAGE_TYPE_1D:
92 if (pCreateInfo->arrayLayers > 1)
93 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
94 else
95 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
96 break;
97 case VK_IMAGE_TYPE_2D:
98 if (pCreateInfo->arrayLayers > 1)
99 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
100 else
101 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
102 break;
103 case VK_IMAGE_TYPE_3D:
104 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
105 break;
106 default:
107 unreachable("unhandled image type");
108 }
109
110 if (is_depth) {
111 surface->flags |= RADEON_SURF_ZBUFFER;
112 }
113
114 if (is_stencil)
115 surface->flags |= RADEON_SURF_SBUFFER;
116
117 surface->flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
118
119 bool dcc_compatible_formats = !radv_is_colorbuffer_format_supported(pCreateInfo->format, &blendable);
120 if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
121 const struct VkImageFormatListCreateInfoKHR *format_list =
122 (const struct VkImageFormatListCreateInfoKHR *)
123 vk_find_struct_const(pCreateInfo->pNext,
124 IMAGE_FORMAT_LIST_CREATE_INFO_KHR);
125 if (format_list) {
126 /* compatibility is transitive, so we only need to check
127 * one format with everything else. */
128 for (unsigned i = 0; i < format_list->viewFormatCount; ++i) {
129 if (!radv_dcc_formats_compatible(pCreateInfo->format,
130 format_list->pViewFormats[i]))
131 dcc_compatible_formats = false;
132 }
133 } else {
134 dcc_compatible_formats = false;
135 }
136 }
137
138 if ((pCreateInfo->usage & (VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
139 VK_IMAGE_USAGE_STORAGE_BIT)) ||
140 !dcc_compatible_formats ||
141 (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) ||
142 pCreateInfo->mipLevels > 1 || pCreateInfo->arrayLayers > 1 ||
143 device->physical_device->rad_info.chip_class < VI ||
144 create_info->scanout || (device->debug_flags & RADV_DEBUG_NO_DCC))
145 surface->flags |= RADEON_SURF_DISABLE_DCC;
146 if (create_info->scanout)
147 surface->flags |= RADEON_SURF_SCANOUT;
148 return 0;
149 }
150 #define ATI_VENDOR_ID 0x1002
151 static uint32_t si_get_bo_metadata_word1(struct radv_device *device)
152 {
153 return (ATI_VENDOR_ID << 16) | device->physical_device->rad_info.pci_id;
154 }
155
156 static inline unsigned
157 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
158 {
159 if (stencil)
160 return image->surface.u.legacy.stencil_tiling_index[level];
161 else
162 return image->surface.u.legacy.tiling_index[level];
163 }
164
165 static unsigned radv_map_swizzle(unsigned swizzle)
166 {
167 switch (swizzle) {
168 case VK_SWIZZLE_Y:
169 return V_008F0C_SQ_SEL_Y;
170 case VK_SWIZZLE_Z:
171 return V_008F0C_SQ_SEL_Z;
172 case VK_SWIZZLE_W:
173 return V_008F0C_SQ_SEL_W;
174 case VK_SWIZZLE_0:
175 return V_008F0C_SQ_SEL_0;
176 case VK_SWIZZLE_1:
177 return V_008F0C_SQ_SEL_1;
178 default: /* VK_SWIZZLE_X */
179 return V_008F0C_SQ_SEL_X;
180 }
181 }
182
183 static void
184 radv_make_buffer_descriptor(struct radv_device *device,
185 struct radv_buffer *buffer,
186 VkFormat vk_format,
187 unsigned offset,
188 unsigned range,
189 uint32_t *state)
190 {
191 const struct vk_format_description *desc;
192 unsigned stride;
193 uint64_t gpu_address = device->ws->buffer_get_va(buffer->bo);
194 uint64_t va = gpu_address + buffer->offset;
195 unsigned num_format, data_format;
196 int first_non_void;
197 desc = vk_format_description(vk_format);
198 first_non_void = vk_format_get_first_non_void_channel(vk_format);
199 stride = desc->block.bits / 8;
200
201 num_format = radv_translate_buffer_numformat(desc, first_non_void);
202 data_format = radv_translate_buffer_dataformat(desc, first_non_void);
203
204 va += offset;
205 state[0] = va;
206 state[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
207 S_008F04_STRIDE(stride);
208
209 if (device->physical_device->rad_info.chip_class != VI && stride) {
210 range /= stride;
211 }
212
213 state[2] = range;
214 state[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc->swizzle[0])) |
215 S_008F0C_DST_SEL_Y(radv_map_swizzle(desc->swizzle[1])) |
216 S_008F0C_DST_SEL_Z(radv_map_swizzle(desc->swizzle[2])) |
217 S_008F0C_DST_SEL_W(radv_map_swizzle(desc->swizzle[3])) |
218 S_008F0C_NUM_FORMAT(num_format) |
219 S_008F0C_DATA_FORMAT(data_format);
220 }
221
222 static void
223 si_set_mutable_tex_desc_fields(struct radv_device *device,
224 struct radv_image *image,
225 const struct legacy_surf_level *base_level_info,
226 unsigned base_level, unsigned first_level,
227 unsigned block_width, bool is_stencil,
228 uint32_t *state)
229 {
230 uint64_t gpu_address = image->bo ? device->ws->buffer_get_va(image->bo) + image->offset : 0;
231 uint64_t va = gpu_address;
232 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
233 uint64_t meta_va = 0;
234 if (chip_class >= GFX9) {
235 if (is_stencil)
236 va += image->surface.u.gfx9.stencil_offset;
237 else
238 va += image->surface.u.gfx9.surf_offset;
239 } else
240 va += base_level_info->offset;
241
242 state[0] = va >> 8;
243 if (chip_class >= GFX9 ||
244 base_level_info->mode == RADEON_SURF_MODE_2D)
245 state[0] |= image->surface.tile_swizzle;
246 state[1] &= C_008F14_BASE_ADDRESS_HI;
247 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
248
249 if (chip_class >= VI) {
250 state[6] &= C_008F28_COMPRESSION_EN;
251 state[7] = 0;
252 if (image->surface.dcc_size && first_level < image->surface.num_dcc_levels) {
253 meta_va = gpu_address + image->dcc_offset;
254 if (chip_class <= VI)
255 meta_va += base_level_info->dcc_offset;
256 state[6] |= S_008F28_COMPRESSION_EN(1);
257 state[7] = meta_va >> 8;
258 state[7] |= image->surface.tile_swizzle;
259 }
260 }
261
262 if (chip_class >= GFX9) {
263 state[3] &= C_008F1C_SW_MODE;
264 state[4] &= C_008F20_PITCH_GFX9;
265
266 if (is_stencil) {
267 state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.stencil.swizzle_mode);
268 state[4] |= S_008F20_PITCH_GFX9(image->surface.u.gfx9.stencil.epitch);
269 } else {
270 state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.surf.swizzle_mode);
271 state[4] |= S_008F20_PITCH_GFX9(image->surface.u.gfx9.surf.epitch);
272 }
273
274 state[5] &= C_008F24_META_DATA_ADDRESS &
275 C_008F24_META_PIPE_ALIGNED &
276 C_008F24_META_RB_ALIGNED;
277 if (meta_va) {
278 struct gfx9_surf_meta_flags meta;
279
280 if (image->dcc_offset)
281 meta = image->surface.u.gfx9.dcc;
282 else
283 meta = image->surface.u.gfx9.htile;
284
285 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
286 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
287 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
288 }
289 } else {
290 /* SI-CI-VI */
291 unsigned pitch = base_level_info->nblk_x * block_width;
292 unsigned index = si_tile_mode_index(image, base_level, is_stencil);
293
294 state[3] &= C_008F1C_TILING_INDEX;
295 state[3] |= S_008F1C_TILING_INDEX(index);
296 state[4] &= C_008F20_PITCH_GFX6;
297 state[4] |= S_008F20_PITCH_GFX6(pitch - 1);
298 }
299 }
300
301 static unsigned radv_tex_dim(VkImageType image_type, VkImageViewType view_type,
302 unsigned nr_layers, unsigned nr_samples, bool is_storage_image, bool gfx9)
303 {
304 if (view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
305 return is_storage_image ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_CUBE;
306
307 /* GFX9 allocates 1D textures as 2D. */
308 if (gfx9 && image_type == VK_IMAGE_TYPE_1D)
309 image_type = VK_IMAGE_TYPE_2D;
310 switch (image_type) {
311 case VK_IMAGE_TYPE_1D:
312 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY : V_008F1C_SQ_RSRC_IMG_1D;
313 case VK_IMAGE_TYPE_2D:
314 if (nr_samples > 1)
315 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY : V_008F1C_SQ_RSRC_IMG_2D_MSAA;
316 else
317 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_2D;
318 case VK_IMAGE_TYPE_3D:
319 if (view_type == VK_IMAGE_VIEW_TYPE_3D)
320 return V_008F1C_SQ_RSRC_IMG_3D;
321 else
322 return V_008F1C_SQ_RSRC_IMG_2D_ARRAY;
323 default:
324 unreachable("illegale image type");
325 }
326 }
327
328 static unsigned gfx9_border_color_swizzle(const unsigned char swizzle[4])
329 {
330 unsigned bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
331
332 if (swizzle[3] == VK_SWIZZLE_X) {
333 /* For the pre-defined border color values (white, opaque
334 * black, transparent black), the only thing that matters is
335 * that the alpha channel winds up in the correct place
336 * (because the RGB channels are all the same) so either of
337 * these enumerations will work.
338 */
339 if (swizzle[2] == VK_SWIZZLE_Y)
340 bc_swizzle = V_008F20_BC_SWIZZLE_WZYX;
341 else
342 bc_swizzle = V_008F20_BC_SWIZZLE_WXYZ;
343 } else if (swizzle[0] == VK_SWIZZLE_X) {
344 if (swizzle[1] == VK_SWIZZLE_Y)
345 bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
346 else
347 bc_swizzle = V_008F20_BC_SWIZZLE_XWYZ;
348 } else if (swizzle[1] == VK_SWIZZLE_X) {
349 bc_swizzle = V_008F20_BC_SWIZZLE_YXWZ;
350 } else if (swizzle[2] == VK_SWIZZLE_X) {
351 bc_swizzle = V_008F20_BC_SWIZZLE_ZYXW;
352 }
353
354 return bc_swizzle;
355 }
356
357 /**
358 * Build the sampler view descriptor for a texture.
359 */
360 static void
361 si_make_texture_descriptor(struct radv_device *device,
362 struct radv_image *image,
363 bool is_storage_image,
364 VkImageViewType view_type,
365 VkFormat vk_format,
366 const VkComponentMapping *mapping,
367 unsigned first_level, unsigned last_level,
368 unsigned first_layer, unsigned last_layer,
369 unsigned width, unsigned height, unsigned depth,
370 uint32_t *state,
371 uint32_t *fmask_state)
372 {
373 const struct vk_format_description *desc;
374 enum vk_swizzle swizzle[4];
375 int first_non_void;
376 unsigned num_format, data_format, type;
377
378 desc = vk_format_description(vk_format);
379
380 if (desc->colorspace == VK_FORMAT_COLORSPACE_ZS) {
381 const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
382 vk_format_compose_swizzles(mapping, swizzle_xxxx, swizzle);
383 } else {
384 vk_format_compose_swizzles(mapping, desc->swizzle, swizzle);
385 }
386
387 first_non_void = vk_format_get_first_non_void_channel(vk_format);
388
389 num_format = radv_translate_tex_numformat(vk_format, desc, first_non_void);
390 if (num_format == ~0) {
391 num_format = 0;
392 }
393
394 data_format = radv_translate_tex_dataformat(vk_format, desc, first_non_void);
395 if (data_format == ~0) {
396 data_format = 0;
397 }
398
399 type = radv_tex_dim(image->type, view_type, image->info.array_size, image->info.samples,
400 is_storage_image, device->physical_device->rad_info.chip_class >= GFX9);
401 if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) {
402 height = 1;
403 depth = image->info.array_size;
404 } else if (type == V_008F1C_SQ_RSRC_IMG_2D_ARRAY ||
405 type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
406 if (view_type != VK_IMAGE_VIEW_TYPE_3D)
407 depth = image->info.array_size;
408 } else if (type == V_008F1C_SQ_RSRC_IMG_CUBE)
409 depth = image->info.array_size / 6;
410
411 state[0] = 0;
412 state[1] = (S_008F14_DATA_FORMAT_GFX6(data_format) |
413 S_008F14_NUM_FORMAT_GFX6(num_format));
414 state[2] = (S_008F18_WIDTH(width - 1) |
415 S_008F18_HEIGHT(height - 1) |
416 S_008F18_PERF_MOD(4));
417 state[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle[0])) |
418 S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle[1])) |
419 S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle[2])) |
420 S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle[3])) |
421 S_008F1C_BASE_LEVEL(image->info.samples > 1 ?
422 0 : first_level) |
423 S_008F1C_LAST_LEVEL(image->info.samples > 1 ?
424 util_logbase2(image->info.samples) :
425 last_level) |
426 S_008F1C_TYPE(type));
427 state[4] = 0;
428 state[5] = S_008F24_BASE_ARRAY(first_layer);
429 state[6] = 0;
430 state[7] = 0;
431
432 if (device->physical_device->rad_info.chip_class >= GFX9) {
433 unsigned bc_swizzle = gfx9_border_color_swizzle(desc->swizzle);
434
435 /* Depth is the the last accessible layer on Gfx9.
436 * The hw doesn't need to know the total number of layers.
437 */
438 if (type == V_008F1C_SQ_RSRC_IMG_3D)
439 state[4] |= S_008F20_DEPTH(depth - 1);
440 else
441 state[4] |= S_008F20_DEPTH(last_layer);
442
443 state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
444 state[5] |= S_008F24_MAX_MIP(image->info.samples > 1 ?
445 util_logbase2(image->info.samples) :
446 image->info.levels - 1);
447 } else {
448 state[3] |= S_008F1C_POW2_PAD(image->info.levels > 1);
449 state[4] |= S_008F20_DEPTH(depth - 1);
450 state[5] |= S_008F24_LAST_ARRAY(last_layer);
451 }
452 if (image->dcc_offset) {
453 unsigned swap = radv_translate_colorswap(vk_format, FALSE);
454
455 state[6] = S_008F28_ALPHA_IS_ON_MSB(swap <= 1);
456 } else {
457 /* The last dword is unused by hw. The shader uses it to clear
458 * bits in the first dword of sampler state.
459 */
460 if (device->physical_device->rad_info.chip_class <= CIK && image->info.samples <= 1) {
461 if (first_level == last_level)
462 state[7] = C_008F30_MAX_ANISO_RATIO;
463 else
464 state[7] = 0xffffffff;
465 }
466 }
467
468 /* Initialize the sampler view for FMASK. */
469 if (image->fmask.size) {
470 uint32_t fmask_format, num_format;
471 uint64_t gpu_address = device->ws->buffer_get_va(image->bo);
472 uint64_t va;
473
474 va = gpu_address + image->offset + image->fmask.offset;
475
476 if (device->physical_device->rad_info.chip_class >= GFX9) {
477 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK;
478 switch (image->info.samples) {
479 case 2:
480 num_format = V_008F14_IMG_FMASK_8_2_2;
481 break;
482 case 4:
483 num_format = V_008F14_IMG_FMASK_8_4_4;
484 break;
485 case 8:
486 num_format = V_008F14_IMG_FMASK_32_8_8;
487 break;
488 default:
489 unreachable("invalid nr_samples");
490 }
491 } else {
492 switch (image->info.samples) {
493 case 2:
494 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2;
495 break;
496 case 4:
497 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4;
498 break;
499 case 8:
500 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8;
501 break;
502 default:
503 assert(0);
504 fmask_format = V_008F14_IMG_DATA_FORMAT_INVALID;
505 }
506 num_format = V_008F14_IMG_NUM_FORMAT_UINT;
507 }
508
509 fmask_state[0] = va >> 8;
510 fmask_state[0] |= image->fmask.tile_swizzle;
511 fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) |
512 S_008F14_DATA_FORMAT_GFX6(fmask_format) |
513 S_008F14_NUM_FORMAT_GFX6(num_format);
514 fmask_state[2] = S_008F18_WIDTH(width - 1) |
515 S_008F18_HEIGHT(height - 1);
516 fmask_state[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X) |
517 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X) |
518 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X) |
519 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X) |
520 S_008F1C_TYPE(radv_tex_dim(image->type, view_type, 1, 0, false, false));
521 fmask_state[4] = 0;
522 fmask_state[5] = S_008F24_BASE_ARRAY(first_layer);
523 fmask_state[6] = 0;
524 fmask_state[7] = 0;
525
526 if (device->physical_device->rad_info.chip_class >= GFX9) {
527 fmask_state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.fmask.swizzle_mode);
528 fmask_state[4] |= S_008F20_DEPTH(last_layer) |
529 S_008F20_PITCH_GFX9(image->surface.u.gfx9.fmask.epitch);
530 fmask_state[5] |= S_008F24_META_PIPE_ALIGNED(image->surface.u.gfx9.cmask.pipe_aligned) |
531 S_008F24_META_RB_ALIGNED(image->surface.u.gfx9.cmask.rb_aligned);
532 } else {
533 fmask_state[3] |= S_008F1C_TILING_INDEX(image->fmask.tile_mode_index);
534 fmask_state[4] |= S_008F20_DEPTH(depth - 1) |
535 S_008F20_PITCH_GFX6(image->fmask.pitch_in_pixels - 1);
536 fmask_state[5] |= S_008F24_LAST_ARRAY(last_layer);
537 }
538 } else if (fmask_state)
539 memset(fmask_state, 0, 8 * 4);
540 }
541
542 static void
543 radv_query_opaque_metadata(struct radv_device *device,
544 struct radv_image *image,
545 struct radeon_bo_metadata *md)
546 {
547 static const VkComponentMapping fixedmapping;
548 uint32_t desc[8], i;
549
550 /* Metadata image format format version 1:
551 * [0] = 1 (metadata format identifier)
552 * [1] = (VENDOR_ID << 16) | PCI_ID
553 * [2:9] = image descriptor for the whole resource
554 * [2] is always 0, because the base address is cleared
555 * [9] is the DCC offset bits [39:8] from the beginning of
556 * the buffer
557 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
558 */
559 md->metadata[0] = 1; /* metadata image format version 1 */
560
561 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
562 md->metadata[1] = si_get_bo_metadata_word1(device);
563
564
565 si_make_texture_descriptor(device, image, false,
566 (VkImageViewType)image->type, image->vk_format,
567 &fixedmapping, 0, image->info.levels - 1, 0,
568 image->info.array_size,
569 image->info.width, image->info.height,
570 image->info.depth,
571 desc, NULL);
572
573 si_set_mutable_tex_desc_fields(device, image, &image->surface.u.legacy.level[0], 0, 0,
574 image->surface.blk_w, false, desc);
575
576 /* Clear the base address and set the relative DCC offset. */
577 desc[0] = 0;
578 desc[1] &= C_008F14_BASE_ADDRESS_HI;
579 desc[7] = image->dcc_offset >> 8;
580
581 /* Dwords [2:9] contain the image descriptor. */
582 memcpy(&md->metadata[2], desc, sizeof(desc));
583
584 /* Dwords [10:..] contain the mipmap level offsets. */
585 if (device->physical_device->rad_info.chip_class <= VI) {
586 for (i = 0; i <= image->info.levels - 1; i++)
587 md->metadata[10+i] = image->surface.u.legacy.level[i].offset >> 8;
588 md->size_metadata = (11 + image->info.levels - 1) * 4;
589 }
590 }
591
592 void
593 radv_init_metadata(struct radv_device *device,
594 struct radv_image *image,
595 struct radeon_bo_metadata *metadata)
596 {
597 struct radeon_surf *surface = &image->surface;
598
599 memset(metadata, 0, sizeof(*metadata));
600
601 if (device->physical_device->rad_info.chip_class >= GFX9) {
602 metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
603 } else {
604 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
605 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
606 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
607 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
608 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
609 metadata->u.legacy.bankw = surface->u.legacy.bankw;
610 metadata->u.legacy.bankh = surface->u.legacy.bankh;
611 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
612 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
613 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
614 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
615 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
616 }
617 radv_query_opaque_metadata(device, image, metadata);
618 }
619
620 /* The number of samples can be specified independently of the texture. */
621 static void
622 radv_image_get_fmask_info(struct radv_device *device,
623 struct radv_image *image,
624 unsigned nr_samples,
625 struct radv_fmask_info *out)
626 {
627 /* FMASK is allocated like an ordinary texture. */
628 struct radeon_surf fmask = {};
629 struct ac_surf_info info = image->info;
630 memset(out, 0, sizeof(*out));
631
632 if (device->physical_device->rad_info.chip_class >= GFX9) {
633 out->alignment = image->surface.u.gfx9.fmask_alignment;
634 out->size = image->surface.u.gfx9.fmask_size;
635 return;
636 }
637
638 fmask.blk_w = image->surface.blk_w;
639 fmask.blk_h = image->surface.blk_h;
640 info.samples = 1;
641 fmask.flags = image->surface.flags | RADEON_SURF_FMASK;
642
643 if (!image->shareable)
644 info.surf_index = &device->fmask_mrt_offset_counter;
645
646 /* Force 2D tiling if it wasn't set. This may occur when creating
647 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
648 * destination buffer must have an FMASK too. */
649 fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
650 fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
651
652 switch (nr_samples) {
653 case 2:
654 case 4:
655 fmask.bpe = 1;
656 break;
657 case 8:
658 fmask.bpe = 4;
659 break;
660 default:
661 return;
662 }
663
664 device->ws->surface_init(device->ws, &info, &fmask);
665 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
666
667 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
668 if (out->slice_tile_max)
669 out->slice_tile_max -= 1;
670
671 out->tile_mode_index = fmask.u.legacy.tiling_index[0];
672 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
673 out->bank_height = fmask.u.legacy.bankh;
674 out->tile_swizzle = fmask.tile_swizzle;
675 out->alignment = MAX2(256, fmask.surf_alignment);
676 out->size = fmask.surf_size;
677
678 assert(!out->tile_swizzle || !image->shareable);
679 }
680
681 static void
682 radv_image_alloc_fmask(struct radv_device *device,
683 struct radv_image *image)
684 {
685 radv_image_get_fmask_info(device, image, image->info.samples, &image->fmask);
686
687 image->fmask.offset = align64(image->size, image->fmask.alignment);
688 image->size = image->fmask.offset + image->fmask.size;
689 image->alignment = MAX2(image->alignment, image->fmask.alignment);
690 }
691
692 static void
693 radv_image_get_cmask_info(struct radv_device *device,
694 struct radv_image *image,
695 struct radv_cmask_info *out)
696 {
697 unsigned pipe_interleave_bytes = device->physical_device->rad_info.pipe_interleave_bytes;
698 unsigned num_pipes = device->physical_device->rad_info.num_tile_pipes;
699 unsigned cl_width, cl_height;
700
701 if (device->physical_device->rad_info.chip_class >= GFX9) {
702 out->alignment = image->surface.u.gfx9.cmask_alignment;
703 out->size = image->surface.u.gfx9.cmask_size;
704 return;
705 }
706
707 switch (num_pipes) {
708 case 2:
709 cl_width = 32;
710 cl_height = 16;
711 break;
712 case 4:
713 cl_width = 32;
714 cl_height = 32;
715 break;
716 case 8:
717 cl_width = 64;
718 cl_height = 32;
719 break;
720 case 16: /* Hawaii */
721 cl_width = 64;
722 cl_height = 64;
723 break;
724 default:
725 assert(0);
726 return;
727 }
728
729 unsigned base_align = num_pipes * pipe_interleave_bytes;
730
731 unsigned width = align(image->info.width, cl_width*8);
732 unsigned height = align(image->info.height, cl_height*8);
733 unsigned slice_elements = (width * height) / (8*8);
734
735 /* Each element of CMASK is a nibble. */
736 unsigned slice_bytes = slice_elements / 2;
737
738 out->slice_tile_max = (width * height) / (128*128);
739 if (out->slice_tile_max)
740 out->slice_tile_max -= 1;
741
742 out->alignment = MAX2(256, base_align);
743 out->size = (image->type == VK_IMAGE_TYPE_3D ? image->info.depth : image->info.array_size) *
744 align(slice_bytes, base_align);
745 }
746
747 static void
748 radv_image_alloc_cmask(struct radv_device *device,
749 struct radv_image *image)
750 {
751 uint32_t clear_value_size = 0;
752 radv_image_get_cmask_info(device, image, &image->cmask);
753
754 image->cmask.offset = align64(image->size, image->cmask.alignment);
755 /* + 8 for storing the clear values */
756 if (!image->clear_value_offset) {
757 image->clear_value_offset = image->cmask.offset + image->cmask.size;
758 clear_value_size = 8;
759 }
760 image->size = image->cmask.offset + image->cmask.size + clear_value_size;
761 image->alignment = MAX2(image->alignment, image->cmask.alignment);
762 }
763
764 static void
765 radv_image_alloc_dcc(struct radv_device *device,
766 struct radv_image *image)
767 {
768 image->dcc_offset = align64(image->size, image->surface.dcc_alignment);
769 /* + 16 for storing the clear values + dcc pred */
770 image->clear_value_offset = image->dcc_offset + image->surface.dcc_size;
771 image->dcc_pred_offset = image->clear_value_offset + 8;
772 image->size = image->dcc_offset + image->surface.dcc_size + 16;
773 image->alignment = MAX2(image->alignment, image->surface.dcc_alignment);
774 }
775
776 static void
777 radv_image_alloc_htile(struct radv_device *device,
778 struct radv_image *image)
779 {
780 if ((device->debug_flags & RADV_DEBUG_NO_HIZ) || image->info.levels > 1) {
781 image->surface.htile_size = 0;
782 return;
783 }
784
785 image->htile_offset = align64(image->size, image->surface.htile_alignment);
786
787 /* + 8 for storing the clear values */
788 image->clear_value_offset = image->htile_offset + image->surface.htile_size;
789 image->size = image->clear_value_offset + 8;
790 image->alignment = align64(image->alignment, image->surface.htile_alignment);
791 }
792
793 VkResult
794 radv_image_create(VkDevice _device,
795 const struct radv_image_create_info *create_info,
796 const VkAllocationCallbacks* alloc,
797 VkImage *pImage)
798 {
799 RADV_FROM_HANDLE(radv_device, device, _device);
800 const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
801 struct radv_image *image = NULL;
802 bool can_cmask_dcc = false;
803 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
804
805 radv_assert(pCreateInfo->mipLevels > 0);
806 radv_assert(pCreateInfo->arrayLayers > 0);
807 radv_assert(pCreateInfo->samples > 0);
808 radv_assert(pCreateInfo->extent.width > 0);
809 radv_assert(pCreateInfo->extent.height > 0);
810 radv_assert(pCreateInfo->extent.depth > 0);
811
812 image = vk_alloc2(&device->alloc, alloc, sizeof(*image), 8,
813 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
814 if (!image)
815 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
816
817 memset(image, 0, sizeof(*image));
818 image->type = pCreateInfo->imageType;
819 image->info.width = pCreateInfo->extent.width;
820 image->info.height = pCreateInfo->extent.height;
821 image->info.depth = pCreateInfo->extent.depth;
822 image->info.samples = pCreateInfo->samples;
823 image->info.array_size = pCreateInfo->arrayLayers;
824 image->info.levels = pCreateInfo->mipLevels;
825
826 image->vk_format = pCreateInfo->format;
827 image->tiling = pCreateInfo->tiling;
828 image->usage = pCreateInfo->usage;
829 image->flags = pCreateInfo->flags;
830
831 image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
832 if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
833 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
834 if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL_KHR)
835 image->queue_family_mask |= (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
836 else
837 image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i];
838 }
839
840 image->shareable = vk_find_struct_const(pCreateInfo->pNext,
841 EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
842 if (!vk_format_is_depth(pCreateInfo->format) && !create_info->scanout && !image->shareable) {
843 image->info.surf_index = &device->image_mrt_offset_counter;
844 }
845
846 radv_init_surface(device, &image->surface, create_info);
847
848 device->ws->surface_init(device->ws, &image->info, &image->surface);
849
850 image->size = image->surface.surf_size;
851 image->alignment = image->surface.surf_alignment;
852
853 if (image->exclusive || image->queue_family_mask == 1)
854 can_cmask_dcc = true;
855
856 if ((pCreateInfo->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
857 image->surface.dcc_size && can_cmask_dcc)
858 radv_image_alloc_dcc(device, image);
859 else
860 image->surface.dcc_size = 0;
861
862 if ((pCreateInfo->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
863 pCreateInfo->mipLevels == 1 &&
864 !image->surface.dcc_size && image->info.depth == 1 && can_cmask_dcc &&
865 !image->surface.is_linear)
866 radv_image_alloc_cmask(device, image);
867
868 if (image->info.samples > 1 && vk_format_is_color(pCreateInfo->format)) {
869 radv_image_alloc_fmask(device, image);
870 } else if (vk_format_is_depth(pCreateInfo->format)) {
871
872 radv_image_alloc_htile(device, image);
873 }
874
875 if (pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
876 image->alignment = MAX2(image->alignment, 4096);
877 image->size = align64(image->size, image->alignment);
878 image->offset = 0;
879
880 image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment,
881 0, RADEON_FLAG_VIRTUAL);
882 if (!image->bo) {
883 vk_free2(&device->alloc, alloc, image);
884 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
885 }
886 }
887
888 *pImage = radv_image_to_handle(image);
889
890 return VK_SUCCESS;
891 }
892
893 static void
894 radv_image_view_make_descriptor(struct radv_image_view *iview,
895 struct radv_device *device,
896 const VkComponentMapping *components,
897 bool is_storage_image)
898 {
899 struct radv_image *image = iview->image;
900 bool is_stencil = iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT;
901 uint32_t blk_w;
902 uint32_t *descriptor;
903 uint32_t *fmask_descriptor;
904 uint32_t hw_level = 0;
905
906 if (is_storage_image) {
907 descriptor = iview->storage_descriptor;
908 fmask_descriptor = iview->storage_fmask_descriptor;
909 } else {
910 descriptor = iview->descriptor;
911 fmask_descriptor = iview->fmask_descriptor;
912 }
913
914 assert(image->surface.blk_w % vk_format_get_blockwidth(image->vk_format) == 0);
915 blk_w = image->surface.blk_w / vk_format_get_blockwidth(image->vk_format) * vk_format_get_blockwidth(iview->vk_format);
916
917 if (device->physical_device->rad_info.chip_class >= GFX9)
918 hw_level = iview->base_mip;
919 si_make_texture_descriptor(device, image, is_storage_image,
920 iview->type,
921 iview->vk_format,
922 components,
923 hw_level, hw_level + iview->level_count - 1,
924 iview->base_layer,
925 iview->base_layer + iview->layer_count - 1,
926 iview->extent.width,
927 iview->extent.height,
928 iview->extent.depth,
929 descriptor,
930 fmask_descriptor);
931
932 const struct legacy_surf_level *base_level_info = NULL;
933 if (device->physical_device->rad_info.chip_class <= GFX9) {
934 if (is_stencil)
935 base_level_info = &image->surface.u.legacy.stencil_level[iview->base_mip];
936 else
937 base_level_info = &image->surface.u.legacy.level[iview->base_mip];
938 }
939 si_set_mutable_tex_desc_fields(device, image,
940 base_level_info,
941 iview->base_mip,
942 iview->base_mip,
943 blk_w, is_stencil, descriptor);
944 }
945
946 void
947 radv_image_view_init(struct radv_image_view *iview,
948 struct radv_device *device,
949 const VkImageViewCreateInfo* pCreateInfo)
950 {
951 RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
952 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
953
954 switch (image->type) {
955 case VK_IMAGE_TYPE_1D:
956 case VK_IMAGE_TYPE_2D:
957 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 <= image->info.array_size);
958 break;
959 case VK_IMAGE_TYPE_3D:
960 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1
961 <= radv_minify(image->info.depth, range->baseMipLevel));
962 break;
963 default:
964 unreachable("bad VkImageType");
965 }
966 iview->image = image;
967 iview->bo = image->bo;
968 iview->type = pCreateInfo->viewType;
969 iview->vk_format = pCreateInfo->format;
970 iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
971
972 if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
973 iview->vk_format = vk_format_stencil_only(iview->vk_format);
974 } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
975 iview->vk_format = vk_format_depth_only(iview->vk_format);
976 }
977
978 if (device->physical_device->rad_info.chip_class >= GFX9) {
979 iview->extent = (VkExtent3D) {
980 .width = image->info.width,
981 .height = image->info.height,
982 .depth = image->info.depth,
983 };
984 } else {
985 iview->extent = (VkExtent3D) {
986 .width = radv_minify(image->info.width , range->baseMipLevel),
987 .height = radv_minify(image->info.height, range->baseMipLevel),
988 .depth = radv_minify(image->info.depth , range->baseMipLevel),
989 };
990 }
991
992 if (iview->vk_format != image->vk_format) {
993 iview->extent.width = round_up_u32(iview->extent.width * vk_format_get_blockwidth(iview->vk_format),
994 vk_format_get_blockwidth(image->vk_format));
995 iview->extent.height = round_up_u32(iview->extent.height * vk_format_get_blockheight(iview->vk_format),
996 vk_format_get_blockheight(image->vk_format));
997 }
998
999 iview->base_layer = range->baseArrayLayer;
1000 iview->layer_count = radv_get_layerCount(image, range);
1001 iview->base_mip = range->baseMipLevel;
1002 iview->level_count = radv_get_levelCount(image, range);
1003
1004 radv_image_view_make_descriptor(iview, device, &pCreateInfo->components, false);
1005 radv_image_view_make_descriptor(iview, device, &pCreateInfo->components, true);
1006 }
1007
1008 bool radv_layout_has_htile(const struct radv_image *image,
1009 VkImageLayout layout,
1010 unsigned queue_mask)
1011 {
1012 return image->surface.htile_size &&
1013 (layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
1014 layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
1015 queue_mask == (1u << RADV_QUEUE_GENERAL);
1016 }
1017
1018 bool radv_layout_is_htile_compressed(const struct radv_image *image,
1019 VkImageLayout layout,
1020 unsigned queue_mask)
1021 {
1022 return image->surface.htile_size &&
1023 (layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
1024 layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
1025 queue_mask == (1u << RADV_QUEUE_GENERAL);
1026 }
1027
1028 bool radv_layout_can_fast_clear(const struct radv_image *image,
1029 VkImageLayout layout,
1030 unsigned queue_mask)
1031 {
1032 return layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL &&
1033 queue_mask == (1u << RADV_QUEUE_GENERAL);
1034 }
1035
1036
1037 unsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family)
1038 {
1039 if (!image->exclusive)
1040 return image->queue_family_mask;
1041 if (family == VK_QUEUE_FAMILY_EXTERNAL_KHR)
1042 return (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
1043 if (family == VK_QUEUE_FAMILY_IGNORED)
1044 return 1u << queue_family;
1045 return 1u << family;
1046 }
1047
1048 VkResult
1049 radv_CreateImage(VkDevice device,
1050 const VkImageCreateInfo *pCreateInfo,
1051 const VkAllocationCallbacks *pAllocator,
1052 VkImage *pImage)
1053 {
1054 return radv_image_create(device,
1055 &(struct radv_image_create_info) {
1056 .vk_info = pCreateInfo,
1057 .scanout = false,
1058 },
1059 pAllocator,
1060 pImage);
1061 }
1062
1063 void
1064 radv_DestroyImage(VkDevice _device, VkImage _image,
1065 const VkAllocationCallbacks *pAllocator)
1066 {
1067 RADV_FROM_HANDLE(radv_device, device, _device);
1068 RADV_FROM_HANDLE(radv_image, image, _image);
1069
1070 if (!image)
1071 return;
1072
1073 if (image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
1074 device->ws->buffer_destroy(image->bo);
1075
1076 vk_free2(&device->alloc, pAllocator, image);
1077 }
1078
1079 void radv_GetImageSubresourceLayout(
1080 VkDevice _device,
1081 VkImage _image,
1082 const VkImageSubresource* pSubresource,
1083 VkSubresourceLayout* pLayout)
1084 {
1085 RADV_FROM_HANDLE(radv_image, image, _image);
1086 RADV_FROM_HANDLE(radv_device, device, _device);
1087 int level = pSubresource->mipLevel;
1088 int layer = pSubresource->arrayLayer;
1089 struct radeon_surf *surface = &image->surface;
1090
1091 if (device->physical_device->rad_info.chip_class >= GFX9) {
1092 pLayout->offset = surface->u.gfx9.offset[level] + surface->u.gfx9.surf_slice_size * layer;
1093 pLayout->rowPitch = surface->u.gfx9.surf_pitch * surface->bpe;
1094 pLayout->arrayPitch = surface->u.gfx9.surf_slice_size;
1095 pLayout->depthPitch = surface->u.gfx9.surf_slice_size;
1096 pLayout->size = surface->u.gfx9.surf_slice_size;
1097 if (image->type == VK_IMAGE_TYPE_3D)
1098 pLayout->size *= u_minify(image->info.depth, level);
1099 } else {
1100 pLayout->offset = surface->u.legacy.level[level].offset + surface->u.legacy.level[level].slice_size * layer;
1101 pLayout->rowPitch = surface->u.legacy.level[level].nblk_x * surface->bpe;
1102 pLayout->arrayPitch = surface->u.legacy.level[level].slice_size;
1103 pLayout->depthPitch = surface->u.legacy.level[level].slice_size;
1104 pLayout->size = surface->u.legacy.level[level].slice_size;
1105 if (image->type == VK_IMAGE_TYPE_3D)
1106 pLayout->size *= u_minify(image->info.depth, level);
1107 }
1108 }
1109
1110
1111 VkResult
1112 radv_CreateImageView(VkDevice _device,
1113 const VkImageViewCreateInfo *pCreateInfo,
1114 const VkAllocationCallbacks *pAllocator,
1115 VkImageView *pView)
1116 {
1117 RADV_FROM_HANDLE(radv_device, device, _device);
1118 struct radv_image_view *view;
1119
1120 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
1121 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1122 if (view == NULL)
1123 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1124
1125 radv_image_view_init(view, device, pCreateInfo);
1126
1127 *pView = radv_image_view_to_handle(view);
1128
1129 return VK_SUCCESS;
1130 }
1131
1132 void
1133 radv_DestroyImageView(VkDevice _device, VkImageView _iview,
1134 const VkAllocationCallbacks *pAllocator)
1135 {
1136 RADV_FROM_HANDLE(radv_device, device, _device);
1137 RADV_FROM_HANDLE(radv_image_view, iview, _iview);
1138
1139 if (!iview)
1140 return;
1141 vk_free2(&device->alloc, pAllocator, iview);
1142 }
1143
1144 void radv_buffer_view_init(struct radv_buffer_view *view,
1145 struct radv_device *device,
1146 const VkBufferViewCreateInfo* pCreateInfo)
1147 {
1148 RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
1149
1150 view->bo = buffer->bo;
1151 view->range = pCreateInfo->range == VK_WHOLE_SIZE ?
1152 buffer->size - pCreateInfo->offset : pCreateInfo->range;
1153 view->vk_format = pCreateInfo->format;
1154
1155 radv_make_buffer_descriptor(device, buffer, view->vk_format,
1156 pCreateInfo->offset, view->range, view->state);
1157 }
1158
1159 VkResult
1160 radv_CreateBufferView(VkDevice _device,
1161 const VkBufferViewCreateInfo *pCreateInfo,
1162 const VkAllocationCallbacks *pAllocator,
1163 VkBufferView *pView)
1164 {
1165 RADV_FROM_HANDLE(radv_device, device, _device);
1166 struct radv_buffer_view *view;
1167
1168 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
1169 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1170 if (!view)
1171 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1172
1173 radv_buffer_view_init(view, device, pCreateInfo);
1174
1175 *pView = radv_buffer_view_to_handle(view);
1176
1177 return VK_SUCCESS;
1178 }
1179
1180 void
1181 radv_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
1182 const VkAllocationCallbacks *pAllocator)
1183 {
1184 RADV_FROM_HANDLE(radv_device, device, _device);
1185 RADV_FROM_HANDLE(radv_buffer_view, view, bufferView);
1186
1187 if (!view)
1188 return;
1189
1190 vk_free2(&device->alloc, pAllocator, view);
1191 }