2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "compiler/nir/nir_builder.h"
28 #include "compiler/nir/nir_format_convert.h"
30 /* The higher compiler layers use the GL enums for image formats even if
31 * they come in from SPIR-V or Vulkan. We need to turn them into an ISL
32 * enum before we can use them.
34 static enum isl_format
35 isl_format_for_gl_format(uint32_t gl_format
)
38 case GL_R8
: return ISL_FORMAT_R8_UNORM
;
39 case GL_R8_SNORM
: return ISL_FORMAT_R8_SNORM
;
40 case GL_R8UI
: return ISL_FORMAT_R8_UINT
;
41 case GL_R8I
: return ISL_FORMAT_R8_SINT
;
42 case GL_RG8
: return ISL_FORMAT_R8G8_UNORM
;
43 case GL_RG8_SNORM
: return ISL_FORMAT_R8G8_SNORM
;
44 case GL_RG8UI
: return ISL_FORMAT_R8G8_UINT
;
45 case GL_RG8I
: return ISL_FORMAT_R8G8_SINT
;
46 case GL_RGBA8
: return ISL_FORMAT_R8G8B8A8_UNORM
;
47 case GL_RGBA8_SNORM
: return ISL_FORMAT_R8G8B8A8_SNORM
;
48 case GL_RGBA8UI
: return ISL_FORMAT_R8G8B8A8_UINT
;
49 case GL_RGBA8I
: return ISL_FORMAT_R8G8B8A8_SINT
;
50 case GL_R11F_G11F_B10F
: return ISL_FORMAT_R11G11B10_FLOAT
;
51 case GL_RGB10_A2
: return ISL_FORMAT_R10G10B10A2_UNORM
;
52 case GL_RGB10_A2UI
: return ISL_FORMAT_R10G10B10A2_UINT
;
53 case GL_R16
: return ISL_FORMAT_R16_UNORM
;
54 case GL_R16_SNORM
: return ISL_FORMAT_R16_SNORM
;
55 case GL_R16F
: return ISL_FORMAT_R16_FLOAT
;
56 case GL_R16UI
: return ISL_FORMAT_R16_UINT
;
57 case GL_R16I
: return ISL_FORMAT_R16_SINT
;
58 case GL_RG16
: return ISL_FORMAT_R16G16_UNORM
;
59 case GL_RG16_SNORM
: return ISL_FORMAT_R16G16_SNORM
;
60 case GL_RG16F
: return ISL_FORMAT_R16G16_FLOAT
;
61 case GL_RG16UI
: return ISL_FORMAT_R16G16_UINT
;
62 case GL_RG16I
: return ISL_FORMAT_R16G16_SINT
;
63 case GL_RGBA16
: return ISL_FORMAT_R16G16B16A16_UNORM
;
64 case GL_RGBA16_SNORM
: return ISL_FORMAT_R16G16B16A16_SNORM
;
65 case GL_RGBA16F
: return ISL_FORMAT_R16G16B16A16_FLOAT
;
66 case GL_RGBA16UI
: return ISL_FORMAT_R16G16B16A16_UINT
;
67 case GL_RGBA16I
: return ISL_FORMAT_R16G16B16A16_SINT
;
68 case GL_R32F
: return ISL_FORMAT_R32_FLOAT
;
69 case GL_R32UI
: return ISL_FORMAT_R32_UINT
;
70 case GL_R32I
: return ISL_FORMAT_R32_SINT
;
71 case GL_RG32F
: return ISL_FORMAT_R32G32_FLOAT
;
72 case GL_RG32UI
: return ISL_FORMAT_R32G32_UINT
;
73 case GL_RG32I
: return ISL_FORMAT_R32G32_SINT
;
74 case GL_RGBA32F
: return ISL_FORMAT_R32G32B32A32_FLOAT
;
75 case GL_RGBA32UI
: return ISL_FORMAT_R32G32B32A32_UINT
;
76 case GL_RGBA32I
: return ISL_FORMAT_R32G32B32A32_SINT
;
77 case GL_NONE
: return ISL_FORMAT_UNSUPPORTED
;
79 assert(!"Invalid image format");
80 return ISL_FORMAT_UNSUPPORTED
;
85 _load_image_param(nir_builder
*b
, nir_deref_instr
*deref
, unsigned offset
)
87 nir_intrinsic_instr
*load
=
88 nir_intrinsic_instr_create(b
->shader
,
89 nir_intrinsic_image_deref_load_param_intel
);
90 load
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
91 nir_intrinsic_set_base(load
, offset
/ 4);
94 case BRW_IMAGE_PARAM_OFFSET_OFFSET
:
95 case BRW_IMAGE_PARAM_SWIZZLING_OFFSET
:
96 load
->num_components
= 2;
98 case BRW_IMAGE_PARAM_TILING_OFFSET
:
99 case BRW_IMAGE_PARAM_SIZE_OFFSET
:
100 load
->num_components
= 3;
102 case BRW_IMAGE_PARAM_STRIDE_OFFSET
:
103 load
->num_components
= 4;
106 unreachable("Invalid param offset");
108 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
109 load
->num_components
, 32, NULL
);
111 nir_builder_instr_insert(b
, &load
->instr
);
112 return &load
->dest
.ssa
;
115 #define load_image_param(b, d, o) \
116 _load_image_param(b, d, BRW_IMAGE_PARAM_##o##_OFFSET)
119 image_coord_is_in_bounds(nir_builder
*b
, nir_deref_instr
*deref
,
122 nir_ssa_def
*size
= load_image_param(b
, deref
, SIZE
);
123 nir_ssa_def
*cmp
= nir_ilt(b
, coord
, size
);
125 unsigned coord_comps
= glsl_get_sampler_coordinate_components(deref
->type
);
126 nir_ssa_def
*in_bounds
= nir_imm_true(b
);
127 for (unsigned i
= 0; i
< coord_comps
; i
++)
128 in_bounds
= nir_iand(b
, in_bounds
, nir_channel(b
, cmp
, i
));
133 /** Calculate the offset in memory of the texel given by \p coord.
135 * This is meant to be used with untyped surface messages to access a tiled
136 * surface, what involves taking into account the tiling and swizzling modes
137 * of the surface manually so it will hopefully not happen very often.
139 * The tiling algorithm implemented here matches either the X or Y tiling
140 * layouts supported by the hardware depending on the tiling coefficients
141 * passed to the program as uniforms. See Volume 1 Part 2 Section 4.5
142 * "Address Tiling Function" of the IVB PRM for an in-depth explanation of
143 * the hardware tiling format.
146 image_address(nir_builder
*b
, const struct gen_device_info
*devinfo
,
147 nir_deref_instr
*deref
, nir_ssa_def
*coord
)
149 if (glsl_get_sampler_dim(deref
->type
) == GLSL_SAMPLER_DIM_1D
&&
150 glsl_sampler_type_is_array(deref
->type
)) {
151 /* It's easier if 1D arrays are treated like 2D arrays */
152 coord
= nir_vec3(b
, nir_channel(b
, coord
, 0),
154 nir_channel(b
, coord
, 1));
156 unsigned dims
= glsl_get_sampler_coordinate_components(deref
->type
);
157 coord
= nir_channels(b
, coord
, (1 << dims
) - 1);
160 nir_ssa_def
*offset
= load_image_param(b
, deref
, OFFSET
);
161 nir_ssa_def
*tiling
= load_image_param(b
, deref
, TILING
);
162 nir_ssa_def
*stride
= load_image_param(b
, deref
, STRIDE
);
164 /* Shift the coordinates by the fixed surface offset. It may be non-zero
165 * if the image is a single slice of a higher-dimensional surface, or if a
166 * non-zero mipmap level of the surface is bound to the pipeline. The
167 * offset needs to be applied here rather than at surface state set-up time
168 * because the desired slice-level may start mid-tile, so simply shifting
169 * the surface base address wouldn't give a well-formed tiled surface in
172 nir_ssa_def
*xypos
= (coord
->num_components
== 1) ?
173 nir_vec2(b
, coord
, nir_imm_int(b
, 0)) :
174 nir_channels(b
, coord
, 0x3);
175 xypos
= nir_iadd(b
, xypos
, offset
);
177 /* The layout of 3-D textures in memory is sort-of like a tiling
178 * format. At each miplevel, the slices are arranged in rows of
179 * 2^level slices per row. The slice row is stored in tmp.y and
180 * the slice within the row is stored in tmp.x.
182 * The layout of 2-D array textures and cubemaps is much simpler:
183 * Depending on whether the ARYSPC_LOD0 layout is in use it will be
184 * stored in memory as an array of slices, each one being a 2-D
185 * arrangement of miplevels, or as a 2D arrangement of miplevels,
186 * each one being an array of slices. In either case the separation
187 * between slices of the same LOD is equal to the qpitch value
188 * provided as stride.w.
190 * This code can be made to handle either 2D arrays and 3D textures
191 * by passing in the miplevel as tile.z for 3-D textures and 0 in
192 * tile.z for 2-D array textures.
194 * See Volume 1 Part 1 of the Gen7 PRM, sections 6.18.4.7 "Surface
195 * Arrays" and 6.18.6 "3D Surfaces" for a more extensive discussion
196 * of the hardware 3D texture and 2D array layouts.
198 if (coord
->num_components
> 2) {
199 /* Decompose z into a major (tmp.y) and a minor (tmp.x)
202 nir_ssa_def
*z
= nir_channel(b
, coord
, 2);
203 nir_ssa_def
*z_x
= nir_ubfe(b
, z
, nir_imm_int(b
, 0),
204 nir_channel(b
, tiling
, 2));
205 nir_ssa_def
*z_y
= nir_ushr(b
, z
, nir_channel(b
, tiling
, 2));
207 /* Take into account the horizontal (tmp.x) and vertical (tmp.y)
210 xypos
= nir_iadd(b
, xypos
, nir_imul(b
, nir_vec2(b
, z_x
, z_y
),
211 nir_channels(b
, stride
, 0xc)));
215 if (coord
->num_components
> 1) {
216 /* Calculate the major/minor x and y indices. In order to
217 * accommodate both X and Y tiling, the Y-major tiling format is
218 * treated as being a bunch of narrow X-tiles placed next to each
219 * other. This means that the tile width for Y-tiling is actually
220 * the width of one sub-column of the Y-major tile where each 4K
221 * tile has 8 512B sub-columns.
223 * The major Y value is the row of tiles in which the pixel lives.
224 * The major X value is the tile sub-column in which the pixel
225 * lives; for X tiling, this is the same as the tile column, for Y
226 * tiling, each tile has 8 sub-columns. The minor X and Y indices
227 * are the position within the sub-column.
230 /* Calculate the minor x and y indices. */
231 nir_ssa_def
*minor
= nir_ubfe(b
, xypos
, nir_imm_int(b
, 0),
232 nir_channels(b
, tiling
, 0x3));
233 nir_ssa_def
*major
= nir_ushr(b
, xypos
, nir_channels(b
, tiling
, 0x3));
235 /* Calculate the texel index from the start of the tile row and the
236 * vertical coordinate of the row.
238 * tmp.x = (major.x << tile.y << tile.x) +
239 * (minor.y << tile.x) + minor.x
240 * tmp.y = major.y << tile.y
242 nir_ssa_def
*idx_x
, *idx_y
;
243 idx_x
= nir_ishl(b
, nir_channel(b
, major
, 0), nir_channel(b
, tiling
, 1));
244 idx_x
= nir_iadd(b
, idx_x
, nir_channel(b
, minor
, 1));
245 idx_x
= nir_ishl(b
, idx_x
, nir_channel(b
, tiling
, 0));
246 idx_x
= nir_iadd(b
, idx_x
, nir_channel(b
, minor
, 0));
247 idx_y
= nir_ishl(b
, nir_channel(b
, major
, 1), nir_channel(b
, tiling
, 1));
249 /* Add it to the start of the tile row. */
251 idx
= nir_imul(b
, idx_y
, nir_channel(b
, stride
, 1));
252 idx
= nir_iadd(b
, idx
, idx_x
);
254 /* Multiply by the Bpp value. */
255 addr
= nir_imul(b
, idx
, nir_channel(b
, stride
, 0));
257 if (devinfo
->gen
< 8 && !devinfo
->is_baytrail
) {
258 /* Take into account the two dynamically specified shifts. Both are
259 * used to implement swizzling of X-tiled surfaces. For Y-tiled
260 * surfaces only one bit needs to be XOR-ed with bit 6 of the memory
261 * address, so a swz value of 0xff (actually interpreted as 31 by the
262 * hardware) will be provided to cause the relevant bit of tmp.y to
263 * be zero and turn the first XOR into the identity. For linear
264 * surfaces or platforms lacking address swizzling both shifts will
265 * be 0xff causing the relevant bits of both tmp.x and .y to be zero,
266 * what effectively disables swizzling.
268 nir_ssa_def
*swizzle
= load_image_param(b
, deref
, SWIZZLING
);
269 nir_ssa_def
*shift0
= nir_ushr(b
, addr
, nir_channel(b
, swizzle
, 0));
270 nir_ssa_def
*shift1
= nir_ushr(b
, addr
, nir_channel(b
, swizzle
, 1));
272 /* XOR tmp.x and tmp.y with bit 6 of the memory address. */
273 nir_ssa_def
*bit
= nir_iand(b
, nir_ixor(b
, shift0
, shift1
),
274 nir_imm_int(b
, 1 << 6));
275 addr
= nir_ixor(b
, addr
, bit
);
278 /* Multiply by the Bpp/stride value. Note that the addr.y may be
279 * non-zero even if the image is one-dimensional because a vertical
280 * offset may have been applied above to select a non-zero slice or
281 * level of a higher-dimensional texture.
284 idx
= nir_imul(b
, nir_channel(b
, xypos
, 1), nir_channel(b
, stride
, 1));
285 idx
= nir_iadd(b
, nir_channel(b
, xypos
, 0), idx
);
286 addr
= nir_imul(b
, idx
, nir_channel(b
, stride
, 0));
293 const struct isl_format_layout
*fmtl
;
298 static struct format_info
299 get_format_info(enum isl_format fmt
)
301 const struct isl_format_layout
*fmtl
= isl_format_get_layout(fmt
);
303 return (struct format_info
) {
305 .chans
= isl_format_get_num_channels(fmt
),
307 fmtl
->channels
.r
.bits
,
308 fmtl
->channels
.g
.bits
,
309 fmtl
->channels
.b
.bits
,
310 fmtl
->channels
.a
.bits
316 nir_zero_vec(nir_builder
*b
, unsigned num_components
)
319 memset(&v
, 0, sizeof(v
));
321 return nir_build_imm(b
, num_components
, 32, v
);
325 convert_color_for_load(nir_builder
*b
, const struct gen_device_info
*devinfo
,
327 enum isl_format image_fmt
, enum isl_format lower_fmt
,
328 unsigned dest_components
)
330 if (image_fmt
== lower_fmt
)
333 if (image_fmt
== ISL_FORMAT_R11G11B10_FLOAT
) {
334 assert(lower_fmt
== ISL_FORMAT_R32_UINT
);
335 color
= nir_format_unpack_11f11f10f(b
, color
);
339 struct format_info image
= get_format_info(image_fmt
);
340 struct format_info lower
= get_format_info(lower_fmt
);
342 const bool needs_sign_extension
=
343 isl_format_has_snorm_channel(image_fmt
) ||
344 isl_format_has_sint_channel(image_fmt
);
346 /* We only check the red channel to detect if we need to pack/unpack */
347 assert(image
.bits
[0] != lower
.bits
[0] ||
348 memcmp(image
.bits
, lower
.bits
, sizeof(image
.bits
)) == 0);
350 if (image
.bits
[0] != lower
.bits
[0] && lower_fmt
== ISL_FORMAT_R32_UINT
) {
351 if (needs_sign_extension
)
352 color
= nir_format_unpack_sint(b
, color
, image
.bits
, image
.chans
);
354 color
= nir_format_unpack_uint(b
, color
, image
.bits
, image
.chans
);
356 /* All these formats are homogeneous */
357 for (unsigned i
= 1; i
< image
.chans
; i
++)
358 assert(image
.bits
[i
] == image
.bits
[0]);
360 /* On IVB, we rely on the undocumented behavior that typed reads from
361 * surfaces of the unsupported R8 and R16 formats return useful data in
362 * their least significant bits. However, the data in the high bits is
363 * garbage so we have to discard it.
365 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
366 (lower_fmt
== ISL_FORMAT_R16_UINT
||
367 lower_fmt
== ISL_FORMAT_R8_UINT
))
368 color
= nir_format_mask_uvec(b
, color
, lower
.bits
);
370 if (image
.bits
[0] != lower
.bits
[0]) {
371 color
= nir_format_bitcast_uvec_unmasked(b
, color
, lower
.bits
[0],
375 if (needs_sign_extension
)
376 color
= nir_format_sign_extend_ivec(b
, color
, image
.bits
);
379 switch (image
.fmtl
->channels
.r
.type
) {
381 assert(isl_format_has_uint_channel(lower_fmt
));
382 color
= nir_format_unorm_to_float(b
, color
, image
.bits
);
386 assert(isl_format_has_uint_channel(lower_fmt
));
387 color
= nir_format_snorm_to_float(b
, color
, image
.bits
);
391 if (image
.bits
[0] == 16)
392 color
= nir_unpack_half_2x16_split_x(b
, color
);
400 unreachable("Invalid image channel type");
404 assert(dest_components
== 1 || dest_components
== 4);
405 assert(color
->num_components
<= dest_components
);
406 if (color
->num_components
== dest_components
)
409 nir_ssa_def
*comps
[4];
410 for (unsigned i
= 0; i
< color
->num_components
; i
++)
411 comps
[i
] = nir_channel(b
, color
, i
);
413 for (unsigned i
= color
->num_components
; i
< 3; i
++)
414 comps
[i
] = nir_imm_int(b
, 0);
416 if (color
->num_components
< 4) {
417 if (isl_format_has_int_channel(image_fmt
))
418 comps
[3] = nir_imm_int(b
, 1);
420 comps
[3] = nir_imm_float(b
, 1);
423 return nir_vec(b
, comps
, dest_components
);
427 lower_image_load_instr(nir_builder
*b
,
428 const struct gen_device_info
*devinfo
,
429 nir_intrinsic_instr
*intrin
)
431 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
432 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
433 const enum isl_format image_fmt
=
434 isl_format_for_gl_format(var
->data
.image
.format
);
436 if (isl_has_matching_typed_storage_image_format(devinfo
, image_fmt
)) {
437 const enum isl_format lower_fmt
=
438 isl_lower_storage_image_format(devinfo
, image_fmt
);
439 const unsigned dest_components
= intrin
->num_components
;
441 /* Use an undef to hold the uses of the load while we do the color
444 nir_ssa_def
*placeholder
= nir_ssa_undef(b
, 4, 32);
445 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(placeholder
));
447 intrin
->num_components
= isl_format_get_num_channels(lower_fmt
);
448 intrin
->dest
.ssa
.num_components
= intrin
->num_components
;
450 b
->cursor
= nir_after_instr(&intrin
->instr
);
452 nir_ssa_def
*color
= convert_color_for_load(b
, devinfo
,
454 image_fmt
, lower_fmt
,
457 nir_ssa_def_rewrite_uses(placeholder
, nir_src_for_ssa(color
));
458 nir_instr_remove(placeholder
->parent_instr
);
460 const struct isl_format_layout
*image_fmtl
=
461 isl_format_get_layout(image_fmt
);
462 /* We have a matching typed format for everything 32b and below */
463 assert(image_fmtl
->bpb
== 64 || image_fmtl
->bpb
== 128);
464 enum isl_format raw_fmt
= (image_fmtl
->bpb
== 64) ?
465 ISL_FORMAT_R32G32_UINT
:
466 ISL_FORMAT_R32G32B32A32_UINT
;
467 const unsigned dest_components
= intrin
->num_components
;
469 b
->cursor
= nir_instr_remove(&intrin
->instr
);
471 nir_ssa_def
*coord
= intrin
->src
[1].ssa
;
473 nir_ssa_def
*do_load
= image_coord_is_in_bounds(b
, deref
, coord
);
474 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
) {
475 /* Check whether the first stride component (i.e. the Bpp value)
476 * is greater than four, what on Gen7 indicates that a surface of
477 * type RAW has been bound for untyped access. Reading or writing
478 * to a surface of type other than RAW using untyped surface
479 * messages causes a hang on IVB and VLV.
481 nir_ssa_def
*stride
= load_image_param(b
, deref
, STRIDE
);
482 nir_ssa_def
*is_raw
=
483 nir_ilt(b
, nir_imm_int(b
, 4), nir_channel(b
, stride
, 0));
484 do_load
= nir_iand(b
, do_load
, is_raw
);
486 nir_push_if(b
, do_load
);
488 nir_ssa_def
*addr
= image_address(b
, devinfo
, deref
, coord
);
489 nir_intrinsic_instr
*load
=
490 nir_intrinsic_instr_create(b
->shader
,
491 nir_intrinsic_image_deref_load_raw_intel
);
492 load
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
493 load
->src
[1] = nir_src_for_ssa(addr
);
494 load
->num_components
= image_fmtl
->bpb
/ 32;
495 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
496 load
->num_components
, 32, NULL
);
497 nir_builder_instr_insert(b
, &load
->instr
);
499 nir_push_else(b
, NULL
);
501 nir_ssa_def
*zero
= nir_zero_vec(b
, load
->num_components
);
505 nir_ssa_def
*value
= nir_if_phi(b
, &load
->dest
.ssa
, zero
);
507 nir_ssa_def
*color
= convert_color_for_load(b
, devinfo
, value
,
511 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(color
));
518 convert_color_for_store(nir_builder
*b
, const struct gen_device_info
*devinfo
,
520 enum isl_format image_fmt
, enum isl_format lower_fmt
)
522 struct format_info image
= get_format_info(image_fmt
);
523 struct format_info lower
= get_format_info(lower_fmt
);
525 color
= nir_channels(b
, color
, (1 << image
.chans
) - 1);
527 if (image_fmt
== lower_fmt
)
530 if (image_fmt
== ISL_FORMAT_R11G11B10_FLOAT
) {
531 assert(lower_fmt
== ISL_FORMAT_R32_UINT
);
532 return nir_format_pack_11f11f10f(b
, color
);
535 switch (image
.fmtl
->channels
.r
.type
) {
537 assert(isl_format_has_uint_channel(lower_fmt
));
538 color
= nir_format_float_to_unorm(b
, color
, image
.bits
);
542 assert(isl_format_has_uint_channel(lower_fmt
));
543 color
= nir_format_float_to_snorm(b
, color
, image
.bits
);
547 if (image
.bits
[0] == 16) {
548 nir_ssa_def
*f16comps
[4];
549 for (unsigned i
= 0; i
< image
.chans
; i
++) {
550 f16comps
[i
] = nir_pack_half_2x16_split(b
, nir_channel(b
, color
, i
),
551 nir_imm_float(b
, 0));
553 color
= nir_vec(b
, f16comps
, image
.chans
);
558 color
= nir_format_clamp_uint(b
, color
, image
.bits
);
562 color
= nir_format_clamp_sint(b
, color
, image
.bits
);
566 unreachable("Invalid image channel type");
569 if (image
.bits
[0] < 32 &&
570 (isl_format_has_snorm_channel(image_fmt
) ||
571 isl_format_has_sint_channel(image_fmt
)))
572 color
= nir_format_mask_uvec(b
, color
, image
.bits
);
574 if (image
.bits
[0] != lower
.bits
[0] && lower_fmt
== ISL_FORMAT_R32_UINT
) {
575 color
= nir_format_pack_uint(b
, color
, image
.bits
, image
.chans
);
577 /* All these formats are homogeneous */
578 for (unsigned i
= 1; i
< image
.chans
; i
++)
579 assert(image
.bits
[i
] == image
.bits
[0]);
581 if (image
.bits
[0] != lower
.bits
[0]) {
582 color
= nir_format_bitcast_uvec_unmasked(b
, color
, image
.bits
[0],
591 lower_image_store_instr(nir_builder
*b
,
592 const struct gen_device_info
*devinfo
,
593 nir_intrinsic_instr
*intrin
)
595 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
596 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
598 /* For write-only surfaces, we trust that the hardware can just do the
601 if (var
->data
.image
.access
& ACCESS_NON_READABLE
)
604 const enum isl_format image_fmt
=
605 isl_format_for_gl_format(var
->data
.image
.format
);
607 if (isl_has_matching_typed_storage_image_format(devinfo
, image_fmt
)) {
608 const enum isl_format lower_fmt
=
609 isl_lower_storage_image_format(devinfo
, image_fmt
);
611 /* Color conversion goes before the store */
612 b
->cursor
= nir_before_instr(&intrin
->instr
);
614 nir_ssa_def
*color
= convert_color_for_store(b
, devinfo
,
616 image_fmt
, lower_fmt
);
617 intrin
->num_components
= isl_format_get_num_channels(lower_fmt
);
618 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[3],
619 nir_src_for_ssa(color
));
621 const struct isl_format_layout
*image_fmtl
=
622 isl_format_get_layout(image_fmt
);
623 /* We have a matching typed format for everything 32b and below */
624 assert(image_fmtl
->bpb
== 64 || image_fmtl
->bpb
== 128);
625 enum isl_format raw_fmt
= (image_fmtl
->bpb
== 64) ?
626 ISL_FORMAT_R32G32_UINT
:
627 ISL_FORMAT_R32G32B32A32_UINT
;
629 b
->cursor
= nir_instr_remove(&intrin
->instr
);
631 nir_ssa_def
*coord
= intrin
->src
[1].ssa
;
633 nir_ssa_def
*do_store
= image_coord_is_in_bounds(b
, deref
, coord
);
634 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
) {
635 /* Check whether the first stride component (i.e. the Bpp value)
636 * is greater than four, what on Gen7 indicates that a surface of
637 * type RAW has been bound for untyped access. Reading or writing
638 * to a surface of type other than RAW using untyped surface
639 * messages causes a hang on IVB and VLV.
641 nir_ssa_def
*stride
= load_image_param(b
, deref
, STRIDE
);
642 nir_ssa_def
*is_raw
=
643 nir_ilt(b
, nir_imm_int(b
, 4), nir_channel(b
, stride
, 0));
644 do_store
= nir_iand(b
, do_store
, is_raw
);
646 nir_push_if(b
, do_store
);
648 nir_ssa_def
*addr
= image_address(b
, devinfo
, deref
, coord
);
649 nir_ssa_def
*color
= convert_color_for_store(b
, devinfo
,
653 nir_intrinsic_instr
*store
=
654 nir_intrinsic_instr_create(b
->shader
,
655 nir_intrinsic_image_deref_store_raw_intel
);
656 store
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
657 store
->src
[1] = nir_src_for_ssa(addr
);
658 store
->src
[2] = nir_src_for_ssa(color
);
659 store
->num_components
= image_fmtl
->bpb
/ 32;
660 nir_builder_instr_insert(b
, &store
->instr
);
669 lower_image_atomic_instr(nir_builder
*b
,
670 const struct gen_device_info
*devinfo
,
671 nir_intrinsic_instr
*intrin
)
673 if (devinfo
->is_haswell
|| devinfo
->gen
>= 8)
676 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
678 b
->cursor
= nir_instr_remove(&intrin
->instr
);
680 /* Use an undef to hold the uses of the load conversion. */
681 nir_ssa_def
*placeholder
= nir_ssa_undef(b
, 4, 32);
682 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(placeholder
));
684 /* Check the first component of the size field to find out if the
685 * image is bound. Necessary on IVB for typed atomics because
686 * they don't seem to respect null surfaces and will happily
687 * corrupt or read random memory when no image is bound.
689 nir_ssa_def
*size
= load_image_param(b
, deref
, SIZE
);
690 nir_ssa_def
*zero
= nir_imm_int(b
, 0);
691 nir_push_if(b
, nir_ine(b
, nir_channel(b
, size
, 0), zero
));
693 nir_builder_instr_insert(b
, &intrin
->instr
);
697 nir_ssa_def
*result
= nir_if_phi(b
, &intrin
->dest
.ssa
, zero
);
698 nir_ssa_def_rewrite_uses(placeholder
, nir_src_for_ssa(result
));
704 lower_image_size_instr(nir_builder
*b
,
705 const struct gen_device_info
*devinfo
,
706 nir_intrinsic_instr
*intrin
)
708 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
709 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
711 /* For write-only images, we have an actual image surface so we fall back
712 * and let the back-end emit a TXS for this.
714 if (var
->data
.image
.access
& ACCESS_NON_READABLE
)
717 /* If we have a matching typed format, then we have an actual image surface
718 * so we fall back and let the back-end emit a TXS for this.
720 const enum isl_format image_fmt
=
721 isl_format_for_gl_format(var
->data
.image
.format
);
722 if (isl_has_matching_typed_storage_image_format(devinfo
, image_fmt
))
725 b
->cursor
= nir_instr_remove(&intrin
->instr
);
727 nir_ssa_def
*size
= load_image_param(b
, deref
, SIZE
);
729 nir_ssa_def
*comps
[4] = { NULL
, NULL
, NULL
, NULL
};
731 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(deref
->type
);
732 unsigned coord_comps
= glsl_get_sampler_coordinate_components(deref
->type
);
733 for (unsigned c
= 0; c
< coord_comps
; c
++) {
734 if (c
== 2 && dim
== GLSL_SAMPLER_DIM_CUBE
) {
735 comps
[2] = nir_idiv(b
, nir_channel(b
, size
, 2), nir_imm_int(b
, 6));
737 comps
[c
] = nir_channel(b
, size
, c
);
741 for (unsigned c
= coord_comps
; c
< intrin
->dest
.ssa
.num_components
; ++c
)
742 comps
[c
] = nir_imm_int(b
, 1);
744 nir_ssa_def
*vec
= nir_vec(b
, comps
, intrin
->dest
.ssa
.num_components
);
745 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(vec
));
751 brw_nir_lower_image_load_store(nir_shader
*shader
,
752 const struct gen_device_info
*devinfo
)
754 bool progress
= false;
756 nir_foreach_function(function
, shader
) {
757 if (function
->impl
== NULL
)
760 nir_foreach_block_safe(block
, function
->impl
) {
762 nir_builder_init(&b
, function
->impl
);
764 nir_foreach_instr_safe(instr
, block
) {
765 if (instr
->type
!= nir_instr_type_intrinsic
)
768 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
769 switch (intrin
->intrinsic
) {
770 case nir_intrinsic_image_deref_load
:
771 if (lower_image_load_instr(&b
, devinfo
, intrin
))
775 case nir_intrinsic_image_deref_store
:
776 if (lower_image_store_instr(&b
, devinfo
, intrin
))
780 case nir_intrinsic_image_deref_atomic_add
:
781 case nir_intrinsic_image_deref_atomic_min
:
782 case nir_intrinsic_image_deref_atomic_max
:
783 case nir_intrinsic_image_deref_atomic_and
:
784 case nir_intrinsic_image_deref_atomic_or
:
785 case nir_intrinsic_image_deref_atomic_xor
:
786 case nir_intrinsic_image_deref_atomic_exchange
:
787 case nir_intrinsic_image_deref_atomic_comp_swap
:
788 if (lower_image_atomic_instr(&b
, devinfo
, intrin
))
792 case nir_intrinsic_image_deref_size
:
793 if (lower_image_size_instr(&b
, devinfo
, intrin
))
805 nir_metadata_preserve(function
->impl
, nir_metadata_none
);
812 brw_nir_rewrite_image_intrinsic(nir_intrinsic_instr
*intrin
,
815 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
816 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
818 switch (intrin
->intrinsic
) {
820 case nir_intrinsic_image_deref_##op: \
821 intrin->intrinsic = nir_intrinsic_image_##op; \
831 CASE(atomic_exchange
)
832 CASE(atomic_comp_swap
)
837 CASE(store_raw_intel
)
840 unreachable("Unhanded image intrinsic");
843 nir_intrinsic_set_image_dim(intrin
, glsl_get_sampler_dim(deref
->type
));
844 nir_intrinsic_set_image_array(intrin
, glsl_sampler_type_is_array(deref
->type
));
845 nir_intrinsic_set_access(intrin
, var
->data
.image
.access
);
846 nir_intrinsic_set_format(intrin
, var
->data
.image
.format
);
848 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
849 nir_src_for_ssa(index
));