2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
38 * The unpacked type depends on the format:
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
61 pan_unpacked_type_for_format(const struct util_format_description
*desc
)
63 int c
= util_format_get_first_non_void_channel(desc
->format
);
66 unreachable("Void format not renderable");
68 bool large
= (desc
->channel
[c
].size
> 16);
69 bool bit8
= (desc
->channel
[c
].size
== 8);
70 assert(desc
->channel
[c
].size
<= 32);
72 if (desc
->channel
[c
].normalized
)
73 return large
? nir_type_float32
: nir_type_float16
;
75 switch (desc
->channel
[c
].type
) {
76 case UTIL_FORMAT_TYPE_UNSIGNED
:
77 return bit8
? nir_type_uint8
:
78 large
? nir_type_uint32
: nir_type_uint16
;
79 case UTIL_FORMAT_TYPE_SIGNED
:
80 return bit8
? nir_type_int8
:
81 large
? nir_type_int32
: nir_type_int16
;
82 case UTIL_FORMAT_TYPE_FLOAT
:
83 return large
? nir_type_float32
: nir_type_float16
;
85 unreachable("Format not renderable");
90 pan_format_class_load(const struct util_format_description
*desc
, unsigned quirks
)
92 /* Check if we can do anything better than software architecturally */
93 if (quirks
& MIDGARD_NO_TYPED_BLEND_LOADS
) {
94 return (quirks
& NO_BLEND_PACKS
)
95 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks
& MIDGARD_MISSING_LOADS
) {
100 switch (desc
->format
) {
101 case PIPE_FORMAT_R11G11B10_FLOAT
:
102 case PIPE_FORMAT_R10G10B10A2_UNORM
:
103 case PIPE_FORMAT_B10G10R10A2_UNORM
:
104 case PIPE_FORMAT_R10G10B10X2_UNORM
:
105 case PIPE_FORMAT_B10G10R10X2_UNORM
:
106 case PIPE_FORMAT_R10G10B10A2_UINT
:
107 return PAN_FORMAT_PACK
;
109 return PAN_FORMAT_NATIVE
;
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE
;
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description
*desc
, unsigned quirks
)
120 /* Check if we can do anything better than software architecturally */
121 if (quirks
& MIDGARD_NO_TYPED_BLEND_STORES
) {
122 return (quirks
& NO_BLEND_PACKS
)
123 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
126 return PAN_FORMAT_NATIVE
;
129 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
130 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
131 * suitable for storing (with components replicated to fill). Unpacks do the
132 * reverse but cannot rely on replication.
134 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
135 * replicate to fill */
138 pan_pack_pure_32(nir_builder
*b
, nir_ssa_def
*v
)
140 nir_ssa_def
*replicated
[4];
142 for (unsigned i
= 0; i
< 4; ++i
)
143 replicated
[i
] = nir_channel(b
, v
, i
% v
->num_components
);
145 return nir_vec(b
, replicated
, 4);
149 pan_unpack_pure_32(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
151 return nir_channels(b
, pack
, (1 << num_components
) - 1);
154 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
155 * upper/lower halves of course */
158 pan_pack_pure_16(nir_builder
*b
, nir_ssa_def
*v
)
160 nir_ssa_def
*replicated
[4];
162 for (unsigned i
= 0; i
< 4; ++i
) {
165 nir_ssa_def
*parts
[2] = {
166 nir_channel(b
, v
, (c
+ 0) % v
->num_components
),
167 nir_channel(b
, v
, (c
+ 1) % v
->num_components
)
170 replicated
[i
] = nir_pack_32_2x16(b
, nir_vec(b
, parts
, 2));
173 return nir_vec(b
, replicated
, 4);
177 pan_unpack_pure_16(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
179 nir_ssa_def
*unpacked
[4];
181 assert(num_components
<= 4);
183 for (unsigned i
= 0; i
< num_components
; i
+= 2) {
184 nir_ssa_def
*halves
=
185 nir_unpack_32_2x16(b
, nir_channel(b
, pack
, i
>> 1));
187 unpacked
[i
+ 0] = nir_channel(b
, halves
, 0);
188 unpacked
[i
+ 1] = nir_channel(b
, halves
, 1);
191 for (unsigned i
= num_components
; i
< 4; ++i
)
192 unpacked
[i
] = nir_imm_intN_t(b
, 0, 16);
194 return nir_vec(b
, unpacked
, 4);
197 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
198 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
199 * 4-channel vector from a scalar via replication */
202 pan_fill_4(nir_builder
*b
, nir_ssa_def
*v
)
205 assert(v
->num_components
<= 4);
207 for (unsigned j
= 0; j
< 4; ++j
)
208 q
[j
] = nir_channel(b
, v
, j
% v
->num_components
);
210 return nir_vec(b
, q
, 4);
214 pan_replicate_4(nir_builder
*b
, nir_ssa_def
*v
)
216 nir_ssa_def
*replicated
[4] = { v
, v
, v
, v
};
217 return nir_vec(b
, replicated
, 4);
221 pan_pack_pure_8(nir_builder
*b
, nir_ssa_def
*v
)
223 return pan_replicate_4(b
, nir_pack_32_4x8(b
, pan_fill_4(b
, v
)));
227 pan_unpack_pure_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
229 assert(num_components
<= 4);
230 nir_ssa_def
*unpacked
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
231 return nir_channels(b
, unpacked
, (1 << num_components
) - 1);
234 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
235 * ops provided we replicate appropriately, but for packing we'd rather stay in
236 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
239 pan_pack_unorm_8(nir_builder
*b
, nir_ssa_def
*v
)
241 return pan_replicate_4(b
, nir_pack_32_4x8(b
,
242 nir_f2u8(b
, nir_fround_even(b
, nir_fmul(b
, nir_fsat(b
,
243 pan_fill_4(b
, v
)), nir_imm_float16(b
, 255.0))))));
247 pan_unpack_unorm_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
249 assert(num_components
<= 4);
250 nir_ssa_def
*unpacked
= nir_unpack_unorm_4x8(b
, nir_channel(b
, pack
, 0));
251 return nir_f2f16(b
, unpacked
);
254 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
255 * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
257 * [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
259 * In other words, spacing it out so we're aligned to bytes and on top. So
262 * pack_32_4x8(f2u8_rte(v * 15.0) << 4)
266 pan_pack_unorm_small(nir_builder
*b
, nir_ssa_def
*v
,
267 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
269 nir_ssa_def
*f
= nir_fmul(b
, nir_fsat(b
, pan_fill_4(b
, v
)), scales
);
270 nir_ssa_def
*u8
= nir_f2u8(b
, nir_fround_even(b
, f
));
271 nir_ssa_def
*s
= nir_ishl(b
, u8
, shifts
);
272 nir_ssa_def
*repl
= nir_pack_32_4x8(b
, s
);
274 return pan_replicate_4(b
, repl
);
278 pan_unpack_unorm_small(nir_builder
*b
, nir_ssa_def
*pack
,
279 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
281 nir_ssa_def
*channels
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
282 nir_ssa_def
*raw
= nir_ushr(b
, nir_u2u16(b
, channels
), shifts
);
283 return nir_fmul(b
, nir_u2f16(b
, raw
), scales
);
287 pan_pack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
289 return pan_pack_unorm_small(b
, v
,
290 nir_imm_vec4_16(b
, 15.0, 15.0, 15.0, 15.0),
291 nir_imm_ivec4(b
, 4, 4, 4, 4));
295 pan_unpack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
297 return pan_unpack_unorm_small(b
, v
,
298 nir_imm_vec4_16(b
, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
299 nir_imm_ivec4(b
, 4, 4, 4, 4));
302 /* UNORM RGB5_A1 and RGB565 are similar */
305 pan_pack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
307 return pan_pack_unorm_small(b
, v
,
308 nir_imm_vec4_16(b
, 31.0, 31.0, 31.0, 1.0),
309 nir_imm_ivec4(b
, 3, 3, 3, 7));
313 pan_unpack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
315 return pan_unpack_unorm_small(b
, v
,
316 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
317 nir_imm_ivec4(b
, 3, 3, 3, 7));
321 pan_pack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
323 return pan_pack_unorm_small(b
, v
,
324 nir_imm_vec4_16(b
, 31.0, 63.0, 31.0, 0.0),
325 nir_imm_ivec4(b
, 3, 2, 3, 0));
329 pan_unpack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
331 return pan_unpack_unorm_small(b
, v
,
332 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
333 nir_imm_ivec4(b
, 3, 2, 3, 0));
336 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
337 * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
338 * pointed out, this means free conversion to RGBX8 */
341 pan_pack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*v
)
343 nir_ssa_def
*scale
= nir_imm_vec4_16(b
, 1023.0, 1023.0, 1023.0, 3.0);
344 nir_ssa_def
*s
= nir_f2u32(b
, nir_fround_even(b
, nir_f2f32(b
, nir_fmul(b
, nir_fsat(b
, v
), scale
))));
346 nir_ssa_def
*top8
= nir_ushr(b
, s
, nir_imm_ivec4(b
, 0x2, 0x2, 0x2, 0x2));
347 nir_ssa_def
*top8_rgb
= nir_pack_32_4x8(b
, nir_u2u8(b
, top8
));
349 nir_ssa_def
*bottom2
= nir_iand(b
, s
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3));
354 nir_ishl(b
, nir_channel(b
, bottom2
, 0), nir_imm_int(b
, 24 + 0)),
355 nir_ishl(b
, nir_channel(b
, bottom2
, 1), nir_imm_int(b
, 24 + 2))),
357 nir_ishl(b
, nir_channel(b
, bottom2
, 2), nir_imm_int(b
, 24 + 4)),
358 nir_ishl(b
, nir_channel(b
, bottom2
, 3), nir_imm_int(b
, 24 + 6))));
360 nir_ssa_def
*p
= nir_ior(b
, top
, top8_rgb
);
361 return pan_replicate_4(b
, p
);
365 pan_unpack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
367 nir_ssa_def
*p
= nir_channel(b
, packed
, 0);
368 nir_ssa_def
*bytes
= nir_unpack_32_4x8(b
, p
);
369 nir_ssa_def
*ubytes
= nir_u2u16(b
, bytes
);
371 nir_ssa_def
*shifts
= nir_ushr(b
, pan_replicate_4(b
, nir_channel(b
, ubytes
, 3)),
372 nir_imm_ivec4(b
, 0, 2, 4, 6));
373 nir_ssa_def
*precision
= nir_iand(b
, shifts
,
374 nir_i2i16(b
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3)));
376 nir_ssa_def
*top_rgb
= nir_ishl(b
, nir_channels(b
, ubytes
, 0x7), nir_imm_int(b
, 2));
377 top_rgb
= nir_ior(b
, nir_channels(b
, precision
, 0x7), top_rgb
);
379 nir_ssa_def
*chans
[4] = {
380 nir_channel(b
, top_rgb
, 0),
381 nir_channel(b
, top_rgb
, 1),
382 nir_channel(b
, top_rgb
, 2),
383 nir_channel(b
, precision
, 3)
386 nir_ssa_def
*scale
= nir_imm_vec4(b
, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
387 return nir_f2f16(b
, nir_fmul(b
, nir_u2f32(b
, nir_vec(b
, chans
, 4)), scale
));
390 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
393 pan_pack_uint_1010102(nir_builder
*b
, nir_ssa_def
*v
)
395 nir_ssa_def
*shift
= nir_ishl(b
, nir_u2u32(b
, v
),
396 nir_imm_ivec4(b
, 0, 10, 20, 30));
398 nir_ssa_def
*p
= nir_ior(b
,
399 nir_ior(b
, nir_channel(b
, shift
, 0), nir_channel(b
, shift
, 1)),
400 nir_ior(b
, nir_channel(b
, shift
, 2), nir_channel(b
, shift
, 3)));
402 return pan_replicate_4(b
, p
);
406 pan_unpack_uint_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
408 nir_ssa_def
*chan
= nir_channel(b
, packed
, 0);
410 nir_ssa_def
*shift
= nir_ushr(b
, pan_replicate_4(b
, chan
),
411 nir_imm_ivec4(b
, 0, 10, 20, 30));
413 nir_ssa_def
*mask
= nir_iand(b
, shift
,
414 nir_imm_ivec4(b
, 0x3ff, 0x3ff, 0x3ff, 0x3));
416 return nir_u2u16(b
, mask
);
419 /* Generic dispatches for un/pack regardless of format */
422 pan_is_unorm4(const struct util_format_description
*desc
)
424 switch (desc
->format
) {
425 case PIPE_FORMAT_B4G4R4A4_UNORM
:
426 case PIPE_FORMAT_B4G4R4X4_UNORM
:
427 case PIPE_FORMAT_A4R4_UNORM
:
428 case PIPE_FORMAT_R4A4_UNORM
:
429 case PIPE_FORMAT_A4B4G4R4_UNORM
:
438 pan_unpack(nir_builder
*b
,
439 const struct util_format_description
*desc
,
442 if (util_format_is_unorm8(desc
))
443 return pan_unpack_unorm_8(b
, packed
, desc
->nr_channels
);
445 if (pan_is_unorm4(desc
))
446 return pan_unpack_unorm_4(b
, packed
);
448 if (desc
->is_array
) {
449 int c
= util_format_get_first_non_void_channel(desc
->format
);
451 struct util_format_channel_description d
= desc
->channel
[c
];
453 if (d
.size
== 32 || d
.size
== 16) {
454 assert(!d
.normalized
);
455 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
457 return d
.size
== 32 ? pan_unpack_pure_32(b
, packed
, desc
->nr_channels
) :
458 pan_unpack_pure_16(b
, packed
, desc
->nr_channels
);
459 } else if (d
.size
== 8) {
460 assert(d
.pure_integer
);
461 return pan_unpack_pure_8(b
, packed
, desc
->nr_channels
);
463 unreachable("Unrenderable size");
467 switch (desc
->format
) {
468 case PIPE_FORMAT_B5G5R5A1_UNORM
:
469 return pan_unpack_unorm_5551(b
, packed
);
470 case PIPE_FORMAT_B5G6R5_UNORM
:
471 return pan_unpack_unorm_565(b
, packed
);
472 case PIPE_FORMAT_R10G10B10A2_UNORM
:
473 return pan_unpack_unorm_1010102(b
, packed
);
474 case PIPE_FORMAT_R10G10B10A2_UINT
:
475 return pan_unpack_uint_1010102(b
, packed
);
480 fprintf(stderr
, "%s\n", desc
->name
);
481 unreachable("Unknown format");
485 pan_pack(nir_builder
*b
,
486 const struct util_format_description
*desc
,
487 nir_ssa_def
*unpacked
)
489 if (util_format_is_unorm8(desc
))
490 return pan_pack_unorm_8(b
, unpacked
);
492 if (pan_is_unorm4(desc
))
493 return pan_pack_unorm_4(b
, unpacked
);
495 if (desc
->is_array
) {
496 int c
= util_format_get_first_non_void_channel(desc
->format
);
498 struct util_format_channel_description d
= desc
->channel
[c
];
500 if (d
.size
== 32 || d
.size
== 16) {
501 assert(!d
.normalized
);
502 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
504 return d
.size
== 32 ? pan_pack_pure_32(b
, unpacked
) :
505 pan_pack_pure_16(b
, unpacked
);
506 } else if (d
.size
== 8) {
507 assert(d
.pure_integer
);
508 return pan_pack_pure_8(b
, unpacked
);
510 unreachable("Unrenderable size");
514 switch (desc
->format
) {
515 case PIPE_FORMAT_B5G5R5A1_UNORM
:
516 return pan_pack_unorm_5551(b
, unpacked
);
517 case PIPE_FORMAT_B5G6R5_UNORM
:
518 return pan_pack_unorm_565(b
, unpacked
);
519 case PIPE_FORMAT_R10G10B10A2_UNORM
:
520 return pan_pack_unorm_1010102(b
, unpacked
);
521 case PIPE_FORMAT_R10G10B10A2_UINT
:
522 return pan_pack_uint_1010102(b
, unpacked
);
527 fprintf(stderr
, "%s\n", desc
->name
);
528 unreachable("Unknown format");
532 pan_lower_fb_store(nir_shader
*shader
,
534 nir_intrinsic_instr
*intr
,
535 const struct util_format_description
*desc
,
538 /* For stores, add conversion before */
539 nir_ssa_def
*unpacked
= nir_ssa_for_src(b
, intr
->src
[1], 4);
540 nir_ssa_def
*packed
= pan_pack(b
, desc
, unpacked
);
542 nir_intrinsic_instr
*new =
543 nir_intrinsic_instr_create(shader
, nir_intrinsic_store_raw_output_pan
);
544 new->src
[0] = nir_src_for_ssa(packed
);
545 new->num_components
= 4;
546 nir_builder_instr_insert(b
, &new->instr
);
550 pan_lower_fb_load(nir_shader
*shader
,
552 nir_intrinsic_instr
*intr
,
553 const struct util_format_description
*desc
,
556 nir_intrinsic_instr
*new = nir_intrinsic_instr_create(shader
,
557 nir_intrinsic_load_raw_output_pan
);
558 new->num_components
= 4;
560 nir_ssa_dest_init(&new->instr
, &new->dest
, 4, 32, NULL
);
561 nir_builder_instr_insert(b
, &new->instr
);
563 /* Convert the raw value */
564 nir_ssa_def
*packed
= &new->dest
.ssa
;
565 nir_ssa_def
*unpacked
= pan_unpack(b
, desc
, packed
);
567 nir_src rewritten
= nir_src_for_ssa(unpacked
);
568 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
, rewritten
, &intr
->instr
);
572 pan_lower_framebuffer(nir_shader
*shader
,
573 const struct util_format_description
*desc
,
576 /* Blend shaders are represented as special fragment shaders */
577 assert(shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
579 nir_foreach_function(func
, shader
) {
580 nir_foreach_block(block
, func
->impl
) {
581 nir_foreach_instr_safe(instr
, block
) {
582 if (instr
->type
!= nir_instr_type_intrinsic
)
585 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
587 bool is_load
= intr
->intrinsic
== nir_intrinsic_load_deref
;
588 bool is_store
= intr
->intrinsic
== nir_intrinsic_store_deref
;
590 if (!(is_load
|| is_store
))
593 /* Don't worry about MRT */
594 nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
596 if (var
->data
.location
!= FRAG_RESULT_COLOR
)
600 nir_builder_init(&b
, func
->impl
);
603 b
.cursor
= nir_before_instr(instr
);
604 pan_lower_fb_store(shader
, &b
, intr
, desc
, quirks
);
606 b
.cursor
= nir_after_instr(instr
);
607 pan_lower_fb_load(shader
, &b
, intr
, desc
, quirks
);
610 nir_instr_remove(instr
);
614 nir_metadata_preserve(func
->impl
, nir_metadata_block_index
|
615 nir_metadata_dominance
);