2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Although it's called a load/store "vectorization" pass, this also combines
26 * intersecting and identical loads/stores. It currently supports derefs, ubo,
27 * ssbo and push constant loads/stores.
29 * This doesn't handle copy_deref intrinsics and assumes that
30 * nir_lower_alu_to_scalar() has been called and that the IR is free from ALU
31 * modifiers. It also assumes that derefs have explicitly laid out types.
33 * After vectorization, the backend may want to call nir_lower_alu_to_scalar()
34 * and nir_lower_pack(). Also this creates cast instructions taking derefs as a
35 * source and some parts of NIR may not be able to handle that well.
37 * There are a few situations where this doesn't vectorize as well as it could:
38 * - It won't turn four consecutive vec3 loads into 3 vec4 loads.
39 * - It doesn't do global vectorization.
40 * Handling these cases probably wouldn't provide much benefit though.
42 * This probably doesn't handle big-endian GPUs correctly.
46 #include "nir_deref.h"
47 #include "nir_builder.h"
48 #include "nir_worklist.h"
49 #include "util/u_dynarray.h"
53 struct intrinsic_info
{
54 nir_variable_mode mode
; /* 0 if the mode is obtained from the deref. */
57 /* Indices into nir_intrinsic::src[] or -1 if not applicable. */
58 int resource_src
; /* resource (e.g. from vulkan_resource_index) */
59 int base_src
; /* offset which it loads/stores from */
60 int deref_src
; /* deref which is loads/stores from */
61 int value_src
; /* the data it is storing */
64 static const struct intrinsic_info
*
65 get_info(nir_intrinsic_op op
) {
67 #define INFO(mode, op, atomic, res, base, deref, val) \
68 case nir_intrinsic_##op: {\
69 static const struct intrinsic_info op##_info = {mode, nir_intrinsic_##op, atomic, res, base, deref, val};\
72 #define LOAD(mode, op, res, base, deref) INFO(mode, load_##op, false, res, base, deref, -1)
73 #define STORE(mode, op, res, base, deref, val) INFO(mode, store_##op, false, res, base, deref, val)
74 #define ATOMIC(mode, type, op, res, base, deref, val) INFO(mode, type##_atomic_##op, true, res, base, deref, val)
75 LOAD(nir_var_mem_push_const
, push_constant
, -1, 0, -1)
76 LOAD(nir_var_mem_ubo
, ubo
, 0, 1, -1)
77 LOAD(nir_var_mem_ssbo
, ssbo
, 0, 1, -1)
78 STORE(nir_var_mem_ssbo
, ssbo
, 1, 2, -1, 0)
79 LOAD(0, deref
, -1, -1, 0)
80 STORE(0, deref
, -1, -1, 0, 1)
81 LOAD(nir_var_mem_shared
, shared
, -1, 0, -1)
82 STORE(nir_var_mem_shared
, shared
, -1, 1, -1, 0)
83 LOAD(nir_var_mem_global
, global
, -1, 0, -1)
84 STORE(nir_var_mem_global
, global
, -1, 1, -1, 0)
85 ATOMIC(nir_var_mem_ssbo
, ssbo
, add
, 0, 1, -1, 2)
86 ATOMIC(nir_var_mem_ssbo
, ssbo
, imin
, 0, 1, -1, 2)
87 ATOMIC(nir_var_mem_ssbo
, ssbo
, umin
, 0, 1, -1, 2)
88 ATOMIC(nir_var_mem_ssbo
, ssbo
, imax
, 0, 1, -1, 2)
89 ATOMIC(nir_var_mem_ssbo
, ssbo
, umax
, 0, 1, -1, 2)
90 ATOMIC(nir_var_mem_ssbo
, ssbo
, and, 0, 1, -1, 2)
91 ATOMIC(nir_var_mem_ssbo
, ssbo
, or, 0, 1, -1, 2)
92 ATOMIC(nir_var_mem_ssbo
, ssbo
, xor, 0, 1, -1, 2)
93 ATOMIC(nir_var_mem_ssbo
, ssbo
, exchange
, 0, 1, -1, 2)
94 ATOMIC(nir_var_mem_ssbo
, ssbo
, comp_swap
, 0, 1, -1, 2)
95 ATOMIC(nir_var_mem_ssbo
, ssbo
, fadd
, 0, 1, -1, 2)
96 ATOMIC(nir_var_mem_ssbo
, ssbo
, fmin
, 0, 1, -1, 2)
97 ATOMIC(nir_var_mem_ssbo
, ssbo
, fmax
, 0, 1, -1, 2)
98 ATOMIC(nir_var_mem_ssbo
, ssbo
, fcomp_swap
, 0, 1, -1, 2)
99 ATOMIC(0, deref
, add
, -1, -1, 0, 1)
100 ATOMIC(0, deref
, imin
, -1, -1, 0, 1)
101 ATOMIC(0, deref
, umin
, -1, -1, 0, 1)
102 ATOMIC(0, deref
, imax
, -1, -1, 0, 1)
103 ATOMIC(0, deref
, umax
, -1, -1, 0, 1)
104 ATOMIC(0, deref
, and, -1, -1, 0, 1)
105 ATOMIC(0, deref
, or, -1, -1, 0, 1)
106 ATOMIC(0, deref
, xor, -1, -1, 0, 1)
107 ATOMIC(0, deref
, exchange
, -1, -1, 0, 1)
108 ATOMIC(0, deref
, comp_swap
, -1, -1, 0, 1)
109 ATOMIC(0, deref
, fadd
, -1, -1, 0, 1)
110 ATOMIC(0, deref
, fmin
, -1, -1, 0, 1)
111 ATOMIC(0, deref
, fmax
, -1, -1, 0, 1)
112 ATOMIC(0, deref
, fcomp_swap
, -1, -1, 0, 1)
113 ATOMIC(nir_var_mem_shared
, shared
, add
, -1, 0, -1, 1)
114 ATOMIC(nir_var_mem_shared
, shared
, imin
, -1, 0, -1, 1)
115 ATOMIC(nir_var_mem_shared
, shared
, umin
, -1, 0, -1, 1)
116 ATOMIC(nir_var_mem_shared
, shared
, imax
, -1, 0, -1, 1)
117 ATOMIC(nir_var_mem_shared
, shared
, umax
, -1, 0, -1, 1)
118 ATOMIC(nir_var_mem_shared
, shared
, and, -1, 0, -1, 1)
119 ATOMIC(nir_var_mem_shared
, shared
, or, -1, 0, -1, 1)
120 ATOMIC(nir_var_mem_shared
, shared
, xor, -1, 0, -1, 1)
121 ATOMIC(nir_var_mem_shared
, shared
, exchange
, -1, 0, -1, 1)
122 ATOMIC(nir_var_mem_shared
, shared
, comp_swap
, -1, 0, -1, 1)
123 ATOMIC(nir_var_mem_shared
, shared
, fadd
, -1, 0, -1, 1)
124 ATOMIC(nir_var_mem_shared
, shared
, fmin
, -1, 0, -1, 1)
125 ATOMIC(nir_var_mem_shared
, shared
, fmax
, -1, 0, -1, 1)
126 ATOMIC(nir_var_mem_shared
, shared
, fcomp_swap
, -1, 0, -1, 1)
127 ATOMIC(nir_var_mem_global
, global
, add
, -1, 0, -1, 1)
128 ATOMIC(nir_var_mem_global
, global
, imin
, -1, 0, -1, 1)
129 ATOMIC(nir_var_mem_global
, global
, umin
, -1, 0, -1, 1)
130 ATOMIC(nir_var_mem_global
, global
, imax
, -1, 0, -1, 1)
131 ATOMIC(nir_var_mem_global
, global
, umax
, -1, 0, -1, 1)
132 ATOMIC(nir_var_mem_global
, global
, and, -1, 0, -1, 1)
133 ATOMIC(nir_var_mem_global
, global
, or, -1, 0, -1, 1)
134 ATOMIC(nir_var_mem_global
, global
, xor, -1, 0, -1, 1)
135 ATOMIC(nir_var_mem_global
, global
, exchange
, -1, 0, -1, 1)
136 ATOMIC(nir_var_mem_global
, global
, comp_swap
, -1, 0, -1, 1)
137 ATOMIC(nir_var_mem_global
, global
, fadd
, -1, 0, -1, 1)
138 ATOMIC(nir_var_mem_global
, global
, fmin
, -1, 0, -1, 1)
139 ATOMIC(nir_var_mem_global
, global
, fmax
, -1, 0, -1, 1)
140 ATOMIC(nir_var_mem_global
, global
, fcomp_swap
, -1, 0, -1, 1)
152 * Information used to compare memory operations.
153 * It canonically represents an offset as:
154 * `offset_defs[0]*offset_defs_mul[0] + offset_defs[1]*offset_defs_mul[1] + ...`
155 * "offset_defs" is sorted in ascenting order by the ssa definition's index.
156 * "resource" or "var" may be NULL.
159 nir_ssa_def
*resource
;
161 unsigned offset_def_count
;
162 nir_ssa_def
**offset_defs
;
163 uint64_t *offset_defs_mul
;
166 /* Information on a single memory operation. */
168 struct list_head head
;
171 struct entry_key
*key
;
173 uint64_t offset
; /* sign-extended */
174 int64_t offset_signed
;
179 nir_intrinsic_instr
*intrin
;
180 const struct intrinsic_info
*info
;
181 enum gl_access_qualifier access
;
184 nir_deref_instr
*deref
;
187 struct vectorize_ctx
{
188 nir_variable_mode modes
;
189 nir_should_vectorize_mem_func callback
;
190 nir_variable_mode robust_modes
;
191 struct list_head entries
[nir_num_variable_modes
];
192 struct hash_table
*loads
[nir_num_variable_modes
];
193 struct hash_table
*stores
[nir_num_variable_modes
];
196 static uint32_t hash_entry_key(const void *key_
)
198 /* this is careful to not include pointers in the hash calculation so that
199 * the order of the hash table walk is deterministic */
200 struct entry_key
*key
= (struct entry_key
*)key_
;
202 uint32_t hash
= _mesa_fnv32_1a_offset_bias
;
204 hash
= _mesa_fnv32_1a_accumulate(hash
, key
->resource
->index
);
206 hash
= _mesa_fnv32_1a_accumulate(hash
, key
->var
->index
);
207 unsigned mode
= key
->var
->data
.mode
;
208 hash
= _mesa_fnv32_1a_accumulate(hash
, mode
);
211 for (unsigned i
= 0; i
< key
->offset_def_count
; i
++)
212 hash
= _mesa_fnv32_1a_accumulate(hash
, key
->offset_defs
[i
]->index
);
214 hash
= _mesa_fnv32_1a_accumulate_block(
215 hash
, key
->offset_defs_mul
, key
->offset_def_count
* sizeof(uint64_t));
220 static bool entry_key_equals(const void *a_
, const void *b_
)
222 struct entry_key
*a
= (struct entry_key
*)a_
;
223 struct entry_key
*b
= (struct entry_key
*)b_
;
225 if (a
->var
!= b
->var
|| a
->resource
!= b
->resource
)
228 if (a
->offset_def_count
!= b
->offset_def_count
)
231 size_t offset_def_size
= a
->offset_def_count
* sizeof(nir_ssa_def
*);
232 size_t offset_def_mul_size
= a
->offset_def_count
* sizeof(uint64_t);
233 if (a
->offset_def_count
&&
234 (memcmp(a
->offset_defs
, b
->offset_defs
, offset_def_size
) ||
235 memcmp(a
->offset_defs_mul
, b
->offset_defs_mul
, offset_def_mul_size
)))
241 static void delete_entry_dynarray(struct hash_entry
*entry
)
243 struct util_dynarray
*arr
= (struct util_dynarray
*)entry
->data
;
247 static int sort_entries(const void *a_
, const void *b_
)
249 struct entry
*a
= *(struct entry
*const*)a_
;
250 struct entry
*b
= *(struct entry
*const*)b_
;
252 if (a
->offset_signed
> b
->offset_signed
)
254 else if (a
->offset_signed
< b
->offset_signed
)
261 get_bit_size(struct entry
*entry
)
263 unsigned size
= entry
->is_store
?
264 entry
->intrin
->src
[entry
->info
->value_src
].ssa
->bit_size
:
265 entry
->intrin
->dest
.ssa
.bit_size
;
266 return size
== 1 ? 32u : size
;
269 /* If "def" is from an alu instruction with the opcode "op" and one of it's
270 * sources is a constant, update "def" to be the non-constant source, fill "c"
271 * with the constant and return true. */
273 parse_alu(nir_ssa_def
**def
, nir_op op
, uint64_t *c
)
275 nir_ssa_scalar scalar
;
279 if (!nir_ssa_scalar_is_alu(scalar
) || nir_ssa_scalar_alu_op(scalar
) != op
)
282 nir_ssa_scalar src0
= nir_ssa_scalar_chase_alu_src(scalar
, 0);
283 nir_ssa_scalar src1
= nir_ssa_scalar_chase_alu_src(scalar
, 1);
284 if (op
!= nir_op_ishl
&& nir_ssa_scalar_is_const(src0
) && src1
.comp
== 0) {
285 *c
= nir_ssa_scalar_as_uint(src0
);
287 } else if (nir_ssa_scalar_is_const(src1
) && src0
.comp
== 0) {
288 *c
= nir_ssa_scalar_as_uint(src1
);
296 /* Parses an offset expression such as "a * 16 + 4" and "(a * 16 + 4) * 64 + 32". */
298 parse_offset(nir_ssa_def
**base
, uint64_t *base_mul
, uint64_t *offset
)
300 if ((*base
)->parent_instr
->type
== nir_instr_type_load_const
) {
301 *offset
= nir_src_comp_as_uint(nir_src_for_ssa(*base
), 0);
308 bool progress
= false;
310 uint64_t mul2
= 1, add2
= 0;
312 progress
= parse_alu(base
, nir_op_imul
, &mul2
);
316 progress
|= parse_alu(base
, nir_op_ishl
, &mul2
);
319 progress
|= parse_alu(base
, nir_op_iadd
, &add2
);
328 type_scalar_size_bytes(const struct glsl_type
*type
)
330 assert(glsl_type_is_vector_or_scalar(type
) ||
331 glsl_type_is_matrix(type
));
332 return glsl_type_is_boolean(type
) ? 4u : glsl_get_bit_size(type
) / 8u;
336 get_array_stride(const struct glsl_type
*type
)
338 unsigned explicit_stride
= glsl_get_explicit_stride(type
);
339 if ((glsl_type_is_matrix(type
) &&
340 glsl_matrix_type_is_row_major(type
)) ||
341 (glsl_type_is_vector(type
) && explicit_stride
== 0))
342 return type_scalar_size_bytes(type
);
343 return explicit_stride
;
347 mask_sign_extend(uint64_t val
, unsigned bit_size
)
349 return (int64_t)(val
<< (64 - bit_size
)) >> (64 - bit_size
);
353 add_to_entry_key(nir_ssa_def
**offset_defs
, uint64_t *offset_defs_mul
,
354 unsigned offset_def_count
, nir_ssa_def
*def
, uint64_t mul
)
356 mul
= mask_sign_extend(mul
, def
->bit_size
);
358 for (unsigned i
= 0; i
<= offset_def_count
; i
++) {
359 if (i
== offset_def_count
|| def
->index
> offset_defs
[i
]->index
) {
360 /* insert before i */
361 memmove(offset_defs
+ i
+ 1, offset_defs
+ i
,
362 (offset_def_count
- i
) * sizeof(nir_ssa_def
*));
363 memmove(offset_defs_mul
+ i
+ 1, offset_defs_mul
+ i
,
364 (offset_def_count
- i
) * sizeof(uint64_t));
365 offset_defs
[i
] = def
;
366 offset_defs_mul
[i
] = mul
;
368 } else if (def
->index
== offset_defs
[i
]->index
) {
369 /* merge with offset_def at i */
370 offset_defs_mul
[i
] += mul
;
374 unreachable("Unreachable.");
378 static struct entry_key
*
379 create_entry_key_from_deref(void *mem_ctx
,
380 struct vectorize_ctx
*ctx
,
381 nir_deref_path
*path
,
382 uint64_t *offset_base
)
384 unsigned path_len
= 0;
385 while (path
->path
[path_len
])
388 nir_ssa_def
*offset_defs_stack
[32];
389 uint64_t offset_defs_mul_stack
[32];
390 nir_ssa_def
**offset_defs
= offset_defs_stack
;
391 uint64_t *offset_defs_mul
= offset_defs_mul_stack
;
393 offset_defs
= malloc(path_len
* sizeof(nir_ssa_def
*));
394 offset_defs_mul
= malloc(path_len
* sizeof(uint64_t));
396 unsigned offset_def_count
= 0;
398 struct entry_key
* key
= ralloc(mem_ctx
, struct entry_key
);
399 key
->resource
= NULL
;
403 for (unsigned i
= 0; i
< path_len
; i
++) {
404 nir_deref_instr
*parent
= i
? path
->path
[i
- 1] : NULL
;
405 nir_deref_instr
*deref
= path
->path
[i
];
407 switch (deref
->deref_type
) {
408 case nir_deref_type_var
: {
410 key
->var
= deref
->var
;
413 case nir_deref_type_array
:
414 case nir_deref_type_ptr_as_array
: {
416 nir_ssa_def
*index
= deref
->arr
.index
.ssa
;
418 if (deref
->deref_type
== nir_deref_type_ptr_as_array
)
419 stride
= nir_deref_instr_ptr_as_array_stride(deref
);
421 stride
= get_array_stride(parent
->type
);
423 nir_ssa_def
*base
= index
;
424 uint64_t offset
= 0, base_mul
= 1;
425 parse_offset(&base
, &base_mul
, &offset
);
426 offset
= mask_sign_extend(offset
, index
->bit_size
);
428 *offset_base
+= offset
* stride
;
430 offset_def_count
+= add_to_entry_key(offset_defs
, offset_defs_mul
,
432 base
, base_mul
* stride
);
436 case nir_deref_type_struct
: {
438 int offset
= glsl_get_struct_field_offset(parent
->type
, deref
->strct
.index
);
439 *offset_base
+= offset
;
442 case nir_deref_type_cast
: {
444 key
->resource
= deref
->parent
.ssa
;
448 unreachable("Unhandled deref type");
452 key
->offset_def_count
= offset_def_count
;
453 key
->offset_defs
= ralloc_array(mem_ctx
, nir_ssa_def
*, offset_def_count
);
454 key
->offset_defs_mul
= ralloc_array(mem_ctx
, uint64_t, offset_def_count
);
455 memcpy(key
->offset_defs
, offset_defs
, offset_def_count
* sizeof(nir_ssa_def
*));
456 memcpy(key
->offset_defs_mul
, offset_defs_mul
, offset_def_count
* sizeof(uint64_t));
458 if (offset_defs
!= offset_defs_stack
)
460 if (offset_defs_mul
!= offset_defs_mul_stack
)
461 free(offset_defs_mul
);
467 parse_entry_key_from_offset(struct entry_key
*key
, unsigned size
, unsigned left
,
468 nir_ssa_def
*base
, uint64_t base_mul
, uint64_t *offset
)
472 parse_offset(&base
, &new_mul
, &new_offset
);
473 *offset
+= new_offset
* base_mul
;
483 nir_ssa_scalar scalar
;
486 if (nir_ssa_scalar_is_alu(scalar
) && nir_ssa_scalar_alu_op(scalar
) == nir_op_iadd
) {
487 nir_ssa_scalar src0
= nir_ssa_scalar_chase_alu_src(scalar
, 0);
488 nir_ssa_scalar src1
= nir_ssa_scalar_chase_alu_src(scalar
, 1);
489 if (src0
.comp
== 0 && src1
.comp
== 0) {
490 unsigned amount
= parse_entry_key_from_offset(key
, size
, left
- 1, src0
.def
, base_mul
, offset
);
491 amount
+= parse_entry_key_from_offset(key
, size
+ amount
, left
- amount
, src1
.def
, base_mul
, offset
);
497 return add_to_entry_key(key
->offset_defs
, key
->offset_defs_mul
, size
, base
, base_mul
);
500 static struct entry_key
*
501 create_entry_key_from_offset(void *mem_ctx
, nir_ssa_def
*base
, uint64_t base_mul
, uint64_t *offset
)
503 struct entry_key
*key
= ralloc(mem_ctx
, struct entry_key
);
504 key
->resource
= NULL
;
507 nir_ssa_def
*offset_defs
[32];
508 uint64_t offset_defs_mul
[32];
509 key
->offset_defs
= offset_defs
;
510 key
->offset_defs_mul
= offset_defs_mul
;
512 key
->offset_def_count
= parse_entry_key_from_offset(key
, 0, 32, base
, base_mul
, offset
);
514 key
->offset_defs
= ralloc_array(mem_ctx
, nir_ssa_def
*, key
->offset_def_count
);
515 key
->offset_defs_mul
= ralloc_array(mem_ctx
, uint64_t, key
->offset_def_count
);
516 memcpy(key
->offset_defs
, offset_defs
, key
->offset_def_count
* sizeof(nir_ssa_def
*));
517 memcpy(key
->offset_defs_mul
, offset_defs_mul
, key
->offset_def_count
* sizeof(uint64_t));
519 key
->offset_def_count
= 0;
520 key
->offset_defs
= NULL
;
521 key
->offset_defs_mul
= NULL
;
526 static nir_variable_mode
527 get_variable_mode(struct entry
*entry
)
529 if (entry
->info
->mode
)
530 return entry
->info
->mode
;
531 assert(entry
->deref
);
532 return entry
->deref
->mode
;
536 mode_to_index(nir_variable_mode mode
)
538 assert(util_bitcount(mode
) == 1);
540 /* Globals and SSBOs should be tracked together */
541 if (mode
== nir_var_mem_global
)
542 mode
= nir_var_mem_ssbo
;
544 return ffs(mode
) - 1;
547 static nir_variable_mode
548 aliasing_modes(nir_variable_mode modes
)
550 /* Global and SSBO can alias */
551 if (modes
& (nir_var_mem_ssbo
| nir_var_mem_global
))
552 modes
|= nir_var_mem_ssbo
| nir_var_mem_global
;
556 static struct entry
*
557 create_entry(struct vectorize_ctx
*ctx
,
558 const struct intrinsic_info
*info
,
559 nir_intrinsic_instr
*intrin
)
561 struct entry
*entry
= rzalloc(ctx
, struct entry
);
562 entry
->intrin
= intrin
;
563 entry
->instr
= &intrin
->instr
;
565 entry
->best_align
= UINT32_MAX
;
566 entry
->is_store
= entry
->info
->value_src
>= 0;
568 if (entry
->info
->deref_src
>= 0) {
569 entry
->deref
= nir_src_as_deref(intrin
->src
[entry
->info
->deref_src
]);
571 nir_deref_path_init(&path
, entry
->deref
, NULL
);
572 entry
->key
= create_entry_key_from_deref(entry
, ctx
, &path
, &entry
->offset
);
573 nir_deref_path_finish(&path
);
575 nir_ssa_def
*base
= entry
->info
->base_src
>= 0 ?
576 intrin
->src
[entry
->info
->base_src
].ssa
: NULL
;
578 if (nir_intrinsic_infos
[intrin
->intrinsic
].index_map
[NIR_INTRINSIC_BASE
])
579 offset
+= nir_intrinsic_base(intrin
);
580 entry
->key
= create_entry_key_from_offset(entry
, base
, 1, &offset
);
581 entry
->offset
= offset
;
584 entry
->offset
= mask_sign_extend(entry
->offset
, base
->bit_size
);
587 if (entry
->info
->resource_src
>= 0)
588 entry
->key
->resource
= intrin
->src
[entry
->info
->resource_src
].ssa
;
590 if (nir_intrinsic_infos
[intrin
->intrinsic
].index_map
[NIR_INTRINSIC_ACCESS
])
591 entry
->access
= nir_intrinsic_access(intrin
);
592 else if (entry
->key
->var
)
593 entry
->access
= entry
->key
->var
->data
.access
;
595 uint32_t restrict_modes
= nir_var_shader_in
| nir_var_shader_out
;
596 restrict_modes
|= nir_var_shader_temp
| nir_var_function_temp
;
597 restrict_modes
|= nir_var_uniform
| nir_var_mem_push_const
;
598 restrict_modes
|= nir_var_system_value
| nir_var_mem_shared
;
599 if (get_variable_mode(entry
) & restrict_modes
)
600 entry
->access
|= ACCESS_RESTRICT
;
605 static nir_deref_instr
*
606 cast_deref(nir_builder
*b
, unsigned num_components
, unsigned bit_size
, nir_deref_instr
*deref
)
608 if (glsl_get_components(deref
->type
) == num_components
&&
609 type_scalar_size_bytes(deref
->type
)*8u == bit_size
)
612 enum glsl_base_type types
[] = {
613 GLSL_TYPE_UINT8
, GLSL_TYPE_UINT16
, GLSL_TYPE_UINT
, GLSL_TYPE_UINT64
};
614 enum glsl_base_type base
= types
[ffs(bit_size
/ 8u) - 1u];
615 const struct glsl_type
*type
= glsl_vector_type(base
, num_components
);
617 if (deref
->type
== type
)
620 return nir_build_deref_cast(b
, &deref
->dest
.ssa
, deref
->mode
, type
, 0);
623 /* Return true if the write mask "write_mask" of a store with "old_bit_size"
624 * bits per element can be represented for a store with "new_bit_size" bits per
627 writemask_representable(unsigned write_mask
, unsigned old_bit_size
, unsigned new_bit_size
)
631 u_bit_scan_consecutive_range(&write_mask
, &start
, &count
);
632 start
*= old_bit_size
;
633 count
*= old_bit_size
;
634 if (start
% new_bit_size
!= 0)
636 if (count
% new_bit_size
!= 0)
643 gcd(uint64_t a
, uint64_t b
)
654 get_best_align(struct entry
*entry
)
656 if (entry
->best_align
!= UINT32_MAX
)
657 return entry
->best_align
;
659 uint64_t best_align
= entry
->offset
;
660 for (unsigned i
= 0; i
< entry
->key
->offset_def_count
; i
++) {
662 best_align
= entry
->key
->offset_defs_mul
[i
];
663 else if (entry
->key
->offset_defs_mul
[i
])
664 best_align
= gcd(best_align
, entry
->key
->offset_defs_mul
[i
]);
667 if (nir_intrinsic_infos
[entry
->intrin
->intrinsic
].index_map
[NIR_INTRINSIC_ALIGN_MUL
])
668 best_align
= MAX2(best_align
, nir_intrinsic_align(entry
->intrin
));
670 /* ensure the result is a power of two that fits in a int32_t */
671 entry
->best_align
= gcd(best_align
, 1u << 30);
673 return entry
->best_align
;
676 /* Return true if "new_bit_size" is a usable bit size for a vectorized load/store
677 * of "low" and "high". */
679 new_bitsize_acceptable(struct vectorize_ctx
*ctx
, unsigned new_bit_size
,
680 struct entry
*low
, struct entry
*high
, unsigned size
)
682 if (size
% new_bit_size
!= 0)
685 unsigned new_num_components
= size
/ new_bit_size
;
686 if (!nir_num_components_valid(new_num_components
))
689 unsigned high_offset
= high
->offset_signed
- low
->offset_signed
;
691 /* check nir_extract_bits limitations */
692 unsigned common_bit_size
= MIN2(get_bit_size(low
), get_bit_size(high
));
693 common_bit_size
= MIN2(common_bit_size
, new_bit_size
);
695 common_bit_size
= MIN2(common_bit_size
, (1u << (ffs(high_offset
* 8) - 1)));
696 if (new_bit_size
/ common_bit_size
> NIR_MAX_VEC_COMPONENTS
)
699 if (!ctx
->callback(get_best_align(low
), new_bit_size
, new_num_components
,
700 high_offset
, low
->intrin
, high
->intrin
))
704 unsigned low_size
= low
->intrin
->num_components
* get_bit_size(low
);
705 unsigned high_size
= high
->intrin
->num_components
* get_bit_size(high
);
707 if (low_size
% new_bit_size
!= 0)
709 if (high_size
% new_bit_size
!= 0)
712 unsigned write_mask
= nir_intrinsic_write_mask(low
->intrin
);
713 if (!writemask_representable(write_mask
, low_size
, new_bit_size
))
716 write_mask
= nir_intrinsic_write_mask(high
->intrin
);
717 if (!writemask_representable(write_mask
, high_size
, new_bit_size
))
724 /* Updates a write mask, "write_mask", so that it can be used with a
725 * "new_bit_size"-bit store instead of a "old_bit_size"-bit store. */
727 update_writemask(unsigned write_mask
, unsigned old_bit_size
, unsigned new_bit_size
)
732 u_bit_scan_consecutive_range(&write_mask
, &start
, &count
);
733 start
= start
* old_bit_size
/ new_bit_size
;
734 count
= count
* old_bit_size
/ new_bit_size
;
735 res
|= ((1 << count
) - 1) << start
;
740 static nir_deref_instr
*subtract_deref(nir_builder
*b
, nir_deref_instr
*deref
, int64_t offset
)
742 /* avoid adding another deref to the path */
743 if (deref
->deref_type
== nir_deref_type_ptr_as_array
&&
744 nir_src_is_const(deref
->arr
.index
) &&
745 offset
% nir_deref_instr_ptr_as_array_stride(deref
) == 0) {
746 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
747 nir_ssa_def
*index
= nir_imm_intN_t(b
, nir_src_as_int(deref
->arr
.index
) - offset
/ stride
,
748 deref
->dest
.ssa
.bit_size
);
749 return nir_build_deref_ptr_as_array(b
, nir_deref_instr_parent(deref
), index
);
752 if (deref
->deref_type
== nir_deref_type_array
&&
753 nir_src_is_const(deref
->arr
.index
)) {
754 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
755 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
756 if (offset
% stride
== 0)
757 return nir_build_deref_array_imm(
758 b
, parent
, nir_src_as_int(deref
->arr
.index
) - offset
/ stride
);
762 deref
= nir_build_deref_cast(b
, &deref
->dest
.ssa
, deref
->mode
,
763 glsl_scalar_type(GLSL_TYPE_UINT8
), 1);
764 return nir_build_deref_ptr_as_array(
765 b
, deref
, nir_imm_intN_t(b
, -offset
, deref
->dest
.ssa
.bit_size
));
768 static bool update_align(struct entry
*entry
)
770 bool has_align_index
=
771 nir_intrinsic_infos
[entry
->intrin
->intrinsic
].index_map
[NIR_INTRINSIC_ALIGN_MUL
];
772 if (has_align_index
) {
773 unsigned align
= get_best_align(entry
);
774 if (align
!= nir_intrinsic_align(entry
->intrin
)) {
775 nir_intrinsic_set_align(entry
->intrin
, align
, 0);
783 vectorize_loads(nir_builder
*b
, struct vectorize_ctx
*ctx
,
784 struct entry
*low
, struct entry
*high
,
785 struct entry
*first
, struct entry
*second
,
786 unsigned new_bit_size
, unsigned new_num_components
,
789 unsigned low_bit_size
= get_bit_size(low
);
790 unsigned high_bit_size
= get_bit_size(high
);
791 bool low_bool
= low
->intrin
->dest
.ssa
.bit_size
== 1;
792 bool high_bool
= high
->intrin
->dest
.ssa
.bit_size
== 1;
793 nir_ssa_def
*data
= &first
->intrin
->dest
.ssa
;
795 b
->cursor
= nir_after_instr(first
->instr
);
797 /* update the load's destination size and extract data for each of the original loads */
798 data
->num_components
= new_num_components
;
799 data
->bit_size
= new_bit_size
;
801 nir_ssa_def
*low_def
= nir_extract_bits(
802 b
, &data
, 1, 0, low
->intrin
->num_components
, low_bit_size
);
803 nir_ssa_def
*high_def
= nir_extract_bits(
804 b
, &data
, 1, high_start
, high
->intrin
->num_components
, high_bit_size
);
806 /* convert booleans */
807 low_def
= low_bool
? nir_i2b(b
, low_def
) : nir_mov(b
, low_def
);
808 high_def
= high_bool
? nir_i2b(b
, high_def
) : nir_mov(b
, high_def
);
812 nir_ssa_def_rewrite_uses_after(&low
->intrin
->dest
.ssa
, nir_src_for_ssa(low_def
),
813 high_def
->parent_instr
);
814 nir_ssa_def_rewrite_uses(&high
->intrin
->dest
.ssa
, nir_src_for_ssa(high_def
));
816 nir_ssa_def_rewrite_uses(&low
->intrin
->dest
.ssa
, nir_src_for_ssa(low_def
));
817 nir_ssa_def_rewrite_uses_after(&high
->intrin
->dest
.ssa
, nir_src_for_ssa(high_def
),
818 high_def
->parent_instr
);
821 /* update the intrinsic */
822 first
->intrin
->num_components
= new_num_components
;
824 const struct intrinsic_info
*info
= first
->info
;
826 /* update the offset */
827 if (first
!= low
&& info
->base_src
>= 0) {
828 /* let nir_opt_algebraic() remove this addition. this doesn't have much
829 * issues with subtracting 16 from expressions like "(i + 1) * 16" because
830 * nir_opt_algebraic() turns them into "i * 16 + 16" */
831 b
->cursor
= nir_before_instr(first
->instr
);
833 nir_ssa_def
*new_base
= first
->intrin
->src
[info
->base_src
].ssa
;
834 new_base
= nir_iadd_imm(b
, new_base
, -(int)(high_start
/ 8u));
836 nir_instr_rewrite_src(first
->instr
, &first
->intrin
->src
[info
->base_src
],
837 nir_src_for_ssa(new_base
));
840 /* update the deref */
841 if (info
->deref_src
>= 0) {
842 b
->cursor
= nir_before_instr(first
->instr
);
844 nir_deref_instr
*deref
= nir_src_as_deref(first
->intrin
->src
[info
->deref_src
]);
845 if (first
!= low
&& high_start
!= 0)
846 deref
= subtract_deref(b
, deref
, high_start
/ 8u);
847 first
->deref
= cast_deref(b
, new_num_components
, new_bit_size
, deref
);
849 nir_instr_rewrite_src(first
->instr
, &first
->intrin
->src
[info
->deref_src
],
850 nir_src_for_ssa(&first
->deref
->dest
.ssa
));
853 /* update base/align */
854 bool has_base_index
=
855 nir_intrinsic_infos
[first
->intrin
->intrinsic
].index_map
[NIR_INTRINSIC_BASE
];
857 if (first
!= low
&& has_base_index
)
858 nir_intrinsic_set_base(first
->intrin
, nir_intrinsic_base(low
->intrin
));
860 first
->key
= low
->key
;
861 first
->offset
= low
->offset
;
862 first
->best_align
= get_best_align(low
);
866 nir_instr_remove(second
->instr
);
870 vectorize_stores(nir_builder
*b
, struct vectorize_ctx
*ctx
,
871 struct entry
*low
, struct entry
*high
,
872 struct entry
*first
, struct entry
*second
,
873 unsigned new_bit_size
, unsigned new_num_components
,
876 ASSERTED
unsigned low_size
= low
->intrin
->num_components
* get_bit_size(low
);
877 assert(low_size
% new_bit_size
== 0);
879 b
->cursor
= nir_before_instr(second
->instr
);
881 /* get new writemasks */
882 uint32_t low_write_mask
= nir_intrinsic_write_mask(low
->intrin
);
883 uint32_t high_write_mask
= nir_intrinsic_write_mask(high
->intrin
);
884 low_write_mask
= update_writemask(low_write_mask
, get_bit_size(low
), new_bit_size
);
885 high_write_mask
= update_writemask(high_write_mask
, get_bit_size(high
), new_bit_size
);
886 high_write_mask
<<= high_start
/ new_bit_size
;
888 uint32_t write_mask
= low_write_mask
| high_write_mask
;
890 /* convert booleans */
891 nir_ssa_def
*low_val
= low
->intrin
->src
[low
->info
->value_src
].ssa
;
892 nir_ssa_def
*high_val
= high
->intrin
->src
[high
->info
->value_src
].ssa
;
893 low_val
= low_val
->bit_size
== 1 ? nir_b2i(b
, low_val
, 32) : low_val
;
894 high_val
= high_val
->bit_size
== 1 ? nir_b2i(b
, high_val
, 32) : high_val
;
896 /* combine the data */
897 nir_ssa_def
*data_channels
[NIR_MAX_VEC_COMPONENTS
];
898 for (unsigned i
= 0; i
< new_num_components
; i
++) {
899 bool set_low
= low_write_mask
& (1 << i
);
900 bool set_high
= high_write_mask
& (1 << i
);
902 if (set_low
&& (!set_high
|| low
== second
)) {
903 unsigned offset
= i
* new_bit_size
;
904 data_channels
[i
] = nir_extract_bits(b
, &low_val
, 1, offset
, 1, new_bit_size
);
905 } else if (set_high
) {
906 assert(!set_low
|| high
== second
);
907 unsigned offset
= i
* new_bit_size
- high_start
;
908 data_channels
[i
] = nir_extract_bits(b
, &high_val
, 1, offset
, 1, new_bit_size
);
910 data_channels
[i
] = nir_ssa_undef(b
, 1, new_bit_size
);
913 nir_ssa_def
*data
= nir_vec(b
, data_channels
, new_num_components
);
915 /* update the intrinsic */
916 nir_intrinsic_set_write_mask(second
->intrin
, write_mask
);
917 second
->intrin
->num_components
= data
->num_components
;
919 const struct intrinsic_info
*info
= second
->info
;
920 assert(info
->value_src
>= 0);
921 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->value_src
],
922 nir_src_for_ssa(data
));
924 /* update the offset */
925 if (second
!= low
&& info
->base_src
>= 0)
926 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->base_src
],
927 low
->intrin
->src
[info
->base_src
]);
929 /* update the deref */
930 if (info
->deref_src
>= 0) {
931 b
->cursor
= nir_before_instr(second
->instr
);
932 second
->deref
= cast_deref(b
, new_num_components
, new_bit_size
,
933 nir_src_as_deref(low
->intrin
->src
[info
->deref_src
]));
934 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->deref_src
],
935 nir_src_for_ssa(&second
->deref
->dest
.ssa
));
938 /* update base/align */
939 bool has_base_index
=
940 nir_intrinsic_infos
[second
->intrin
->intrinsic
].index_map
[NIR_INTRINSIC_BASE
];
942 if (second
!= low
&& has_base_index
)
943 nir_intrinsic_set_base(second
->intrin
, nir_intrinsic_base(low
->intrin
));
945 second
->key
= low
->key
;
946 second
->offset
= low
->offset
;
947 second
->best_align
= get_best_align(low
);
949 update_align(second
);
951 list_del(&first
->head
);
952 nir_instr_remove(first
->instr
);
955 /* Returns true if it can prove that "a" and "b" point to different resources. */
957 resources_different(nir_ssa_def
*a
, nir_ssa_def
*b
)
962 if (a
->parent_instr
->type
== nir_instr_type_load_const
&&
963 b
->parent_instr
->type
== nir_instr_type_load_const
) {
964 return nir_src_as_uint(nir_src_for_ssa(a
)) != nir_src_as_uint(nir_src_for_ssa(b
));
967 if (a
->parent_instr
->type
== nir_instr_type_intrinsic
&&
968 b
->parent_instr
->type
== nir_instr_type_intrinsic
) {
969 nir_intrinsic_instr
*aintrin
= nir_instr_as_intrinsic(a
->parent_instr
);
970 nir_intrinsic_instr
*bintrin
= nir_instr_as_intrinsic(b
->parent_instr
);
971 if (aintrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
&&
972 bintrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
973 return nir_intrinsic_desc_set(aintrin
) != nir_intrinsic_desc_set(bintrin
) ||
974 nir_intrinsic_binding(aintrin
) != nir_intrinsic_binding(bintrin
) ||
975 resources_different(aintrin
->src
[0].ssa
, bintrin
->src
[0].ssa
);
983 compare_entries(struct entry
*a
, struct entry
*b
)
985 if (!entry_key_equals(a
->key
, b
->key
))
987 return b
->offset_signed
- a
->offset_signed
;
991 may_alias(struct entry
*a
, struct entry
*b
)
993 assert(mode_to_index(get_variable_mode(a
)) ==
994 mode_to_index(get_variable_mode(b
)));
996 /* if the resources/variables are definitively different and both have
997 * ACCESS_RESTRICT, we can assume they do not alias. */
998 bool res_different
= a
->key
->var
!= b
->key
->var
||
999 resources_different(a
->key
->resource
, b
->key
->resource
);
1000 if (res_different
&& (a
->access
& ACCESS_RESTRICT
) && (b
->access
& ACCESS_RESTRICT
))
1003 /* we can't compare offsets if the resources/variables might be different */
1004 if (a
->key
->var
!= b
->key
->var
|| a
->key
->resource
!= b
->key
->resource
)
1007 /* use adjacency information */
1008 /* TODO: we can look closer at the entry keys */
1009 int64_t diff
= compare_entries(a
, b
);
1010 if (diff
!= INT64_MAX
) {
1011 /* with atomics, intrin->num_components can be 0 */
1013 return llabs(diff
) < MAX2(b
->intrin
->num_components
, 1u) * (get_bit_size(b
) / 8u);
1015 return diff
< MAX2(a
->intrin
->num_components
, 1u) * (get_bit_size(a
) / 8u);
1018 /* TODO: we can use deref information */
1024 check_for_aliasing(struct vectorize_ctx
*ctx
, struct entry
*first
, struct entry
*second
)
1026 nir_variable_mode mode
= get_variable_mode(first
);
1027 if (mode
& (nir_var_uniform
| nir_var_system_value
|
1028 nir_var_mem_push_const
| nir_var_mem_ubo
))
1031 unsigned mode_index
= mode_to_index(mode
);
1032 if (first
->is_store
) {
1033 /* find first entry that aliases "first" */
1034 list_for_each_entry_from(struct entry
, next
, first
, &ctx
->entries
[mode_index
], head
) {
1039 if (may_alias(first
, next
))
1043 /* find previous store that aliases this load */
1044 list_for_each_entry_from_rev(struct entry
, prev
, second
, &ctx
->entries
[mode_index
], head
) {
1049 if (prev
->is_store
&& may_alias(second
, prev
))
1058 check_for_robustness(struct vectorize_ctx
*ctx
, struct entry
*low
)
1060 nir_variable_mode mode
= get_variable_mode(low
);
1061 if (mode
& ctx
->robust_modes
) {
1062 unsigned low_bit_size
= get_bit_size(low
);
1063 unsigned low_size
= low
->intrin
->num_components
* low_bit_size
;
1065 /* don't attempt to vectorize accesses if the offset can overflow. */
1066 /* TODO: handle indirect accesses. */
1067 return low
->offset_signed
< 0 && low
->offset_signed
+ low_size
>= 0;
1074 is_strided_vector(const struct glsl_type
*type
)
1076 if (glsl_type_is_vector(type
)) {
1077 unsigned explicit_stride
= glsl_get_explicit_stride(type
);
1078 return explicit_stride
!= 0 && explicit_stride
!=
1079 type_scalar_size_bytes(glsl_get_array_element(type
));
1086 try_vectorize(nir_function_impl
*impl
, struct vectorize_ctx
*ctx
,
1087 struct entry
*low
, struct entry
*high
,
1088 struct entry
*first
, struct entry
*second
)
1090 if (!(get_variable_mode(first
) & ctx
->modes
) ||
1091 !(get_variable_mode(second
) & ctx
->modes
))
1094 if (check_for_aliasing(ctx
, first
, second
))
1097 if (check_for_robustness(ctx
, low
))
1100 /* we can only vectorize non-volatile loads/stores of the same type and with
1101 * the same access */
1102 if (first
->info
!= second
->info
|| first
->access
!= second
->access
||
1103 (first
->access
& ACCESS_VOLATILE
) || first
->info
->is_atomic
)
1106 /* don't attempt to vectorize accesses of row-major matrix columns */
1108 const struct glsl_type
*first_type
= first
->deref
->type
;
1109 const struct glsl_type
*second_type
= second
->deref
->type
;
1110 if (is_strided_vector(first_type
) || is_strided_vector(second_type
))
1114 /* gather information */
1115 uint64_t diff
= high
->offset_signed
- low
->offset_signed
;
1116 unsigned low_bit_size
= get_bit_size(low
);
1117 unsigned high_bit_size
= get_bit_size(high
);
1118 unsigned low_size
= low
->intrin
->num_components
* low_bit_size
;
1119 unsigned high_size
= high
->intrin
->num_components
* high_bit_size
;
1120 unsigned new_size
= MAX2(diff
* 8u + high_size
, low_size
);
1122 /* find a good bit size for the new load/store */
1123 unsigned new_bit_size
= 0;
1124 if (new_bitsize_acceptable(ctx
, low_bit_size
, low
, high
, new_size
)) {
1125 new_bit_size
= low_bit_size
;
1126 } else if (low_bit_size
!= high_bit_size
&&
1127 new_bitsize_acceptable(ctx
, high_bit_size
, low
, high
, new_size
)) {
1128 new_bit_size
= high_bit_size
;
1131 for (; new_bit_size
>= 8; new_bit_size
/= 2) {
1132 /* don't repeat trying out bitsizes */
1133 if (new_bit_size
== low_bit_size
|| new_bit_size
== high_bit_size
)
1135 if (new_bitsize_acceptable(ctx
, new_bit_size
, low
, high
, new_size
))
1138 if (new_bit_size
< 8)
1141 unsigned new_num_components
= new_size
/ new_bit_size
;
1143 /* vectorize the loads/stores */
1145 nir_builder_init(&b
, impl
);
1147 if (first
->is_store
)
1148 vectorize_stores(&b
, ctx
, low
, high
, first
, second
,
1149 new_bit_size
, new_num_components
, diff
* 8u);
1151 vectorize_loads(&b
, ctx
, low
, high
, first
, second
,
1152 new_bit_size
, new_num_components
, diff
* 8u);
1158 vectorize_entries(struct vectorize_ctx
*ctx
, nir_function_impl
*impl
, struct hash_table
*ht
)
1163 bool progress
= false;
1164 hash_table_foreach(ht
, entry
) {
1165 struct util_dynarray
*arr
= entry
->data
;
1169 qsort(util_dynarray_begin(arr
),
1170 util_dynarray_num_elements(arr
, struct entry
*),
1171 sizeof(struct entry
*), &sort_entries
);
1174 for (; i
< util_dynarray_num_elements(arr
, struct entry
*) - 1; i
++) {
1175 struct entry
*low
= *util_dynarray_element(arr
, struct entry
*, i
);
1176 struct entry
*high
= *util_dynarray_element(arr
, struct entry
*, i
+ 1);
1178 uint64_t diff
= high
->offset_signed
- low
->offset_signed
;
1179 if (diff
> get_bit_size(low
) / 8u * low
->intrin
->num_components
) {
1180 progress
|= update_align(low
);
1184 struct entry
*first
= low
->index
< high
->index
? low
: high
;
1185 struct entry
*second
= low
->index
< high
->index
? high
: low
;
1187 if (try_vectorize(impl
, ctx
, low
, high
, first
, second
)) {
1188 *util_dynarray_element(arr
, struct entry
*, i
) = NULL
;
1189 *util_dynarray_element(arr
, struct entry
*, i
+ 1) = low
->is_store
? second
: first
;
1192 progress
|= update_align(low
);
1196 struct entry
*last
= *util_dynarray_element(arr
, struct entry
*, i
);
1197 progress
|= update_align(last
);
1200 _mesa_hash_table_clear(ht
, delete_entry_dynarray
);
1206 handle_barrier(struct vectorize_ctx
*ctx
, bool *progress
, nir_function_impl
*impl
, nir_instr
*instr
)
1209 bool acquire
= true;
1210 bool release
= true;
1211 if (instr
->type
== nir_instr_type_intrinsic
) {
1212 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1213 switch (intrin
->intrinsic
) {
1214 case nir_intrinsic_group_memory_barrier
:
1215 case nir_intrinsic_memory_barrier
:
1216 modes
= nir_var_mem_ssbo
| nir_var_mem_shared
| nir_var_mem_global
;
1218 /* prevent speculative loads/stores */
1219 case nir_intrinsic_discard_if
:
1220 case nir_intrinsic_discard
:
1221 modes
= nir_var_all
;
1223 case nir_intrinsic_memory_barrier_buffer
:
1224 modes
= nir_var_mem_ssbo
| nir_var_mem_global
;
1226 case nir_intrinsic_memory_barrier_shared
:
1227 modes
= nir_var_mem_shared
;
1229 case nir_intrinsic_scoped_memory_barrier
:
1230 modes
= nir_intrinsic_memory_modes(intrin
);
1231 acquire
= nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_ACQUIRE
;
1232 release
= nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_RELEASE
;
1233 switch (nir_intrinsic_memory_scope(intrin
)) {
1234 case NIR_SCOPE_INVOCATION
:
1235 case NIR_SCOPE_SUBGROUP
:
1236 /* a barier should never be required for correctness with these scopes */
1246 } else if (instr
->type
== nir_instr_type_call
) {
1247 modes
= nir_var_all
;
1253 unsigned mode_index
= u_bit_scan(&modes
);
1254 if ((1 << mode_index
) == nir_var_mem_global
) {
1255 /* Global should be rolled in with SSBO */
1256 assert(list_is_empty(&ctx
->entries
[mode_index
]));
1257 assert(ctx
->loads
[mode_index
] == NULL
);
1258 assert(ctx
->stores
[mode_index
] == NULL
);
1263 *progress
|= vectorize_entries(ctx
, impl
, ctx
->loads
[mode_index
]);
1265 *progress
|= vectorize_entries(ctx
, impl
, ctx
->stores
[mode_index
]);
1272 process_block(nir_function_impl
*impl
, struct vectorize_ctx
*ctx
, nir_block
*block
)
1274 bool progress
= false;
1276 for (unsigned i
= 0; i
< nir_num_variable_modes
; i
++) {
1277 list_inithead(&ctx
->entries
[i
]);
1279 _mesa_hash_table_clear(ctx
->loads
[i
], delete_entry_dynarray
);
1281 _mesa_hash_table_clear(ctx
->stores
[i
], delete_entry_dynarray
);
1284 /* create entries */
1285 unsigned next_index
= 0;
1287 nir_foreach_instr_safe(instr
, block
) {
1288 if (handle_barrier(ctx
, &progress
, impl
, instr
))
1291 /* gather information */
1292 if (instr
->type
!= nir_instr_type_intrinsic
)
1294 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1296 const struct intrinsic_info
*info
= get_info(intrin
->intrinsic
);
1300 nir_variable_mode mode
= info
->mode
;
1302 mode
= nir_src_as_deref(intrin
->src
[info
->deref_src
])->mode
;
1303 if (!(mode
& aliasing_modes(ctx
->modes
)))
1305 unsigned mode_index
= mode_to_index(mode
);
1308 struct entry
*entry
= create_entry(ctx
, info
, intrin
);
1309 entry
->index
= next_index
++;
1311 list_addtail(&entry
->head
, &ctx
->entries
[mode_index
]);
1313 /* add the entry to a hash table */
1315 struct hash_table
*adj_ht
= NULL
;
1316 if (entry
->is_store
) {
1317 if (!ctx
->stores
[mode_index
])
1318 ctx
->stores
[mode_index
] = _mesa_hash_table_create(ctx
, &hash_entry_key
, &entry_key_equals
);
1319 adj_ht
= ctx
->stores
[mode_index
];
1321 if (!ctx
->loads
[mode_index
])
1322 ctx
->loads
[mode_index
] = _mesa_hash_table_create(ctx
, &hash_entry_key
, &entry_key_equals
);
1323 adj_ht
= ctx
->loads
[mode_index
];
1326 uint32_t key_hash
= hash_entry_key(entry
->key
);
1327 struct hash_entry
*adj_entry
= _mesa_hash_table_search_pre_hashed(adj_ht
, key_hash
, entry
->key
);
1328 struct util_dynarray
*arr
;
1329 if (adj_entry
&& adj_entry
->data
) {
1330 arr
= (struct util_dynarray
*)adj_entry
->data
;
1332 arr
= ralloc(ctx
, struct util_dynarray
);
1333 util_dynarray_init(arr
, arr
);
1334 _mesa_hash_table_insert_pre_hashed(adj_ht
, key_hash
, entry
->key
, arr
);
1336 util_dynarray_append(arr
, struct entry
*, entry
);
1339 /* sort and combine entries */
1340 for (unsigned i
= 0; i
< nir_num_variable_modes
; i
++) {
1341 progress
|= vectorize_entries(ctx
, impl
, ctx
->loads
[i
]);
1342 progress
|= vectorize_entries(ctx
, impl
, ctx
->stores
[i
]);
1349 nir_opt_load_store_vectorize(nir_shader
*shader
, nir_variable_mode modes
,
1350 nir_should_vectorize_mem_func callback
,
1351 nir_variable_mode robust_modes
)
1353 bool progress
= false;
1355 struct vectorize_ctx
*ctx
= rzalloc(NULL
, struct vectorize_ctx
);
1357 ctx
->callback
= callback
;
1358 ctx
->robust_modes
= robust_modes
;
1360 nir_index_vars(shader
, NULL
, modes
);
1362 nir_foreach_function(function
, shader
) {
1363 if (function
->impl
) {
1364 if (modes
& nir_var_function_temp
)
1365 nir_index_vars(shader
, function
->impl
, nir_var_function_temp
);
1367 nir_foreach_block(block
, function
->impl
)
1368 progress
|= process_block(function
->impl
, ctx
, block
);
1370 nir_metadata_preserve(function
->impl
,
1371 nir_metadata_block_index
|
1372 nir_metadata_dominance
|
1373 nir_metadata_live_ssa_defs
);