2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Although it's called a load/store "vectorization" pass, this also combines
26 * intersecting and identical loads/stores. It currently supports derefs, ubo,
27 * ssbo and push constant loads/stores.
29 * This doesn't handle copy_deref intrinsics and assumes that
30 * nir_lower_alu_to_scalar() has been called and that the IR is free from ALU
31 * modifiers. It also assumes that derefs have explicitly laid out types.
33 * After vectorization, the backend may want to call nir_lower_alu_to_scalar()
34 * and nir_lower_pack(). Also this creates cast instructions taking derefs as a
35 * source and some parts of NIR may not be able to handle that well.
37 * There are a few situations where this doesn't vectorize as well as it could:
38 * - It won't turn four consecutive vec3 loads into 3 vec4 loads.
39 * - It doesn't do global vectorization.
40 * Handling these cases probably wouldn't provide much benefit though.
42 * This probably doesn't handle big-endian GPUs correctly.
46 #include "nir_deref.h"
47 #include "nir_builder.h"
48 #include "nir_worklist.h"
49 #include "util/u_dynarray.h"
53 struct intrinsic_info
{
54 nir_variable_mode mode
; /* 0 if the mode is obtained from the deref. */
57 /* Indices into nir_intrinsic::src[] or -1 if not applicable. */
58 int resource_src
; /* resource (e.g. from vulkan_resource_index) */
59 int base_src
; /* offset which it loads/stores from */
60 int deref_src
; /* deref which is loads/stores from */
61 int value_src
; /* the data it is storing */
64 static const struct intrinsic_info
*
65 get_info(nir_intrinsic_op op
) {
67 #define INFO(mode, op, atomic, res, base, deref, val) \
68 case nir_intrinsic_##op: {\
69 static const struct intrinsic_info op##_info = {mode, nir_intrinsic_##op, atomic, res, base, deref, val};\
72 #define LOAD(mode, op, res, base, deref) INFO(mode, load_##op, false, res, base, deref, -1)
73 #define STORE(mode, op, res, base, deref, val) INFO(mode, store_##op, false, res, base, deref, val)
74 #define ATOMIC(mode, type, op, res, base, deref, val) INFO(mode, type##_atomic_##op, true, res, base, deref, val)
75 LOAD(nir_var_mem_push_const
, push_constant
, -1, 0, -1)
76 LOAD(nir_var_mem_ubo
, ubo
, 0, 1, -1)
77 LOAD(nir_var_mem_ssbo
, ssbo
, 0, 1, -1)
78 STORE(nir_var_mem_ssbo
, ssbo
, 1, 2, -1, 0)
79 LOAD(0, deref
, -1, -1, 0)
80 STORE(0, deref
, -1, -1, 0, 1)
81 LOAD(nir_var_mem_shared
, shared
, -1, 0, -1)
82 STORE(nir_var_mem_shared
, shared
, -1, 1, -1, 0)
83 LOAD(nir_var_mem_global
, global
, -1, 0, -1)
84 STORE(nir_var_mem_global
, global
, -1, 1, -1, 0)
85 ATOMIC(nir_var_mem_ssbo
, ssbo
, add
, 0, 1, -1, 2)
86 ATOMIC(nir_var_mem_ssbo
, ssbo
, imin
, 0, 1, -1, 2)
87 ATOMIC(nir_var_mem_ssbo
, ssbo
, umin
, 0, 1, -1, 2)
88 ATOMIC(nir_var_mem_ssbo
, ssbo
, imax
, 0, 1, -1, 2)
89 ATOMIC(nir_var_mem_ssbo
, ssbo
, umax
, 0, 1, -1, 2)
90 ATOMIC(nir_var_mem_ssbo
, ssbo
, and, 0, 1, -1, 2)
91 ATOMIC(nir_var_mem_ssbo
, ssbo
, or, 0, 1, -1, 2)
92 ATOMIC(nir_var_mem_ssbo
, ssbo
, xor, 0, 1, -1, 2)
93 ATOMIC(nir_var_mem_ssbo
, ssbo
, exchange
, 0, 1, -1, 2)
94 ATOMIC(nir_var_mem_ssbo
, ssbo
, comp_swap
, 0, 1, -1, 2)
95 ATOMIC(nir_var_mem_ssbo
, ssbo
, fadd
, 0, 1, -1, 2)
96 ATOMIC(nir_var_mem_ssbo
, ssbo
, fmin
, 0, 1, -1, 2)
97 ATOMIC(nir_var_mem_ssbo
, ssbo
, fmax
, 0, 1, -1, 2)
98 ATOMIC(nir_var_mem_ssbo
, ssbo
, fcomp_swap
, 0, 1, -1, 2)
99 ATOMIC(0, deref
, add
, -1, -1, 0, 1)
100 ATOMIC(0, deref
, imin
, -1, -1, 0, 1)
101 ATOMIC(0, deref
, umin
, -1, -1, 0, 1)
102 ATOMIC(0, deref
, imax
, -1, -1, 0, 1)
103 ATOMIC(0, deref
, umax
, -1, -1, 0, 1)
104 ATOMIC(0, deref
, and, -1, -1, 0, 1)
105 ATOMIC(0, deref
, or, -1, -1, 0, 1)
106 ATOMIC(0, deref
, xor, -1, -1, 0, 1)
107 ATOMIC(0, deref
, exchange
, -1, -1, 0, 1)
108 ATOMIC(0, deref
, comp_swap
, -1, -1, 0, 1)
109 ATOMIC(0, deref
, fadd
, -1, -1, 0, 1)
110 ATOMIC(0, deref
, fmin
, -1, -1, 0, 1)
111 ATOMIC(0, deref
, fmax
, -1, -1, 0, 1)
112 ATOMIC(0, deref
, fcomp_swap
, -1, -1, 0, 1)
113 ATOMIC(nir_var_mem_shared
, shared
, add
, -1, 0, -1, 1)
114 ATOMIC(nir_var_mem_shared
, shared
, imin
, -1, 0, -1, 1)
115 ATOMIC(nir_var_mem_shared
, shared
, umin
, -1, 0, -1, 1)
116 ATOMIC(nir_var_mem_shared
, shared
, imax
, -1, 0, -1, 1)
117 ATOMIC(nir_var_mem_shared
, shared
, umax
, -1, 0, -1, 1)
118 ATOMIC(nir_var_mem_shared
, shared
, and, -1, 0, -1, 1)
119 ATOMIC(nir_var_mem_shared
, shared
, or, -1, 0, -1, 1)
120 ATOMIC(nir_var_mem_shared
, shared
, xor, -1, 0, -1, 1)
121 ATOMIC(nir_var_mem_shared
, shared
, exchange
, -1, 0, -1, 1)
122 ATOMIC(nir_var_mem_shared
, shared
, comp_swap
, -1, 0, -1, 1)
123 ATOMIC(nir_var_mem_shared
, shared
, fadd
, -1, 0, -1, 1)
124 ATOMIC(nir_var_mem_shared
, shared
, fmin
, -1, 0, -1, 1)
125 ATOMIC(nir_var_mem_shared
, shared
, fmax
, -1, 0, -1, 1)
126 ATOMIC(nir_var_mem_shared
, shared
, fcomp_swap
, -1, 0, -1, 1)
127 ATOMIC(nir_var_mem_global
, global
, add
, -1, 0, -1, 1)
128 ATOMIC(nir_var_mem_global
, global
, imin
, -1, 0, -1, 1)
129 ATOMIC(nir_var_mem_global
, global
, umin
, -1, 0, -1, 1)
130 ATOMIC(nir_var_mem_global
, global
, imax
, -1, 0, -1, 1)
131 ATOMIC(nir_var_mem_global
, global
, umax
, -1, 0, -1, 1)
132 ATOMIC(nir_var_mem_global
, global
, and, -1, 0, -1, 1)
133 ATOMIC(nir_var_mem_global
, global
, or, -1, 0, -1, 1)
134 ATOMIC(nir_var_mem_global
, global
, xor, -1, 0, -1, 1)
135 ATOMIC(nir_var_mem_global
, global
, exchange
, -1, 0, -1, 1)
136 ATOMIC(nir_var_mem_global
, global
, comp_swap
, -1, 0, -1, 1)
137 ATOMIC(nir_var_mem_global
, global
, fadd
, -1, 0, -1, 1)
138 ATOMIC(nir_var_mem_global
, global
, fmin
, -1, 0, -1, 1)
139 ATOMIC(nir_var_mem_global
, global
, fmax
, -1, 0, -1, 1)
140 ATOMIC(nir_var_mem_global
, global
, fcomp_swap
, -1, 0, -1, 1)
152 * Information used to compare memory operations.
153 * It canonically represents an offset as:
154 * `offset_defs[0]*offset_defs_mul[0] + offset_defs[1]*offset_defs_mul[1] + ...`
155 * "offset_defs" is sorted in ascenting order by the ssa definition's index.
156 * "resource" or "var" may be NULL.
159 nir_ssa_def
*resource
;
161 unsigned offset_def_count
;
162 nir_ssa_def
**offset_defs
;
163 uint64_t *offset_defs_mul
;
166 /* Information on a single memory operation. */
168 struct list_head head
;
171 struct entry_key
*key
;
173 uint64_t offset
; /* sign-extended */
174 int64_t offset_signed
;
179 nir_intrinsic_instr
*intrin
;
180 const struct intrinsic_info
*info
;
181 enum gl_access_qualifier access
;
184 nir_deref_instr
*deref
;
187 struct vectorize_ctx
{
188 nir_variable_mode modes
;
189 nir_should_vectorize_mem_func callback
;
190 nir_variable_mode robust_modes
;
191 struct list_head entries
[nir_num_variable_modes
];
192 struct hash_table
*loads
[nir_num_variable_modes
];
193 struct hash_table
*stores
[nir_num_variable_modes
];
196 static uint32_t hash_entry_key(const void *key_
)
198 /* this is careful to not include pointers in the hash calculation so that
199 * the order of the hash table walk is deterministic */
200 struct entry_key
*key
= (struct entry_key
*)key_
;
204 hash
= XXH32(&key
->resource
->index
, sizeof(key
->resource
->index
), hash
);
206 hash
= XXH32(&key
->var
->index
, sizeof(key
->var
->index
), hash
);
207 unsigned mode
= key
->var
->data
.mode
;
208 hash
= XXH32(&mode
, sizeof(mode
), hash
);
211 for (unsigned i
= 0; i
< key
->offset_def_count
; i
++)
212 hash
= XXH32(&key
->offset_defs
[i
]->index
, sizeof(key
->offset_defs
[i
]->index
), hash
);
214 hash
= XXH32(key
->offset_defs_mul
, key
->offset_def_count
* sizeof(uint64_t), hash
);
219 static bool entry_key_equals(const void *a_
, const void *b_
)
221 struct entry_key
*a
= (struct entry_key
*)a_
;
222 struct entry_key
*b
= (struct entry_key
*)b_
;
224 if (a
->var
!= b
->var
|| a
->resource
!= b
->resource
)
227 if (a
->offset_def_count
!= b
->offset_def_count
)
230 size_t offset_def_size
= a
->offset_def_count
* sizeof(nir_ssa_def
*);
231 size_t offset_def_mul_size
= a
->offset_def_count
* sizeof(uint64_t);
232 if (a
->offset_def_count
&&
233 (memcmp(a
->offset_defs
, b
->offset_defs
, offset_def_size
) ||
234 memcmp(a
->offset_defs_mul
, b
->offset_defs_mul
, offset_def_mul_size
)))
240 static void delete_entry_dynarray(struct hash_entry
*entry
)
242 struct util_dynarray
*arr
= (struct util_dynarray
*)entry
->data
;
246 static int sort_entries(const void *a_
, const void *b_
)
248 struct entry
*a
= *(struct entry
*const*)a_
;
249 struct entry
*b
= *(struct entry
*const*)b_
;
251 if (a
->offset_signed
> b
->offset_signed
)
253 else if (a
->offset_signed
< b
->offset_signed
)
260 get_bit_size(struct entry
*entry
)
262 unsigned size
= entry
->is_store
?
263 entry
->intrin
->src
[entry
->info
->value_src
].ssa
->bit_size
:
264 entry
->intrin
->dest
.ssa
.bit_size
;
265 return size
== 1 ? 32u : size
;
268 /* If "def" is from an alu instruction with the opcode "op" and one of it's
269 * sources is a constant, update "def" to be the non-constant source, fill "c"
270 * with the constant and return true. */
272 parse_alu(nir_ssa_def
**def
, nir_op op
, uint64_t *c
)
274 nir_ssa_scalar scalar
;
278 if (!nir_ssa_scalar_is_alu(scalar
) || nir_ssa_scalar_alu_op(scalar
) != op
)
281 nir_ssa_scalar src0
= nir_ssa_scalar_chase_alu_src(scalar
, 0);
282 nir_ssa_scalar src1
= nir_ssa_scalar_chase_alu_src(scalar
, 1);
283 if (op
!= nir_op_ishl
&& nir_ssa_scalar_is_const(src0
) && src1
.comp
== 0) {
284 *c
= nir_ssa_scalar_as_uint(src0
);
286 } else if (nir_ssa_scalar_is_const(src1
) && src0
.comp
== 0) {
287 *c
= nir_ssa_scalar_as_uint(src1
);
295 /* Parses an offset expression such as "a * 16 + 4" and "(a * 16 + 4) * 64 + 32". */
297 parse_offset(nir_ssa_def
**base
, uint64_t *base_mul
, uint64_t *offset
)
299 if ((*base
)->parent_instr
->type
== nir_instr_type_load_const
) {
300 *offset
= nir_src_comp_as_uint(nir_src_for_ssa(*base
), 0);
307 bool progress
= false;
309 uint64_t mul2
= 1, add2
= 0;
311 progress
= parse_alu(base
, nir_op_imul
, &mul2
);
315 progress
|= parse_alu(base
, nir_op_ishl
, &mul2
);
318 progress
|= parse_alu(base
, nir_op_iadd
, &add2
);
327 type_scalar_size_bytes(const struct glsl_type
*type
)
329 assert(glsl_type_is_vector_or_scalar(type
) ||
330 glsl_type_is_matrix(type
));
331 return glsl_type_is_boolean(type
) ? 4u : glsl_get_bit_size(type
) / 8u;
335 mask_sign_extend(uint64_t val
, unsigned bit_size
)
337 return (int64_t)(val
<< (64 - bit_size
)) >> (64 - bit_size
);
341 add_to_entry_key(nir_ssa_def
**offset_defs
, uint64_t *offset_defs_mul
,
342 unsigned offset_def_count
, nir_ssa_def
*def
, uint64_t mul
)
344 mul
= mask_sign_extend(mul
, def
->bit_size
);
346 for (unsigned i
= 0; i
<= offset_def_count
; i
++) {
347 if (i
== offset_def_count
|| def
->index
> offset_defs
[i
]->index
) {
348 /* insert before i */
349 memmove(offset_defs
+ i
+ 1, offset_defs
+ i
,
350 (offset_def_count
- i
) * sizeof(nir_ssa_def
*));
351 memmove(offset_defs_mul
+ i
+ 1, offset_defs_mul
+ i
,
352 (offset_def_count
- i
) * sizeof(uint64_t));
353 offset_defs
[i
] = def
;
354 offset_defs_mul
[i
] = mul
;
356 } else if (def
->index
== offset_defs
[i
]->index
) {
357 /* merge with offset_def at i */
358 offset_defs_mul
[i
] += mul
;
362 unreachable("Unreachable.");
366 static struct entry_key
*
367 create_entry_key_from_deref(void *mem_ctx
,
368 struct vectorize_ctx
*ctx
,
369 nir_deref_path
*path
,
370 uint64_t *offset_base
)
372 unsigned path_len
= 0;
373 while (path
->path
[path_len
])
376 nir_ssa_def
*offset_defs_stack
[32];
377 uint64_t offset_defs_mul_stack
[32];
378 nir_ssa_def
**offset_defs
= offset_defs_stack
;
379 uint64_t *offset_defs_mul
= offset_defs_mul_stack
;
381 offset_defs
= malloc(path_len
* sizeof(nir_ssa_def
*));
382 offset_defs_mul
= malloc(path_len
* sizeof(uint64_t));
384 unsigned offset_def_count
= 0;
386 struct entry_key
* key
= ralloc(mem_ctx
, struct entry_key
);
387 key
->resource
= NULL
;
391 for (unsigned i
= 0; i
< path_len
; i
++) {
392 nir_deref_instr
*parent
= i
? path
->path
[i
- 1] : NULL
;
393 nir_deref_instr
*deref
= path
->path
[i
];
395 switch (deref
->deref_type
) {
396 case nir_deref_type_var
: {
398 key
->var
= deref
->var
;
401 case nir_deref_type_array
:
402 case nir_deref_type_ptr_as_array
: {
404 nir_ssa_def
*index
= deref
->arr
.index
.ssa
;
405 uint32_t stride
= nir_deref_instr_array_stride(deref
);
407 nir_ssa_def
*base
= index
;
408 uint64_t offset
= 0, base_mul
= 1;
409 parse_offset(&base
, &base_mul
, &offset
);
410 offset
= mask_sign_extend(offset
, index
->bit_size
);
412 *offset_base
+= offset
* stride
;
414 offset_def_count
+= add_to_entry_key(offset_defs
, offset_defs_mul
,
416 base
, base_mul
* stride
);
420 case nir_deref_type_struct
: {
422 int offset
= glsl_get_struct_field_offset(parent
->type
, deref
->strct
.index
);
423 *offset_base
+= offset
;
426 case nir_deref_type_cast
: {
428 key
->resource
= deref
->parent
.ssa
;
432 unreachable("Unhandled deref type");
436 key
->offset_def_count
= offset_def_count
;
437 key
->offset_defs
= ralloc_array(mem_ctx
, nir_ssa_def
*, offset_def_count
);
438 key
->offset_defs_mul
= ralloc_array(mem_ctx
, uint64_t, offset_def_count
);
439 memcpy(key
->offset_defs
, offset_defs
, offset_def_count
* sizeof(nir_ssa_def
*));
440 memcpy(key
->offset_defs_mul
, offset_defs_mul
, offset_def_count
* sizeof(uint64_t));
442 if (offset_defs
!= offset_defs_stack
)
444 if (offset_defs_mul
!= offset_defs_mul_stack
)
445 free(offset_defs_mul
);
451 parse_entry_key_from_offset(struct entry_key
*key
, unsigned size
, unsigned left
,
452 nir_ssa_def
*base
, uint64_t base_mul
, uint64_t *offset
)
456 parse_offset(&base
, &new_mul
, &new_offset
);
457 *offset
+= new_offset
* base_mul
;
467 nir_ssa_scalar scalar
;
470 if (nir_ssa_scalar_is_alu(scalar
) && nir_ssa_scalar_alu_op(scalar
) == nir_op_iadd
) {
471 nir_ssa_scalar src0
= nir_ssa_scalar_chase_alu_src(scalar
, 0);
472 nir_ssa_scalar src1
= nir_ssa_scalar_chase_alu_src(scalar
, 1);
473 if (src0
.comp
== 0 && src1
.comp
== 0) {
474 unsigned amount
= parse_entry_key_from_offset(key
, size
, left
- 1, src0
.def
, base_mul
, offset
);
475 amount
+= parse_entry_key_from_offset(key
, size
+ amount
, left
- amount
, src1
.def
, base_mul
, offset
);
481 return add_to_entry_key(key
->offset_defs
, key
->offset_defs_mul
, size
, base
, base_mul
);
484 static struct entry_key
*
485 create_entry_key_from_offset(void *mem_ctx
, nir_ssa_def
*base
, uint64_t base_mul
, uint64_t *offset
)
487 struct entry_key
*key
= ralloc(mem_ctx
, struct entry_key
);
488 key
->resource
= NULL
;
491 nir_ssa_def
*offset_defs
[32];
492 uint64_t offset_defs_mul
[32];
493 key
->offset_defs
= offset_defs
;
494 key
->offset_defs_mul
= offset_defs_mul
;
496 key
->offset_def_count
= parse_entry_key_from_offset(key
, 0, 32, base
, base_mul
, offset
);
498 key
->offset_defs
= ralloc_array(mem_ctx
, nir_ssa_def
*, key
->offset_def_count
);
499 key
->offset_defs_mul
= ralloc_array(mem_ctx
, uint64_t, key
->offset_def_count
);
500 memcpy(key
->offset_defs
, offset_defs
, key
->offset_def_count
* sizeof(nir_ssa_def
*));
501 memcpy(key
->offset_defs_mul
, offset_defs_mul
, key
->offset_def_count
* sizeof(uint64_t));
503 key
->offset_def_count
= 0;
504 key
->offset_defs
= NULL
;
505 key
->offset_defs_mul
= NULL
;
510 static nir_variable_mode
511 get_variable_mode(struct entry
*entry
)
513 if (entry
->info
->mode
)
514 return entry
->info
->mode
;
515 assert(entry
->deref
);
516 return entry
->deref
->mode
;
520 mode_to_index(nir_variable_mode mode
)
522 assert(util_bitcount(mode
) == 1);
524 /* Globals and SSBOs should be tracked together */
525 if (mode
== nir_var_mem_global
)
526 mode
= nir_var_mem_ssbo
;
528 return ffs(mode
) - 1;
531 static nir_variable_mode
532 aliasing_modes(nir_variable_mode modes
)
534 /* Global and SSBO can alias */
535 if (modes
& (nir_var_mem_ssbo
| nir_var_mem_global
))
536 modes
|= nir_var_mem_ssbo
| nir_var_mem_global
;
540 static struct entry
*
541 create_entry(struct vectorize_ctx
*ctx
,
542 const struct intrinsic_info
*info
,
543 nir_intrinsic_instr
*intrin
)
545 struct entry
*entry
= rzalloc(ctx
, struct entry
);
546 entry
->intrin
= intrin
;
547 entry
->instr
= &intrin
->instr
;
549 entry
->best_align
= UINT32_MAX
;
550 entry
->is_store
= entry
->info
->value_src
>= 0;
552 if (entry
->info
->deref_src
>= 0) {
553 entry
->deref
= nir_src_as_deref(intrin
->src
[entry
->info
->deref_src
]);
555 nir_deref_path_init(&path
, entry
->deref
, NULL
);
556 entry
->key
= create_entry_key_from_deref(entry
, ctx
, &path
, &entry
->offset
);
557 nir_deref_path_finish(&path
);
559 nir_ssa_def
*base
= entry
->info
->base_src
>= 0 ?
560 intrin
->src
[entry
->info
->base_src
].ssa
: NULL
;
562 if (nir_intrinsic_has_base(intrin
))
563 offset
+= nir_intrinsic_base(intrin
);
564 entry
->key
= create_entry_key_from_offset(entry
, base
, 1, &offset
);
565 entry
->offset
= offset
;
568 entry
->offset
= mask_sign_extend(entry
->offset
, base
->bit_size
);
571 if (entry
->info
->resource_src
>= 0)
572 entry
->key
->resource
= intrin
->src
[entry
->info
->resource_src
].ssa
;
574 if (nir_intrinsic_has_access(intrin
))
575 entry
->access
= nir_intrinsic_access(intrin
);
576 else if (entry
->key
->var
)
577 entry
->access
= entry
->key
->var
->data
.access
;
579 uint32_t restrict_modes
= nir_var_shader_in
| nir_var_shader_out
;
580 restrict_modes
|= nir_var_shader_temp
| nir_var_function_temp
;
581 restrict_modes
|= nir_var_uniform
| nir_var_mem_push_const
;
582 restrict_modes
|= nir_var_system_value
| nir_var_mem_shared
;
583 if (get_variable_mode(entry
) & restrict_modes
)
584 entry
->access
|= ACCESS_RESTRICT
;
589 static nir_deref_instr
*
590 cast_deref(nir_builder
*b
, unsigned num_components
, unsigned bit_size
, nir_deref_instr
*deref
)
592 if (glsl_get_components(deref
->type
) == num_components
&&
593 type_scalar_size_bytes(deref
->type
)*8u == bit_size
)
596 enum glsl_base_type types
[] = {
597 GLSL_TYPE_UINT8
, GLSL_TYPE_UINT16
, GLSL_TYPE_UINT
, GLSL_TYPE_UINT64
};
598 enum glsl_base_type base
= types
[ffs(bit_size
/ 8u) - 1u];
599 const struct glsl_type
*type
= glsl_vector_type(base
, num_components
);
601 if (deref
->type
== type
)
604 return nir_build_deref_cast(b
, &deref
->dest
.ssa
, deref
->mode
, type
, 0);
607 /* Return true if the write mask "write_mask" of a store with "old_bit_size"
608 * bits per element can be represented for a store with "new_bit_size" bits per
611 writemask_representable(unsigned write_mask
, unsigned old_bit_size
, unsigned new_bit_size
)
615 u_bit_scan_consecutive_range(&write_mask
, &start
, &count
);
616 start
*= old_bit_size
;
617 count
*= old_bit_size
;
618 if (start
% new_bit_size
!= 0)
620 if (count
% new_bit_size
!= 0)
627 gcd(uint64_t a
, uint64_t b
)
638 get_best_align(struct entry
*entry
)
640 if (entry
->best_align
!= UINT32_MAX
)
641 return entry
->best_align
;
643 uint64_t best_align
= entry
->offset
;
644 for (unsigned i
= 0; i
< entry
->key
->offset_def_count
; i
++) {
646 best_align
= entry
->key
->offset_defs_mul
[i
];
647 else if (entry
->key
->offset_defs_mul
[i
])
648 best_align
= gcd(best_align
, entry
->key
->offset_defs_mul
[i
]);
651 if (nir_intrinsic_has_align_mul(entry
->intrin
))
652 best_align
= MAX2(best_align
, nir_intrinsic_align(entry
->intrin
));
654 /* ensure the result is a power of two that fits in a int32_t */
655 entry
->best_align
= gcd(best_align
, 1u << 30);
657 return entry
->best_align
;
660 /* Return true if "new_bit_size" is a usable bit size for a vectorized load/store
661 * of "low" and "high". */
663 new_bitsize_acceptable(struct vectorize_ctx
*ctx
, unsigned new_bit_size
,
664 struct entry
*low
, struct entry
*high
, unsigned size
)
666 if (size
% new_bit_size
!= 0)
669 unsigned new_num_components
= size
/ new_bit_size
;
670 if (!nir_num_components_valid(new_num_components
))
673 unsigned high_offset
= high
->offset_signed
- low
->offset_signed
;
675 /* check nir_extract_bits limitations */
676 unsigned common_bit_size
= MIN2(get_bit_size(low
), get_bit_size(high
));
677 common_bit_size
= MIN2(common_bit_size
, new_bit_size
);
679 common_bit_size
= MIN2(common_bit_size
, (1u << (ffs(high_offset
* 8) - 1)));
680 if (new_bit_size
/ common_bit_size
> NIR_MAX_VEC_COMPONENTS
)
683 if (!ctx
->callback(get_best_align(low
), new_bit_size
, new_num_components
,
684 high_offset
, low
->intrin
, high
->intrin
))
688 unsigned low_size
= low
->intrin
->num_components
* get_bit_size(low
);
689 unsigned high_size
= high
->intrin
->num_components
* get_bit_size(high
);
691 if (low_size
% new_bit_size
!= 0)
693 if (high_size
% new_bit_size
!= 0)
696 unsigned write_mask
= nir_intrinsic_write_mask(low
->intrin
);
697 if (!writemask_representable(write_mask
, low_size
, new_bit_size
))
700 write_mask
= nir_intrinsic_write_mask(high
->intrin
);
701 if (!writemask_representable(write_mask
, high_size
, new_bit_size
))
708 /* Updates a write mask, "write_mask", so that it can be used with a
709 * "new_bit_size"-bit store instead of a "old_bit_size"-bit store. */
711 update_writemask(unsigned write_mask
, unsigned old_bit_size
, unsigned new_bit_size
)
716 u_bit_scan_consecutive_range(&write_mask
, &start
, &count
);
717 start
= start
* old_bit_size
/ new_bit_size
;
718 count
= count
* old_bit_size
/ new_bit_size
;
719 res
|= ((1 << count
) - 1) << start
;
724 static nir_deref_instr
*subtract_deref(nir_builder
*b
, nir_deref_instr
*deref
, int64_t offset
)
726 /* avoid adding another deref to the path */
727 if (deref
->deref_type
== nir_deref_type_ptr_as_array
&&
728 nir_src_is_const(deref
->arr
.index
) &&
729 offset
% nir_deref_instr_array_stride(deref
) == 0) {
730 unsigned stride
= nir_deref_instr_array_stride(deref
);
731 nir_ssa_def
*index
= nir_imm_intN_t(b
, nir_src_as_int(deref
->arr
.index
) - offset
/ stride
,
732 deref
->dest
.ssa
.bit_size
);
733 return nir_build_deref_ptr_as_array(b
, nir_deref_instr_parent(deref
), index
);
736 if (deref
->deref_type
== nir_deref_type_array
&&
737 nir_src_is_const(deref
->arr
.index
)) {
738 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
739 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
740 if (offset
% stride
== 0)
741 return nir_build_deref_array_imm(
742 b
, parent
, nir_src_as_int(deref
->arr
.index
) - offset
/ stride
);
746 deref
= nir_build_deref_cast(b
, &deref
->dest
.ssa
, deref
->mode
,
747 glsl_scalar_type(GLSL_TYPE_UINT8
), 1);
748 return nir_build_deref_ptr_as_array(
749 b
, deref
, nir_imm_intN_t(b
, -offset
, deref
->dest
.ssa
.bit_size
));
752 static bool update_align(struct entry
*entry
)
754 if (nir_intrinsic_has_align_mul(entry
->intrin
)) {
755 unsigned align
= get_best_align(entry
);
756 if (align
!= nir_intrinsic_align(entry
->intrin
)) {
757 nir_intrinsic_set_align(entry
->intrin
, align
, 0);
765 vectorize_loads(nir_builder
*b
, struct vectorize_ctx
*ctx
,
766 struct entry
*low
, struct entry
*high
,
767 struct entry
*first
, struct entry
*second
,
768 unsigned new_bit_size
, unsigned new_num_components
,
771 unsigned low_bit_size
= get_bit_size(low
);
772 unsigned high_bit_size
= get_bit_size(high
);
773 bool low_bool
= low
->intrin
->dest
.ssa
.bit_size
== 1;
774 bool high_bool
= high
->intrin
->dest
.ssa
.bit_size
== 1;
775 nir_ssa_def
*data
= &first
->intrin
->dest
.ssa
;
777 b
->cursor
= nir_after_instr(first
->instr
);
779 /* update the load's destination size and extract data for each of the original loads */
780 data
->num_components
= new_num_components
;
781 data
->bit_size
= new_bit_size
;
783 nir_ssa_def
*low_def
= nir_extract_bits(
784 b
, &data
, 1, 0, low
->intrin
->num_components
, low_bit_size
);
785 nir_ssa_def
*high_def
= nir_extract_bits(
786 b
, &data
, 1, high_start
, high
->intrin
->num_components
, high_bit_size
);
788 /* convert booleans */
789 low_def
= low_bool
? nir_i2b(b
, low_def
) : nir_mov(b
, low_def
);
790 high_def
= high_bool
? nir_i2b(b
, high_def
) : nir_mov(b
, high_def
);
794 nir_ssa_def_rewrite_uses_after(&low
->intrin
->dest
.ssa
, nir_src_for_ssa(low_def
),
795 high_def
->parent_instr
);
796 nir_ssa_def_rewrite_uses(&high
->intrin
->dest
.ssa
, nir_src_for_ssa(high_def
));
798 nir_ssa_def_rewrite_uses(&low
->intrin
->dest
.ssa
, nir_src_for_ssa(low_def
));
799 nir_ssa_def_rewrite_uses_after(&high
->intrin
->dest
.ssa
, nir_src_for_ssa(high_def
),
800 high_def
->parent_instr
);
803 /* update the intrinsic */
804 first
->intrin
->num_components
= new_num_components
;
806 const struct intrinsic_info
*info
= first
->info
;
808 /* update the offset */
809 if (first
!= low
&& info
->base_src
>= 0) {
810 /* let nir_opt_algebraic() remove this addition. this doesn't have much
811 * issues with subtracting 16 from expressions like "(i + 1) * 16" because
812 * nir_opt_algebraic() turns them into "i * 16 + 16" */
813 b
->cursor
= nir_before_instr(first
->instr
);
815 nir_ssa_def
*new_base
= first
->intrin
->src
[info
->base_src
].ssa
;
816 new_base
= nir_iadd_imm(b
, new_base
, -(int)(high_start
/ 8u));
818 nir_instr_rewrite_src(first
->instr
, &first
->intrin
->src
[info
->base_src
],
819 nir_src_for_ssa(new_base
));
822 /* update the deref */
823 if (info
->deref_src
>= 0) {
824 b
->cursor
= nir_before_instr(first
->instr
);
826 nir_deref_instr
*deref
= nir_src_as_deref(first
->intrin
->src
[info
->deref_src
]);
827 if (first
!= low
&& high_start
!= 0)
828 deref
= subtract_deref(b
, deref
, high_start
/ 8u);
829 first
->deref
= cast_deref(b
, new_num_components
, new_bit_size
, deref
);
831 nir_instr_rewrite_src(first
->instr
, &first
->intrin
->src
[info
->deref_src
],
832 nir_src_for_ssa(&first
->deref
->dest
.ssa
));
835 /* update base/align */
836 if (first
!= low
&& nir_intrinsic_has_base(first
->intrin
))
837 nir_intrinsic_set_base(first
->intrin
, nir_intrinsic_base(low
->intrin
));
839 first
->key
= low
->key
;
840 first
->offset
= low
->offset
;
841 first
->best_align
= get_best_align(low
);
845 nir_instr_remove(second
->instr
);
849 vectorize_stores(nir_builder
*b
, struct vectorize_ctx
*ctx
,
850 struct entry
*low
, struct entry
*high
,
851 struct entry
*first
, struct entry
*second
,
852 unsigned new_bit_size
, unsigned new_num_components
,
855 ASSERTED
unsigned low_size
= low
->intrin
->num_components
* get_bit_size(low
);
856 assert(low_size
% new_bit_size
== 0);
858 b
->cursor
= nir_before_instr(second
->instr
);
860 /* get new writemasks */
861 uint32_t low_write_mask
= nir_intrinsic_write_mask(low
->intrin
);
862 uint32_t high_write_mask
= nir_intrinsic_write_mask(high
->intrin
);
863 low_write_mask
= update_writemask(low_write_mask
, get_bit_size(low
), new_bit_size
);
864 high_write_mask
= update_writemask(high_write_mask
, get_bit_size(high
), new_bit_size
);
865 high_write_mask
<<= high_start
/ new_bit_size
;
867 uint32_t write_mask
= low_write_mask
| high_write_mask
;
869 /* convert booleans */
870 nir_ssa_def
*low_val
= low
->intrin
->src
[low
->info
->value_src
].ssa
;
871 nir_ssa_def
*high_val
= high
->intrin
->src
[high
->info
->value_src
].ssa
;
872 low_val
= low_val
->bit_size
== 1 ? nir_b2i(b
, low_val
, 32) : low_val
;
873 high_val
= high_val
->bit_size
== 1 ? nir_b2i(b
, high_val
, 32) : high_val
;
875 /* combine the data */
876 nir_ssa_def
*data_channels
[NIR_MAX_VEC_COMPONENTS
];
877 for (unsigned i
= 0; i
< new_num_components
; i
++) {
878 bool set_low
= low_write_mask
& (1 << i
);
879 bool set_high
= high_write_mask
& (1 << i
);
881 if (set_low
&& (!set_high
|| low
== second
)) {
882 unsigned offset
= i
* new_bit_size
;
883 data_channels
[i
] = nir_extract_bits(b
, &low_val
, 1, offset
, 1, new_bit_size
);
884 } else if (set_high
) {
885 assert(!set_low
|| high
== second
);
886 unsigned offset
= i
* new_bit_size
- high_start
;
887 data_channels
[i
] = nir_extract_bits(b
, &high_val
, 1, offset
, 1, new_bit_size
);
889 data_channels
[i
] = nir_ssa_undef(b
, 1, new_bit_size
);
892 nir_ssa_def
*data
= nir_vec(b
, data_channels
, new_num_components
);
894 /* update the intrinsic */
895 nir_intrinsic_set_write_mask(second
->intrin
, write_mask
);
896 second
->intrin
->num_components
= data
->num_components
;
898 const struct intrinsic_info
*info
= second
->info
;
899 assert(info
->value_src
>= 0);
900 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->value_src
],
901 nir_src_for_ssa(data
));
903 /* update the offset */
904 if (second
!= low
&& info
->base_src
>= 0)
905 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->base_src
],
906 low
->intrin
->src
[info
->base_src
]);
908 /* update the deref */
909 if (info
->deref_src
>= 0) {
910 b
->cursor
= nir_before_instr(second
->instr
);
911 second
->deref
= cast_deref(b
, new_num_components
, new_bit_size
,
912 nir_src_as_deref(low
->intrin
->src
[info
->deref_src
]));
913 nir_instr_rewrite_src(second
->instr
, &second
->intrin
->src
[info
->deref_src
],
914 nir_src_for_ssa(&second
->deref
->dest
.ssa
));
917 /* update base/align */
918 if (second
!= low
&& nir_intrinsic_has_base(second
->intrin
))
919 nir_intrinsic_set_base(second
->intrin
, nir_intrinsic_base(low
->intrin
));
921 second
->key
= low
->key
;
922 second
->offset
= low
->offset
;
923 second
->best_align
= get_best_align(low
);
925 update_align(second
);
927 list_del(&first
->head
);
928 nir_instr_remove(first
->instr
);
931 /* Returns true if it can prove that "a" and "b" point to different resources. */
933 resources_different(nir_ssa_def
*a
, nir_ssa_def
*b
)
938 if (a
->parent_instr
->type
== nir_instr_type_load_const
&&
939 b
->parent_instr
->type
== nir_instr_type_load_const
) {
940 return nir_src_as_uint(nir_src_for_ssa(a
)) != nir_src_as_uint(nir_src_for_ssa(b
));
943 if (a
->parent_instr
->type
== nir_instr_type_intrinsic
&&
944 b
->parent_instr
->type
== nir_instr_type_intrinsic
) {
945 nir_intrinsic_instr
*aintrin
= nir_instr_as_intrinsic(a
->parent_instr
);
946 nir_intrinsic_instr
*bintrin
= nir_instr_as_intrinsic(b
->parent_instr
);
947 if (aintrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
&&
948 bintrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
949 return nir_intrinsic_desc_set(aintrin
) != nir_intrinsic_desc_set(bintrin
) ||
950 nir_intrinsic_binding(aintrin
) != nir_intrinsic_binding(bintrin
) ||
951 resources_different(aintrin
->src
[0].ssa
, bintrin
->src
[0].ssa
);
959 compare_entries(struct entry
*a
, struct entry
*b
)
961 if (!entry_key_equals(a
->key
, b
->key
))
963 return b
->offset_signed
- a
->offset_signed
;
967 may_alias(struct entry
*a
, struct entry
*b
)
969 assert(mode_to_index(get_variable_mode(a
)) ==
970 mode_to_index(get_variable_mode(b
)));
972 /* if the resources/variables are definitively different and both have
973 * ACCESS_RESTRICT, we can assume they do not alias. */
974 bool res_different
= a
->key
->var
!= b
->key
->var
||
975 resources_different(a
->key
->resource
, b
->key
->resource
);
976 if (res_different
&& (a
->access
& ACCESS_RESTRICT
) && (b
->access
& ACCESS_RESTRICT
))
979 /* we can't compare offsets if the resources/variables might be different */
980 if (a
->key
->var
!= b
->key
->var
|| a
->key
->resource
!= b
->key
->resource
)
983 /* use adjacency information */
984 /* TODO: we can look closer at the entry keys */
985 int64_t diff
= compare_entries(a
, b
);
986 if (diff
!= INT64_MAX
) {
987 /* with atomics, intrin->num_components can be 0 */
989 return llabs(diff
) < MAX2(b
->intrin
->num_components
, 1u) * (get_bit_size(b
) / 8u);
991 return diff
< MAX2(a
->intrin
->num_components
, 1u) * (get_bit_size(a
) / 8u);
994 /* TODO: we can use deref information */
1000 check_for_aliasing(struct vectorize_ctx
*ctx
, struct entry
*first
, struct entry
*second
)
1002 nir_variable_mode mode
= get_variable_mode(first
);
1003 if (mode
& (nir_var_uniform
| nir_var_system_value
|
1004 nir_var_mem_push_const
| nir_var_mem_ubo
))
1007 unsigned mode_index
= mode_to_index(mode
);
1008 if (first
->is_store
) {
1009 /* find first entry that aliases "first" */
1010 list_for_each_entry_from(struct entry
, next
, first
, &ctx
->entries
[mode_index
], head
) {
1015 if (may_alias(first
, next
))
1019 /* find previous store that aliases this load */
1020 list_for_each_entry_from_rev(struct entry
, prev
, second
, &ctx
->entries
[mode_index
], head
) {
1025 if (prev
->is_store
&& may_alias(second
, prev
))
1034 check_for_robustness(struct vectorize_ctx
*ctx
, struct entry
*low
)
1036 nir_variable_mode mode
= get_variable_mode(low
);
1037 if (mode
& ctx
->robust_modes
) {
1038 unsigned low_bit_size
= get_bit_size(low
);
1039 unsigned low_size
= low
->intrin
->num_components
* low_bit_size
;
1041 /* don't attempt to vectorize accesses if the offset can overflow. */
1042 /* TODO: handle indirect accesses. */
1043 return low
->offset_signed
< 0 && low
->offset_signed
+ low_size
>= 0;
1050 is_strided_vector(const struct glsl_type
*type
)
1052 if (glsl_type_is_vector(type
)) {
1053 unsigned explicit_stride
= glsl_get_explicit_stride(type
);
1054 return explicit_stride
!= 0 && explicit_stride
!=
1055 type_scalar_size_bytes(glsl_get_array_element(type
));
1062 try_vectorize(nir_function_impl
*impl
, struct vectorize_ctx
*ctx
,
1063 struct entry
*low
, struct entry
*high
,
1064 struct entry
*first
, struct entry
*second
)
1066 if (!(get_variable_mode(first
) & ctx
->modes
) ||
1067 !(get_variable_mode(second
) & ctx
->modes
))
1070 if (check_for_aliasing(ctx
, first
, second
))
1073 if (check_for_robustness(ctx
, low
))
1076 /* we can only vectorize non-volatile loads/stores of the same type and with
1077 * the same access */
1078 if (first
->info
!= second
->info
|| first
->access
!= second
->access
||
1079 (first
->access
& ACCESS_VOLATILE
) || first
->info
->is_atomic
)
1082 /* don't attempt to vectorize accesses of row-major matrix columns */
1084 const struct glsl_type
*first_type
= first
->deref
->type
;
1085 const struct glsl_type
*second_type
= second
->deref
->type
;
1086 if (is_strided_vector(first_type
) || is_strided_vector(second_type
))
1090 /* gather information */
1091 uint64_t diff
= high
->offset_signed
- low
->offset_signed
;
1092 unsigned low_bit_size
= get_bit_size(low
);
1093 unsigned high_bit_size
= get_bit_size(high
);
1094 unsigned low_size
= low
->intrin
->num_components
* low_bit_size
;
1095 unsigned high_size
= high
->intrin
->num_components
* high_bit_size
;
1096 unsigned new_size
= MAX2(diff
* 8u + high_size
, low_size
);
1098 /* find a good bit size for the new load/store */
1099 unsigned new_bit_size
= 0;
1100 if (new_bitsize_acceptable(ctx
, low_bit_size
, low
, high
, new_size
)) {
1101 new_bit_size
= low_bit_size
;
1102 } else if (low_bit_size
!= high_bit_size
&&
1103 new_bitsize_acceptable(ctx
, high_bit_size
, low
, high
, new_size
)) {
1104 new_bit_size
= high_bit_size
;
1107 for (; new_bit_size
>= 8; new_bit_size
/= 2) {
1108 /* don't repeat trying out bitsizes */
1109 if (new_bit_size
== low_bit_size
|| new_bit_size
== high_bit_size
)
1111 if (new_bitsize_acceptable(ctx
, new_bit_size
, low
, high
, new_size
))
1114 if (new_bit_size
< 8)
1117 unsigned new_num_components
= new_size
/ new_bit_size
;
1119 /* vectorize the loads/stores */
1121 nir_builder_init(&b
, impl
);
1123 if (first
->is_store
)
1124 vectorize_stores(&b
, ctx
, low
, high
, first
, second
,
1125 new_bit_size
, new_num_components
, diff
* 8u);
1127 vectorize_loads(&b
, ctx
, low
, high
, first
, second
,
1128 new_bit_size
, new_num_components
, diff
* 8u);
1134 vectorize_entries(struct vectorize_ctx
*ctx
, nir_function_impl
*impl
, struct hash_table
*ht
)
1139 bool progress
= false;
1140 hash_table_foreach(ht
, entry
) {
1141 struct util_dynarray
*arr
= entry
->data
;
1145 qsort(util_dynarray_begin(arr
),
1146 util_dynarray_num_elements(arr
, struct entry
*),
1147 sizeof(struct entry
*), &sort_entries
);
1150 for (; i
< util_dynarray_num_elements(arr
, struct entry
*) - 1; i
++) {
1151 struct entry
*low
= *util_dynarray_element(arr
, struct entry
*, i
);
1152 struct entry
*high
= *util_dynarray_element(arr
, struct entry
*, i
+ 1);
1154 uint64_t diff
= high
->offset_signed
- low
->offset_signed
;
1155 if (diff
> get_bit_size(low
) / 8u * low
->intrin
->num_components
) {
1156 progress
|= update_align(low
);
1160 struct entry
*first
= low
->index
< high
->index
? low
: high
;
1161 struct entry
*second
= low
->index
< high
->index
? high
: low
;
1163 if (try_vectorize(impl
, ctx
, low
, high
, first
, second
)) {
1164 *util_dynarray_element(arr
, struct entry
*, i
) = NULL
;
1165 *util_dynarray_element(arr
, struct entry
*, i
+ 1) = low
->is_store
? second
: first
;
1168 progress
|= update_align(low
);
1172 struct entry
*last
= *util_dynarray_element(arr
, struct entry
*, i
);
1173 progress
|= update_align(last
);
1176 _mesa_hash_table_clear(ht
, delete_entry_dynarray
);
1182 handle_barrier(struct vectorize_ctx
*ctx
, bool *progress
, nir_function_impl
*impl
, nir_instr
*instr
)
1185 bool acquire
= true;
1186 bool release
= true;
1187 if (instr
->type
== nir_instr_type_intrinsic
) {
1188 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1189 switch (intrin
->intrinsic
) {
1190 case nir_intrinsic_group_memory_barrier
:
1191 case nir_intrinsic_memory_barrier
:
1192 modes
= nir_var_mem_ssbo
| nir_var_mem_shared
| nir_var_mem_global
;
1194 /* prevent speculative loads/stores */
1195 case nir_intrinsic_discard_if
:
1196 case nir_intrinsic_discard
:
1197 modes
= nir_var_all
;
1199 case nir_intrinsic_memory_barrier_buffer
:
1200 modes
= nir_var_mem_ssbo
| nir_var_mem_global
;
1202 case nir_intrinsic_memory_barrier_shared
:
1203 modes
= nir_var_mem_shared
;
1205 case nir_intrinsic_scoped_barrier
:
1206 if (nir_intrinsic_memory_scope(intrin
) == NIR_SCOPE_NONE
)
1209 modes
= nir_intrinsic_memory_modes(intrin
) & (nir_var_mem_ssbo
|
1210 nir_var_mem_shared
|
1211 nir_var_mem_global
);
1212 acquire
= nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_ACQUIRE
;
1213 release
= nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_RELEASE
;
1214 switch (nir_intrinsic_memory_scope(intrin
)) {
1215 case NIR_SCOPE_INVOCATION
:
1216 case NIR_SCOPE_SUBGROUP
:
1217 /* a barier should never be required for correctness with these scopes */
1227 } else if (instr
->type
== nir_instr_type_call
) {
1228 modes
= nir_var_all
;
1234 unsigned mode_index
= u_bit_scan(&modes
);
1235 if ((1 << mode_index
) == nir_var_mem_global
) {
1236 /* Global should be rolled in with SSBO */
1237 assert(list_is_empty(&ctx
->entries
[mode_index
]));
1238 assert(ctx
->loads
[mode_index
] == NULL
);
1239 assert(ctx
->stores
[mode_index
] == NULL
);
1244 *progress
|= vectorize_entries(ctx
, impl
, ctx
->loads
[mode_index
]);
1246 *progress
|= vectorize_entries(ctx
, impl
, ctx
->stores
[mode_index
]);
1253 process_block(nir_function_impl
*impl
, struct vectorize_ctx
*ctx
, nir_block
*block
)
1255 bool progress
= false;
1257 for (unsigned i
= 0; i
< nir_num_variable_modes
; i
++) {
1258 list_inithead(&ctx
->entries
[i
]);
1260 _mesa_hash_table_clear(ctx
->loads
[i
], delete_entry_dynarray
);
1262 _mesa_hash_table_clear(ctx
->stores
[i
], delete_entry_dynarray
);
1265 /* create entries */
1266 unsigned next_index
= 0;
1268 nir_foreach_instr_safe(instr
, block
) {
1269 if (handle_barrier(ctx
, &progress
, impl
, instr
))
1272 /* gather information */
1273 if (instr
->type
!= nir_instr_type_intrinsic
)
1275 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1277 const struct intrinsic_info
*info
= get_info(intrin
->intrinsic
);
1281 nir_variable_mode mode
= info
->mode
;
1283 mode
= nir_src_as_deref(intrin
->src
[info
->deref_src
])->mode
;
1284 if (!(mode
& aliasing_modes(ctx
->modes
)))
1286 unsigned mode_index
= mode_to_index(mode
);
1289 struct entry
*entry
= create_entry(ctx
, info
, intrin
);
1290 entry
->index
= next_index
++;
1292 list_addtail(&entry
->head
, &ctx
->entries
[mode_index
]);
1294 /* add the entry to a hash table */
1296 struct hash_table
*adj_ht
= NULL
;
1297 if (entry
->is_store
) {
1298 if (!ctx
->stores
[mode_index
])
1299 ctx
->stores
[mode_index
] = _mesa_hash_table_create(ctx
, &hash_entry_key
, &entry_key_equals
);
1300 adj_ht
= ctx
->stores
[mode_index
];
1302 if (!ctx
->loads
[mode_index
])
1303 ctx
->loads
[mode_index
] = _mesa_hash_table_create(ctx
, &hash_entry_key
, &entry_key_equals
);
1304 adj_ht
= ctx
->loads
[mode_index
];
1307 uint32_t key_hash
= hash_entry_key(entry
->key
);
1308 struct hash_entry
*adj_entry
= _mesa_hash_table_search_pre_hashed(adj_ht
, key_hash
, entry
->key
);
1309 struct util_dynarray
*arr
;
1310 if (adj_entry
&& adj_entry
->data
) {
1311 arr
= (struct util_dynarray
*)adj_entry
->data
;
1313 arr
= ralloc(ctx
, struct util_dynarray
);
1314 util_dynarray_init(arr
, arr
);
1315 _mesa_hash_table_insert_pre_hashed(adj_ht
, key_hash
, entry
->key
, arr
);
1317 util_dynarray_append(arr
, struct entry
*, entry
);
1320 /* sort and combine entries */
1321 for (unsigned i
= 0; i
< nir_num_variable_modes
; i
++) {
1322 progress
|= vectorize_entries(ctx
, impl
, ctx
->loads
[i
]);
1323 progress
|= vectorize_entries(ctx
, impl
, ctx
->stores
[i
]);
1330 nir_opt_load_store_vectorize(nir_shader
*shader
, nir_variable_mode modes
,
1331 nir_should_vectorize_mem_func callback
,
1332 nir_variable_mode robust_modes
)
1334 bool progress
= false;
1336 struct vectorize_ctx
*ctx
= rzalloc(NULL
, struct vectorize_ctx
);
1338 ctx
->callback
= callback
;
1339 ctx
->robust_modes
= robust_modes
;
1341 nir_shader_index_vars(shader
, modes
);
1343 nir_foreach_function(function
, shader
) {
1344 if (function
->impl
) {
1345 if (modes
& nir_var_function_temp
)
1346 nir_function_impl_index_vars(function
->impl
);
1348 nir_foreach_block(block
, function
->impl
)
1349 progress
|= process_block(function
->impl
, ctx
, block
);
1351 nir_metadata_preserve(function
->impl
,
1352 nir_metadata_block_index
|
1353 nir_metadata_dominance
|
1354 nir_metadata_live_ssa_defs
);