814c5a552641edeb539dcb57f2660701b156c2a2
[mesa.git] / src / compiler / nir / nir_opt_load_store_vectorize.c
1 /*
2 * Copyright © 2019 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * Although it's called a load/store "vectorization" pass, this also combines
26 * intersecting and identical loads/stores. It currently supports derefs, ubo,
27 * ssbo and push constant loads/stores.
28 *
29 * This doesn't handle copy_deref intrinsics and assumes that
30 * nir_lower_alu_to_scalar() has been called and that the IR is free from ALU
31 * modifiers. It also assumes that derefs have explicitly laid out types.
32 *
33 * After vectorization, the backend may want to call nir_lower_alu_to_scalar()
34 * and nir_lower_pack(). Also this creates cast instructions taking derefs as a
35 * source and some parts of NIR may not be able to handle that well.
36 *
37 * There are a few situations where this doesn't vectorize as well as it could:
38 * - It won't turn four consecutive vec3 loads into 3 vec4 loads.
39 * - It doesn't do global vectorization.
40 * Handling these cases probably wouldn't provide much benefit though.
41 *
42 * This probably doesn't handle big-endian GPUs correctly.
43 */
44
45 #include "nir.h"
46 #include "nir_deref.h"
47 #include "nir_builder.h"
48 #include "nir_worklist.h"
49 #include "util/u_dynarray.h"
50
51 #include <stdlib.h>
52
53 struct intrinsic_info {
54 nir_variable_mode mode; /* 0 if the mode is obtained from the deref. */
55 nir_intrinsic_op op;
56 bool is_atomic;
57 /* Indices into nir_intrinsic::src[] or -1 if not applicable. */
58 int resource_src; /* resource (e.g. from vulkan_resource_index) */
59 int base_src; /* offset which it loads/stores from */
60 int deref_src; /* deref which is loads/stores from */
61 int value_src; /* the data it is storing */
62 };
63
64 static const struct intrinsic_info *
65 get_info(nir_intrinsic_op op) {
66 switch (op) {
67 #define INFO(mode, op, atomic, res, base, deref, val) \
68 case nir_intrinsic_##op: {\
69 static const struct intrinsic_info op##_info = {mode, nir_intrinsic_##op, atomic, res, base, deref, val};\
70 return &op##_info;\
71 }
72 #define LOAD(mode, op, res, base, deref) INFO(mode, load_##op, false, res, base, deref, -1)
73 #define STORE(mode, op, res, base, deref, val) INFO(mode, store_##op, false, res, base, deref, val)
74 #define ATOMIC(mode, type, op, res, base, deref, val) INFO(mode, type##_atomic_##op, true, res, base, deref, val)
75 LOAD(nir_var_mem_push_const, push_constant, -1, 0, -1)
76 LOAD(nir_var_mem_ubo, ubo, 0, 1, -1)
77 LOAD(nir_var_mem_ssbo, ssbo, 0, 1, -1)
78 STORE(nir_var_mem_ssbo, ssbo, 1, 2, -1, 0)
79 LOAD(0, deref, -1, -1, 0)
80 STORE(0, deref, -1, -1, 0, 1)
81 LOAD(nir_var_mem_shared, shared, -1, 0, -1)
82 STORE(nir_var_mem_shared, shared, -1, 1, -1, 0)
83 LOAD(nir_var_mem_global, global, -1, 0, -1)
84 STORE(nir_var_mem_global, global, -1, 1, -1, 0)
85 ATOMIC(nir_var_mem_ssbo, ssbo, add, 0, 1, -1, 2)
86 ATOMIC(nir_var_mem_ssbo, ssbo, imin, 0, 1, -1, 2)
87 ATOMIC(nir_var_mem_ssbo, ssbo, umin, 0, 1, -1, 2)
88 ATOMIC(nir_var_mem_ssbo, ssbo, imax, 0, 1, -1, 2)
89 ATOMIC(nir_var_mem_ssbo, ssbo, umax, 0, 1, -1, 2)
90 ATOMIC(nir_var_mem_ssbo, ssbo, and, 0, 1, -1, 2)
91 ATOMIC(nir_var_mem_ssbo, ssbo, or, 0, 1, -1, 2)
92 ATOMIC(nir_var_mem_ssbo, ssbo, xor, 0, 1, -1, 2)
93 ATOMIC(nir_var_mem_ssbo, ssbo, exchange, 0, 1, -1, 2)
94 ATOMIC(nir_var_mem_ssbo, ssbo, comp_swap, 0, 1, -1, 2)
95 ATOMIC(nir_var_mem_ssbo, ssbo, fadd, 0, 1, -1, 2)
96 ATOMIC(nir_var_mem_ssbo, ssbo, fmin, 0, 1, -1, 2)
97 ATOMIC(nir_var_mem_ssbo, ssbo, fmax, 0, 1, -1, 2)
98 ATOMIC(nir_var_mem_ssbo, ssbo, fcomp_swap, 0, 1, -1, 2)
99 ATOMIC(0, deref, add, -1, -1, 0, 1)
100 ATOMIC(0, deref, imin, -1, -1, 0, 1)
101 ATOMIC(0, deref, umin, -1, -1, 0, 1)
102 ATOMIC(0, deref, imax, -1, -1, 0, 1)
103 ATOMIC(0, deref, umax, -1, -1, 0, 1)
104 ATOMIC(0, deref, and, -1, -1, 0, 1)
105 ATOMIC(0, deref, or, -1, -1, 0, 1)
106 ATOMIC(0, deref, xor, -1, -1, 0, 1)
107 ATOMIC(0, deref, exchange, -1, -1, 0, 1)
108 ATOMIC(0, deref, comp_swap, -1, -1, 0, 1)
109 ATOMIC(0, deref, fadd, -1, -1, 0, 1)
110 ATOMIC(0, deref, fmin, -1, -1, 0, 1)
111 ATOMIC(0, deref, fmax, -1, -1, 0, 1)
112 ATOMIC(0, deref, fcomp_swap, -1, -1, 0, 1)
113 ATOMIC(nir_var_mem_shared, shared, add, -1, 0, -1, 1)
114 ATOMIC(nir_var_mem_shared, shared, imin, -1, 0, -1, 1)
115 ATOMIC(nir_var_mem_shared, shared, umin, -1, 0, -1, 1)
116 ATOMIC(nir_var_mem_shared, shared, imax, -1, 0, -1, 1)
117 ATOMIC(nir_var_mem_shared, shared, umax, -1, 0, -1, 1)
118 ATOMIC(nir_var_mem_shared, shared, and, -1, 0, -1, 1)
119 ATOMIC(nir_var_mem_shared, shared, or, -1, 0, -1, 1)
120 ATOMIC(nir_var_mem_shared, shared, xor, -1, 0, -1, 1)
121 ATOMIC(nir_var_mem_shared, shared, exchange, -1, 0, -1, 1)
122 ATOMIC(nir_var_mem_shared, shared, comp_swap, -1, 0, -1, 1)
123 ATOMIC(nir_var_mem_shared, shared, fadd, -1, 0, -1, 1)
124 ATOMIC(nir_var_mem_shared, shared, fmin, -1, 0, -1, 1)
125 ATOMIC(nir_var_mem_shared, shared, fmax, -1, 0, -1, 1)
126 ATOMIC(nir_var_mem_shared, shared, fcomp_swap, -1, 0, -1, 1)
127 ATOMIC(nir_var_mem_global, global, add, -1, 0, -1, 1)
128 ATOMIC(nir_var_mem_global, global, imin, -1, 0, -1, 1)
129 ATOMIC(nir_var_mem_global, global, umin, -1, 0, -1, 1)
130 ATOMIC(nir_var_mem_global, global, imax, -1, 0, -1, 1)
131 ATOMIC(nir_var_mem_global, global, umax, -1, 0, -1, 1)
132 ATOMIC(nir_var_mem_global, global, and, -1, 0, -1, 1)
133 ATOMIC(nir_var_mem_global, global, or, -1, 0, -1, 1)
134 ATOMIC(nir_var_mem_global, global, xor, -1, 0, -1, 1)
135 ATOMIC(nir_var_mem_global, global, exchange, -1, 0, -1, 1)
136 ATOMIC(nir_var_mem_global, global, comp_swap, -1, 0, -1, 1)
137 ATOMIC(nir_var_mem_global, global, fadd, -1, 0, -1, 1)
138 ATOMIC(nir_var_mem_global, global, fmin, -1, 0, -1, 1)
139 ATOMIC(nir_var_mem_global, global, fmax, -1, 0, -1, 1)
140 ATOMIC(nir_var_mem_global, global, fcomp_swap, -1, 0, -1, 1)
141 default:
142 break;
143 #undef ATOMIC
144 #undef STORE
145 #undef LOAD
146 #undef INFO
147 }
148 return NULL;
149 }
150
151 /*
152 * Information used to compare memory operations.
153 * It canonically represents an offset as:
154 * `offset_defs[0]*offset_defs_mul[0] + offset_defs[1]*offset_defs_mul[1] + ...`
155 * "offset_defs" is sorted in ascenting order by the ssa definition's index.
156 * "resource" or "var" may be NULL.
157 */
158 struct entry_key {
159 nir_ssa_def *resource;
160 nir_variable *var;
161 unsigned offset_def_count;
162 nir_ssa_def **offset_defs;
163 uint64_t *offset_defs_mul;
164 };
165
166 /* Information on a single memory operation. */
167 struct entry {
168 struct list_head head;
169 unsigned index;
170
171 struct entry_key *key;
172 union {
173 uint64_t offset; /* sign-extended */
174 int64_t offset_signed;
175 };
176 uint32_t best_align;
177
178 nir_instr *instr;
179 nir_intrinsic_instr *intrin;
180 const struct intrinsic_info *info;
181 enum gl_access_qualifier access;
182 bool is_store;
183
184 nir_deref_instr *deref;
185 };
186
187 struct vectorize_ctx {
188 nir_variable_mode modes;
189 nir_should_vectorize_mem_func callback;
190 nir_variable_mode robust_modes;
191 struct list_head entries[nir_num_variable_modes];
192 struct hash_table *loads[nir_num_variable_modes];
193 struct hash_table *stores[nir_num_variable_modes];
194 };
195
196 static uint32_t hash_entry_key(const void *key_)
197 {
198 /* this is careful to not include pointers in the hash calculation so that
199 * the order of the hash table walk is deterministic */
200 struct entry_key *key = (struct entry_key*)key_;
201
202 uint32_t hash = _mesa_fnv32_1a_offset_bias;
203 if (key->resource)
204 hash = _mesa_fnv32_1a_accumulate(hash, key->resource->index);
205 if (key->var) {
206 hash = _mesa_fnv32_1a_accumulate(hash, key->var->index);
207 unsigned mode = key->var->data.mode;
208 hash = _mesa_fnv32_1a_accumulate(hash, mode);
209 }
210
211 for (unsigned i = 0; i < key->offset_def_count; i++)
212 hash = _mesa_fnv32_1a_accumulate(hash, key->offset_defs[i]->index);
213
214 hash = _mesa_fnv32_1a_accumulate_block(
215 hash, key->offset_defs_mul, key->offset_def_count * sizeof(uint64_t));
216
217 return hash;
218 }
219
220 static bool entry_key_equals(const void *a_, const void *b_)
221 {
222 struct entry_key *a = (struct entry_key*)a_;
223 struct entry_key *b = (struct entry_key*)b_;
224
225 if (a->var != b->var || a->resource != b->resource)
226 return false;
227
228 if (a->offset_def_count != b->offset_def_count)
229 return false;
230
231 size_t offset_def_size = a->offset_def_count * sizeof(nir_ssa_def *);
232 size_t offset_def_mul_size = a->offset_def_count * sizeof(uint64_t);
233 if (a->offset_def_count &&
234 (memcmp(a->offset_defs, b->offset_defs, offset_def_size) ||
235 memcmp(a->offset_defs_mul, b->offset_defs_mul, offset_def_mul_size)))
236 return false;
237
238 return true;
239 }
240
241 static void delete_entry_dynarray(struct hash_entry *entry)
242 {
243 struct util_dynarray *arr = (struct util_dynarray *)entry->data;
244 ralloc_free(arr);
245 }
246
247 static int sort_entries(const void *a_, const void *b_)
248 {
249 struct entry *a = *(struct entry*const*)a_;
250 struct entry *b = *(struct entry*const*)b_;
251
252 if (a->offset_signed > b->offset_signed)
253 return 1;
254 else if (a->offset_signed < b->offset_signed)
255 return -1;
256 else
257 return 0;
258 }
259
260 static unsigned
261 get_bit_size(struct entry *entry)
262 {
263 unsigned size = entry->is_store ?
264 entry->intrin->src[entry->info->value_src].ssa->bit_size :
265 entry->intrin->dest.ssa.bit_size;
266 return size == 1 ? 32u : size;
267 }
268
269 /* If "def" is from an alu instruction with the opcode "op" and one of it's
270 * sources is a constant, update "def" to be the non-constant source, fill "c"
271 * with the constant and return true. */
272 static bool
273 parse_alu(nir_ssa_def **def, nir_op op, uint64_t *c)
274 {
275 nir_ssa_scalar scalar;
276 scalar.def = *def;
277 scalar.comp = 0;
278
279 if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != op)
280 return false;
281
282 nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
283 nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
284 if (op != nir_op_ishl && nir_ssa_scalar_is_const(src0) && src1.comp == 0) {
285 *c = nir_ssa_scalar_as_uint(src0);
286 *def = src1.def;
287 } else if (nir_ssa_scalar_is_const(src1) && src0.comp == 0) {
288 *c = nir_ssa_scalar_as_uint(src1);
289 *def = src0.def;
290 } else {
291 return false;
292 }
293 return true;
294 }
295
296 /* Parses an offset expression such as "a * 16 + 4" and "(a * 16 + 4) * 64 + 32". */
297 static void
298 parse_offset(nir_ssa_def **base, uint64_t *base_mul, uint64_t *offset)
299 {
300 if ((*base)->parent_instr->type == nir_instr_type_load_const) {
301 *offset = nir_src_comp_as_uint(nir_src_for_ssa(*base), 0);
302 *base = NULL;
303 return;
304 }
305
306 uint64_t mul = 1;
307 uint64_t add = 0;
308 bool progress = false;
309 do {
310 uint64_t mul2 = 1, add2 = 0;
311
312 progress = parse_alu(base, nir_op_imul, &mul2);
313 mul *= mul2;
314
315 mul2 = 0;
316 progress |= parse_alu(base, nir_op_ishl, &mul2);
317 mul <<= mul2;
318
319 progress |= parse_alu(base, nir_op_iadd, &add2);
320 add += add2 * mul;
321 } while (progress);
322
323 *base_mul = mul;
324 *offset = add;
325 }
326
327 static unsigned
328 type_scalar_size_bytes(const struct glsl_type *type)
329 {
330 assert(glsl_type_is_vector_or_scalar(type) ||
331 glsl_type_is_matrix(type));
332 return glsl_type_is_boolean(type) ? 4u : glsl_get_bit_size(type) / 8u;
333 }
334
335 static int
336 get_array_stride(const struct glsl_type *type)
337 {
338 unsigned explicit_stride = glsl_get_explicit_stride(type);
339 if ((glsl_type_is_matrix(type) &&
340 glsl_matrix_type_is_row_major(type)) ||
341 (glsl_type_is_vector(type) && explicit_stride == 0))
342 return type_scalar_size_bytes(type);
343 return explicit_stride;
344 }
345
346 static uint64_t
347 mask_sign_extend(uint64_t val, unsigned bit_size)
348 {
349 return (int64_t)(val << (64 - bit_size)) >> (64 - bit_size);
350 }
351
352 static unsigned
353 add_to_entry_key(nir_ssa_def **offset_defs, uint64_t *offset_defs_mul,
354 unsigned offset_def_count, nir_ssa_def *def, uint64_t mul)
355 {
356 mul = mask_sign_extend(mul, def->bit_size);
357
358 for (unsigned i = 0; i <= offset_def_count; i++) {
359 if (i == offset_def_count || def->index > offset_defs[i]->index) {
360 /* insert before i */
361 memmove(offset_defs + i + 1, offset_defs + i,
362 (offset_def_count - i) * sizeof(nir_ssa_def *));
363 memmove(offset_defs_mul + i + 1, offset_defs_mul + i,
364 (offset_def_count - i) * sizeof(uint64_t));
365 offset_defs[i] = def;
366 offset_defs_mul[i] = mul;
367 return 1;
368 } else if (def->index == offset_defs[i]->index) {
369 /* merge with offset_def at i */
370 offset_defs_mul[i] += mul;
371 return 0;
372 }
373 }
374 unreachable("Unreachable.");
375 return 0;
376 }
377
378 static struct entry_key *
379 create_entry_key_from_deref(void *mem_ctx,
380 struct vectorize_ctx *ctx,
381 nir_deref_path *path,
382 uint64_t *offset_base)
383 {
384 unsigned path_len = 0;
385 while (path->path[path_len])
386 path_len++;
387
388 nir_ssa_def *offset_defs_stack[32];
389 uint64_t offset_defs_mul_stack[32];
390 nir_ssa_def **offset_defs = offset_defs_stack;
391 uint64_t *offset_defs_mul = offset_defs_mul_stack;
392 if (path_len > 32) {
393 offset_defs = malloc(path_len * sizeof(nir_ssa_def *));
394 offset_defs_mul = malloc(path_len * sizeof(uint64_t));
395 }
396 unsigned offset_def_count = 0;
397
398 struct entry_key* key = ralloc(mem_ctx, struct entry_key);
399 key->resource = NULL;
400 key->var = NULL;
401 *offset_base = 0;
402
403 for (unsigned i = 0; i < path_len; i++) {
404 nir_deref_instr *parent = i ? path->path[i - 1] : NULL;
405 nir_deref_instr *deref = path->path[i];
406
407 switch (deref->deref_type) {
408 case nir_deref_type_var: {
409 assert(!parent);
410 key->var = deref->var;
411 break;
412 }
413 case nir_deref_type_array:
414 case nir_deref_type_ptr_as_array: {
415 assert(parent);
416 nir_ssa_def *index = deref->arr.index.ssa;
417 uint32_t stride;
418 if (deref->deref_type == nir_deref_type_ptr_as_array)
419 stride = nir_deref_instr_ptr_as_array_stride(deref);
420 else
421 stride = get_array_stride(parent->type);
422
423 nir_ssa_def *base = index;
424 uint64_t offset = 0, base_mul = 1;
425 parse_offset(&base, &base_mul, &offset);
426 offset = mask_sign_extend(offset, index->bit_size);
427
428 *offset_base += offset * stride;
429 if (base) {
430 offset_def_count += add_to_entry_key(offset_defs, offset_defs_mul,
431 offset_def_count,
432 base, base_mul * stride);
433 }
434 break;
435 }
436 case nir_deref_type_struct: {
437 assert(parent);
438 int offset = glsl_get_struct_field_offset(parent->type, deref->strct.index);
439 *offset_base += offset;
440 break;
441 }
442 case nir_deref_type_cast: {
443 if (!parent)
444 key->resource = deref->parent.ssa;
445 break;
446 }
447 default:
448 unreachable("Unhandled deref type");
449 }
450 }
451
452 key->offset_def_count = offset_def_count;
453 key->offset_defs = ralloc_array(mem_ctx, nir_ssa_def *, offset_def_count);
454 key->offset_defs_mul = ralloc_array(mem_ctx, uint64_t, offset_def_count);
455 memcpy(key->offset_defs, offset_defs, offset_def_count * sizeof(nir_ssa_def *));
456 memcpy(key->offset_defs_mul, offset_defs_mul, offset_def_count * sizeof(uint64_t));
457
458 if (offset_defs != offset_defs_stack)
459 free(offset_defs);
460 if (offset_defs_mul != offset_defs_mul_stack)
461 free(offset_defs_mul);
462
463 return key;
464 }
465
466 static unsigned
467 parse_entry_key_from_offset(struct entry_key *key, unsigned size, unsigned left,
468 nir_ssa_def *base, uint64_t base_mul, uint64_t *offset)
469 {
470 uint64_t new_mul;
471 uint64_t new_offset;
472 parse_offset(&base, &new_mul, &new_offset);
473 *offset += new_offset * base_mul;
474
475 if (!base)
476 return 0;
477
478 base_mul *= new_mul;
479
480 assert(left >= 1);
481
482 if (left >= 2) {
483 nir_ssa_scalar scalar;
484 scalar.def = base;
485 scalar.comp = 0;
486 if (nir_ssa_scalar_is_alu(scalar) && nir_ssa_scalar_alu_op(scalar) == nir_op_iadd) {
487 nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
488 nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
489 if (src0.comp == 0 && src1.comp == 0) {
490 unsigned amount = parse_entry_key_from_offset(key, size, left - 1, src0.def, base_mul, offset);
491 amount += parse_entry_key_from_offset(key, size + amount, left - amount, src1.def, base_mul, offset);
492 return amount;
493 }
494 }
495 }
496
497 return add_to_entry_key(key->offset_defs, key->offset_defs_mul, size, base, base_mul);
498 }
499
500 static struct entry_key *
501 create_entry_key_from_offset(void *mem_ctx, nir_ssa_def *base, uint64_t base_mul, uint64_t *offset)
502 {
503 struct entry_key *key = ralloc(mem_ctx, struct entry_key);
504 key->resource = NULL;
505 key->var = NULL;
506 if (base) {
507 nir_ssa_def *offset_defs[32];
508 uint64_t offset_defs_mul[32];
509 key->offset_defs = offset_defs;
510 key->offset_defs_mul = offset_defs_mul;
511
512 key->offset_def_count = parse_entry_key_from_offset(key, 0, 32, base, base_mul, offset);
513
514 key->offset_defs = ralloc_array(mem_ctx, nir_ssa_def *, key->offset_def_count);
515 key->offset_defs_mul = ralloc_array(mem_ctx, uint64_t, key->offset_def_count);
516 memcpy(key->offset_defs, offset_defs, key->offset_def_count * sizeof(nir_ssa_def *));
517 memcpy(key->offset_defs_mul, offset_defs_mul, key->offset_def_count * sizeof(uint64_t));
518 } else {
519 key->offset_def_count = 0;
520 key->offset_defs = NULL;
521 key->offset_defs_mul = NULL;
522 }
523 return key;
524 }
525
526 static nir_variable_mode
527 get_variable_mode(struct entry *entry)
528 {
529 if (entry->info->mode)
530 return entry->info->mode;
531 assert(entry->deref);
532 return entry->deref->mode;
533 }
534
535 static unsigned
536 mode_to_index(nir_variable_mode mode)
537 {
538 assert(util_bitcount(mode) == 1);
539
540 /* Globals and SSBOs should be tracked together */
541 if (mode == nir_var_mem_global)
542 mode = nir_var_mem_ssbo;
543
544 return ffs(mode) - 1;
545 }
546
547 static nir_variable_mode
548 aliasing_modes(nir_variable_mode modes)
549 {
550 /* Global and SSBO can alias */
551 if (modes & (nir_var_mem_ssbo | nir_var_mem_global))
552 modes |= nir_var_mem_ssbo | nir_var_mem_global;
553 return modes;
554 }
555
556 static struct entry *
557 create_entry(struct vectorize_ctx *ctx,
558 const struct intrinsic_info *info,
559 nir_intrinsic_instr *intrin)
560 {
561 struct entry *entry = rzalloc(ctx, struct entry);
562 entry->intrin = intrin;
563 entry->instr = &intrin->instr;
564 entry->info = info;
565 entry->best_align = UINT32_MAX;
566 entry->is_store = entry->info->value_src >= 0;
567
568 if (entry->info->deref_src >= 0) {
569 entry->deref = nir_src_as_deref(intrin->src[entry->info->deref_src]);
570 nir_deref_path path;
571 nir_deref_path_init(&path, entry->deref, NULL);
572 entry->key = create_entry_key_from_deref(entry, ctx, &path, &entry->offset);
573 nir_deref_path_finish(&path);
574 } else {
575 nir_ssa_def *base = entry->info->base_src >= 0 ?
576 intrin->src[entry->info->base_src].ssa : NULL;
577 uint64_t offset = 0;
578 if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_BASE])
579 offset += nir_intrinsic_base(intrin);
580 entry->key = create_entry_key_from_offset(entry, base, 1, &offset);
581 entry->offset = offset;
582
583 if (base)
584 entry->offset = mask_sign_extend(entry->offset, base->bit_size);
585 }
586
587 if (entry->info->resource_src >= 0)
588 entry->key->resource = intrin->src[entry->info->resource_src].ssa;
589
590 if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_ACCESS])
591 entry->access = nir_intrinsic_access(intrin);
592 else if (entry->key->var)
593 entry->access = entry->key->var->data.access;
594
595 uint32_t restrict_modes = nir_var_shader_in | nir_var_shader_out;
596 restrict_modes |= nir_var_shader_temp | nir_var_function_temp;
597 restrict_modes |= nir_var_uniform | nir_var_mem_push_const;
598 restrict_modes |= nir_var_system_value | nir_var_mem_shared;
599 if (get_variable_mode(entry) & restrict_modes)
600 entry->access |= ACCESS_RESTRICT;
601
602 return entry;
603 }
604
605 static nir_deref_instr *
606 cast_deref(nir_builder *b, unsigned num_components, unsigned bit_size, nir_deref_instr *deref)
607 {
608 if (glsl_get_components(deref->type) == num_components &&
609 type_scalar_size_bytes(deref->type)*8u == bit_size)
610 return deref;
611
612 enum glsl_base_type types[] = {
613 GLSL_TYPE_UINT8, GLSL_TYPE_UINT16, GLSL_TYPE_UINT, GLSL_TYPE_UINT64};
614 enum glsl_base_type base = types[ffs(bit_size / 8u) - 1u];
615 const struct glsl_type *type = glsl_vector_type(base, num_components);
616
617 if (deref->type == type)
618 return deref;
619
620 return nir_build_deref_cast(b, &deref->dest.ssa, deref->mode, type, 0);
621 }
622
623 /* Return true if the write mask "write_mask" of a store with "old_bit_size"
624 * bits per element can be represented for a store with "new_bit_size" bits per
625 * element. */
626 static bool
627 writemask_representable(unsigned write_mask, unsigned old_bit_size, unsigned new_bit_size)
628 {
629 while (write_mask) {
630 int start, count;
631 u_bit_scan_consecutive_range(&write_mask, &start, &count);
632 start *= old_bit_size;
633 count *= old_bit_size;
634 if (start % new_bit_size != 0)
635 return false;
636 if (count % new_bit_size != 0)
637 return false;
638 }
639 return true;
640 }
641
642 static uint64_t
643 gcd(uint64_t a, uint64_t b)
644 {
645 while (b) {
646 uint64_t old_b = b;
647 b = a % b;
648 a = old_b;
649 }
650 return a;
651 }
652
653 static uint32_t
654 get_best_align(struct entry *entry)
655 {
656 if (entry->best_align != UINT32_MAX)
657 return entry->best_align;
658
659 uint64_t best_align = entry->offset;
660 for (unsigned i = 0; i < entry->key->offset_def_count; i++) {
661 if (!best_align)
662 best_align = entry->key->offset_defs_mul[i];
663 else if (entry->key->offset_defs_mul[i])
664 best_align = gcd(best_align, entry->key->offset_defs_mul[i]);
665 }
666
667 if (nir_intrinsic_infos[entry->intrin->intrinsic].index_map[NIR_INTRINSIC_ALIGN_MUL])
668 best_align = MAX2(best_align, nir_intrinsic_align(entry->intrin));
669
670 /* ensure the result is a power of two that fits in a int32_t */
671 entry->best_align = gcd(best_align, 1u << 30);
672
673 return entry->best_align;
674 }
675
676 /* Return true if "new_bit_size" is a usable bit size for a vectorized load/store
677 * of "low" and "high". */
678 static bool
679 new_bitsize_acceptable(struct vectorize_ctx *ctx, unsigned new_bit_size,
680 struct entry *low, struct entry *high, unsigned size)
681 {
682 if (size % new_bit_size != 0)
683 return false;
684
685 unsigned new_num_components = size / new_bit_size;
686 if (!nir_num_components_valid(new_num_components))
687 return false;
688
689 unsigned high_offset = high->offset_signed - low->offset_signed;
690
691 /* check nir_extract_bits limitations */
692 unsigned common_bit_size = MIN2(get_bit_size(low), get_bit_size(high));
693 common_bit_size = MIN2(common_bit_size, new_bit_size);
694 if (high_offset > 0)
695 common_bit_size = MIN2(common_bit_size, (1u << (ffs(high_offset * 8) - 1)));
696 if (new_bit_size / common_bit_size > NIR_MAX_VEC_COMPONENTS)
697 return false;
698
699 if (!ctx->callback(get_best_align(low), new_bit_size, new_num_components,
700 high_offset, low->intrin, high->intrin))
701 return false;
702
703 if (low->is_store) {
704 unsigned low_size = low->intrin->num_components * get_bit_size(low);
705 unsigned high_size = high->intrin->num_components * get_bit_size(high);
706
707 if (low_size % new_bit_size != 0)
708 return false;
709 if (high_size % new_bit_size != 0)
710 return false;
711
712 unsigned write_mask = nir_intrinsic_write_mask(low->intrin);
713 if (!writemask_representable(write_mask, low_size, new_bit_size))
714 return false;
715
716 write_mask = nir_intrinsic_write_mask(high->intrin);
717 if (!writemask_representable(write_mask, high_size, new_bit_size))
718 return false;
719 }
720
721 return true;
722 }
723
724 /* Updates a write mask, "write_mask", so that it can be used with a
725 * "new_bit_size"-bit store instead of a "old_bit_size"-bit store. */
726 static uint32_t
727 update_writemask(unsigned write_mask, unsigned old_bit_size, unsigned new_bit_size)
728 {
729 uint32_t res = 0;
730 while (write_mask) {
731 int start, count;
732 u_bit_scan_consecutive_range(&write_mask, &start, &count);
733 start = start * old_bit_size / new_bit_size;
734 count = count * old_bit_size / new_bit_size;
735 res |= ((1 << count) - 1) << start;
736 }
737 return res;
738 }
739
740 static nir_deref_instr *subtract_deref(nir_builder *b, nir_deref_instr *deref, int64_t offset)
741 {
742 /* avoid adding another deref to the path */
743 if (deref->deref_type == nir_deref_type_ptr_as_array &&
744 nir_src_is_const(deref->arr.index) &&
745 offset % nir_deref_instr_ptr_as_array_stride(deref) == 0) {
746 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
747 nir_ssa_def *index = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index) - offset / stride,
748 deref->dest.ssa.bit_size);
749 return nir_build_deref_ptr_as_array(b, nir_deref_instr_parent(deref), index);
750 }
751
752 if (deref->deref_type == nir_deref_type_array &&
753 nir_src_is_const(deref->arr.index)) {
754 nir_deref_instr *parent = nir_deref_instr_parent(deref);
755 unsigned stride = glsl_get_explicit_stride(parent->type);
756 if (offset % stride == 0)
757 return nir_build_deref_array_imm(
758 b, parent, nir_src_as_int(deref->arr.index) - offset / stride);
759 }
760
761
762 deref = nir_build_deref_cast(b, &deref->dest.ssa, deref->mode,
763 glsl_scalar_type(GLSL_TYPE_UINT8), 1);
764 return nir_build_deref_ptr_as_array(
765 b, deref, nir_imm_intN_t(b, -offset, deref->dest.ssa.bit_size));
766 }
767
768 static bool update_align(struct entry *entry)
769 {
770 bool has_align_index =
771 nir_intrinsic_infos[entry->intrin->intrinsic].index_map[NIR_INTRINSIC_ALIGN_MUL];
772 if (has_align_index) {
773 unsigned align = get_best_align(entry);
774 if (align != nir_intrinsic_align(entry->intrin)) {
775 nir_intrinsic_set_align(entry->intrin, align, 0);
776 return true;
777 }
778 }
779 return false;
780 }
781
782 static void
783 vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
784 struct entry *low, struct entry *high,
785 struct entry *first, struct entry *second,
786 unsigned new_bit_size, unsigned new_num_components,
787 unsigned high_start)
788 {
789 unsigned low_bit_size = get_bit_size(low);
790 unsigned high_bit_size = get_bit_size(high);
791 bool low_bool = low->intrin->dest.ssa.bit_size == 1;
792 bool high_bool = high->intrin->dest.ssa.bit_size == 1;
793 nir_ssa_def *data = &first->intrin->dest.ssa;
794
795 b->cursor = nir_after_instr(first->instr);
796
797 /* update the load's destination size and extract data for each of the original loads */
798 data->num_components = new_num_components;
799 data->bit_size = new_bit_size;
800
801 nir_ssa_def *low_def = nir_extract_bits(
802 b, &data, 1, 0, low->intrin->num_components, low_bit_size);
803 nir_ssa_def *high_def = nir_extract_bits(
804 b, &data, 1, high_start, high->intrin->num_components, high_bit_size);
805
806 /* convert booleans */
807 low_def = low_bool ? nir_i2b(b, low_def) : nir_mov(b, low_def);
808 high_def = high_bool ? nir_i2b(b, high_def) : nir_mov(b, high_def);
809
810 /* update uses */
811 if (first == low) {
812 nir_ssa_def_rewrite_uses_after(&low->intrin->dest.ssa, nir_src_for_ssa(low_def),
813 high_def->parent_instr);
814 nir_ssa_def_rewrite_uses(&high->intrin->dest.ssa, nir_src_for_ssa(high_def));
815 } else {
816 nir_ssa_def_rewrite_uses(&low->intrin->dest.ssa, nir_src_for_ssa(low_def));
817 nir_ssa_def_rewrite_uses_after(&high->intrin->dest.ssa, nir_src_for_ssa(high_def),
818 high_def->parent_instr);
819 }
820
821 /* update the intrinsic */
822 first->intrin->num_components = new_num_components;
823
824 const struct intrinsic_info *info = first->info;
825
826 /* update the offset */
827 if (first != low && info->base_src >= 0) {
828 /* let nir_opt_algebraic() remove this addition. this doesn't have much
829 * issues with subtracting 16 from expressions like "(i + 1) * 16" because
830 * nir_opt_algebraic() turns them into "i * 16 + 16" */
831 b->cursor = nir_before_instr(first->instr);
832
833 nir_ssa_def *new_base = first->intrin->src[info->base_src].ssa;
834 new_base = nir_iadd_imm(b, new_base, -(int)(high_start / 8u));
835
836 nir_instr_rewrite_src(first->instr, &first->intrin->src[info->base_src],
837 nir_src_for_ssa(new_base));
838 }
839
840 /* update the deref */
841 if (info->deref_src >= 0) {
842 b->cursor = nir_before_instr(first->instr);
843
844 nir_deref_instr *deref = nir_src_as_deref(first->intrin->src[info->deref_src]);
845 if (first != low && high_start != 0)
846 deref = subtract_deref(b, deref, high_start / 8u);
847 first->deref = cast_deref(b, new_num_components, new_bit_size, deref);
848
849 nir_instr_rewrite_src(first->instr, &first->intrin->src[info->deref_src],
850 nir_src_for_ssa(&first->deref->dest.ssa));
851 }
852
853 /* update base/align */
854 bool has_base_index =
855 nir_intrinsic_infos[first->intrin->intrinsic].index_map[NIR_INTRINSIC_BASE];
856
857 if (first != low && has_base_index)
858 nir_intrinsic_set_base(first->intrin, nir_intrinsic_base(low->intrin));
859
860 first->key = low->key;
861 first->offset = low->offset;
862 first->best_align = get_best_align(low);
863
864 update_align(first);
865
866 nir_instr_remove(second->instr);
867 }
868
869 static void
870 vectorize_stores(nir_builder *b, struct vectorize_ctx *ctx,
871 struct entry *low, struct entry *high,
872 struct entry *first, struct entry *second,
873 unsigned new_bit_size, unsigned new_num_components,
874 unsigned high_start)
875 {
876 ASSERTED unsigned low_size = low->intrin->num_components * get_bit_size(low);
877 assert(low_size % new_bit_size == 0);
878
879 b->cursor = nir_before_instr(second->instr);
880
881 /* get new writemasks */
882 uint32_t low_write_mask = nir_intrinsic_write_mask(low->intrin);
883 uint32_t high_write_mask = nir_intrinsic_write_mask(high->intrin);
884 low_write_mask = update_writemask(low_write_mask, get_bit_size(low), new_bit_size);
885 high_write_mask = update_writemask(high_write_mask, get_bit_size(high), new_bit_size);
886 high_write_mask <<= high_start / new_bit_size;
887
888 uint32_t write_mask = low_write_mask | high_write_mask;
889
890 /* convert booleans */
891 nir_ssa_def *low_val = low->intrin->src[low->info->value_src].ssa;
892 nir_ssa_def *high_val = high->intrin->src[high->info->value_src].ssa;
893 low_val = low_val->bit_size == 1 ? nir_b2i(b, low_val, 32) : low_val;
894 high_val = high_val->bit_size == 1 ? nir_b2i(b, high_val, 32) : high_val;
895
896 /* combine the data */
897 nir_ssa_def *data_channels[NIR_MAX_VEC_COMPONENTS];
898 for (unsigned i = 0; i < new_num_components; i++) {
899 bool set_low = low_write_mask & (1 << i);
900 bool set_high = high_write_mask & (1 << i);
901
902 if (set_low && (!set_high || low == second)) {
903 unsigned offset = i * new_bit_size;
904 data_channels[i] = nir_extract_bits(b, &low_val, 1, offset, 1, new_bit_size);
905 } else if (set_high) {
906 assert(!set_low || high == second);
907 unsigned offset = i * new_bit_size - high_start;
908 data_channels[i] = nir_extract_bits(b, &high_val, 1, offset, 1, new_bit_size);
909 } else {
910 data_channels[i] = nir_ssa_undef(b, 1, new_bit_size);
911 }
912 }
913 nir_ssa_def *data = nir_vec(b, data_channels, new_num_components);
914
915 /* update the intrinsic */
916 nir_intrinsic_set_write_mask(second->intrin, write_mask);
917 second->intrin->num_components = data->num_components;
918
919 const struct intrinsic_info *info = second->info;
920 assert(info->value_src >= 0);
921 nir_instr_rewrite_src(second->instr, &second->intrin->src[info->value_src],
922 nir_src_for_ssa(data));
923
924 /* update the offset */
925 if (second != low && info->base_src >= 0)
926 nir_instr_rewrite_src(second->instr, &second->intrin->src[info->base_src],
927 low->intrin->src[info->base_src]);
928
929 /* update the deref */
930 if (info->deref_src >= 0) {
931 b->cursor = nir_before_instr(second->instr);
932 second->deref = cast_deref(b, new_num_components, new_bit_size,
933 nir_src_as_deref(low->intrin->src[info->deref_src]));
934 nir_instr_rewrite_src(second->instr, &second->intrin->src[info->deref_src],
935 nir_src_for_ssa(&second->deref->dest.ssa));
936 }
937
938 /* update base/align */
939 bool has_base_index =
940 nir_intrinsic_infos[second->intrin->intrinsic].index_map[NIR_INTRINSIC_BASE];
941
942 if (second != low && has_base_index)
943 nir_intrinsic_set_base(second->intrin, nir_intrinsic_base(low->intrin));
944
945 second->key = low->key;
946 second->offset = low->offset;
947 second->best_align = get_best_align(low);
948
949 update_align(second);
950
951 list_del(&first->head);
952 nir_instr_remove(first->instr);
953 }
954
955 /* Returns true if it can prove that "a" and "b" point to different resources. */
956 static bool
957 resources_different(nir_ssa_def *a, nir_ssa_def *b)
958 {
959 if (!a || !b)
960 return false;
961
962 if (a->parent_instr->type == nir_instr_type_load_const &&
963 b->parent_instr->type == nir_instr_type_load_const) {
964 return nir_src_as_uint(nir_src_for_ssa(a)) != nir_src_as_uint(nir_src_for_ssa(b));
965 }
966
967 if (a->parent_instr->type == nir_instr_type_intrinsic &&
968 b->parent_instr->type == nir_instr_type_intrinsic) {
969 nir_intrinsic_instr *aintrin = nir_instr_as_intrinsic(a->parent_instr);
970 nir_intrinsic_instr *bintrin = nir_instr_as_intrinsic(b->parent_instr);
971 if (aintrin->intrinsic == nir_intrinsic_vulkan_resource_index &&
972 bintrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
973 return nir_intrinsic_desc_set(aintrin) != nir_intrinsic_desc_set(bintrin) ||
974 nir_intrinsic_binding(aintrin) != nir_intrinsic_binding(bintrin) ||
975 resources_different(aintrin->src[0].ssa, bintrin->src[0].ssa);
976 }
977 }
978
979 return false;
980 }
981
982 static int64_t
983 compare_entries(struct entry *a, struct entry *b)
984 {
985 if (!entry_key_equals(a->key, b->key))
986 return INT64_MAX;
987 return b->offset_signed - a->offset_signed;
988 }
989
990 static bool
991 may_alias(struct entry *a, struct entry *b)
992 {
993 assert(mode_to_index(get_variable_mode(a)) ==
994 mode_to_index(get_variable_mode(b)));
995
996 /* if the resources/variables are definitively different and both have
997 * ACCESS_RESTRICT, we can assume they do not alias. */
998 bool res_different = a->key->var != b->key->var ||
999 resources_different(a->key->resource, b->key->resource);
1000 if (res_different && (a->access & ACCESS_RESTRICT) && (b->access & ACCESS_RESTRICT))
1001 return false;
1002
1003 /* we can't compare offsets if the resources/variables might be different */
1004 if (a->key->var != b->key->var || a->key->resource != b->key->resource)
1005 return true;
1006
1007 /* use adjacency information */
1008 /* TODO: we can look closer at the entry keys */
1009 int64_t diff = compare_entries(a, b);
1010 if (diff != INT64_MAX) {
1011 /* with atomics, intrin->num_components can be 0 */
1012 if (diff < 0)
1013 return llabs(diff) < MAX2(b->intrin->num_components, 1u) * (get_bit_size(b) / 8u);
1014 else
1015 return diff < MAX2(a->intrin->num_components, 1u) * (get_bit_size(a) / 8u);
1016 }
1017
1018 /* TODO: we can use deref information */
1019
1020 return true;
1021 }
1022
1023 static bool
1024 check_for_aliasing(struct vectorize_ctx *ctx, struct entry *first, struct entry *second)
1025 {
1026 nir_variable_mode mode = get_variable_mode(first);
1027 if (mode & (nir_var_uniform | nir_var_system_value |
1028 nir_var_mem_push_const | nir_var_mem_ubo))
1029 return false;
1030
1031 unsigned mode_index = mode_to_index(mode);
1032 if (first->is_store) {
1033 /* find first entry that aliases "first" */
1034 list_for_each_entry_from(struct entry, next, first, &ctx->entries[mode_index], head) {
1035 if (next == first)
1036 continue;
1037 if (next == second)
1038 return false;
1039 if (may_alias(first, next))
1040 return true;
1041 }
1042 } else {
1043 /* find previous store that aliases this load */
1044 list_for_each_entry_from_rev(struct entry, prev, second, &ctx->entries[mode_index], head) {
1045 if (prev == second)
1046 continue;
1047 if (prev == first)
1048 return false;
1049 if (prev->is_store && may_alias(second, prev))
1050 return true;
1051 }
1052 }
1053
1054 return false;
1055 }
1056
1057 static bool
1058 check_for_robustness(struct vectorize_ctx *ctx, struct entry *low)
1059 {
1060 nir_variable_mode mode = get_variable_mode(low);
1061 if (mode & ctx->robust_modes) {
1062 unsigned low_bit_size = get_bit_size(low);
1063 unsigned low_size = low->intrin->num_components * low_bit_size;
1064
1065 /* don't attempt to vectorize accesses if the offset can overflow. */
1066 /* TODO: handle indirect accesses. */
1067 return low->offset_signed < 0 && low->offset_signed + low_size >= 0;
1068 }
1069
1070 return false;
1071 }
1072
1073 static bool
1074 is_strided_vector(const struct glsl_type *type)
1075 {
1076 if (glsl_type_is_vector(type)) {
1077 unsigned explicit_stride = glsl_get_explicit_stride(type);
1078 return explicit_stride != 0 && explicit_stride !=
1079 type_scalar_size_bytes(glsl_get_array_element(type));
1080 } else {
1081 return false;
1082 }
1083 }
1084
1085 static bool
1086 try_vectorize(nir_function_impl *impl, struct vectorize_ctx *ctx,
1087 struct entry *low, struct entry *high,
1088 struct entry *first, struct entry *second)
1089 {
1090 if (!(get_variable_mode(first) & ctx->modes) ||
1091 !(get_variable_mode(second) & ctx->modes))
1092 return false;
1093
1094 if (check_for_aliasing(ctx, first, second))
1095 return false;
1096
1097 if (check_for_robustness(ctx, low))
1098 return false;
1099
1100 /* we can only vectorize non-volatile loads/stores of the same type and with
1101 * the same access */
1102 if (first->info != second->info || first->access != second->access ||
1103 (first->access & ACCESS_VOLATILE) || first->info->is_atomic)
1104 return false;
1105
1106 /* don't attempt to vectorize accesses of row-major matrix columns */
1107 if (first->deref) {
1108 const struct glsl_type *first_type = first->deref->type;
1109 const struct glsl_type *second_type = second->deref->type;
1110 if (is_strided_vector(first_type) || is_strided_vector(second_type))
1111 return false;
1112 }
1113
1114 /* gather information */
1115 uint64_t diff = high->offset_signed - low->offset_signed;
1116 unsigned low_bit_size = get_bit_size(low);
1117 unsigned high_bit_size = get_bit_size(high);
1118 unsigned low_size = low->intrin->num_components * low_bit_size;
1119 unsigned high_size = high->intrin->num_components * high_bit_size;
1120 unsigned new_size = MAX2(diff * 8u + high_size, low_size);
1121
1122 /* find a good bit size for the new load/store */
1123 unsigned new_bit_size = 0;
1124 if (new_bitsize_acceptable(ctx, low_bit_size, low, high, new_size)) {
1125 new_bit_size = low_bit_size;
1126 } else if (low_bit_size != high_bit_size &&
1127 new_bitsize_acceptable(ctx, high_bit_size, low, high, new_size)) {
1128 new_bit_size = high_bit_size;
1129 } else {
1130 new_bit_size = 64;
1131 for (; new_bit_size >= 8; new_bit_size /= 2) {
1132 /* don't repeat trying out bitsizes */
1133 if (new_bit_size == low_bit_size || new_bit_size == high_bit_size)
1134 continue;
1135 if (new_bitsize_acceptable(ctx, new_bit_size, low, high, new_size))
1136 break;
1137 }
1138 if (new_bit_size < 8)
1139 return false;
1140 }
1141 unsigned new_num_components = new_size / new_bit_size;
1142
1143 /* vectorize the loads/stores */
1144 nir_builder b;
1145 nir_builder_init(&b, impl);
1146
1147 if (first->is_store)
1148 vectorize_stores(&b, ctx, low, high, first, second,
1149 new_bit_size, new_num_components, diff * 8u);
1150 else
1151 vectorize_loads(&b, ctx, low, high, first, second,
1152 new_bit_size, new_num_components, diff * 8u);
1153
1154 return true;
1155 }
1156
1157 static bool
1158 vectorize_entries(struct vectorize_ctx *ctx, nir_function_impl *impl, struct hash_table *ht)
1159 {
1160 if (!ht)
1161 return false;
1162
1163 bool progress = false;
1164 hash_table_foreach(ht, entry) {
1165 struct util_dynarray *arr = entry->data;
1166 if (!arr->size)
1167 continue;
1168
1169 qsort(util_dynarray_begin(arr),
1170 util_dynarray_num_elements(arr, struct entry *),
1171 sizeof(struct entry *), &sort_entries);
1172
1173 unsigned i = 0;
1174 for (; i < util_dynarray_num_elements(arr, struct entry*) - 1; i++) {
1175 struct entry *low = *util_dynarray_element(arr, struct entry *, i);
1176 struct entry *high = *util_dynarray_element(arr, struct entry *, i + 1);
1177
1178 uint64_t diff = high->offset_signed - low->offset_signed;
1179 if (diff > get_bit_size(low) / 8u * low->intrin->num_components) {
1180 progress |= update_align(low);
1181 continue;
1182 }
1183
1184 struct entry *first = low->index < high->index ? low : high;
1185 struct entry *second = low->index < high->index ? high : low;
1186
1187 if (try_vectorize(impl, ctx, low, high, first, second)) {
1188 *util_dynarray_element(arr, struct entry *, i) = NULL;
1189 *util_dynarray_element(arr, struct entry *, i + 1) = low->is_store ? second : first;
1190 progress = true;
1191 } else {
1192 progress |= update_align(low);
1193 }
1194 }
1195
1196 struct entry *last = *util_dynarray_element(arr, struct entry *, i);
1197 progress |= update_align(last);
1198 }
1199
1200 _mesa_hash_table_clear(ht, delete_entry_dynarray);
1201
1202 return progress;
1203 }
1204
1205 static bool
1206 handle_barrier(struct vectorize_ctx *ctx, bool *progress, nir_function_impl *impl, nir_instr *instr)
1207 {
1208 unsigned modes = 0;
1209 bool acquire = true;
1210 bool release = true;
1211 if (instr->type == nir_instr_type_intrinsic) {
1212 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1213 switch (intrin->intrinsic) {
1214 case nir_intrinsic_group_memory_barrier:
1215 case nir_intrinsic_memory_barrier:
1216 modes = nir_var_mem_ssbo | nir_var_mem_shared | nir_var_mem_global;
1217 break;
1218 /* prevent speculative loads/stores */
1219 case nir_intrinsic_discard_if:
1220 case nir_intrinsic_discard:
1221 modes = nir_var_all;
1222 break;
1223 case nir_intrinsic_memory_barrier_buffer:
1224 modes = nir_var_mem_ssbo | nir_var_mem_global;
1225 break;
1226 case nir_intrinsic_memory_barrier_shared:
1227 modes = nir_var_mem_shared;
1228 break;
1229 case nir_intrinsic_scoped_memory_barrier:
1230 modes = nir_intrinsic_memory_modes(intrin);
1231 acquire = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE;
1232 release = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE;
1233 switch (nir_intrinsic_memory_scope(intrin)) {
1234 case NIR_SCOPE_INVOCATION:
1235 case NIR_SCOPE_SUBGROUP:
1236 /* a barier should never be required for correctness with these scopes */
1237 modes = 0;
1238 break;
1239 default:
1240 break;
1241 }
1242 break;
1243 default:
1244 return false;
1245 }
1246 } else if (instr->type == nir_instr_type_call) {
1247 modes = nir_var_all;
1248 } else {
1249 return false;
1250 }
1251
1252 while (modes) {
1253 unsigned mode_index = u_bit_scan(&modes);
1254 if ((1 << mode_index) == nir_var_mem_global) {
1255 /* Global should be rolled in with SSBO */
1256 assert(list_is_empty(&ctx->entries[mode_index]));
1257 assert(ctx->loads[mode_index] == NULL);
1258 assert(ctx->stores[mode_index] == NULL);
1259 continue;
1260 }
1261
1262 if (acquire)
1263 *progress |= vectorize_entries(ctx, impl, ctx->loads[mode_index]);
1264 if (release)
1265 *progress |= vectorize_entries(ctx, impl, ctx->stores[mode_index]);
1266 }
1267
1268 return true;
1269 }
1270
1271 static bool
1272 process_block(nir_function_impl *impl, struct vectorize_ctx *ctx, nir_block *block)
1273 {
1274 bool progress = false;
1275
1276 for (unsigned i = 0; i < nir_num_variable_modes; i++) {
1277 list_inithead(&ctx->entries[i]);
1278 if (ctx->loads[i])
1279 _mesa_hash_table_clear(ctx->loads[i], delete_entry_dynarray);
1280 if (ctx->stores[i])
1281 _mesa_hash_table_clear(ctx->stores[i], delete_entry_dynarray);
1282 }
1283
1284 /* create entries */
1285 unsigned next_index = 0;
1286
1287 nir_foreach_instr_safe(instr, block) {
1288 if (handle_barrier(ctx, &progress, impl, instr))
1289 continue;
1290
1291 /* gather information */
1292 if (instr->type != nir_instr_type_intrinsic)
1293 continue;
1294 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1295
1296 const struct intrinsic_info *info = get_info(intrin->intrinsic);
1297 if (!info)
1298 continue;
1299
1300 nir_variable_mode mode = info->mode;
1301 if (!mode)
1302 mode = nir_src_as_deref(intrin->src[info->deref_src])->mode;
1303 if (!(mode & aliasing_modes(ctx->modes)))
1304 continue;
1305 unsigned mode_index = mode_to_index(mode);
1306
1307 /* create entry */
1308 struct entry *entry = create_entry(ctx, info, intrin);
1309 entry->index = next_index++;
1310
1311 list_addtail(&entry->head, &ctx->entries[mode_index]);
1312
1313 /* add the entry to a hash table */
1314
1315 struct hash_table *adj_ht = NULL;
1316 if (entry->is_store) {
1317 if (!ctx->stores[mode_index])
1318 ctx->stores[mode_index] = _mesa_hash_table_create(ctx, &hash_entry_key, &entry_key_equals);
1319 adj_ht = ctx->stores[mode_index];
1320 } else {
1321 if (!ctx->loads[mode_index])
1322 ctx->loads[mode_index] = _mesa_hash_table_create(ctx, &hash_entry_key, &entry_key_equals);
1323 adj_ht = ctx->loads[mode_index];
1324 }
1325
1326 uint32_t key_hash = hash_entry_key(entry->key);
1327 struct hash_entry *adj_entry = _mesa_hash_table_search_pre_hashed(adj_ht, key_hash, entry->key);
1328 struct util_dynarray *arr;
1329 if (adj_entry && adj_entry->data) {
1330 arr = (struct util_dynarray *)adj_entry->data;
1331 } else {
1332 arr = ralloc(ctx, struct util_dynarray);
1333 util_dynarray_init(arr, arr);
1334 _mesa_hash_table_insert_pre_hashed(adj_ht, key_hash, entry->key, arr);
1335 }
1336 util_dynarray_append(arr, struct entry *, entry);
1337 }
1338
1339 /* sort and combine entries */
1340 for (unsigned i = 0; i < nir_num_variable_modes; i++) {
1341 progress |= vectorize_entries(ctx, impl, ctx->loads[i]);
1342 progress |= vectorize_entries(ctx, impl, ctx->stores[i]);
1343 }
1344
1345 return progress;
1346 }
1347
1348 bool
1349 nir_opt_load_store_vectorize(nir_shader *shader, nir_variable_mode modes,
1350 nir_should_vectorize_mem_func callback,
1351 nir_variable_mode robust_modes)
1352 {
1353 bool progress = false;
1354
1355 struct vectorize_ctx *ctx = rzalloc(NULL, struct vectorize_ctx);
1356 ctx->modes = modes;
1357 ctx->callback = callback;
1358 ctx->robust_modes = robust_modes;
1359
1360 nir_index_vars(shader, NULL, modes);
1361
1362 nir_foreach_function(function, shader) {
1363 if (function->impl) {
1364 if (modes & nir_var_function_temp)
1365 nir_index_vars(shader, function->impl, nir_var_function_temp);
1366
1367 nir_foreach_block(block, function->impl)
1368 progress |= process_block(function->impl, ctx, block);
1369
1370 nir_metadata_preserve(function->impl,
1371 nir_metadata_block_index |
1372 nir_metadata_dominance |
1373 nir_metadata_live_ssa_defs);
1374 }
1375 }
1376
1377 ralloc_free(ctx);
1378 return progress;
1379 }