freedreno/ir3: set array precision on creation
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32
33 struct ir3_context *
34 ir3_context_init(struct ir3_compiler *compiler,
35 struct ir3_shader_variant *so)
36 {
37 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38
39 if (compiler->gpu_id >= 400) {
40 if (so->type == MESA_SHADER_VERTEX) {
41 ctx->astc_srgb = so->key.vastc_srgb;
42 } else if (so->type == MESA_SHADER_FRAGMENT) {
43 ctx->astc_srgb = so->key.fastc_srgb;
44 }
45
46 } else {
47 if (so->type == MESA_SHADER_VERTEX) {
48 ctx->samples = so->key.vsamples;
49 } else if (so->type == MESA_SHADER_FRAGMENT) {
50 ctx->samples = so->key.fsamples;
51 }
52 }
53
54 if (compiler->gpu_id >= 600) {
55 ctx->funcs = &ir3_a6xx_funcs;
56 } else if (compiler->gpu_id >= 400) {
57 ctx->funcs = &ir3_a4xx_funcs;
58 }
59
60 ctx->compiler = compiler;
61 ctx->so = so;
62 ctx->def_ht = _mesa_hash_table_create(ctx,
63 _mesa_hash_pointer, _mesa_key_pointer_equal);
64 ctx->block_ht = _mesa_hash_table_create(ctx,
65 _mesa_hash_pointer, _mesa_key_pointer_equal);
66 ctx->sel_cond_conversions = _mesa_hash_table_create(ctx,
67 _mesa_hash_pointer, _mesa_key_pointer_equal);
68
69 /* TODO: maybe generate some sort of bitmask of what key
70 * lowers vs what shader has (ie. no need to lower
71 * texture clamp lowering if no texture sample instrs)..
72 * although should be done further up the stack to avoid
73 * creating duplicate variants..
74 */
75
76 ctx->s = nir_shader_clone(ctx, so->shader->nir);
77 ir3_nir_lower_variant(so, ctx->s);
78
79 /* this needs to be the last pass run, so do this here instead of
80 * in ir3_optimize_nir():
81 */
82 bool progress = false;
83 NIR_PASS(progress, ctx->s, nir_lower_locals_to_regs);
84
85 /* we could need cleanup after lower_locals_to_regs */
86 while (progress) {
87 progress = false;
88 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
89 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
90 }
91
92 /* We want to lower nir_op_imul as late as possible, to catch also
93 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
94 * However, we want a final swing of a few passes to have a chance
95 * at optimizing the result.
96 */
97 progress = false;
98 NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
99 while (progress) {
100 progress = false;
101 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
102 NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
103 NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
104 NIR_PASS(progress, ctx->s, nir_opt_dce);
105 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
106 }
107
108 /* Enable the texture pre-fetch feature only a4xx onwards. But
109 * only enable it on generations that have been tested:
110 */
111 if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
112 NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
113
114 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
115
116 /* Super crude heuristic to limit # of tex prefetch in small
117 * shaders. This completely ignores loops.. but that's really
118 * not the worst of it's problems. (A frag shader that has
119 * loops is probably going to be big enough to not trigger a
120 * lower threshold.)
121 *
122 * 1) probably want to do this in terms of ir3 instructions
123 * 2) probably really want to decide this after scheduling
124 * (or at least pre-RA sched) so we have a rough idea about
125 * nops, and don't count things that get cp'd away
126 * 3) blob seems to use higher thresholds with a mix of more
127 * SFU instructions. Which partly makes sense, more SFU
128 * instructions probably means you want to get the real
129 * shader started sooner, but that considers where in the
130 * shader the SFU instructions are, which blob doesn't seem
131 * to do.
132 *
133 * This uses more conservative thresholds assuming a more alu
134 * than sfu heavy instruction mix.
135 */
136 if (so->type == MESA_SHADER_FRAGMENT) {
137 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
138
139 unsigned instruction_count = 0;
140 nir_foreach_block (block, fxn) {
141 instruction_count += exec_list_length(&block->instr_list);
142 }
143
144 if (instruction_count < 50) {
145 ctx->prefetch_limit = 2;
146 } else if (instruction_count < 70) {
147 ctx->prefetch_limit = 3;
148 } else {
149 ctx->prefetch_limit = IR3_MAX_SAMPLER_PREFETCH;
150 }
151 }
152
153 if (shader_debug_enabled(so->type)) {
154 fprintf(stdout, "NIR (final form) for %s shader %s:\n",
155 ir3_shader_stage(so), so->shader->nir->info.name);
156 nir_print_shader(ctx->s, stdout);
157 }
158
159 ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
160
161 return ctx;
162 }
163
164 void
165 ir3_context_free(struct ir3_context *ctx)
166 {
167 ralloc_free(ctx);
168 }
169
170 /*
171 * Misc helpers
172 */
173
174 /* allocate a n element value array (to be populated by caller) and
175 * insert in def_ht
176 */
177 struct ir3_instruction **
178 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
179 {
180 struct ir3_instruction **value =
181 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
182 _mesa_hash_table_insert(ctx->def_ht, dst, value);
183 return value;
184 }
185
186 struct ir3_instruction **
187 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
188 {
189 struct ir3_instruction **value;
190
191 if (dst->is_ssa) {
192 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
193 } else {
194 value = ralloc_array(ctx, struct ir3_instruction *, n);
195 }
196
197 /* NOTE: in non-ssa case, we don't really need to store last_dst
198 * but this helps us catch cases where put_dst() call is forgotten
199 */
200 compile_assert(ctx, !ctx->last_dst);
201 ctx->last_dst = value;
202 ctx->last_dst_n = n;
203
204 return value;
205 }
206
207 struct ir3_instruction * const *
208 ir3_get_src(struct ir3_context *ctx, nir_src *src)
209 {
210 if (src->is_ssa) {
211 struct hash_entry *entry;
212 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
213 compile_assert(ctx, entry);
214 return entry->data;
215 } else {
216 nir_register *reg = src->reg.reg;
217 struct ir3_array *arr = ir3_get_array(ctx, reg);
218 unsigned num_components = arr->r->num_components;
219 struct ir3_instruction *addr = NULL;
220 struct ir3_instruction **value =
221 ralloc_array(ctx, struct ir3_instruction *, num_components);
222
223 if (src->reg.indirect)
224 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
225 reg->num_components);
226
227 for (unsigned i = 0; i < num_components; i++) {
228 unsigned n = src->reg.base_offset * reg->num_components + i;
229 compile_assert(ctx, n < arr->length);
230 value[i] = ir3_create_array_load(ctx, arr, n, addr);
231 }
232
233 return value;
234 }
235 }
236
237 void
238 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
239 {
240 unsigned bit_size = nir_dest_bit_size(*dst);
241
242 /* add extra mov if dst value is HIGH reg.. in some cases not all
243 * instructions can read from HIGH regs, in cases where they can
244 * ir3_cp will clean up the extra mov:
245 */
246 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
247 if (!ctx->last_dst[i])
248 continue;
249 if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
250 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
251 }
252 }
253
254 /* Note: 1-bit bools are stored in 32-bit regs */
255 if (bit_size == 16) {
256 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
257 struct ir3_instruction *dst = ctx->last_dst[i];
258 ir3_set_dst_type(dst, true);
259 ir3_fixup_src_type(dst);
260 if (dst->opc == OPC_META_SPLIT) {
261 ir3_set_dst_type(ssa(dst->regs[1]), true);
262 ir3_fixup_src_type(ssa(dst->regs[1]));
263 dst->regs[1]->flags |= IR3_REG_HALF;
264 }
265 }
266 }
267
268 if (!dst->is_ssa) {
269 nir_register *reg = dst->reg.reg;
270 struct ir3_array *arr = ir3_get_array(ctx, reg);
271 unsigned num_components = ctx->last_dst_n;
272 struct ir3_instruction *addr = NULL;
273
274 if (dst->reg.indirect)
275 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
276 reg->num_components);
277
278 for (unsigned i = 0; i < num_components; i++) {
279 unsigned n = dst->reg.base_offset * reg->num_components + i;
280 compile_assert(ctx, n < arr->length);
281 if (!ctx->last_dst[i])
282 continue;
283 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
284 }
285
286 ralloc_free(ctx->last_dst);
287 }
288
289 ctx->last_dst = NULL;
290 ctx->last_dst_n = 0;
291 }
292
293 static unsigned
294 dest_flags(struct ir3_instruction *instr)
295 {
296 return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
297 }
298
299 struct ir3_instruction *
300 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
301 unsigned arrsz)
302 {
303 struct ir3_block *block = ctx->block;
304 struct ir3_instruction *collect;
305
306 if (arrsz == 0)
307 return NULL;
308
309 unsigned flags = dest_flags(arr[0]);
310
311 collect = ir3_instr_create2(block, OPC_META_COLLECT, 1 + arrsz);
312 __ssa_dst(collect)->flags |= flags;
313 for (unsigned i = 0; i < arrsz; i++) {
314 struct ir3_instruction *elem = arr[i];
315
316 /* Since arrays are pre-colored in RA, we can't assume that
317 * things will end up in the right place. (Ie. if a collect
318 * joins elements from two different arrays.) So insert an
319 * extra mov.
320 *
321 * We could possibly skip this if all the collected elements
322 * are contiguous elements in a single array.. not sure how
323 * likely that is to happen.
324 *
325 * Fixes a problem with glamor shaders, that in effect do
326 * something like:
327 *
328 * if (foo)
329 * texcoord = ..
330 * else
331 * texcoord = ..
332 * color = texture2D(tex, texcoord);
333 *
334 * In this case, texcoord will end up as nir registers (which
335 * translate to ir3 array's of length 1. And we can't assume
336 * the two (or more) arrays will get allocated in consecutive
337 * scalar registers.
338 *
339 */
340 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
341 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
342 elem = ir3_MOV(block, elem, type);
343 }
344
345 compile_assert(ctx, dest_flags(elem) == flags);
346 __ssa_src(collect, elem, flags);
347 }
348
349 collect->regs[0]->wrmask = MASK(arrsz);
350
351 return collect;
352 }
353
354 /* helper for instructions that produce multiple consecutive scalar
355 * outputs which need to have a split meta instruction inserted
356 */
357 void
358 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
359 struct ir3_instruction *src, unsigned base, unsigned n)
360 {
361 struct ir3_instruction *prev = NULL;
362
363 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
364 dst[0] = src;
365 return;
366 }
367
368 if (src->opc == OPC_META_COLLECT) {
369 debug_assert((base + n) < src->regs_count);
370
371 for (int i = 0; i < n; i++) {
372 dst[i] = ssa(src->regs[i + base + 1]);
373 }
374
375 return;
376 }
377
378 unsigned flags = dest_flags(src);
379
380 for (int i = 0, j = 0; i < n; i++) {
381 struct ir3_instruction *split =
382 ir3_instr_create(block, OPC_META_SPLIT);
383 __ssa_dst(split)->flags |= flags;
384 __ssa_src(split, src, flags);
385 split->split.off = i + base;
386
387 if (prev) {
388 split->cp.left = prev;
389 split->cp.left_cnt++;
390 prev->cp.right = split;
391 prev->cp.right_cnt++;
392 }
393 prev = split;
394
395 if (src->regs[0]->wrmask & (1 << (i + base)))
396 dst[j++] = split;
397 }
398 }
399
400 NORETURN void
401 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
402 {
403 struct hash_table *errors = NULL;
404 va_list ap;
405 va_start(ap, format);
406 if (ctx->cur_instr) {
407 errors = _mesa_hash_table_create(NULL,
408 _mesa_hash_pointer,
409 _mesa_key_pointer_equal);
410 char *msg = ralloc_vasprintf(errors, format, ap);
411 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
412 } else {
413 _debug_vprintf(format, ap);
414 }
415 va_end(ap);
416 nir_print_shader_annotated(ctx->s, stdout, errors);
417 ralloc_free(errors);
418 ctx->error = true;
419 unreachable("");
420 }
421
422 static struct ir3_instruction *
423 create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
424 {
425 struct ir3_instruction *instr, *immed;
426
427 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
428
429 switch(align){
430 case 1:
431 /* src *= 1: */
432 break;
433 case 2:
434 /* src *= 2 => src <<= 1: */
435 immed = create_immed_typed(block, 1, TYPE_S16);
436 instr = ir3_SHL_B(block, instr, 0, immed, 0);
437 break;
438 case 3:
439 /* src *= 3: */
440 immed = create_immed_typed(block, 3, TYPE_S16);
441 instr = ir3_MULL_U(block, instr, 0, immed, 0);
442 break;
443 case 4:
444 /* src *= 4 => src <<= 2: */
445 immed = create_immed_typed(block, 2, TYPE_S16);
446 instr = ir3_SHL_B(block, instr, 0, immed, 0);
447 break;
448 default:
449 unreachable("bad align");
450 return NULL;
451 }
452
453 instr->regs[0]->flags |= IR3_REG_HALF;
454
455 instr = ir3_MOV(block, instr, TYPE_S16);
456 instr->regs[0]->num = regid(REG_A0, 0);
457 instr->regs[0]->flags &= ~IR3_REG_SSA;
458
459 return instr;
460 }
461
462 static struct ir3_instruction *
463 create_addr1(struct ir3_block *block, unsigned const_val)
464 {
465
466 struct ir3_instruction *immed = create_immed_typed(block, const_val, TYPE_S16);
467 struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_S16);
468 instr->regs[0]->num = regid(REG_A0, 1);
469 instr->regs[0]->flags &= ~IR3_REG_SSA;
470 return instr;
471 }
472
473 /* caches addr values to avoid generating multiple cov/shl/mova
474 * sequences for each use of a given NIR level src as address
475 */
476 struct ir3_instruction *
477 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
478 {
479 struct ir3_instruction *addr;
480 unsigned idx = align - 1;
481
482 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
483
484 if (!ctx->addr0_ht[idx]) {
485 ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx,
486 _mesa_hash_pointer, _mesa_key_pointer_equal);
487 } else {
488 struct hash_entry *entry;
489 entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
490 if (entry)
491 return entry->data;
492 }
493
494 addr = create_addr0(ctx->block, src, align);
495 _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
496
497 return addr;
498 }
499
500 /* Similar to ir3_get_addr0, but for a1.x. */
501 struct ir3_instruction *
502 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
503 {
504 struct ir3_instruction *addr;
505
506 if (!ctx->addr1_ht) {
507 ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
508 } else {
509 addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
510 if (addr)
511 return addr;
512 }
513
514 addr = create_addr1(ctx->block, const_val);
515 _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
516
517 return addr;
518 }
519
520 struct ir3_instruction *
521 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
522 {
523 struct ir3_block *b = ctx->block;
524 struct ir3_instruction *cond;
525
526 /* NOTE: only cmps.*.* can write p0.x: */
527 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
528 cond->cat2.condition = IR3_COND_NE;
529
530 /* condition always goes in predicate register: */
531 cond->regs[0]->num = regid(REG_P0, 0);
532 cond->regs[0]->flags &= ~IR3_REG_SSA;
533
534 return cond;
535 }
536
537 /*
538 * Array helpers
539 */
540
541 void
542 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
543 {
544 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
545 arr->id = ++ctx->num_arrays;
546 /* NOTE: sometimes we get non array regs, for example for arrays of
547 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
548 * treat a non-array as if it was an array of length 1.
549 *
550 * It would be nice if there was a nir pass to convert arrays of
551 * length 1 to ssa.
552 */
553 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
554 compile_assert(ctx, arr->length > 0);
555 arr->r = reg;
556 arr->half = reg->bit_size <= 16;
557 // HACK one-bit bools still end up as 32b:
558 if (reg->bit_size == 1)
559 arr->half = false;
560 list_addtail(&arr->node, &ctx->ir->array_list);
561 }
562
563 struct ir3_array *
564 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
565 {
566 foreach_array (arr, &ctx->ir->array_list) {
567 if (arr->r == reg)
568 return arr;
569 }
570 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
571 return NULL;
572 }
573
574 /* relative (indirect) if address!=NULL */
575 struct ir3_instruction *
576 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
577 struct ir3_instruction *address)
578 {
579 struct ir3_block *block = ctx->block;
580 struct ir3_instruction *mov;
581 struct ir3_register *src;
582 unsigned flags = 0;
583
584 mov = ir3_instr_create(block, OPC_MOV);
585 if (arr->half) {
586 mov->cat1.src_type = TYPE_U16;
587 mov->cat1.dst_type = TYPE_U16;
588 flags |= IR3_REG_HALF;
589 } else {
590 mov->cat1.src_type = TYPE_U32;
591 mov->cat1.dst_type = TYPE_U32;
592 }
593
594 mov->barrier_class = IR3_BARRIER_ARRAY_R;
595 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
596 __ssa_dst(mov)->flags |= flags;
597 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
598 COND(address, IR3_REG_RELATIV) | flags);
599 src->instr = arr->last_write;
600 src->size = arr->length;
601 src->array.id = arr->id;
602 src->array.offset = n;
603
604 if (address)
605 ir3_instr_set_address(mov, address);
606
607 return mov;
608 }
609
610 /* relative (indirect) if address!=NULL */
611 void
612 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
613 struct ir3_instruction *src, struct ir3_instruction *address)
614 {
615 struct ir3_block *block = ctx->block;
616 struct ir3_instruction *mov;
617 struct ir3_register *dst;
618
619 /* if not relative store, don't create an extra mov, since that
620 * ends up being difficult for cp to remove.
621 *
622 * Also, don't skip the mov if the src is meta (like fanout/split),
623 * since that creates a situation that RA can't really handle properly.
624 */
625 if (!address && !is_meta(src)) {
626 dst = src->regs[0];
627
628 src->barrier_class |= IR3_BARRIER_ARRAY_W;
629 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
630
631 dst->flags |= IR3_REG_ARRAY;
632 dst->instr = arr->last_write;
633 dst->size = arr->length;
634 dst->array.id = arr->id;
635 dst->array.offset = n;
636
637 arr->last_write = src;
638
639 array_insert(block, block->keeps, src);
640
641 return;
642 }
643
644 mov = ir3_instr_create(block, OPC_MOV);
645 mov->cat1.src_type = TYPE_U32;
646 mov->cat1.dst_type = TYPE_U32;
647 mov->barrier_class = IR3_BARRIER_ARRAY_W;
648 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
649 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
650 COND(address, IR3_REG_RELATIV));
651 dst->instr = arr->last_write;
652 dst->size = arr->length;
653 dst->array.id = arr->id;
654 dst->array.offset = n;
655 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
656
657 if (address)
658 ir3_instr_set_address(mov, address);
659
660 arr->last_write = mov;
661
662 /* the array store may only matter to something in an earlier
663 * block (ie. loops), but since arrays are not in SSA, depth
664 * pass won't know this.. so keep all array stores:
665 */
666 array_insert(block, block->keeps, mov);
667 }