vc4: Make qir_for_each_inst_inorder() safe against removal.
[mesa.git] / src / gallium / drivers / vc4 / vc4_qir.h
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef VC4_QIR_H
25 #define VC4_QIR_H
26
27 #include <assert.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include <string.h>
33
34 #include "util/macros.h"
35 #include "compiler/nir/nir.h"
36 #include "util/list.h"
37 #include "util/u_math.h"
38
39 #include "vc4_screen.h"
40 #include "vc4_qpu_defines.h"
41 #include "vc4_qpu.h"
42 #include "kernel/vc4_packet.h"
43 #include "pipe/p_state.h"
44
45 struct nir_builder;
46
47 enum qfile {
48 QFILE_NULL,
49 QFILE_TEMP,
50 QFILE_VARY,
51 QFILE_UNIF,
52 QFILE_VPM,
53 QFILE_TLB_COLOR_WRITE,
54 QFILE_TLB_COLOR_WRITE_MS,
55 QFILE_TLB_Z_WRITE,
56 QFILE_TLB_STENCIL_SETUP,
57
58 /* If tex_s is written on its own without preceding t/r/b setup, it's
59 * a direct memory access using the input value, without the sideband
60 * uniform load. We represent these in QIR as a separate write
61 * destination so we can tell if the sideband uniform is present.
62 */
63 QFILE_TEX_S_DIRECT,
64
65 QFILE_TEX_S,
66 QFILE_TEX_T,
67 QFILE_TEX_R,
68 QFILE_TEX_B,
69
70 /* Payload registers that aren't in the physical register file, so we
71 * can just use the corresponding qpu_reg at qpu_emit time.
72 */
73 QFILE_FRAG_X,
74 QFILE_FRAG_Y,
75 QFILE_FRAG_REV_FLAG,
76 QFILE_QPU_ELEMENT,
77
78 /**
79 * Stores an immediate value in the index field that will be used
80 * directly by qpu_load_imm().
81 */
82 QFILE_LOAD_IMM,
83
84 /**
85 * Stores an immediate value in the index field that can be turned
86 * into a small immediate field by qpu_encode_small_immediate().
87 */
88 QFILE_SMALL_IMM,
89 };
90
91 struct qreg {
92 enum qfile file;
93 uint32_t index;
94 int pack;
95 };
96
97 static inline struct qreg qir_reg(enum qfile file, uint32_t index)
98 {
99 return (struct qreg){file, index};
100 }
101
102 enum qop {
103 QOP_UNDEF,
104 QOP_MOV,
105 QOP_FMOV,
106 QOP_MMOV,
107 QOP_FADD,
108 QOP_FSUB,
109 QOP_FMUL,
110 QOP_V8MULD,
111 QOP_V8MIN,
112 QOP_V8MAX,
113 QOP_V8ADDS,
114 QOP_V8SUBS,
115 QOP_MUL24,
116 QOP_FMIN,
117 QOP_FMAX,
118 QOP_FMINABS,
119 QOP_FMAXABS,
120 QOP_ADD,
121 QOP_SUB,
122 QOP_SHL,
123 QOP_SHR,
124 QOP_ASR,
125 QOP_MIN,
126 QOP_MAX,
127 QOP_AND,
128 QOP_OR,
129 QOP_XOR,
130 QOP_NOT,
131
132 QOP_FTOI,
133 QOP_ITOF,
134 QOP_RCP,
135 QOP_RSQ,
136 QOP_EXP2,
137 QOP_LOG2,
138 QOP_VW_SETUP,
139 QOP_VR_SETUP,
140 QOP_TLB_COLOR_READ,
141 QOP_MS_MASK,
142 QOP_VARY_ADD_C,
143
144 QOP_FRAG_Z,
145 QOP_FRAG_W,
146
147 /**
148 * Signal of texture read being necessary and then reading r4 into
149 * the destination
150 */
151 QOP_TEX_RESULT,
152
153 /**
154 * Insert the signal for switching threads in a threaded fragment
155 * shader. No value can be live in an accumulator across a thrsw.
156 *
157 * At the QPU level, this will have several delay slots before the
158 * switch happens. Those slots are the responsibility of the
159 * scheduler.
160 */
161 QOP_THRSW,
162
163 /* 32-bit immediate loaded to each SIMD channel */
164 QOP_LOAD_IMM,
165
166 /* 32-bit immediate divided into 16 2-bit unsigned int values and
167 * loaded to each corresponding SIMD channel.
168 */
169 QOP_LOAD_IMM_U2,
170 /* 32-bit immediate divided into 16 2-bit signed int values and
171 * loaded to each corresponding SIMD channel.
172 */
173 QOP_LOAD_IMM_I2,
174
175 QOP_ROT_MUL,
176
177 /* Jumps to block->successor[0] if the qinst->cond (as a
178 * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note
179 * that block->successor[1] may be unset if the condition is ALWAYS.
180 */
181 QOP_BRANCH,
182
183 /* Emits an ADD from src[0] to src[1], where src[0] must be a
184 * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS,
185 * required by the kernel as part of its branch validation.
186 */
187 QOP_UNIFORMS_RESET,
188 };
189
190 struct queued_qpu_inst {
191 struct list_head link;
192 uint64_t inst;
193 };
194
195 struct qinst {
196 struct list_head link;
197
198 enum qop op;
199 struct qreg dst;
200 struct qreg src[3];
201 bool sf;
202 bool cond_is_exec_mask;
203 uint8_t cond;
204 };
205
206 enum qstage {
207 /**
208 * Coordinate shader, runs during binning, before the VS, and just
209 * outputs position.
210 */
211 QSTAGE_COORD,
212 QSTAGE_VERT,
213 QSTAGE_FRAG,
214 };
215
216 enum quniform_contents {
217 /**
218 * Indicates that a constant 32-bit value is copied from the program's
219 * uniform contents.
220 */
221 QUNIFORM_CONSTANT,
222 /**
223 * Indicates that the program's uniform contents are used as an index
224 * into the GL uniform storage.
225 */
226 QUNIFORM_UNIFORM,
227
228 /** @{
229 * Scaling factors from clip coordinates to relative to the viewport
230 * center.
231 *
232 * This is used by the coordinate and vertex shaders to produce the
233 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
234 * point offsets from the viewport ccenter.
235 */
236 QUNIFORM_VIEWPORT_X_SCALE,
237 QUNIFORM_VIEWPORT_Y_SCALE,
238 /** @} */
239
240 QUNIFORM_VIEWPORT_Z_OFFSET,
241 QUNIFORM_VIEWPORT_Z_SCALE,
242
243 QUNIFORM_USER_CLIP_PLANE,
244
245 /**
246 * A reference to a texture config parameter 0 uniform.
247 *
248 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
249 * defines texture type, miplevels, and such. It will be found as a
250 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
251 */
252 QUNIFORM_TEXTURE_CONFIG_P0,
253
254 /**
255 * A reference to a texture config parameter 1 uniform.
256 *
257 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
258 * defines texture width, height, filters, and wrap modes. It will be
259 * found as a parameter to the second QOP_TEX_[STRB] instruction in a
260 * sequence.
261 */
262 QUNIFORM_TEXTURE_CONFIG_P1,
263
264 /** A reference to a texture config parameter 2 cubemap stride uniform */
265 QUNIFORM_TEXTURE_CONFIG_P2,
266
267 QUNIFORM_TEXTURE_FIRST_LEVEL,
268
269 QUNIFORM_TEXTURE_MSAA_ADDR,
270
271 QUNIFORM_UBO_ADDR,
272
273 QUNIFORM_TEXRECT_SCALE_X,
274 QUNIFORM_TEXRECT_SCALE_Y,
275
276 QUNIFORM_TEXTURE_BORDER_COLOR,
277
278 QUNIFORM_BLEND_CONST_COLOR_X,
279 QUNIFORM_BLEND_CONST_COLOR_Y,
280 QUNIFORM_BLEND_CONST_COLOR_Z,
281 QUNIFORM_BLEND_CONST_COLOR_W,
282 QUNIFORM_BLEND_CONST_COLOR_RGBA,
283 QUNIFORM_BLEND_CONST_COLOR_AAAA,
284
285 QUNIFORM_STENCIL,
286
287 QUNIFORM_ALPHA_REF,
288 QUNIFORM_SAMPLE_MASK,
289
290 /* Placeholder uniform that will be updated by the kernel when used by
291 * an instruction writing to QPU_W_UNIFORMS_ADDRESS.
292 */
293 QUNIFORM_UNIFORMS_ADDRESS,
294 };
295
296 struct vc4_varying_slot {
297 uint8_t slot;
298 uint8_t swizzle;
299 };
300
301 struct vc4_compiler_ubo_range {
302 /**
303 * offset in bytes from the start of the ubo where this range is
304 * uploaded.
305 *
306 * Only set once used is set.
307 */
308 uint32_t dst_offset;
309
310 /**
311 * offset in bytes from the start of the gallium uniforms where the
312 * data comes from.
313 */
314 uint32_t src_offset;
315
316 /** size in bytes of this ubo range */
317 uint32_t size;
318
319 /**
320 * Set if this range is used by the shader for indirect uniforms
321 * access.
322 */
323 bool used;
324 };
325
326 struct vc4_key {
327 struct vc4_uncompiled_shader *shader_state;
328 struct {
329 enum pipe_format format;
330 uint8_t swizzle[4];
331 union {
332 struct {
333 unsigned compare_mode:1;
334 unsigned compare_func:3;
335 unsigned wrap_s:3;
336 unsigned wrap_t:3;
337 bool force_first_level:1;
338 };
339 struct {
340 uint16_t msaa_width, msaa_height;
341 };
342 };
343 } tex[VC4_MAX_TEXTURE_SAMPLERS];
344 uint8_t ucp_enables;
345 };
346
347 struct vc4_fs_key {
348 struct vc4_key base;
349 enum pipe_format color_format;
350 bool depth_enabled;
351 bool stencil_enabled;
352 bool stencil_twoside;
353 bool stencil_full_writemasks;
354 bool is_points;
355 bool is_lines;
356 bool alpha_test;
357 bool point_coord_upper_left;
358 bool light_twoside;
359 bool msaa;
360 bool sample_coverage;
361 bool sample_alpha_to_coverage;
362 bool sample_alpha_to_one;
363 uint8_t alpha_test_func;
364 uint8_t logicop_func;
365 uint32_t point_sprite_mask;
366
367 struct pipe_rt_blend_state blend;
368 };
369
370 struct vc4_vs_key {
371 struct vc4_key base;
372
373 const struct vc4_fs_inputs *fs_inputs;
374 enum pipe_format attr_formats[8];
375 bool is_coord;
376 bool per_vertex_point_size;
377 bool clamp_color;
378 };
379
380 /** A basic block of QIR intructions. */
381 struct qblock {
382 struct list_head link;
383
384 struct list_head instructions;
385 struct list_head qpu_inst_list;
386
387 struct set *predecessors;
388 struct qblock *successors[2];
389
390 int index;
391
392 /* Instruction IPs for the first and last instruction of the block.
393 * Set by vc4_qpu_schedule.c.
394 */
395 uint32_t start_qpu_ip;
396 uint32_t end_qpu_ip;
397
398 /* Instruction IP for the branch instruction of the block. Set by
399 * vc4_qpu_schedule.c.
400 */
401 uint32_t branch_qpu_ip;
402
403 /** @{ used by vc4_qir_live_variables.c */
404 BITSET_WORD *def;
405 BITSET_WORD *use;
406 BITSET_WORD *live_in;
407 BITSET_WORD *live_out;
408 int start_ip, end_ip;
409 /** @} */
410 };
411
412 struct vc4_compile {
413 struct vc4_context *vc4;
414 nir_shader *s;
415 nir_function_impl *impl;
416 struct exec_list *cf_node_list;
417
418 /**
419 * Mapping from nir_register * or nir_ssa_def * to array of struct
420 * qreg for the values.
421 */
422 struct hash_table *def_ht;
423
424 /* For each temp, the instruction generating its value. */
425 struct qinst **defs;
426 uint32_t defs_array_size;
427
428 /**
429 * Inputs to the shader, arranged by TGSI declaration order.
430 *
431 * Not all fragment shader QFILE_VARY reads are present in this array.
432 */
433 struct qreg *inputs;
434 struct qreg *outputs;
435 bool msaa_per_sample_output;
436 struct qreg color_reads[VC4_MAX_SAMPLES];
437 struct qreg sample_colors[VC4_MAX_SAMPLES];
438 uint32_t inputs_array_size;
439 uint32_t outputs_array_size;
440 uint32_t uniforms_array_size;
441
442 struct vc4_compiler_ubo_range *ubo_ranges;
443 uint32_t ubo_ranges_array_size;
444 /** Number of uniform areas declared in ubo_ranges. */
445 uint32_t num_uniform_ranges;
446 /** Number of uniform areas used for indirect addressed loads. */
447 uint32_t num_ubo_ranges;
448 uint32_t next_ubo_dst_offset;
449
450 /* State for whether we're executing on each channel currently. 0 if
451 * yes, otherwise a block number + 1 that the channel jumped to.
452 */
453 struct qreg execute;
454
455 struct qreg line_x, point_x, point_y;
456 /** boolean (~0 -> true) if the fragment has been discarded. */
457 struct qreg discard;
458 struct qreg payload_FRAG_Z;
459 struct qreg payload_FRAG_W;
460
461 uint8_t vattr_sizes[8];
462
463 /**
464 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
465 *
466 * This includes those that aren't part of the VPM varyings, like
467 * point/line coordinates.
468 */
469 struct vc4_varying_slot *input_slots;
470 uint32_t num_input_slots;
471 uint32_t input_slots_array_size;
472
473 /**
474 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
475 * of the output is. Used to emit from the VS in the order that the
476 * FS needs.
477 */
478 struct vc4_varying_slot *output_slots;
479
480 struct pipe_shader_state *shader_state;
481 struct vc4_key *key;
482 struct vc4_fs_key *fs_key;
483 struct vc4_vs_key *vs_key;
484
485 /* Live ranges of temps. */
486 int *temp_start, *temp_end;
487
488 uint32_t *uniform_data;
489 enum quniform_contents *uniform_contents;
490 uint32_t uniform_array_size;
491 uint32_t num_uniforms;
492 uint32_t num_outputs;
493 uint32_t num_texture_samples;
494 uint32_t output_position_index;
495 uint32_t output_color_index;
496 uint32_t output_point_size_index;
497 uint32_t output_sample_mask_index;
498
499 struct qreg undef;
500 enum qstage stage;
501 uint32_t num_temps;
502
503 struct list_head blocks;
504 int next_block_index;
505 struct qblock *cur_block;
506 struct qblock *loop_cont_block;
507 struct qblock *loop_break_block;
508
509 struct list_head qpu_inst_list;
510
511 /* Pre-QPU-scheduled instruction containing the last THRSW */
512 uint64_t *last_thrsw;
513
514 uint64_t *qpu_insts;
515 uint32_t qpu_inst_count;
516 uint32_t qpu_inst_size;
517 uint32_t num_inputs;
518
519 /**
520 * Number of inputs from num_inputs remaining to be queued to the read
521 * FIFO in the VS/CS.
522 */
523 uint32_t num_inputs_remaining;
524
525 /* Number of inputs currently in the read FIFO for the VS/CS */
526 uint32_t num_inputs_in_fifo;
527
528 /** Next offset in the VPM to read from in the VS/CS */
529 uint32_t vpm_read_offset;
530
531 uint32_t program_id;
532 uint32_t variant_id;
533
534 /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH
535 * is used to hide texturing latency at the cost of limiting ourselves
536 * to the bottom half of physical reg space.
537 */
538 bool fs_threaded;
539
540 bool last_thrsw_at_top_level;
541
542 bool failed;
543 };
544
545 /* Special nir_load_input intrinsic index for loading the current TLB
546 * destination color.
547 */
548 #define VC4_NIR_TLB_COLOR_READ_INPUT 2000000000
549
550 #define VC4_NIR_MS_MASK_OUTPUT 2000000000
551
552 struct vc4_compile *qir_compile_init(void);
553 void qir_compile_destroy(struct vc4_compile *c);
554 struct qblock *qir_new_block(struct vc4_compile *c);
555 void qir_set_emit_block(struct vc4_compile *c, struct qblock *block);
556 void qir_link_blocks(struct qblock *predecessor, struct qblock *successor);
557 struct qblock *qir_entry_block(struct vc4_compile *c);
558 struct qblock *qir_exit_block(struct vc4_compile *c);
559 struct qinst *qir_inst(enum qop op, struct qreg dst,
560 struct qreg src0, struct qreg src1);
561 void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst);
562 struct qreg qir_uniform(struct vc4_compile *c,
563 enum quniform_contents contents,
564 uint32_t data);
565 void qir_schedule_instructions(struct vc4_compile *c);
566 void qir_reorder_uniforms(struct vc4_compile *c);
567 void qir_emit_uniform_stream_resets(struct vc4_compile *c);
568
569 struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst);
570 struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst);
571
572 struct qreg qir_get_temp(struct vc4_compile *c);
573 void qir_calculate_live_intervals(struct vc4_compile *c);
574 int qir_get_nsrc(struct qinst *inst);
575 int qir_get_non_sideband_nsrc(struct qinst *inst);
576 int qir_get_tex_uniform_src(struct qinst *inst);
577 bool qir_reg_equals(struct qreg a, struct qreg b);
578 bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst);
579 bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst);
580 bool qir_is_mul(struct qinst *inst);
581 bool qir_is_raw_mov(struct qinst *inst);
582 bool qir_is_tex(struct qinst *inst);
583 bool qir_has_implicit_tex_uniform(struct qinst *inst);
584 bool qir_is_float_input(struct qinst *inst);
585 bool qir_depends_on_flags(struct qinst *inst);
586 bool qir_writes_r4(struct qinst *inst);
587 struct qreg qir_follow_movs(struct vc4_compile *c, struct qreg reg);
588 uint8_t qir_channels_written(struct qinst *inst);
589
590 void qir_dump(struct vc4_compile *c);
591 void qir_dump_inst(struct vc4_compile *c, struct qinst *inst);
592 const char *qir_get_stage_name(enum qstage stage);
593
594 void qir_validate(struct vc4_compile *c);
595
596 void qir_optimize(struct vc4_compile *c);
597 bool qir_opt_algebraic(struct vc4_compile *c);
598 bool qir_opt_coalesce_ff_writes(struct vc4_compile *c);
599 bool qir_opt_constant_folding(struct vc4_compile *c);
600 bool qir_opt_copy_propagation(struct vc4_compile *c);
601 bool qir_opt_dead_code(struct vc4_compile *c);
602 bool qir_opt_peephole_sf(struct vc4_compile *c);
603 bool qir_opt_small_immediates(struct vc4_compile *c);
604 bool qir_opt_vpm(struct vc4_compile *c);
605 void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c);
606 void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c);
607 nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
608 nir_ssa_def **srcs, int swiz);
609 void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c);
610 void qir_lower_uniforms(struct vc4_compile *c);
611
612 uint32_t qpu_schedule_instructions(struct vc4_compile *c);
613
614 void qir_SF(struct vc4_compile *c, struct qreg src);
615
616 static inline struct qreg
617 qir_uniform_ui(struct vc4_compile *c, uint32_t ui)
618 {
619 return qir_uniform(c, QUNIFORM_CONSTANT, ui);
620 }
621
622 static inline struct qreg
623 qir_uniform_f(struct vc4_compile *c, float f)
624 {
625 return qir_uniform(c, QUNIFORM_CONSTANT, fui(f));
626 }
627
628 #define QIR_ALU0(name) \
629 static inline struct qreg \
630 qir_##name(struct vc4_compile *c) \
631 { \
632 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
633 c->undef, c->undef)); \
634 } \
635 static inline struct qinst * \
636 qir_##name##_dest(struct vc4_compile *c, struct qreg dest) \
637 { \
638 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, \
639 c->undef, c->undef)); \
640 }
641
642 #define QIR_ALU1(name) \
643 static inline struct qreg \
644 qir_##name(struct vc4_compile *c, struct qreg a) \
645 { \
646 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
647 a, c->undef)); \
648 } \
649 static inline struct qinst * \
650 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
651 struct qreg a) \
652 { \
653 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, \
654 c->undef)); \
655 }
656
657 #define QIR_ALU2(name) \
658 static inline struct qreg \
659 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
660 { \
661 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b)); \
662 } \
663 static inline struct qinst * \
664 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
665 struct qreg a, struct qreg b) \
666 { \
667 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b)); \
668 }
669
670 #define QIR_NODST_1(name) \
671 static inline struct qinst * \
672 qir_##name(struct vc4_compile *c, struct qreg a) \
673 { \
674 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
675 a, c->undef)); \
676 }
677
678 #define QIR_NODST_2(name) \
679 static inline struct qinst * \
680 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
681 { \
682 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
683 a, b)); \
684 }
685
686 #define QIR_PAYLOAD(name) \
687 static inline struct qreg \
688 qir_##name(struct vc4_compile *c) \
689 { \
690 struct qreg *payload = &c->payload_##name; \
691 if (payload->file != QFILE_NULL) \
692 return *payload; \
693 *payload = qir_get_temp(c); \
694 struct qinst *inst = qir_inst(QOP_##name, *payload, \
695 c->undef, c->undef); \
696 struct qblock *entry = qir_entry_block(c); \
697 list_add(&inst->link, &entry->instructions); \
698 c->defs[payload->index] = inst; \
699 return *payload; \
700 }
701
702 QIR_ALU1(MOV)
703 QIR_ALU1(FMOV)
704 QIR_ALU1(MMOV)
705 QIR_ALU2(FADD)
706 QIR_ALU2(FSUB)
707 QIR_ALU2(FMUL)
708 QIR_ALU2(V8MULD)
709 QIR_ALU2(V8MIN)
710 QIR_ALU2(V8MAX)
711 QIR_ALU2(V8ADDS)
712 QIR_ALU2(V8SUBS)
713 QIR_ALU2(MUL24)
714 QIR_ALU2(FMIN)
715 QIR_ALU2(FMAX)
716 QIR_ALU2(FMINABS)
717 QIR_ALU2(FMAXABS)
718 QIR_ALU1(FTOI)
719 QIR_ALU1(ITOF)
720
721 QIR_ALU2(ADD)
722 QIR_ALU2(SUB)
723 QIR_ALU2(SHL)
724 QIR_ALU2(SHR)
725 QIR_ALU2(ASR)
726 QIR_ALU2(MIN)
727 QIR_ALU2(MAX)
728 QIR_ALU2(AND)
729 QIR_ALU2(OR)
730 QIR_ALU2(XOR)
731 QIR_ALU1(NOT)
732
733 QIR_ALU1(RCP)
734 QIR_ALU1(RSQ)
735 QIR_ALU1(EXP2)
736 QIR_ALU1(LOG2)
737 QIR_ALU1(VARY_ADD_C)
738 QIR_PAYLOAD(FRAG_Z)
739 QIR_PAYLOAD(FRAG_W)
740 QIR_ALU0(TEX_RESULT)
741 QIR_ALU0(TLB_COLOR_READ)
742 QIR_NODST_1(MS_MASK)
743
744 static inline struct qreg
745 qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1)
746 {
747 struct qreg t = qir_get_temp(c);
748 qir_MOV_dest(c, t, src1);
749 qir_MOV_dest(c, t, src0)->cond = cond;
750 return t;
751 }
752
753 static inline struct qreg
754 qir_UNPACK_8_F(struct vc4_compile *c, struct qreg src, int i)
755 {
756 struct qreg t = qir_FMOV(c, src);
757 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
758 return t;
759 }
760
761 static inline struct qreg
762 qir_UNPACK_8_I(struct vc4_compile *c, struct qreg src, int i)
763 {
764 struct qreg t = qir_MOV(c, src);
765 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
766 return t;
767 }
768
769 static inline struct qreg
770 qir_UNPACK_16_F(struct vc4_compile *c, struct qreg src, int i)
771 {
772 struct qreg t = qir_FMOV(c, src);
773 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
774 return t;
775 }
776
777 static inline struct qreg
778 qir_UNPACK_16_I(struct vc4_compile *c, struct qreg src, int i)
779 {
780 struct qreg t = qir_MOV(c, src);
781 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
782 return t;
783 }
784
785 static inline void
786 qir_PACK_8_F(struct vc4_compile *c, struct qreg dest, struct qreg val, int chan)
787 {
788 assert(!dest.pack);
789 dest.pack = QPU_PACK_MUL_8A + chan;
790 qir_emit_nondef(c, qir_inst(QOP_MMOV, dest, val, c->undef));
791 }
792
793 static inline struct qreg
794 qir_PACK_8888_F(struct vc4_compile *c, struct qreg val)
795 {
796 struct qreg dest = qir_MMOV(c, val);
797 c->defs[dest.index]->dst.pack = QPU_PACK_MUL_8888;
798 return dest;
799 }
800
801 static inline struct qreg
802 qir_POW(struct vc4_compile *c, struct qreg x, struct qreg y)
803 {
804 return qir_EXP2(c, qir_FMUL(c,
805 y,
806 qir_LOG2(c, x)));
807 }
808
809 static inline void
810 qir_VPM_WRITE(struct vc4_compile *c, struct qreg val)
811 {
812 qir_MOV_dest(c, qir_reg(QFILE_VPM, 0), val);
813 }
814
815 static inline struct qreg
816 qir_LOAD_IMM(struct vc4_compile *c, uint32_t val)
817 {
818 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM, c->undef,
819 qir_reg(QFILE_LOAD_IMM, val), c->undef));
820 }
821
822 static inline struct qreg
823 qir_LOAD_IMM_U2(struct vc4_compile *c, uint32_t val)
824 {
825 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_U2, c->undef,
826 qir_reg(QFILE_LOAD_IMM, val),
827 c->undef));
828 }
829
830 static inline struct qreg
831 qir_LOAD_IMM_I2(struct vc4_compile *c, uint32_t val)
832 {
833 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_I2, c->undef,
834 qir_reg(QFILE_LOAD_IMM, val),
835 c->undef));
836 }
837
838 /** Shifts the multiply output to the right by rot channels */
839 static inline struct qreg
840 qir_ROT_MUL(struct vc4_compile *c, struct qreg val, uint32_t rot)
841 {
842 return qir_emit_def(c, qir_inst(QOP_ROT_MUL, c->undef,
843 val,
844 qir_reg(QFILE_LOAD_IMM,
845 QPU_SMALL_IMM_MUL_ROT + rot)));
846 }
847
848 static inline struct qinst *
849 qir_MOV_cond(struct vc4_compile *c, uint8_t cond,
850 struct qreg dest, struct qreg src)
851 {
852 struct qinst *mov = qir_MOV_dest(c, dest, src);
853 mov->cond = cond;
854 return mov;
855 }
856
857 static inline struct qinst *
858 qir_BRANCH(struct vc4_compile *c, uint8_t cond)
859 {
860 struct qinst *inst = qir_inst(QOP_BRANCH, c->undef, c->undef, c->undef);
861 inst->cond = cond;
862 qir_emit_nondef(c, inst);
863 return inst;
864 }
865
866 #define qir_for_each_block(block, c) \
867 list_for_each_entry(struct qblock, block, &c->blocks, link)
868
869 #define qir_for_each_block_rev(block, c) \
870 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
871
872 /* Loop over the non-NULL members of the successors array. */
873 #define qir_for_each_successor(succ, block) \
874 for (struct qblock *succ = block->successors[0]; \
875 succ != NULL; \
876 succ = (succ == block->successors[1] ? NULL : \
877 block->successors[1]))
878
879 #define qir_for_each_inst(inst, block) \
880 list_for_each_entry(struct qinst, inst, &block->instructions, link)
881
882 #define qir_for_each_inst_rev(inst, block) \
883 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
884
885 #define qir_for_each_inst_safe(inst, block) \
886 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
887
888 #define qir_for_each_inst_inorder(inst, c) \
889 qir_for_each_block(_block, c) \
890 qir_for_each_inst_safe(inst, _block)
891
892 #endif /* VC4_QIR_H */