ba3fbd7b283e205bb2a05228bef45cee55e8e80f
[mesa.git] / src / gallium / drivers / vc4 / vc4_qir.h
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef VC4_QIR_H
25 #define VC4_QIR_H
26
27 #include <assert.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include <string.h>
33
34 #include "util/macros.h"
35 #include "compiler/nir/nir.h"
36 #include "util/list.h"
37 #include "util/u_math.h"
38
39 #include "vc4_screen.h"
40 #include "vc4_qpu_defines.h"
41 #include "vc4_qpu.h"
42 #include "kernel/vc4_packet.h"
43 #include "pipe/p_state.h"
44
45 struct nir_builder;
46
47 enum qfile {
48 QFILE_NULL,
49 QFILE_TEMP,
50 QFILE_VARY,
51 QFILE_UNIF,
52 QFILE_VPM,
53 QFILE_TLB_COLOR_WRITE,
54 QFILE_TLB_COLOR_WRITE_MS,
55 QFILE_TLB_Z_WRITE,
56 QFILE_TLB_STENCIL_SETUP,
57
58 /* Payload registers that aren't in the physical register file, so we
59 * can just use the corresponding qpu_reg at qpu_emit time.
60 */
61 QFILE_FRAG_X,
62 QFILE_FRAG_Y,
63 QFILE_FRAG_REV_FLAG,
64 QFILE_QPU_ELEMENT,
65
66 /**
67 * Stores an immediate value in the index field that will be used
68 * directly by qpu_load_imm().
69 */
70 QFILE_LOAD_IMM,
71
72 /**
73 * Stores an immediate value in the index field that can be turned
74 * into a small immediate field by qpu_encode_small_immediate().
75 */
76 QFILE_SMALL_IMM,
77 };
78
79 struct qreg {
80 enum qfile file;
81 uint32_t index;
82 int pack;
83 };
84
85 static inline struct qreg qir_reg(enum qfile file, uint32_t index)
86 {
87 return (struct qreg){file, index};
88 }
89
90 enum qop {
91 QOP_UNDEF,
92 QOP_MOV,
93 QOP_FMOV,
94 QOP_MMOV,
95 QOP_FADD,
96 QOP_FSUB,
97 QOP_FMUL,
98 QOP_V8MULD,
99 QOP_V8MIN,
100 QOP_V8MAX,
101 QOP_V8ADDS,
102 QOP_V8SUBS,
103 QOP_MUL24,
104 QOP_FMIN,
105 QOP_FMAX,
106 QOP_FMINABS,
107 QOP_FMAXABS,
108 QOP_ADD,
109 QOP_SUB,
110 QOP_SHL,
111 QOP_SHR,
112 QOP_ASR,
113 QOP_MIN,
114 QOP_MAX,
115 QOP_AND,
116 QOP_OR,
117 QOP_XOR,
118 QOP_NOT,
119
120 QOP_FTOI,
121 QOP_ITOF,
122 QOP_RCP,
123 QOP_RSQ,
124 QOP_EXP2,
125 QOP_LOG2,
126 QOP_VW_SETUP,
127 QOP_VR_SETUP,
128 QOP_TLB_COLOR_READ,
129 QOP_MS_MASK,
130 QOP_VARY_ADD_C,
131
132 QOP_FRAG_Z,
133 QOP_FRAG_W,
134
135 /** Texture x coordinate parameter write */
136 QOP_TEX_S,
137 /** Texture y coordinate parameter write */
138 QOP_TEX_T,
139 /** Texture border color parameter or cube map z coordinate write */
140 QOP_TEX_R,
141 /** Texture LOD bias parameter write */
142 QOP_TEX_B,
143
144 /**
145 * Texture-unit 4-byte read with address provided direct in S
146 * cooordinate.
147 *
148 * The first operand is the offset from the start of the UBO, and the
149 * second is the uniform that has the UBO's base pointer.
150 */
151 QOP_TEX_DIRECT,
152
153 /**
154 * Signal of texture read being necessary and then reading r4 into
155 * the destination
156 */
157 QOP_TEX_RESULT,
158
159 /**
160 * Insert the signal for switching threads in a threaded fragment
161 * shader. No value can be live in an accumulator across a thrsw.
162 *
163 * At the QPU level, this will have several delay slots before the
164 * switch happens. Those slots are the responsibility of the
165 * scheduler.
166 */
167 QOP_THRSW,
168
169 /* 32-bit immediate loaded to each SIMD channel */
170 QOP_LOAD_IMM,
171
172 /* 32-bit immediate divided into 16 2-bit unsigned int values and
173 * loaded to each corresponding SIMD channel.
174 */
175 QOP_LOAD_IMM_U2,
176 /* 32-bit immediate divided into 16 2-bit signed int values and
177 * loaded to each corresponding SIMD channel.
178 */
179 QOP_LOAD_IMM_I2,
180
181 QOP_ROT_MUL,
182
183 /* Jumps to block->successor[0] if the qinst->cond (as a
184 * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note
185 * that block->successor[1] may be unset if the condition is ALWAYS.
186 */
187 QOP_BRANCH,
188
189 /* Emits an ADD from src[0] to src[1], where src[0] must be a
190 * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS,
191 * required by the kernel as part of its branch validation.
192 */
193 QOP_UNIFORMS_RESET,
194 };
195
196 struct queued_qpu_inst {
197 struct list_head link;
198 uint64_t inst;
199 };
200
201 struct qinst {
202 struct list_head link;
203
204 enum qop op;
205 struct qreg dst;
206 struct qreg *src;
207 bool sf;
208 bool cond_is_exec_mask;
209 uint8_t cond;
210 };
211
212 enum qstage {
213 /**
214 * Coordinate shader, runs during binning, before the VS, and just
215 * outputs position.
216 */
217 QSTAGE_COORD,
218 QSTAGE_VERT,
219 QSTAGE_FRAG,
220 };
221
222 enum quniform_contents {
223 /**
224 * Indicates that a constant 32-bit value is copied from the program's
225 * uniform contents.
226 */
227 QUNIFORM_CONSTANT,
228 /**
229 * Indicates that the program's uniform contents are used as an index
230 * into the GL uniform storage.
231 */
232 QUNIFORM_UNIFORM,
233
234 /** @{
235 * Scaling factors from clip coordinates to relative to the viewport
236 * center.
237 *
238 * This is used by the coordinate and vertex shaders to produce the
239 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
240 * point offsets from the viewport ccenter.
241 */
242 QUNIFORM_VIEWPORT_X_SCALE,
243 QUNIFORM_VIEWPORT_Y_SCALE,
244 /** @} */
245
246 QUNIFORM_VIEWPORT_Z_OFFSET,
247 QUNIFORM_VIEWPORT_Z_SCALE,
248
249 QUNIFORM_USER_CLIP_PLANE,
250
251 /**
252 * A reference to a texture config parameter 0 uniform.
253 *
254 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
255 * defines texture type, miplevels, and such. It will be found as a
256 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
257 */
258 QUNIFORM_TEXTURE_CONFIG_P0,
259
260 /**
261 * A reference to a texture config parameter 1 uniform.
262 *
263 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
264 * defines texture width, height, filters, and wrap modes. It will be
265 * found as a parameter to the second QOP_TEX_[STRB] instruction in a
266 * sequence.
267 */
268 QUNIFORM_TEXTURE_CONFIG_P1,
269
270 /** A reference to a texture config parameter 2 cubemap stride uniform */
271 QUNIFORM_TEXTURE_CONFIG_P2,
272
273 QUNIFORM_TEXTURE_FIRST_LEVEL,
274
275 QUNIFORM_TEXTURE_MSAA_ADDR,
276
277 QUNIFORM_UBO_ADDR,
278
279 QUNIFORM_TEXRECT_SCALE_X,
280 QUNIFORM_TEXRECT_SCALE_Y,
281
282 QUNIFORM_TEXTURE_BORDER_COLOR,
283
284 QUNIFORM_BLEND_CONST_COLOR_X,
285 QUNIFORM_BLEND_CONST_COLOR_Y,
286 QUNIFORM_BLEND_CONST_COLOR_Z,
287 QUNIFORM_BLEND_CONST_COLOR_W,
288 QUNIFORM_BLEND_CONST_COLOR_RGBA,
289 QUNIFORM_BLEND_CONST_COLOR_AAAA,
290
291 QUNIFORM_STENCIL,
292
293 QUNIFORM_ALPHA_REF,
294 QUNIFORM_SAMPLE_MASK,
295
296 /* Placeholder uniform that will be updated by the kernel when used by
297 * an instruction writing to QPU_W_UNIFORMS_ADDRESS.
298 */
299 QUNIFORM_UNIFORMS_ADDRESS,
300 };
301
302 struct vc4_varying_slot {
303 uint8_t slot;
304 uint8_t swizzle;
305 };
306
307 struct vc4_compiler_ubo_range {
308 /**
309 * offset in bytes from the start of the ubo where this range is
310 * uploaded.
311 *
312 * Only set once used is set.
313 */
314 uint32_t dst_offset;
315
316 /**
317 * offset in bytes from the start of the gallium uniforms where the
318 * data comes from.
319 */
320 uint32_t src_offset;
321
322 /** size in bytes of this ubo range */
323 uint32_t size;
324
325 /**
326 * Set if this range is used by the shader for indirect uniforms
327 * access.
328 */
329 bool used;
330 };
331
332 struct vc4_key {
333 struct vc4_uncompiled_shader *shader_state;
334 struct {
335 enum pipe_format format;
336 uint8_t swizzle[4];
337 union {
338 struct {
339 unsigned compare_mode:1;
340 unsigned compare_func:3;
341 unsigned wrap_s:3;
342 unsigned wrap_t:3;
343 bool force_first_level:1;
344 };
345 struct {
346 uint16_t msaa_width, msaa_height;
347 };
348 };
349 } tex[VC4_MAX_TEXTURE_SAMPLERS];
350 uint8_t ucp_enables;
351 };
352
353 struct vc4_fs_key {
354 struct vc4_key base;
355 enum pipe_format color_format;
356 bool depth_enabled;
357 bool stencil_enabled;
358 bool stencil_twoside;
359 bool stencil_full_writemasks;
360 bool is_points;
361 bool is_lines;
362 bool alpha_test;
363 bool point_coord_upper_left;
364 bool light_twoside;
365 bool msaa;
366 bool sample_coverage;
367 bool sample_alpha_to_coverage;
368 bool sample_alpha_to_one;
369 uint8_t alpha_test_func;
370 uint8_t logicop_func;
371 uint32_t point_sprite_mask;
372
373 struct pipe_rt_blend_state blend;
374 };
375
376 struct vc4_vs_key {
377 struct vc4_key base;
378
379 const struct vc4_fs_inputs *fs_inputs;
380 enum pipe_format attr_formats[8];
381 bool is_coord;
382 bool per_vertex_point_size;
383 bool clamp_color;
384 };
385
386 /** A basic block of QIR intructions. */
387 struct qblock {
388 struct list_head link;
389
390 struct list_head instructions;
391 struct list_head qpu_inst_list;
392
393 struct set *predecessors;
394 struct qblock *successors[2];
395
396 int index;
397
398 /* Instruction IPs for the first and last instruction of the block.
399 * Set by vc4_qpu_schedule.c.
400 */
401 uint32_t start_qpu_ip;
402 uint32_t end_qpu_ip;
403
404 /* Instruction IP for the branch instruction of the block. Set by
405 * vc4_qpu_schedule.c.
406 */
407 uint32_t branch_qpu_ip;
408
409 /** @{ used by vc4_qir_live_variables.c */
410 BITSET_WORD *def;
411 BITSET_WORD *use;
412 BITSET_WORD *live_in;
413 BITSET_WORD *live_out;
414 int start_ip, end_ip;
415 /** @} */
416 };
417
418 struct vc4_compile {
419 struct vc4_context *vc4;
420 nir_shader *s;
421 nir_function_impl *impl;
422 struct exec_list *cf_node_list;
423
424 /**
425 * Mapping from nir_register * or nir_ssa_def * to array of struct
426 * qreg for the values.
427 */
428 struct hash_table *def_ht;
429
430 /* For each temp, the instruction generating its value. */
431 struct qinst **defs;
432 uint32_t defs_array_size;
433
434 /**
435 * Inputs to the shader, arranged by TGSI declaration order.
436 *
437 * Not all fragment shader QFILE_VARY reads are present in this array.
438 */
439 struct qreg *inputs;
440 struct qreg *outputs;
441 bool msaa_per_sample_output;
442 struct qreg color_reads[VC4_MAX_SAMPLES];
443 struct qreg sample_colors[VC4_MAX_SAMPLES];
444 uint32_t inputs_array_size;
445 uint32_t outputs_array_size;
446 uint32_t uniforms_array_size;
447
448 struct vc4_compiler_ubo_range *ubo_ranges;
449 uint32_t ubo_ranges_array_size;
450 /** Number of uniform areas declared in ubo_ranges. */
451 uint32_t num_uniform_ranges;
452 /** Number of uniform areas used for indirect addressed loads. */
453 uint32_t num_ubo_ranges;
454 uint32_t next_ubo_dst_offset;
455
456 /* State for whether we're executing on each channel currently. 0 if
457 * yes, otherwise a block number + 1 that the channel jumped to.
458 */
459 struct qreg execute;
460
461 struct qreg line_x, point_x, point_y;
462 /** boolean (~0 -> true) if the fragment has been discarded. */
463 struct qreg discard;
464 struct qreg payload_FRAG_Z;
465 struct qreg payload_FRAG_W;
466
467 uint8_t vattr_sizes[8];
468
469 /**
470 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
471 *
472 * This includes those that aren't part of the VPM varyings, like
473 * point/line coordinates.
474 */
475 struct vc4_varying_slot *input_slots;
476 uint32_t num_input_slots;
477 uint32_t input_slots_array_size;
478
479 /**
480 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
481 * of the output is. Used to emit from the VS in the order that the
482 * FS needs.
483 */
484 struct vc4_varying_slot *output_slots;
485
486 struct pipe_shader_state *shader_state;
487 struct vc4_key *key;
488 struct vc4_fs_key *fs_key;
489 struct vc4_vs_key *vs_key;
490
491 /* Live ranges of temps. */
492 int *temp_start, *temp_end;
493
494 uint32_t *uniform_data;
495 enum quniform_contents *uniform_contents;
496 uint32_t uniform_array_size;
497 uint32_t num_uniforms;
498 uint32_t num_outputs;
499 uint32_t num_texture_samples;
500 uint32_t output_position_index;
501 uint32_t output_color_index;
502 uint32_t output_point_size_index;
503 uint32_t output_sample_mask_index;
504
505 struct qreg undef;
506 enum qstage stage;
507 uint32_t num_temps;
508
509 struct list_head blocks;
510 int next_block_index;
511 struct qblock *cur_block;
512 struct qblock *loop_cont_block;
513 struct qblock *loop_break_block;
514
515 struct list_head qpu_inst_list;
516
517 /* Pre-QPU-scheduled instruction containing the last THRSW */
518 uint64_t *last_thrsw;
519
520 uint64_t *qpu_insts;
521 uint32_t qpu_inst_count;
522 uint32_t qpu_inst_size;
523 uint32_t num_inputs;
524
525 /**
526 * Number of inputs from num_inputs remaining to be queued to the read
527 * FIFO in the VS/CS.
528 */
529 uint32_t num_inputs_remaining;
530
531 /* Number of inputs currently in the read FIFO for the VS/CS */
532 uint32_t num_inputs_in_fifo;
533
534 /** Next offset in the VPM to read from in the VS/CS */
535 uint32_t vpm_read_offset;
536
537 uint32_t program_id;
538 uint32_t variant_id;
539
540 /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH
541 * is used to hide texturing latency at the cost of limiting ourselves
542 * to the bottom half of physical reg space.
543 */
544 bool fs_threaded;
545
546 bool last_thrsw_at_top_level;
547
548 bool failed;
549 };
550
551 /* Special nir_load_input intrinsic index for loading the current TLB
552 * destination color.
553 */
554 #define VC4_NIR_TLB_COLOR_READ_INPUT 2000000000
555
556 #define VC4_NIR_MS_MASK_OUTPUT 2000000000
557
558 struct vc4_compile *qir_compile_init(void);
559 void qir_compile_destroy(struct vc4_compile *c);
560 struct qblock *qir_new_block(struct vc4_compile *c);
561 void qir_set_emit_block(struct vc4_compile *c, struct qblock *block);
562 void qir_link_blocks(struct qblock *predecessor, struct qblock *successor);
563 struct qblock *qir_entry_block(struct vc4_compile *c);
564 struct qblock *qir_exit_block(struct vc4_compile *c);
565 struct qinst *qir_inst(enum qop op, struct qreg dst,
566 struct qreg src0, struct qreg src1);
567 struct qinst *qir_inst4(enum qop op, struct qreg dst,
568 struct qreg a,
569 struct qreg b,
570 struct qreg c,
571 struct qreg d);
572 void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst);
573 struct qreg qir_uniform(struct vc4_compile *c,
574 enum quniform_contents contents,
575 uint32_t data);
576 void qir_schedule_instructions(struct vc4_compile *c);
577 void qir_reorder_uniforms(struct vc4_compile *c);
578 void qir_emit_uniform_stream_resets(struct vc4_compile *c);
579
580 struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst);
581 struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst);
582
583 struct qreg qir_get_temp(struct vc4_compile *c);
584 void qir_calculate_live_intervals(struct vc4_compile *c);
585 int qir_get_op_nsrc(enum qop qop);
586 bool qir_reg_equals(struct qreg a, struct qreg b);
587 bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst);
588 bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst);
589 bool qir_is_mul(struct qinst *inst);
590 bool qir_is_raw_mov(struct qinst *inst);
591 bool qir_is_tex(struct qinst *inst);
592 bool qir_is_float_input(struct qinst *inst);
593 bool qir_depends_on_flags(struct qinst *inst);
594 bool qir_writes_r4(struct qinst *inst);
595 struct qreg qir_follow_movs(struct vc4_compile *c, struct qreg reg);
596 uint8_t qir_channels_written(struct qinst *inst);
597
598 void qir_dump(struct vc4_compile *c);
599 void qir_dump_inst(struct vc4_compile *c, struct qinst *inst);
600 const char *qir_get_stage_name(enum qstage stage);
601
602 void qir_validate(struct vc4_compile *c);
603
604 void qir_optimize(struct vc4_compile *c);
605 bool qir_opt_algebraic(struct vc4_compile *c);
606 bool qir_opt_constant_folding(struct vc4_compile *c);
607 bool qir_opt_copy_propagation(struct vc4_compile *c);
608 bool qir_opt_dead_code(struct vc4_compile *c);
609 bool qir_opt_peephole_sf(struct vc4_compile *c);
610 bool qir_opt_small_immediates(struct vc4_compile *c);
611 bool qir_opt_vpm(struct vc4_compile *c);
612 void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c);
613 void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c);
614 nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
615 nir_ssa_def **srcs, int swiz);
616 void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c);
617 void qir_lower_uniforms(struct vc4_compile *c);
618
619 uint32_t qpu_schedule_instructions(struct vc4_compile *c);
620
621 void qir_SF(struct vc4_compile *c, struct qreg src);
622
623 static inline struct qreg
624 qir_uniform_ui(struct vc4_compile *c, uint32_t ui)
625 {
626 return qir_uniform(c, QUNIFORM_CONSTANT, ui);
627 }
628
629 static inline struct qreg
630 qir_uniform_f(struct vc4_compile *c, float f)
631 {
632 return qir_uniform(c, QUNIFORM_CONSTANT, fui(f));
633 }
634
635 #define QIR_ALU0(name) \
636 static inline struct qreg \
637 qir_##name(struct vc4_compile *c) \
638 { \
639 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
640 c->undef, c->undef)); \
641 } \
642 static inline struct qinst * \
643 qir_##name##_dest(struct vc4_compile *c, struct qreg dest) \
644 { \
645 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, \
646 c->undef, c->undef)); \
647 }
648
649 #define QIR_ALU1(name) \
650 static inline struct qreg \
651 qir_##name(struct vc4_compile *c, struct qreg a) \
652 { \
653 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
654 a, c->undef)); \
655 } \
656 static inline struct qinst * \
657 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
658 struct qreg a) \
659 { \
660 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, \
661 c->undef)); \
662 }
663
664 #define QIR_ALU2(name) \
665 static inline struct qreg \
666 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
667 { \
668 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b)); \
669 } \
670 static inline struct qinst * \
671 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
672 struct qreg a, struct qreg b) \
673 { \
674 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b)); \
675 }
676
677 #define QIR_NODST_1(name) \
678 static inline struct qinst * \
679 qir_##name(struct vc4_compile *c, struct qreg a) \
680 { \
681 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
682 a, c->undef)); \
683 }
684
685 #define QIR_NODST_2(name) \
686 static inline struct qinst * \
687 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
688 { \
689 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
690 a, b)); \
691 }
692
693 #define QIR_PAYLOAD(name) \
694 static inline struct qreg \
695 qir_##name(struct vc4_compile *c) \
696 { \
697 struct qreg *payload = &c->payload_##name; \
698 if (payload->file != QFILE_NULL) \
699 return *payload; \
700 *payload = qir_get_temp(c); \
701 struct qinst *inst = qir_inst(QOP_##name, *payload, \
702 c->undef, c->undef); \
703 struct qblock *entry = qir_entry_block(c); \
704 list_add(&inst->link, &entry->instructions); \
705 c->defs[payload->index] = inst; \
706 return *payload; \
707 }
708
709 QIR_ALU1(MOV)
710 QIR_ALU1(FMOV)
711 QIR_ALU1(MMOV)
712 QIR_ALU2(FADD)
713 QIR_ALU2(FSUB)
714 QIR_ALU2(FMUL)
715 QIR_ALU2(V8MULD)
716 QIR_ALU2(V8MIN)
717 QIR_ALU2(V8MAX)
718 QIR_ALU2(V8ADDS)
719 QIR_ALU2(V8SUBS)
720 QIR_ALU2(MUL24)
721 QIR_ALU2(FMIN)
722 QIR_ALU2(FMAX)
723 QIR_ALU2(FMINABS)
724 QIR_ALU2(FMAXABS)
725 QIR_ALU1(FTOI)
726 QIR_ALU1(ITOF)
727
728 QIR_ALU2(ADD)
729 QIR_ALU2(SUB)
730 QIR_ALU2(SHL)
731 QIR_ALU2(SHR)
732 QIR_ALU2(ASR)
733 QIR_ALU2(MIN)
734 QIR_ALU2(MAX)
735 QIR_ALU2(AND)
736 QIR_ALU2(OR)
737 QIR_ALU2(XOR)
738 QIR_ALU1(NOT)
739
740 QIR_ALU1(RCP)
741 QIR_ALU1(RSQ)
742 QIR_ALU1(EXP2)
743 QIR_ALU1(LOG2)
744 QIR_ALU1(VARY_ADD_C)
745 QIR_NODST_2(TEX_S)
746 QIR_NODST_2(TEX_T)
747 QIR_NODST_2(TEX_R)
748 QIR_NODST_2(TEX_B)
749 QIR_NODST_2(TEX_DIRECT)
750 QIR_PAYLOAD(FRAG_Z)
751 QIR_PAYLOAD(FRAG_W)
752 QIR_ALU0(TEX_RESULT)
753 QIR_ALU0(TLB_COLOR_READ)
754 QIR_NODST_1(MS_MASK)
755
756 static inline struct qreg
757 qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1)
758 {
759 struct qreg t = qir_get_temp(c);
760 qir_MOV_dest(c, t, src1);
761 qir_MOV_dest(c, t, src0)->cond = cond;
762 return t;
763 }
764
765 static inline struct qreg
766 qir_UNPACK_8_F(struct vc4_compile *c, struct qreg src, int i)
767 {
768 struct qreg t = qir_FMOV(c, src);
769 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
770 return t;
771 }
772
773 static inline struct qreg
774 qir_UNPACK_8_I(struct vc4_compile *c, struct qreg src, int i)
775 {
776 struct qreg t = qir_MOV(c, src);
777 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
778 return t;
779 }
780
781 static inline struct qreg
782 qir_UNPACK_16_F(struct vc4_compile *c, struct qreg src, int i)
783 {
784 struct qreg t = qir_FMOV(c, src);
785 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
786 return t;
787 }
788
789 static inline struct qreg
790 qir_UNPACK_16_I(struct vc4_compile *c, struct qreg src, int i)
791 {
792 struct qreg t = qir_MOV(c, src);
793 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
794 return t;
795 }
796
797 static inline void
798 qir_PACK_8_F(struct vc4_compile *c, struct qreg dest, struct qreg val, int chan)
799 {
800 assert(!dest.pack);
801 dest.pack = QPU_PACK_MUL_8A + chan;
802 qir_emit_nondef(c, qir_inst(QOP_MMOV, dest, val, c->undef));
803 }
804
805 static inline struct qreg
806 qir_PACK_8888_F(struct vc4_compile *c, struct qreg val)
807 {
808 struct qreg dest = qir_MMOV(c, val);
809 c->defs[dest.index]->dst.pack = QPU_PACK_MUL_8888;
810 return dest;
811 }
812
813 static inline struct qreg
814 qir_POW(struct vc4_compile *c, struct qreg x, struct qreg y)
815 {
816 return qir_EXP2(c, qir_FMUL(c,
817 y,
818 qir_LOG2(c, x)));
819 }
820
821 static inline void
822 qir_VPM_WRITE(struct vc4_compile *c, struct qreg val)
823 {
824 qir_MOV_dest(c, qir_reg(QFILE_VPM, 0), val);
825 }
826
827 static inline struct qreg
828 qir_LOAD_IMM(struct vc4_compile *c, uint32_t val)
829 {
830 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM, c->undef,
831 qir_reg(QFILE_LOAD_IMM, val), c->undef));
832 }
833
834 static inline struct qreg
835 qir_LOAD_IMM_U2(struct vc4_compile *c, uint32_t val)
836 {
837 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_U2, c->undef,
838 qir_reg(QFILE_LOAD_IMM, val),
839 c->undef));
840 }
841
842 static inline struct qreg
843 qir_LOAD_IMM_I2(struct vc4_compile *c, uint32_t val)
844 {
845 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_I2, c->undef,
846 qir_reg(QFILE_LOAD_IMM, val),
847 c->undef));
848 }
849
850 /** Shifts the multiply output to the right by rot channels */
851 static inline struct qreg
852 qir_ROT_MUL(struct vc4_compile *c, struct qreg val, uint32_t rot)
853 {
854 return qir_emit_def(c, qir_inst(QOP_ROT_MUL, c->undef,
855 val,
856 qir_reg(QFILE_LOAD_IMM,
857 QPU_SMALL_IMM_MUL_ROT + rot)));
858 }
859
860 static inline struct qinst *
861 qir_MOV_cond(struct vc4_compile *c, uint8_t cond,
862 struct qreg dest, struct qreg src)
863 {
864 struct qinst *mov = qir_MOV_dest(c, dest, src);
865 mov->cond = cond;
866 return mov;
867 }
868
869 static inline struct qinst *
870 qir_BRANCH(struct vc4_compile *c, uint8_t cond)
871 {
872 struct qinst *inst = qir_inst(QOP_BRANCH, c->undef, c->undef, c->undef);
873 inst->cond = cond;
874 qir_emit_nondef(c, inst);
875 return inst;
876 }
877
878 #define qir_for_each_block(block, c) \
879 list_for_each_entry(struct qblock, block, &c->blocks, link)
880
881 #define qir_for_each_block_rev(block, c) \
882 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
883
884 /* Loop over the non-NULL members of the successors array. */
885 #define qir_for_each_successor(succ, block) \
886 for (struct qblock *succ = block->successors[0]; \
887 succ != NULL; \
888 succ = (succ == block->successors[1] ? NULL : \
889 block->successors[1]))
890
891 #define qir_for_each_inst(inst, block) \
892 list_for_each_entry(struct qinst, inst, &block->instructions, link)
893
894 #define qir_for_each_inst_rev(inst, block) \
895 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
896
897 #define qir_for_each_inst_safe(inst, block) \
898 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
899
900 #define qir_for_each_inst_inorder(inst, c) \
901 qir_for_each_block(_block, c) \
902 qir_for_each_inst(inst, _block)
903
904 #endif /* VC4_QIR_H */