vc4: Avoid generating a custom shader per level in glGenerateMipmaps().
[mesa.git] / src / gallium / drivers / vc4 / vc4_qir.h
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef VC4_QIR_H
25 #define VC4_QIR_H
26
27 #include <assert.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include <string.h>
33
34 #include "util/macros.h"
35 #include "compiler/nir/nir.h"
36 #include "util/list.h"
37 #include "util/u_math.h"
38
39 #include "vc4_screen.h"
40 #include "vc4_qpu_defines.h"
41 #include "vc4_qpu.h"
42 #include "kernel/vc4_packet.h"
43 #include "pipe/p_state.h"
44
45 struct nir_builder;
46
47 enum qfile {
48 QFILE_NULL,
49 QFILE_TEMP,
50 QFILE_VARY,
51 QFILE_UNIF,
52 QFILE_VPM,
53 QFILE_TLB_COLOR_WRITE,
54 QFILE_TLB_COLOR_WRITE_MS,
55 QFILE_TLB_Z_WRITE,
56 QFILE_TLB_STENCIL_SETUP,
57
58 /* Payload registers that aren't in the physical register file, so we
59 * can just use the corresponding qpu_reg at qpu_emit time.
60 */
61 QFILE_FRAG_X,
62 QFILE_FRAG_Y,
63 QFILE_FRAG_REV_FLAG,
64
65 /**
66 * Stores an immediate value in the index field that will be used
67 * directly by qpu_load_imm().
68 */
69 QFILE_LOAD_IMM,
70
71 /**
72 * Stores an immediate value in the index field that can be turned
73 * into a small immediate field by qpu_encode_small_immediate().
74 */
75 QFILE_SMALL_IMM,
76 };
77
78 struct qreg {
79 enum qfile file;
80 uint32_t index;
81 int pack;
82 };
83
84 static inline struct qreg qir_reg(enum qfile file, uint32_t index)
85 {
86 return (struct qreg){file, index};
87 }
88
89 enum qop {
90 QOP_UNDEF,
91 QOP_MOV,
92 QOP_FMOV,
93 QOP_MMOV,
94 QOP_FADD,
95 QOP_FSUB,
96 QOP_FMUL,
97 QOP_V8MULD,
98 QOP_V8MIN,
99 QOP_V8MAX,
100 QOP_V8ADDS,
101 QOP_V8SUBS,
102 QOP_MUL24,
103 QOP_FMIN,
104 QOP_FMAX,
105 QOP_FMINABS,
106 QOP_FMAXABS,
107 QOP_ADD,
108 QOP_SUB,
109 QOP_SHL,
110 QOP_SHR,
111 QOP_ASR,
112 QOP_MIN,
113 QOP_MAX,
114 QOP_AND,
115 QOP_OR,
116 QOP_XOR,
117 QOP_NOT,
118
119 QOP_FTOI,
120 QOP_ITOF,
121 QOP_RCP,
122 QOP_RSQ,
123 QOP_EXP2,
124 QOP_LOG2,
125 QOP_VW_SETUP,
126 QOP_VR_SETUP,
127 QOP_TLB_COLOR_READ,
128 QOP_MS_MASK,
129 QOP_VARY_ADD_C,
130
131 QOP_FRAG_Z,
132 QOP_FRAG_W,
133
134 /** Texture x coordinate parameter write */
135 QOP_TEX_S,
136 /** Texture y coordinate parameter write */
137 QOP_TEX_T,
138 /** Texture border color parameter or cube map z coordinate write */
139 QOP_TEX_R,
140 /** Texture LOD bias parameter write */
141 QOP_TEX_B,
142
143 /**
144 * Texture-unit 4-byte read with address provided direct in S
145 * cooordinate.
146 *
147 * The first operand is the offset from the start of the UBO, and the
148 * second is the uniform that has the UBO's base pointer.
149 */
150 QOP_TEX_DIRECT,
151
152 /**
153 * Signal of texture read being necessary and then reading r4 into
154 * the destination
155 */
156 QOP_TEX_RESULT,
157
158 QOP_LOAD_IMM,
159
160 /* Jumps to block->successor[0] if the qinst->cond (as a
161 * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note
162 * that block->successor[1] may be unset if the condition is ALWAYS.
163 */
164 QOP_BRANCH,
165
166 /* Emits an ADD from src[0] to src[1], where src[0] must be a
167 * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS,
168 * required by the kernel as part of its branch validation.
169 */
170 QOP_UNIFORMS_RESET,
171 };
172
173 struct queued_qpu_inst {
174 struct list_head link;
175 uint64_t inst;
176 };
177
178 struct qinst {
179 struct list_head link;
180
181 enum qop op;
182 struct qreg dst;
183 struct qreg *src;
184 bool sf;
185 uint8_t cond;
186 };
187
188 enum qstage {
189 /**
190 * Coordinate shader, runs during binning, before the VS, and just
191 * outputs position.
192 */
193 QSTAGE_COORD,
194 QSTAGE_VERT,
195 QSTAGE_FRAG,
196 };
197
198 enum quniform_contents {
199 /**
200 * Indicates that a constant 32-bit value is copied from the program's
201 * uniform contents.
202 */
203 QUNIFORM_CONSTANT,
204 /**
205 * Indicates that the program's uniform contents are used as an index
206 * into the GL uniform storage.
207 */
208 QUNIFORM_UNIFORM,
209
210 /** @{
211 * Scaling factors from clip coordinates to relative to the viewport
212 * center.
213 *
214 * This is used by the coordinate and vertex shaders to produce the
215 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
216 * point offsets from the viewport ccenter.
217 */
218 QUNIFORM_VIEWPORT_X_SCALE,
219 QUNIFORM_VIEWPORT_Y_SCALE,
220 /** @} */
221
222 QUNIFORM_VIEWPORT_Z_OFFSET,
223 QUNIFORM_VIEWPORT_Z_SCALE,
224
225 QUNIFORM_USER_CLIP_PLANE,
226
227 /**
228 * A reference to a texture config parameter 0 uniform.
229 *
230 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
231 * defines texture type, miplevels, and such. It will be found as a
232 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
233 */
234 QUNIFORM_TEXTURE_CONFIG_P0,
235
236 /**
237 * A reference to a texture config parameter 1 uniform.
238 *
239 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
240 * defines texture width, height, filters, and wrap modes. It will be
241 * found as a parameter to the second QOP_TEX_[STRB] instruction in a
242 * sequence.
243 */
244 QUNIFORM_TEXTURE_CONFIG_P1,
245
246 /** A reference to a texture config parameter 2 cubemap stride uniform */
247 QUNIFORM_TEXTURE_CONFIG_P2,
248
249 QUNIFORM_TEXTURE_FIRST_LEVEL,
250
251 QUNIFORM_TEXTURE_MSAA_ADDR,
252
253 QUNIFORM_UBO_ADDR,
254
255 QUNIFORM_TEXRECT_SCALE_X,
256 QUNIFORM_TEXRECT_SCALE_Y,
257
258 QUNIFORM_TEXTURE_BORDER_COLOR,
259
260 QUNIFORM_BLEND_CONST_COLOR_X,
261 QUNIFORM_BLEND_CONST_COLOR_Y,
262 QUNIFORM_BLEND_CONST_COLOR_Z,
263 QUNIFORM_BLEND_CONST_COLOR_W,
264 QUNIFORM_BLEND_CONST_COLOR_RGBA,
265 QUNIFORM_BLEND_CONST_COLOR_AAAA,
266
267 QUNIFORM_STENCIL,
268
269 QUNIFORM_ALPHA_REF,
270 QUNIFORM_SAMPLE_MASK,
271
272 /* Placeholder uniform that will be updated by the kernel when used by
273 * an instruction writing to QPU_W_UNIFORMS_ADDRESS.
274 */
275 QUNIFORM_UNIFORMS_ADDRESS,
276 };
277
278 struct vc4_varying_slot {
279 uint8_t slot;
280 uint8_t swizzle;
281 };
282
283 struct vc4_compiler_ubo_range {
284 /**
285 * offset in bytes from the start of the ubo where this range is
286 * uploaded.
287 *
288 * Only set once used is set.
289 */
290 uint32_t dst_offset;
291
292 /**
293 * offset in bytes from the start of the gallium uniforms where the
294 * data comes from.
295 */
296 uint32_t src_offset;
297
298 /** size in bytes of this ubo range */
299 uint32_t size;
300
301 /**
302 * Set if this range is used by the shader for indirect uniforms
303 * access.
304 */
305 bool used;
306 };
307
308 struct vc4_key {
309 struct vc4_uncompiled_shader *shader_state;
310 struct {
311 enum pipe_format format;
312 uint8_t swizzle[4];
313 union {
314 struct {
315 unsigned compare_mode:1;
316 unsigned compare_func:3;
317 unsigned wrap_s:3;
318 unsigned wrap_t:3;
319 bool force_first_level:1;
320 };
321 struct {
322 uint16_t msaa_width, msaa_height;
323 };
324 };
325 } tex[VC4_MAX_TEXTURE_SAMPLERS];
326 uint8_t ucp_enables;
327 };
328
329 struct vc4_fs_key {
330 struct vc4_key base;
331 enum pipe_format color_format;
332 bool depth_enabled;
333 bool stencil_enabled;
334 bool stencil_twoside;
335 bool stencil_full_writemasks;
336 bool is_points;
337 bool is_lines;
338 bool alpha_test;
339 bool point_coord_upper_left;
340 bool light_twoside;
341 bool msaa;
342 bool sample_coverage;
343 bool sample_alpha_to_coverage;
344 bool sample_alpha_to_one;
345 uint8_t alpha_test_func;
346 uint8_t logicop_func;
347 uint32_t point_sprite_mask;
348
349 struct pipe_rt_blend_state blend;
350 };
351
352 struct vc4_vs_key {
353 struct vc4_key base;
354
355 /**
356 * This is a proxy for the array of FS input semantics, which is
357 * larger than we would want to put in the key.
358 */
359 uint64_t compiled_fs_id;
360
361 enum pipe_format attr_formats[8];
362 bool is_coord;
363 bool per_vertex_point_size;
364 bool clamp_color;
365 };
366
367 /** A basic block of QIR intructions. */
368 struct qblock {
369 struct list_head link;
370
371 struct list_head instructions;
372 struct list_head qpu_inst_list;
373
374 struct set *predecessors;
375 struct qblock *successors[2];
376
377 int index;
378
379 /* Instruction IPs for the first and last instruction of the block.
380 * Set by vc4_qpu_schedule.c.
381 */
382 uint32_t start_qpu_ip;
383 uint32_t end_qpu_ip;
384
385 /* Instruction IP for the branch instruction of the block. Set by
386 * vc4_qpu_schedule.c.
387 */
388 uint32_t branch_qpu_ip;
389
390 /** @{ used by vc4_qir_live_variables.c */
391 BITSET_WORD *def;
392 BITSET_WORD *use;
393 BITSET_WORD *live_in;
394 BITSET_WORD *live_out;
395 int start_ip, end_ip;
396 /** @} */
397 };
398
399 struct vc4_compile {
400 struct vc4_context *vc4;
401 nir_shader *s;
402 nir_function_impl *impl;
403 struct exec_list *cf_node_list;
404
405 /**
406 * Mapping from nir_register * or nir_ssa_def * to array of struct
407 * qreg for the values.
408 */
409 struct hash_table *def_ht;
410
411 /* For each temp, the instruction generating its value. */
412 struct qinst **defs;
413 uint32_t defs_array_size;
414
415 /**
416 * Inputs to the shader, arranged by TGSI declaration order.
417 *
418 * Not all fragment shader QFILE_VARY reads are present in this array.
419 */
420 struct qreg *inputs;
421 struct qreg *outputs;
422 bool msaa_per_sample_output;
423 struct qreg color_reads[VC4_MAX_SAMPLES];
424 struct qreg sample_colors[VC4_MAX_SAMPLES];
425 uint32_t inputs_array_size;
426 uint32_t outputs_array_size;
427 uint32_t uniforms_array_size;
428
429 struct vc4_compiler_ubo_range *ubo_ranges;
430 uint32_t ubo_ranges_array_size;
431 /** Number of uniform areas declared in ubo_ranges. */
432 uint32_t num_uniform_ranges;
433 /** Number of uniform areas used for indirect addressed loads. */
434 uint32_t num_ubo_ranges;
435 uint32_t next_ubo_dst_offset;
436
437 /* State for whether we're executing on each channel currently. 0 if
438 * yes, otherwise a block number + 1 that the channel jumped to.
439 */
440 struct qreg execute;
441
442 struct qreg line_x, point_x, point_y;
443 struct qreg discard;
444 struct qreg payload_FRAG_Z;
445 struct qreg payload_FRAG_W;
446
447 uint8_t vattr_sizes[8];
448
449 /**
450 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
451 *
452 * This includes those that aren't part of the VPM varyings, like
453 * point/line coordinates.
454 */
455 struct vc4_varying_slot *input_slots;
456 uint32_t num_input_slots;
457 uint32_t input_slots_array_size;
458
459 /**
460 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
461 * of the output is. Used to emit from the VS in the order that the
462 * FS needs.
463 */
464 struct vc4_varying_slot *output_slots;
465
466 struct pipe_shader_state *shader_state;
467 struct vc4_key *key;
468 struct vc4_fs_key *fs_key;
469 struct vc4_vs_key *vs_key;
470
471 /* Live ranges of temps. */
472 int *temp_start, *temp_end;
473
474 uint32_t *uniform_data;
475 enum quniform_contents *uniform_contents;
476 uint32_t uniform_array_size;
477 uint32_t num_uniforms;
478 uint32_t num_outputs;
479 uint32_t num_texture_samples;
480 uint32_t output_position_index;
481 uint32_t output_color_index;
482 uint32_t output_point_size_index;
483 uint32_t output_sample_mask_index;
484
485 struct qreg undef;
486 enum qstage stage;
487 uint32_t num_temps;
488
489 struct list_head blocks;
490 int next_block_index;
491 struct qblock *cur_block;
492 struct qblock *loop_cont_block;
493 struct qblock *loop_break_block;
494
495 struct list_head qpu_inst_list;
496
497 uint64_t *qpu_insts;
498 uint32_t qpu_inst_count;
499 uint32_t qpu_inst_size;
500 uint32_t num_inputs;
501
502 uint32_t program_id;
503 uint32_t variant_id;
504 };
505
506 /* Special nir_load_input intrinsic index for loading the current TLB
507 * destination color.
508 */
509 #define VC4_NIR_TLB_COLOR_READ_INPUT 2000000000
510
511 #define VC4_NIR_MS_MASK_OUTPUT 2000000000
512
513 /* Special offset for nir_load_uniform values to get a QUNIFORM_*
514 * state-dependent value.
515 */
516 #define VC4_NIR_STATE_UNIFORM_OFFSET 1000000000
517
518 struct vc4_compile *qir_compile_init(void);
519 void qir_compile_destroy(struct vc4_compile *c);
520 struct qblock *qir_new_block(struct vc4_compile *c);
521 void qir_set_emit_block(struct vc4_compile *c, struct qblock *block);
522 void qir_link_blocks(struct qblock *predecessor, struct qblock *successor);
523 struct qblock *qir_entry_block(struct vc4_compile *c);
524 struct qblock *qir_exit_block(struct vc4_compile *c);
525 struct qinst *qir_inst(enum qop op, struct qreg dst,
526 struct qreg src0, struct qreg src1);
527 struct qinst *qir_inst4(enum qop op, struct qreg dst,
528 struct qreg a,
529 struct qreg b,
530 struct qreg c,
531 struct qreg d);
532 void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst);
533 struct qreg qir_uniform(struct vc4_compile *c,
534 enum quniform_contents contents,
535 uint32_t data);
536 void qir_schedule_instructions(struct vc4_compile *c);
537 void qir_reorder_uniforms(struct vc4_compile *c);
538 void qir_emit_uniform_stream_resets(struct vc4_compile *c);
539
540 struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst);
541 struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst);
542
543 struct qreg qir_get_temp(struct vc4_compile *c);
544 void qir_calculate_live_intervals(struct vc4_compile *c);
545 int qir_get_op_nsrc(enum qop qop);
546 bool qir_reg_equals(struct qreg a, struct qreg b);
547 bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst);
548 bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst);
549 bool qir_is_mul(struct qinst *inst);
550 bool qir_is_raw_mov(struct qinst *inst);
551 bool qir_is_tex(struct qinst *inst);
552 bool qir_is_float_input(struct qinst *inst);
553 bool qir_depends_on_flags(struct qinst *inst);
554 bool qir_writes_r4(struct qinst *inst);
555 struct qreg qir_follow_movs(struct vc4_compile *c, struct qreg reg);
556 uint8_t qir_channels_written(struct qinst *inst);
557
558 void qir_dump(struct vc4_compile *c);
559 void qir_dump_inst(struct vc4_compile *c, struct qinst *inst);
560 const char *qir_get_stage_name(enum qstage stage);
561
562 void qir_validate(struct vc4_compile *c);
563
564 void qir_optimize(struct vc4_compile *c);
565 bool qir_opt_algebraic(struct vc4_compile *c);
566 bool qir_opt_constant_folding(struct vc4_compile *c);
567 bool qir_opt_copy_propagation(struct vc4_compile *c);
568 bool qir_opt_dead_code(struct vc4_compile *c);
569 bool qir_opt_peephole_sf(struct vc4_compile *c);
570 bool qir_opt_small_immediates(struct vc4_compile *c);
571 bool qir_opt_vpm(struct vc4_compile *c);
572 void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c);
573 void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c);
574 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
575 enum quniform_contents contents);
576 nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
577 nir_ssa_def **srcs, int swiz);
578 void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c);
579 void qir_lower_uniforms(struct vc4_compile *c);
580
581 uint32_t qpu_schedule_instructions(struct vc4_compile *c);
582
583 void qir_SF(struct vc4_compile *c, struct qreg src);
584
585 static inline struct qreg
586 qir_uniform_ui(struct vc4_compile *c, uint32_t ui)
587 {
588 return qir_uniform(c, QUNIFORM_CONSTANT, ui);
589 }
590
591 static inline struct qreg
592 qir_uniform_f(struct vc4_compile *c, float f)
593 {
594 return qir_uniform(c, QUNIFORM_CONSTANT, fui(f));
595 }
596
597 #define QIR_ALU0(name) \
598 static inline struct qreg \
599 qir_##name(struct vc4_compile *c) \
600 { \
601 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
602 c->undef, c->undef)); \
603 } \
604 static inline struct qinst * \
605 qir_##name##_dest(struct vc4_compile *c, struct qreg dest) \
606 { \
607 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, \
608 c->undef, c->undef)); \
609 }
610
611 #define QIR_ALU1(name) \
612 static inline struct qreg \
613 qir_##name(struct vc4_compile *c, struct qreg a) \
614 { \
615 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
616 a, c->undef)); \
617 } \
618 static inline struct qinst * \
619 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
620 struct qreg a) \
621 { \
622 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, \
623 c->undef)); \
624 }
625
626 #define QIR_ALU2(name) \
627 static inline struct qreg \
628 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
629 { \
630 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b)); \
631 } \
632 static inline struct qinst * \
633 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
634 struct qreg a, struct qreg b) \
635 { \
636 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b)); \
637 }
638
639 #define QIR_NODST_1(name) \
640 static inline struct qinst * \
641 qir_##name(struct vc4_compile *c, struct qreg a) \
642 { \
643 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
644 a, c->undef)); \
645 }
646
647 #define QIR_NODST_2(name) \
648 static inline struct qinst * \
649 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
650 { \
651 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
652 a, b)); \
653 }
654
655 #define QIR_PAYLOAD(name) \
656 static inline struct qreg \
657 qir_##name(struct vc4_compile *c) \
658 { \
659 struct qreg *payload = &c->payload_##name; \
660 if (payload->file != QFILE_NULL) \
661 return *payload; \
662 *payload = qir_get_temp(c); \
663 struct qinst *inst = qir_inst(QOP_##name, *payload, \
664 c->undef, c->undef); \
665 struct qblock *entry = qir_entry_block(c); \
666 list_add(&inst->link, &entry->instructions); \
667 c->defs[payload->index] = inst; \
668 return *payload; \
669 }
670
671 QIR_ALU1(MOV)
672 QIR_ALU1(FMOV)
673 QIR_ALU1(MMOV)
674 QIR_ALU2(FADD)
675 QIR_ALU2(FSUB)
676 QIR_ALU2(FMUL)
677 QIR_ALU2(V8MULD)
678 QIR_ALU2(V8MIN)
679 QIR_ALU2(V8MAX)
680 QIR_ALU2(V8ADDS)
681 QIR_ALU2(V8SUBS)
682 QIR_ALU2(MUL24)
683 QIR_ALU2(FMIN)
684 QIR_ALU2(FMAX)
685 QIR_ALU2(FMINABS)
686 QIR_ALU2(FMAXABS)
687 QIR_ALU1(FTOI)
688 QIR_ALU1(ITOF)
689
690 QIR_ALU2(ADD)
691 QIR_ALU2(SUB)
692 QIR_ALU2(SHL)
693 QIR_ALU2(SHR)
694 QIR_ALU2(ASR)
695 QIR_ALU2(MIN)
696 QIR_ALU2(MAX)
697 QIR_ALU2(AND)
698 QIR_ALU2(OR)
699 QIR_ALU2(XOR)
700 QIR_ALU1(NOT)
701
702 QIR_ALU1(RCP)
703 QIR_ALU1(RSQ)
704 QIR_ALU1(EXP2)
705 QIR_ALU1(LOG2)
706 QIR_ALU1(VARY_ADD_C)
707 QIR_NODST_2(TEX_S)
708 QIR_NODST_2(TEX_T)
709 QIR_NODST_2(TEX_R)
710 QIR_NODST_2(TEX_B)
711 QIR_NODST_2(TEX_DIRECT)
712 QIR_PAYLOAD(FRAG_Z)
713 QIR_PAYLOAD(FRAG_W)
714 QIR_ALU0(TEX_RESULT)
715 QIR_ALU0(TLB_COLOR_READ)
716 QIR_NODST_1(MS_MASK)
717
718 static inline struct qreg
719 qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1)
720 {
721 struct qreg t = qir_get_temp(c);
722 struct qinst *a = qir_MOV_dest(c, t, src0);
723 struct qinst *b = qir_MOV_dest(c, t, src1);
724 a->cond = cond;
725 b->cond = qpu_cond_complement(cond);
726 return t;
727 }
728
729 static inline struct qreg
730 qir_UNPACK_8_F(struct vc4_compile *c, struct qreg src, int i)
731 {
732 struct qreg t = qir_FMOV(c, src);
733 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
734 return t;
735 }
736
737 static inline struct qreg
738 qir_UNPACK_8_I(struct vc4_compile *c, struct qreg src, int i)
739 {
740 struct qreg t = qir_MOV(c, src);
741 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
742 return t;
743 }
744
745 static inline struct qreg
746 qir_UNPACK_16_F(struct vc4_compile *c, struct qreg src, int i)
747 {
748 struct qreg t = qir_FMOV(c, src);
749 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
750 return t;
751 }
752
753 static inline struct qreg
754 qir_UNPACK_16_I(struct vc4_compile *c, struct qreg src, int i)
755 {
756 struct qreg t = qir_MOV(c, src);
757 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
758 return t;
759 }
760
761 static inline void
762 qir_PACK_8_F(struct vc4_compile *c, struct qreg dest, struct qreg val, int chan)
763 {
764 assert(!dest.pack);
765 dest.pack = QPU_PACK_MUL_8A + chan;
766 qir_emit_nondef(c, qir_inst(QOP_MMOV, dest, val, c->undef));
767 }
768
769 static inline struct qreg
770 qir_PACK_8888_F(struct vc4_compile *c, struct qreg val)
771 {
772 struct qreg dest = qir_MMOV(c, val);
773 c->defs[dest.index]->dst.pack = QPU_PACK_MUL_8888;
774 return dest;
775 }
776
777 static inline struct qreg
778 qir_POW(struct vc4_compile *c, struct qreg x, struct qreg y)
779 {
780 return qir_EXP2(c, qir_FMUL(c,
781 y,
782 qir_LOG2(c, x)));
783 }
784
785 static inline void
786 qir_VPM_WRITE(struct vc4_compile *c, struct qreg val)
787 {
788 qir_MOV_dest(c, qir_reg(QFILE_VPM, 0), val);
789 }
790
791 static inline struct qreg
792 qir_LOAD_IMM(struct vc4_compile *c, uint32_t val)
793 {
794 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM, c->undef,
795 qir_reg(QFILE_LOAD_IMM, val), c->undef));
796 }
797
798 static inline void
799 qir_MOV_cond(struct vc4_compile *c, uint8_t cond,
800 struct qreg dest, struct qreg src)
801 {
802 qir_MOV_dest(c, dest, src)->cond = cond;
803 }
804
805 static inline struct qinst *
806 qir_BRANCH(struct vc4_compile *c, uint8_t cond)
807 {
808 struct qinst *inst = qir_inst(QOP_BRANCH, c->undef, c->undef, c->undef);
809 inst->cond = cond;
810 qir_emit_nondef(c, inst);
811 return inst;
812 }
813
814 #define qir_for_each_block(block, c) \
815 list_for_each_entry(struct qblock, block, &c->blocks, link)
816
817 #define qir_for_each_block_rev(block, c) \
818 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
819
820 /* Loop over the non-NULL members of the successors array. */
821 #define qir_for_each_successor(succ, block) \
822 for (struct qblock *succ = block->successors[0]; \
823 succ != NULL; \
824 succ = (succ == block->successors[1] ? NULL : \
825 block->successors[1]))
826
827 #define qir_for_each_inst(inst, block) \
828 list_for_each_entry(struct qinst, inst, &block->instructions, link)
829
830 #define qir_for_each_inst_rev(inst, block) \
831 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
832
833 #define qir_for_each_inst_safe(inst, block) \
834 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
835
836 #define qir_for_each_inst_inorder(inst, c) \
837 qir_for_each_block(_block, c) \
838 qir_for_each_inst(inst, _block)
839
840 #endif /* VC4_QIR_H */