2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef IR3_CONTEXT_H_
28 #define IR3_CONTEXT_H_
30 #include "ir3_compiler.h"
34 /* for conditionally setting boolean flag(s): */
35 #define COND(bool, val) ((bool) ? (val) : 0)
37 #define DBG(fmt, ...) \
38 do { debug_printf("%s:%d: "fmt "\n", \
39 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
42 * The context for compilation of a single shader.
45 struct ir3_compiler
*compiler
;
46 const struct ir3_context_funcs
*funcs
;
50 struct nir_instr
*cur_instr
; /* current instruction, just for debug */
53 struct ir3_shader_variant
*so
;
55 struct ir3_block
*block
; /* the current block */
56 struct ir3_block
*in_block
; /* block created for shader inputs */
58 nir_function_impl
*impl
;
60 /* For fragment shaders, varyings are not actual shader inputs,
61 * instead the hw passes a ij coord which is used with
64 * But NIR doesn't know that, it still declares varyings as
65 * inputs. So we do all the input tracking normally and fix
66 * things up after compile_instructions()
68 struct ir3_instruction
*ij_pixel
, *ij_sample
, *ij_centroid
, *ij_size
;
70 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
71 struct ir3_instruction
*frag_face
, *frag_coord
;
73 /* For vertex shaders, keep track of the system values sources */
74 struct ir3_instruction
*vertex_id
, *basevertex
, *instance_id
;
76 /* For fragment shaders: */
77 struct ir3_instruction
*samp_id
, *samp_mask_in
;
79 /* Compute shader inputs: */
80 struct ir3_instruction
*local_invocation_id
, *work_group_id
;
82 /* mapping from nir_register to defining instruction: */
83 struct hash_table
*def_ht
;
87 /* Tracking for max level of flowcontrol (branchstack) needed
90 unsigned stack
, max_stack
;
92 /* a common pattern for indirect addressing is to request the
93 * same address register multiple times. To avoid generating
94 * duplicate instruction sequences (which our backend does not
95 * try to clean up, since that should be done as the NIR stage)
96 * we cache the address value generated for a given src value:
98 * Note that we have to cache these per alignment, since same
99 * src used for an array of vec1 cannot be also used for an
102 struct hash_table
*addr_ht
[4];
104 /* last dst array, for indirect we need to insert a var-store.
106 struct ir3_instruction
**last_dst
;
109 /* maps nir_block to ir3_block, mostly for the purposes of
110 * figuring out the blocks successors
112 struct hash_table
*block_ht
;
114 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
117 unsigned samples
; /* bitmask of x,y sample shifts */
119 unsigned max_texture_index
;
121 /* set if we encounter something we can't handle yet, so we
122 * can bail cleanly and fallback to TGSI compiler f/e
127 struct ir3_context_funcs
{
128 void (*emit_intrinsic_load_ssbo
)(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
,
129 struct ir3_instruction
**dst
);
130 void (*emit_intrinsic_store_ssbo
)(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
);
131 struct ir3_instruction
* (*emit_intrinsic_atomic_ssbo
)(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
);
132 void (*emit_intrinsic_store_image
)(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
);
133 struct ir3_instruction
* (*emit_intrinsic_atomic_image
)(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
);
136 extern const struct ir3_context_funcs ir3_a4xx_funcs
;
137 extern const struct ir3_context_funcs ir3_a6xx_funcs
;
139 struct ir3_context
* ir3_context_init(struct ir3_compiler
*compiler
,
140 struct ir3_shader_variant
*so
);
141 void ir3_context_free(struct ir3_context
*ctx
);
143 /* gpu pointer size in units of 32bit registers/slots */
145 unsigned ir3_pointer_size(struct ir3_context
*ctx
)
147 return (ctx
->compiler
->gpu_id
>= 500) ? 2 : 1;
150 struct ir3_instruction
** ir3_get_dst_ssa(struct ir3_context
*ctx
, nir_ssa_def
*dst
, unsigned n
);
151 struct ir3_instruction
** ir3_get_dst(struct ir3_context
*ctx
, nir_dest
*dst
, unsigned n
);
152 struct ir3_instruction
* const * ir3_get_src(struct ir3_context
*ctx
, nir_src
*src
);
153 void ir3_put_dst(struct ir3_context
*ctx
, nir_dest
*dst
);
154 struct ir3_instruction
* ir3_create_collect(struct ir3_context
*ctx
,
155 struct ir3_instruction
*const *arr
, unsigned arrsz
);
156 void ir3_split_dest(struct ir3_block
*block
, struct ir3_instruction
**dst
,
157 struct ir3_instruction
*src
, unsigned base
, unsigned n
);
159 NORETURN
void ir3_context_error(struct ir3_context
*ctx
, const char *format
, ...);
161 #define compile_assert(ctx, cond) do { \
162 if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
165 struct ir3_instruction
* ir3_get_addr(struct ir3_context
*ctx
,
166 struct ir3_instruction
*src
, int align
);
167 struct ir3_instruction
* ir3_get_predicate(struct ir3_context
*ctx
,
168 struct ir3_instruction
*src
);
170 void ir3_declare_array(struct ir3_context
*ctx
, nir_register
*reg
);
171 struct ir3_array
* ir3_get_array(struct ir3_context
*ctx
, nir_register
*reg
);
172 struct ir3_instruction
*ir3_create_array_load(struct ir3_context
*ctx
,
173 struct ir3_array
*arr
, int n
, struct ir3_instruction
*address
);
174 void ir3_create_array_store(struct ir3_context
*ctx
, struct ir3_array
*arr
, int n
,
175 struct ir3_instruction
*src
, struct ir3_instruction
*address
);
177 static inline type_t
utype_for_size(unsigned bit_size
)
180 case 32: return TYPE_U32
;
181 case 16: return TYPE_U16
;
182 case 8: return TYPE_U8
;
183 default: unreachable("bad bitsize"); return ~0;
187 static inline type_t
utype_src(nir_src src
)
188 { return utype_for_size(nir_src_bit_size(src
)); }
190 static inline type_t
utype_dst(nir_dest dst
)
191 { return utype_for_size(nir_dest_bit_size(dst
)); }
193 #endif /* IR3_CONTEXT_H_ */