freedreno/ir3: re-work shader inputs/outputs
[mesa.git] / src / freedreno / ir3 / ir3_context.h
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_CONTEXT_H_
28 #define IR3_CONTEXT_H_
29
30 #include "ir3_compiler.h"
31 #include "ir3_nir.h"
32 #include "ir3.h"
33
34 /* for conditionally setting boolean flag(s): */
35 #define COND(bool, val) ((bool) ? (val) : 0)
36
37 #define DBG(fmt, ...) \
38 do { debug_printf("%s:%d: "fmt "\n", \
39 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
40
41 /**
42 * The context for compilation of a single shader.
43 */
44 struct ir3_context {
45 struct ir3_compiler *compiler;
46 const struct ir3_context_funcs *funcs;
47
48 struct nir_shader *s;
49
50 struct nir_instr *cur_instr; /* current instruction, just for debug */
51
52 struct ir3 *ir;
53 struct ir3_shader_variant *so;
54
55 /* Tables of scalar inputs/outputs. Because of the way varying packing
56 * works, we could have inputs w/ fractional location, which is a bit
57 * awkward to deal with unless we keep track of the split scalar in/
58 * out components.
59 *
60 * These *only* have inputs/outputs that are touched by load_*input and
61 * store_output.
62 */
63 unsigned ninputs, noutputs;
64 struct ir3_instruction **inputs;
65 struct ir3_instruction **outputs;
66
67 struct ir3_block *block; /* the current block */
68 struct ir3_block *in_block; /* block created for shader inputs */
69
70 nir_function_impl *impl;
71
72 /* For fragment shaders, varyings are not actual shader inputs,
73 * instead the hw passes a ij coord which is used with
74 * bary.f.
75 *
76 * But NIR doesn't know that, it still declares varyings as
77 * inputs. So we do all the input tracking normally and fix
78 * things up after compile_instructions()
79 */
80 struct ir3_instruction *ij_pixel, *ij_sample, *ij_centroid, *ij_size;
81
82 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
83 struct ir3_instruction *frag_face, *frag_coord;
84
85 /* For vertex shaders, keep track of the system values sources */
86 struct ir3_instruction *vertex_id, *basevertex, *instance_id;
87
88 /* For fragment shaders: */
89 struct ir3_instruction *samp_id, *samp_mask_in;
90
91 /* For geometry shaders: */
92 struct ir3_instruction *primitive_id;
93 struct ir3_instruction *gs_header;
94
95 /* For tessellation shaders: */
96 struct ir3_instruction *patch_vertices_in;
97 struct ir3_instruction *tcs_header;
98 struct ir3_instruction *tess_coord;
99
100 /* Compute shader inputs: */
101 struct ir3_instruction *local_invocation_id, *work_group_id;
102
103 /* mapping from nir_register to defining instruction: */
104 struct hash_table *def_ht;
105
106 unsigned num_arrays;
107
108 /* Tracking for max level of flowcontrol (branchstack) needed
109 * by a5xx+:
110 */
111 unsigned stack, max_stack;
112
113 /* a common pattern for indirect addressing is to request the
114 * same address register multiple times. To avoid generating
115 * duplicate instruction sequences (which our backend does not
116 * try to clean up, since that should be done as the NIR stage)
117 * we cache the address value generated for a given src value:
118 *
119 * Note that we have to cache these per alignment, since same
120 * src used for an array of vec1 cannot be also used for an
121 * array of vec4.
122 */
123 struct hash_table *addr_ht[4];
124
125 /* last dst array, for indirect we need to insert a var-store.
126 */
127 struct ir3_instruction **last_dst;
128 unsigned last_dst_n;
129
130 /* maps nir_block to ir3_block, mostly for the purposes of
131 * figuring out the blocks successors
132 */
133 struct hash_table *block_ht;
134
135 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
136 unsigned astc_srgb;
137
138 unsigned samples; /* bitmask of x,y sample shifts */
139
140 unsigned max_texture_index;
141
142 /* set if we encounter something we can't handle yet, so we
143 * can bail cleanly and fallback to TGSI compiler f/e
144 */
145 bool error;
146 };
147
148 struct ir3_context_funcs {
149 void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
150 struct ir3_instruction **dst);
151 void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
152 struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
153 void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
154 struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
155 };
156
157 extern const struct ir3_context_funcs ir3_a4xx_funcs;
158 extern const struct ir3_context_funcs ir3_a6xx_funcs;
159
160 struct ir3_context * ir3_context_init(struct ir3_compiler *compiler,
161 struct ir3_shader_variant *so);
162 void ir3_context_free(struct ir3_context *ctx);
163
164 struct ir3_instruction ** ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n);
165 struct ir3_instruction ** ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n);
166 struct ir3_instruction * const * ir3_get_src(struct ir3_context *ctx, nir_src *src);
167 void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
168 struct ir3_instruction * ir3_create_collect(struct ir3_context *ctx,
169 struct ir3_instruction *const *arr, unsigned arrsz);
170 void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
171 struct ir3_instruction *src, unsigned base, unsigned n);
172
173 NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format, ...);
174
175 #define compile_assert(ctx, cond) do { \
176 if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
177 } while (0)
178
179 struct ir3_instruction * ir3_get_addr(struct ir3_context *ctx,
180 struct ir3_instruction *src, int align);
181 struct ir3_instruction * ir3_get_predicate(struct ir3_context *ctx,
182 struct ir3_instruction *src);
183
184 void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
185 struct ir3_array * ir3_get_array(struct ir3_context *ctx, nir_register *reg);
186 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
187 struct ir3_array *arr, int n, struct ir3_instruction *address,
188 unsigned bitsize);
189 void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
190 struct ir3_instruction *src, struct ir3_instruction *address);
191
192 static inline type_t utype_for_size(unsigned bit_size)
193 {
194 switch (bit_size) {
195 case 32: return TYPE_U32;
196 case 16: return TYPE_U16;
197 case 8: return TYPE_U8;
198 default: unreachable("bad bitsize"); return ~0;
199 }
200 }
201
202 static inline type_t utype_src(nir_src src)
203 { return utype_for_size(nir_src_bit_size(src)); }
204
205 static inline type_t utype_dst(nir_dest dst)
206 { return utype_for_size(nir_dest_bit_size(dst)); }
207
208 #endif /* IR3_CONTEXT_H_ */