glsl: Silence unused parameter warning
[mesa.git] / src / compiler / glsl / lower_shared_reference.cpp
1 /*
2 * Copyright (c) 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_shared_reference.cpp
26 *
27 * IR lower pass to replace dereferences of compute shader shared variables
28 * with intrinsic function calls.
29 *
30 * This relieves drivers of the responsibility of allocating space for the
31 * shared variables in the shared memory region.
32 */
33
34 #include "lower_buffer_access.h"
35 #include "ir_builder.h"
36 #include "main/macros.h"
37 #include "util/list.h"
38 #include "glsl_parser_extras.h"
39
40 using namespace ir_builder;
41
42 namespace {
43
44 struct var_offset {
45 struct list_head node;
46 const ir_variable *var;
47 unsigned offset;
48 };
49
50 class lower_shared_reference_visitor :
51 public lower_buffer_access::lower_buffer_access {
52 public:
53
54 lower_shared_reference_visitor(struct gl_linked_shader *shader)
55 : list_ctx(ralloc_context(NULL)), shader(shader), shared_size(0u)
56 {
57 list_inithead(&var_offsets);
58 }
59
60 ~lower_shared_reference_visitor()
61 {
62 ralloc_free(list_ctx);
63 }
64
65 enum {
66 shared_load_access,
67 shared_store_access,
68 shared_atomic_access,
69 } buffer_access_type;
70
71 void insert_buffer_access(void *mem_ctx, ir_dereference *deref,
72 const glsl_type *type, ir_rvalue *offset,
73 unsigned mask, int channel);
74
75 void handle_rvalue(ir_rvalue **rvalue);
76 ir_visitor_status visit_enter(ir_assignment *ir);
77 void handle_assignment(ir_assignment *ir);
78
79 ir_call *lower_shared_atomic_intrinsic(ir_call *ir);
80 ir_call *check_for_shared_atomic_intrinsic(ir_call *ir);
81 ir_visitor_status visit_enter(ir_call *ir);
82
83 unsigned get_shared_offset(const ir_variable *);
84
85 ir_call *shared_load(void *mem_ctx, const struct glsl_type *type,
86 ir_rvalue *offset);
87 ir_call *shared_store(void *mem_ctx, ir_rvalue *deref, ir_rvalue *offset,
88 unsigned write_mask);
89
90 void *list_ctx;
91 struct gl_linked_shader *shader;
92 struct list_head var_offsets;
93 unsigned shared_size;
94 bool progress;
95 };
96
97 unsigned
98 lower_shared_reference_visitor::get_shared_offset(const ir_variable *var)
99 {
100 list_for_each_entry(var_offset, var_entry, &var_offsets, node) {
101 if (var_entry->var == var)
102 return var_entry->offset;
103 }
104
105 struct var_offset *new_entry = rzalloc(list_ctx, struct var_offset);
106 list_add(&new_entry->node, &var_offsets);
107 new_entry->var = var;
108
109 unsigned var_align = var->type->std430_base_alignment(false);
110 new_entry->offset = glsl_align(shared_size, var_align);
111
112 unsigned var_size = var->type->std430_size(false);
113 shared_size = new_entry->offset + var_size;
114
115 return new_entry->offset;
116 }
117
118 void
119 lower_shared_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
120 {
121 if (!*rvalue)
122 return;
123
124 ir_dereference *deref = (*rvalue)->as_dereference();
125 if (!deref)
126 return;
127
128 ir_variable *var = deref->variable_referenced();
129 if (!var || var->data.mode != ir_var_shader_shared)
130 return;
131
132 buffer_access_type = shared_load_access;
133
134 void *mem_ctx = ralloc_parent(shader->ir);
135
136 ir_rvalue *offset = NULL;
137 unsigned const_offset = get_shared_offset(var);
138 bool row_major;
139 int matrix_columns;
140 assert(var->get_interface_type() == NULL);
141 const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
142
143 setup_buffer_access(mem_ctx, deref,
144 &offset, &const_offset,
145 &row_major, &matrix_columns, NULL, packing);
146
147 /* Now that we've calculated the offset to the start of the
148 * dereference, walk over the type and emit loads into a temporary.
149 */
150 const glsl_type *type = (*rvalue)->type;
151 ir_variable *load_var = new(mem_ctx) ir_variable(type,
152 "shared_load_temp",
153 ir_var_temporary);
154 base_ir->insert_before(load_var);
155
156 ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
157 "shared_load_temp_offset",
158 ir_var_temporary);
159 base_ir->insert_before(load_offset);
160 base_ir->insert_before(assign(load_offset, offset));
161
162 deref = new(mem_ctx) ir_dereference_variable(load_var);
163
164 emit_access(mem_ctx, false, deref, load_offset, const_offset, row_major,
165 matrix_columns, packing, 0);
166
167 *rvalue = deref;
168
169 progress = true;
170 }
171
172 void
173 lower_shared_reference_visitor::handle_assignment(ir_assignment *ir)
174 {
175 if (!ir || !ir->lhs)
176 return;
177
178 ir_rvalue *rvalue = ir->lhs->as_rvalue();
179 if (!rvalue)
180 return;
181
182 ir_dereference *deref = ir->lhs->as_dereference();
183 if (!deref)
184 return;
185
186 ir_variable *var = ir->lhs->variable_referenced();
187 if (!var || var->data.mode != ir_var_shader_shared)
188 return;
189
190 buffer_access_type = shared_store_access;
191
192 /* We have a write to a shared variable, so declare a temporary and rewrite
193 * the assignment so that the temporary is the LHS.
194 */
195 void *mem_ctx = ralloc_parent(shader->ir);
196
197 const glsl_type *type = rvalue->type;
198 ir_variable *store_var = new(mem_ctx) ir_variable(type,
199 "shared_store_temp",
200 ir_var_temporary);
201 base_ir->insert_before(store_var);
202 ir->lhs = new(mem_ctx) ir_dereference_variable(store_var);
203
204 ir_rvalue *offset = NULL;
205 unsigned const_offset = get_shared_offset(var);
206 bool row_major;
207 int matrix_columns;
208 assert(var->get_interface_type() == NULL);
209 const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
210
211 setup_buffer_access(mem_ctx, deref,
212 &offset, &const_offset,
213 &row_major, &matrix_columns, NULL, packing);
214
215 deref = new(mem_ctx) ir_dereference_variable(store_var);
216
217 ir_variable *store_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
218 "shared_store_temp_offset",
219 ir_var_temporary);
220 base_ir->insert_before(store_offset);
221 base_ir->insert_before(assign(store_offset, offset));
222
223 /* Now we have to write the value assigned to the temporary back to memory */
224 emit_access(mem_ctx, true, deref, store_offset, const_offset, row_major,
225 matrix_columns, packing, ir->write_mask);
226
227 progress = true;
228 }
229
230 ir_visitor_status
231 lower_shared_reference_visitor::visit_enter(ir_assignment *ir)
232 {
233 handle_assignment(ir);
234 return rvalue_visit(ir);
235 }
236
237 void
238 lower_shared_reference_visitor::insert_buffer_access(void *mem_ctx,
239 ir_dereference *deref,
240 const glsl_type *type,
241 ir_rvalue *offset,
242 unsigned mask,
243 int channel)
244 {
245 if (buffer_access_type == shared_store_access) {
246 ir_call *store = shared_store(mem_ctx, deref, offset, mask);
247 base_ir->insert_after(store);
248 } else {
249 ir_call *load = shared_load(mem_ctx, type, offset);
250 base_ir->insert_before(load);
251 ir_rvalue *value = load->return_deref->as_rvalue()->clone(mem_ctx, NULL);
252 base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
253 value));
254 }
255 }
256
257 static bool
258 compute_shader_enabled(const _mesa_glsl_parse_state *state)
259 {
260 return state->stage == MESA_SHADER_COMPUTE;
261 }
262
263 ir_call *
264 lower_shared_reference_visitor::shared_store(void *mem_ctx,
265 ir_rvalue *deref,
266 ir_rvalue *offset,
267 unsigned write_mask)
268 {
269 exec_list sig_params;
270
271 ir_variable *offset_ref = new(mem_ctx)
272 ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
273 sig_params.push_tail(offset_ref);
274
275 ir_variable *val_ref = new(mem_ctx)
276 ir_variable(deref->type, "value" , ir_var_function_in);
277 sig_params.push_tail(val_ref);
278
279 ir_variable *writemask_ref = new(mem_ctx)
280 ir_variable(glsl_type::uint_type, "write_mask" , ir_var_function_in);
281 sig_params.push_tail(writemask_ref);
282
283 ir_function_signature *sig = new(mem_ctx)
284 ir_function_signature(glsl_type::void_type, compute_shader_enabled);
285 assert(sig);
286 sig->replace_parameters(&sig_params);
287 sig->is_intrinsic = true;
288
289 ir_function *f = new(mem_ctx) ir_function("__intrinsic_store_shared");
290 f->add_signature(sig);
291
292 exec_list call_params;
293 call_params.push_tail(offset->clone(mem_ctx, NULL));
294 call_params.push_tail(deref->clone(mem_ctx, NULL));
295 call_params.push_tail(new(mem_ctx) ir_constant(write_mask));
296 return new(mem_ctx) ir_call(sig, NULL, &call_params);
297 }
298
299 ir_call *
300 lower_shared_reference_visitor::shared_load(void *mem_ctx,
301 const struct glsl_type *type,
302 ir_rvalue *offset)
303 {
304 exec_list sig_params;
305
306 ir_variable *offset_ref = new(mem_ctx)
307 ir_variable(glsl_type::uint_type, "offset_ref" , ir_var_function_in);
308 sig_params.push_tail(offset_ref);
309
310 ir_function_signature *sig =
311 new(mem_ctx) ir_function_signature(type, compute_shader_enabled);
312 assert(sig);
313 sig->replace_parameters(&sig_params);
314 sig->is_intrinsic = true;
315
316 ir_function *f = new(mem_ctx) ir_function("__intrinsic_load_shared");
317 f->add_signature(sig);
318
319 ir_variable *result = new(mem_ctx)
320 ir_variable(type, "shared_load_result", ir_var_temporary);
321 base_ir->insert_before(result);
322 ir_dereference_variable *deref_result = new(mem_ctx)
323 ir_dereference_variable(result);
324
325 exec_list call_params;
326 call_params.push_tail(offset->clone(mem_ctx, NULL));
327
328 return new(mem_ctx) ir_call(sig, deref_result, &call_params);
329 }
330
331 /* Lowers the intrinsic call to a new internal intrinsic that swaps the access
332 * to the shared variable in the first parameter by an offset. This involves
333 * creating the new internal intrinsic (i.e. the new function signature).
334 */
335 ir_call *
336 lower_shared_reference_visitor::lower_shared_atomic_intrinsic(ir_call *ir)
337 {
338 /* Shared atomics usually have 2 parameters, the shared variable and an
339 * integer argument. The exception is CompSwap, that has an additional
340 * integer parameter.
341 */
342 int param_count = ir->actual_parameters.length();
343 assert(param_count == 2 || param_count == 3);
344
345 /* First argument must be a scalar integer shared variable */
346 exec_node *param = ir->actual_parameters.get_head();
347 ir_instruction *inst = (ir_instruction *) param;
348 assert(inst->ir_type == ir_type_dereference_variable ||
349 inst->ir_type == ir_type_dereference_array ||
350 inst->ir_type == ir_type_dereference_record ||
351 inst->ir_type == ir_type_swizzle);
352
353 ir_rvalue *deref = (ir_rvalue *) inst;
354 assert(deref->type->is_scalar() && deref->type->is_integer());
355
356 ir_variable *var = deref->variable_referenced();
357 assert(var);
358
359 /* Compute the offset to the start if the dereference
360 */
361 void *mem_ctx = ralloc_parent(shader->ir);
362
363 ir_rvalue *offset = NULL;
364 unsigned const_offset = get_shared_offset(var);
365 bool row_major;
366 int matrix_columns;
367 assert(var->get_interface_type() == NULL);
368 const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
369 buffer_access_type = shared_atomic_access;
370
371 setup_buffer_access(mem_ctx, deref,
372 &offset, &const_offset,
373 &row_major, &matrix_columns, NULL, packing);
374
375 assert(offset);
376 assert(!row_major);
377 assert(matrix_columns == 1);
378
379 ir_rvalue *deref_offset =
380 add(offset, new(mem_ctx) ir_constant(const_offset));
381
382 /* Create the new internal function signature that will take an offset
383 * instead of a shared variable
384 */
385 exec_list sig_params;
386 ir_variable *sig_param = new(mem_ctx)
387 ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
388 sig_params.push_tail(sig_param);
389
390 const glsl_type *type = deref->type->base_type == GLSL_TYPE_INT ?
391 glsl_type::int_type : glsl_type::uint_type;
392 sig_param = new(mem_ctx)
393 ir_variable(type, "data1", ir_var_function_in);
394 sig_params.push_tail(sig_param);
395
396 if (param_count == 3) {
397 sig_param = new(mem_ctx)
398 ir_variable(type, "data2", ir_var_function_in);
399 sig_params.push_tail(sig_param);
400 }
401
402 ir_function_signature *sig =
403 new(mem_ctx) ir_function_signature(deref->type,
404 compute_shader_enabled);
405 assert(sig);
406 sig->replace_parameters(&sig_params);
407 sig->is_intrinsic = true;
408
409 char func_name[64];
410 sprintf(func_name, "%s_shared", ir->callee_name());
411 ir_function *f = new(mem_ctx) ir_function(func_name);
412 f->add_signature(sig);
413
414 /* Now, create the call to the internal intrinsic */
415 exec_list call_params;
416 call_params.push_tail(deref_offset);
417 param = ir->actual_parameters.get_head()->get_next();
418 ir_rvalue *param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
419 call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
420 if (param_count == 3) {
421 param = param->get_next();
422 param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
423 call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
424 }
425 ir_dereference_variable *return_deref =
426 ir->return_deref->clone(mem_ctx, NULL);
427 return new(mem_ctx) ir_call(sig, return_deref, &call_params);
428 }
429
430 ir_call *
431 lower_shared_reference_visitor::check_for_shared_atomic_intrinsic(ir_call *ir)
432 {
433 exec_list& params = ir->actual_parameters;
434
435 if (params.length() < 2 || params.length() > 3)
436 return ir;
437
438 ir_rvalue *rvalue =
439 ((ir_instruction *) params.get_head())->as_rvalue();
440 if (!rvalue)
441 return ir;
442
443 ir_variable *var = rvalue->variable_referenced();
444 if (!var || var->data.mode != ir_var_shader_shared)
445 return ir;
446
447 const char *callee = ir->callee_name();
448 if (!strcmp("__intrinsic_atomic_add", callee) ||
449 !strcmp("__intrinsic_atomic_min", callee) ||
450 !strcmp("__intrinsic_atomic_max", callee) ||
451 !strcmp("__intrinsic_atomic_and", callee) ||
452 !strcmp("__intrinsic_atomic_or", callee) ||
453 !strcmp("__intrinsic_atomic_xor", callee) ||
454 !strcmp("__intrinsic_atomic_exchange", callee) ||
455 !strcmp("__intrinsic_atomic_comp_swap", callee)) {
456 return lower_shared_atomic_intrinsic(ir);
457 }
458
459 return ir;
460 }
461
462 ir_visitor_status
463 lower_shared_reference_visitor::visit_enter(ir_call *ir)
464 {
465 ir_call *new_ir = check_for_shared_atomic_intrinsic(ir);
466 if (new_ir != ir) {
467 progress = true;
468 base_ir->replace_with(new_ir);
469 return visit_continue_with_parent;
470 }
471
472 return rvalue_visit(ir);
473 }
474
475 } /* unnamed namespace */
476
477 void
478 lower_shared_reference(struct gl_linked_shader *shader, unsigned *shared_size)
479 {
480 if (shader->Stage != MESA_SHADER_COMPUTE)
481 return;
482
483 lower_shared_reference_visitor v(shader);
484
485 /* Loop over the instructions lowering references, because we take a deref
486 * of an shared variable array using a shared variable dereference as the
487 * index will produce a collection of instructions all of which have cloned
488 * shared variable dereferences for that array index.
489 */
490 do {
491 v.progress = false;
492 visit_list_elements(&v, shader->ir);
493 } while (v.progress);
494
495 *shared_size = v.shared_size;
496 }