llvmpipe: add grid launch
[mesa.git] / src / gallium / drivers / llvmpipe / lp_state_cs.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "gallivm/lp_bld_const.h"
31 #include "gallivm/lp_bld_debug.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_flow.h"
34 #include "gallivm/lp_bld_gather.h"
35 #include "gallivm/lp_bld_coro.h"
36 #include "lp_state_cs.h"
37 #include "lp_context.h"
38 #include "lp_debug.h"
39 #include "lp_state.h"
40 #include "lp_perf.h"
41 #include "lp_screen.h"
42 #include "lp_cs_tpool.h"
43
44 struct lp_cs_job_info {
45 unsigned grid_size[3];
46 unsigned block_size[3];
47 struct lp_cs_exec *current;
48 };
49
50 static void
51 generate_compute(struct llvmpipe_context *lp,
52 struct lp_compute_shader *shader,
53 struct lp_compute_shader_variant *variant)
54 {
55 struct gallivm_state *gallivm = variant->gallivm;
56 char func_name[64], func_name_coro[64];
57 LLVMTypeRef arg_types[13];
58 LLVMTypeRef func_type, coro_func_type;
59 LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
60 LLVMValueRef context_ptr;
61 LLVMValueRef x_size_arg, y_size_arg, z_size_arg;
62 LLVMValueRef grid_x_arg, grid_y_arg, grid_z_arg;
63 LLVMValueRef grid_size_x_arg, grid_size_y_arg, grid_size_z_arg;
64 LLVMValueRef thread_data_ptr;
65 LLVMBasicBlockRef block;
66 LLVMBuilderRef builder;
67 LLVMValueRef function, coro;
68 struct lp_type cs_type;
69 unsigned i;
70
71 /*
72 * This function has two parts
73 * a) setup the coroutine execution environment loop.
74 * b) build the compute shader llvm for use inside the coroutine.
75 */
76 assert(lp_native_vector_width / 32 >= 4);
77
78 memset(&cs_type, 0, sizeof cs_type);
79 cs_type.floating = TRUE; /* floating point values */
80 cs_type.sign = TRUE; /* values are signed */
81 cs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
82 cs_type.width = 32; /* 32-bit float */
83 cs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
84 snprintf(func_name, sizeof(func_name), "cs%u_variant%u",
85 shader->no, variant->no);
86
87 snprintf(func_name_coro, sizeof(func_name), "cs_co_%u_variant%u",
88 shader->no, variant->no);
89
90 arg_types[0] = variant->jit_cs_context_ptr_type; /* context */
91 arg_types[1] = int32_type; /* block_x_size */
92 arg_types[2] = int32_type; /* block_y_size */
93 arg_types[3] = int32_type; /* block_z_size */
94 arg_types[4] = int32_type; /* grid_x */
95 arg_types[5] = int32_type; /* grid_y */
96 arg_types[6] = int32_type; /* grid_z */
97 arg_types[7] = int32_type; /* grid_size_x */
98 arg_types[8] = int32_type; /* grid_size_y */
99 arg_types[9] = int32_type; /* grid_size_z */
100 arg_types[10] = variant->jit_cs_thread_data_ptr_type; /* per thread data */
101 arg_types[11] = int32_type;
102 arg_types[12] = int32_type;
103 func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
104 arg_types, ARRAY_SIZE(arg_types) - 2, 0);
105
106 coro_func_type = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0),
107 arg_types, ARRAY_SIZE(arg_types), 0);
108
109 function = LLVMAddFunction(gallivm->module, func_name, func_type);
110 LLVMSetFunctionCallConv(function, LLVMCCallConv);
111
112 coro = LLVMAddFunction(gallivm->module, func_name_coro, coro_func_type);
113 LLVMSetFunctionCallConv(coro, LLVMCCallConv);
114
115 variant->function = function;
116
117 for(i = 0; i < ARRAY_SIZE(arg_types); ++i) {
118 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
119 lp_add_function_attr(coro, i + 1, LP_FUNC_ATTR_NOALIAS);
120 lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
121 }
122 }
123
124 context_ptr = LLVMGetParam(function, 0);
125 x_size_arg = LLVMGetParam(function, 1);
126 y_size_arg = LLVMGetParam(function, 2);
127 z_size_arg = LLVMGetParam(function, 3);
128 grid_x_arg = LLVMGetParam(function, 4);
129 grid_y_arg = LLVMGetParam(function, 5);
130 grid_z_arg = LLVMGetParam(function, 6);
131 grid_size_x_arg = LLVMGetParam(function, 7);
132 grid_size_y_arg = LLVMGetParam(function, 8);
133 grid_size_z_arg = LLVMGetParam(function, 9);
134 thread_data_ptr = LLVMGetParam(function, 10);
135
136 lp_build_name(context_ptr, "context");
137 lp_build_name(x_size_arg, "x_size");
138 lp_build_name(y_size_arg, "y_size");
139 lp_build_name(z_size_arg, "z_size");
140 lp_build_name(grid_x_arg, "grid_x");
141 lp_build_name(grid_y_arg, "grid_y");
142 lp_build_name(grid_z_arg, "grid_z");
143 lp_build_name(grid_size_x_arg, "grid_size_x");
144 lp_build_name(grid_size_y_arg, "grid_size_y");
145 lp_build_name(grid_size_z_arg, "grid_size_z");
146 lp_build_name(thread_data_ptr, "thread_data");
147
148 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
149 builder = gallivm->builder;
150 assert(builder);
151 LLVMPositionBuilderAtEnd(builder, block);
152
153 struct lp_build_loop_state loop_state[4];
154 LLVMValueRef num_x_loop;
155 LLVMValueRef vec_length = lp_build_const_int32(gallivm, cs_type.length);
156 num_x_loop = LLVMBuildAdd(gallivm->builder, x_size_arg, vec_length, "");
157 num_x_loop = LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), "");
158 num_x_loop = LLVMBuildUDiv(gallivm->builder, num_x_loop, vec_length, "");
159 LLVMValueRef partials = LLVMBuildURem(gallivm->builder, x_size_arg, vec_length, "");
160
161 LLVMValueRef coro_num_hdls = LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, "");
162 coro_num_hdls = LLVMBuildMul(gallivm->builder, coro_num_hdls, z_size_arg, "");
163
164 LLVMTypeRef hdl_ptr_type = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
165 LLVMValueRef coro_hdls = LLVMBuildArrayAlloca(gallivm->builder, hdl_ptr_type, coro_num_hdls, "coro_hdls");
166
167 unsigned end_coroutine = INT_MAX;
168
169 /*
170 * This is the main coroutine execution loop. It iterates over the dimensions
171 * and calls the coroutine main entrypoint on the first pass, but in subsequent
172 * passes it checks if the coroutine has completed and resumes it if not.
173 */
174 /* take x_width - round up to type.length width */
175 lp_build_loop_begin(&loop_state[3], gallivm,
176 lp_build_const_int32(gallivm, 0)); /* coroutine reentry loop */
177 lp_build_loop_begin(&loop_state[2], gallivm,
178 lp_build_const_int32(gallivm, 0)); /* z loop */
179 lp_build_loop_begin(&loop_state[1], gallivm,
180 lp_build_const_int32(gallivm, 0)); /* y loop */
181 lp_build_loop_begin(&loop_state[0], gallivm,
182 lp_build_const_int32(gallivm, 0)); /* x loop */
183 {
184 LLVMValueRef args[13];
185 args[0] = context_ptr;
186 args[1] = loop_state[0].counter;
187 args[2] = loop_state[1].counter;
188 args[3] = loop_state[2].counter;
189 args[4] = grid_x_arg;
190 args[5] = grid_y_arg;
191 args[6] = grid_z_arg;
192 args[7] = grid_size_x_arg;
193 args[8] = grid_size_y_arg;
194 args[9] = grid_size_z_arg;
195 args[10] = thread_data_ptr;
196 args[11] = num_x_loop;
197 args[12] = partials;
198
199 /* idx = (z * (size_x * size_y) + y * size_x + x */
200 LLVMValueRef coro_hdl_idx = LLVMBuildMul(gallivm->builder, loop_state[2].counter,
201 LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, ""), "");
202 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
203 LLVMBuildMul(gallivm->builder, loop_state[1].counter,
204 num_x_loop, ""), "");
205 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
206 loop_state[0].counter, "");
207
208 LLVMValueRef coro_entry = LLVMBuildGEP(gallivm->builder, coro_hdls, &coro_hdl_idx, 1, "");
209
210 LLVMValueRef coro_hdl = LLVMBuildLoad(gallivm->builder, coro_entry, "coro_hdl");
211
212 struct lp_build_if_state ifstate;
213 LLVMValueRef cmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, loop_state[3].counter,
214 lp_build_const_int32(gallivm, 0), "");
215 /* first time here - call the coroutine function entry point */
216 lp_build_if(&ifstate, gallivm, cmp);
217 LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 13, "");
218 LLVMBuildStore(gallivm->builder, coro_ret, coro_entry);
219 lp_build_else(&ifstate);
220 /* subsequent calls for this invocation - check if done. */
221 LLVMValueRef coro_done = lp_build_coro_done(gallivm, coro_hdl);
222 struct lp_build_if_state ifstate2;
223 lp_build_if(&ifstate2, gallivm, coro_done);
224 /* if done destroy and force loop exit */
225 lp_build_coro_destroy(gallivm, coro_hdl);
226 lp_build_loop_force_set_counter(&loop_state[3], lp_build_const_int32(gallivm, end_coroutine - 1));
227 lp_build_else(&ifstate2);
228 /* otherwise resume the coroutine */
229 lp_build_coro_resume(gallivm, coro_hdl);
230 lp_build_endif(&ifstate2);
231 lp_build_endif(&ifstate);
232 lp_build_loop_force_reload_counter(&loop_state[3]);
233 }
234 lp_build_loop_end_cond(&loop_state[0],
235 num_x_loop,
236 NULL, LLVMIntUGE);
237 lp_build_loop_end_cond(&loop_state[1],
238 y_size_arg,
239 NULL, LLVMIntUGE);
240 lp_build_loop_end_cond(&loop_state[2],
241 z_size_arg,
242 NULL, LLVMIntUGE);
243 lp_build_loop_end_cond(&loop_state[3],
244 lp_build_const_int32(gallivm, end_coroutine),
245 NULL, LLVMIntEQ);
246 LLVMBuildRetVoid(builder);
247
248 /* This is stage (b) - generate the compute shader code inside the coroutine. */
249 context_ptr = LLVMGetParam(coro, 0);
250 x_size_arg = LLVMGetParam(coro, 1);
251 y_size_arg = LLVMGetParam(coro, 2);
252 z_size_arg = LLVMGetParam(coro, 3);
253 grid_x_arg = LLVMGetParam(coro, 4);
254 grid_y_arg = LLVMGetParam(coro, 5);
255 grid_z_arg = LLVMGetParam(coro, 6);
256 grid_size_x_arg = LLVMGetParam(coro, 7);
257 grid_size_y_arg = LLVMGetParam(coro, 8);
258 grid_size_z_arg = LLVMGetParam(coro, 9);
259 thread_data_ptr = LLVMGetParam(coro, 10);
260 num_x_loop = LLVMGetParam(coro, 11);
261 partials = LLVMGetParam(coro, 12);
262 block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "entry");
263 LLVMPositionBuilderAtEnd(builder, block);
264 {
265 const struct tgsi_token *tokens = shader->base.tokens;
266 LLVMValueRef consts_ptr, num_consts_ptr;
267 LLVMValueRef ssbo_ptr, num_ssbo_ptr;
268 LLVMValueRef shared_ptr;
269 struct lp_build_mask_context mask;
270 struct lp_bld_tgsi_system_values system_values;
271
272 memset(&system_values, 0, sizeof(system_values));
273 consts_ptr = lp_jit_cs_context_constants(gallivm, context_ptr);
274 num_consts_ptr = lp_jit_cs_context_num_constants(gallivm, context_ptr);
275 ssbo_ptr = lp_jit_cs_context_ssbos(gallivm, context_ptr);
276 num_ssbo_ptr = lp_jit_cs_context_num_ssbos(gallivm, context_ptr);
277 shared_ptr = lp_jit_cs_thread_data_shared(gallivm, thread_data_ptr);
278
279 /* these are coroutine entrypoint necessities */
280 LLVMValueRef coro_id = lp_build_coro_id(gallivm);
281 LLVMValueRef coro_hdl = lp_build_coro_begin_alloc_mem(gallivm, coro_id);
282
283 LLVMValueRef has_partials = LLVMBuildICmp(gallivm->builder, LLVMIntNE, partials, lp_build_const_int32(gallivm, 0), "");
284 LLVMValueRef tid_vals[3];
285 LLVMValueRef tids_x[LP_MAX_VECTOR_LENGTH], tids_y[LP_MAX_VECTOR_LENGTH], tids_z[LP_MAX_VECTOR_LENGTH];
286 LLVMValueRef base_val = LLVMBuildMul(gallivm->builder, x_size_arg, vec_length, "");
287 for (i = 0; i < cs_type.length; i++) {
288 tids_x[i] = LLVMBuildAdd(gallivm->builder, base_val, lp_build_const_int32(gallivm, i), "");
289 tids_y[i] = y_size_arg;
290 tids_z[i] = z_size_arg;
291 }
292 tid_vals[0] = lp_build_gather_values(gallivm, tids_x, cs_type.length);
293 tid_vals[1] = lp_build_gather_values(gallivm, tids_y, cs_type.length);
294 tid_vals[2] = lp_build_gather_values(gallivm, tids_z, cs_type.length);
295 system_values.thread_id = LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type, cs_type.length), 3));
296 for (i = 0; i < 3; i++)
297 system_values.thread_id = LLVMBuildInsertValue(builder, system_values.thread_id, tid_vals[i], i, "");
298
299 LLVMValueRef gtids[3] = { grid_x_arg, grid_y_arg, grid_z_arg };
300 system_values.block_id = LLVMGetUndef(LLVMVectorType(int32_type, 3));
301 for (i = 0; i < 3; i++)
302 system_values.block_id = LLVMBuildInsertElement(builder, system_values.block_id, gtids[i], lp_build_const_int32(gallivm, i), "");
303
304 LLVMValueRef gstids[3] = { grid_size_x_arg, grid_size_y_arg, grid_size_z_arg };
305 system_values.grid_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
306 for (i = 0; i < 3; i++)
307 system_values.grid_size = LLVMBuildInsertElement(builder, system_values.grid_size, gstids[i], lp_build_const_int32(gallivm, i), "");
308
309 LLVMValueRef last_x_loop = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, x_size_arg, LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), ""), "");
310 LLVMValueRef use_partial_mask = LLVMBuildAnd(gallivm->builder, last_x_loop, has_partials, "");
311 struct lp_build_if_state if_state;
312 LLVMValueRef mask_val = lp_build_alloca(gallivm, LLVMVectorType(int32_type, cs_type.length), "mask");
313 LLVMValueRef full_mask_val = lp_build_const_int_vec(gallivm, cs_type, ~0);
314 LLVMBuildStore(gallivm->builder, full_mask_val, mask_val);
315
316 lp_build_if(&if_state, gallivm, use_partial_mask);
317 struct lp_build_loop_state mask_loop_state;
318 lp_build_loop_begin(&mask_loop_state, gallivm, partials);
319 LLVMValueRef tmask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
320 tmask_val = LLVMBuildInsertElement(gallivm->builder, tmask_val, lp_build_const_int32(gallivm, 0), mask_loop_state.counter, "");
321 LLVMBuildStore(gallivm->builder, tmask_val, mask_val);
322 lp_build_loop_end_cond(&mask_loop_state, vec_length, NULL, LLVMIntUGE);
323 lp_build_endif(&if_state);
324
325 mask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
326 lp_build_mask_begin(&mask, gallivm, cs_type, mask_val);
327
328 struct lp_build_coro_suspend_info coro_info;
329
330 LLVMBasicBlockRef sus_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "suspend");
331 LLVMBasicBlockRef clean_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "cleanup");
332
333 coro_info.suspend = sus_block;
334 coro_info.cleanup = clean_block;
335
336 struct lp_build_tgsi_params params;
337 memset(&params, 0, sizeof(params));
338
339 params.type = cs_type;
340 params.mask = &mask;
341 params.consts_ptr = consts_ptr;
342 params.const_sizes_ptr = num_consts_ptr;
343 params.system_values = &system_values;
344 params.context_ptr = context_ptr;
345 params.info = &shader->info.base;
346 params.ssbo_ptr = ssbo_ptr;
347 params.ssbo_sizes_ptr = num_ssbo_ptr;
348 params.shared_ptr = shared_ptr;
349 params.coro = &coro_info;
350
351 lp_build_tgsi_soa(gallivm, tokens, &params, NULL);
352
353 mask_val = lp_build_mask_end(&mask);
354
355 lp_build_coro_suspend_switch(gallivm, &coro_info, NULL, true);
356 LLVMPositionBuilderAtEnd(builder, clean_block);
357
358 lp_build_coro_free_mem(gallivm, coro_id, coro_hdl);
359
360 LLVMBuildBr(builder, sus_block);
361 LLVMPositionBuilderAtEnd(builder, sus_block);
362
363 lp_build_coro_end(gallivm, coro_hdl);
364 LLVMBuildRet(builder, coro_hdl);
365 }
366
367 gallivm_verify_function(gallivm, coro);
368 gallivm_verify_function(gallivm, function);
369 }
370
371 static void *
372 llvmpipe_create_compute_state(struct pipe_context *pipe,
373 const struct pipe_compute_state *templ)
374 {
375 struct lp_compute_shader *shader;
376
377 shader = CALLOC_STRUCT(lp_compute_shader);
378 if (!shader)
379 return NULL;
380
381 assert(templ->ir_type == PIPE_SHADER_IR_TGSI);
382 shader->base.tokens = tgsi_dup_tokens(templ->prog);
383
384 lp_build_tgsi_info(shader->base.tokens, &shader->info);
385 make_empty_list(&shader->variants);
386
387 return shader;
388 }
389
390 static void
391 llvmpipe_bind_compute_state(struct pipe_context *pipe,
392 void *cs)
393 {
394 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
395
396 if (llvmpipe->cs == cs)
397 return;
398
399 llvmpipe->cs = (struct lp_compute_shader *)cs;
400 llvmpipe->cs_dirty |= LP_CSNEW_CS;
401 }
402
403 /**
404 * Remove shader variant from two lists: the shader's variant list
405 * and the context's variant list.
406 */
407 static void
408 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context *lp,
409 struct lp_compute_shader_variant *variant)
410 {
411 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
412 debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
413 "v total cached %u inst %u total inst %u\n",
414 variant->shader->no, variant->no,
415 variant->shader->variants_created,
416 variant->shader->variants_cached,
417 lp->nr_cs_variants, variant->nr_instrs, lp->nr_cs_instrs);
418 }
419
420 gallivm_destroy(variant->gallivm);
421
422 /* remove from shader's list */
423 remove_from_list(&variant->list_item_local);
424 variant->shader->variants_cached--;
425
426 /* remove from context's list */
427 remove_from_list(&variant->list_item_global);
428 lp->nr_fs_variants--;
429 lp->nr_fs_instrs -= variant->nr_instrs;
430
431 FREE(variant);
432 }
433
434 static void
435 llvmpipe_delete_compute_state(struct pipe_context *pipe,
436 void *cs)
437 {
438 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
439 struct lp_compute_shader *shader = cs;
440 struct lp_cs_variant_list_item *li;
441
442 /* Delete all the variants */
443 li = first_elem(&shader->variants);
444 while(!at_end(&shader->variants, li)) {
445 struct lp_cs_variant_list_item *next = next_elem(li);
446 llvmpipe_remove_cs_shader_variant(llvmpipe, li->base);
447 li = next;
448 }
449 tgsi_free_tokens(shader->base.tokens);
450 FREE(shader);
451 }
452
453 static void
454 make_variant_key(struct llvmpipe_context *lp,
455 struct lp_compute_shader *shader,
456 struct lp_compute_shader_variant_key *key)
457 {
458 memset(key, 0, shader->variant_key_size);
459 }
460
461 static void
462 dump_cs_variant_key(const struct lp_compute_shader_variant_key *key)
463 {
464 debug_printf("cs variant %p:\n", (void *) key);
465 }
466
467 static void
468 lp_debug_cs_variant(const struct lp_compute_shader_variant *variant)
469 {
470 debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
471 variant->shader->no, variant->no);
472 tgsi_dump(variant->shader->base.tokens, 0);
473 dump_cs_variant_key(&variant->key);
474 debug_printf("\n");
475 }
476
477 static struct lp_compute_shader_variant *
478 generate_variant(struct llvmpipe_context *lp,
479 struct lp_compute_shader *shader,
480 const struct lp_compute_shader_variant_key *key)
481 {
482 struct lp_compute_shader_variant *variant;
483 char module_name[64];
484
485 variant = CALLOC_STRUCT(lp_compute_shader_variant);
486 if (!variant)
487 return NULL;
488
489 snprintf(module_name, sizeof(module_name), "cs%u_variant%u",
490 shader->no, shader->variants_created);
491
492 variant->gallivm = gallivm_create(module_name, lp->context);
493 if (!variant->gallivm) {
494 FREE(variant);
495 return NULL;
496 }
497
498 variant->shader = shader;
499 variant->list_item_global.base = variant;
500 variant->list_item_local.base = variant;
501 variant->no = shader->variants_created++;
502
503 memcpy(&variant->key, key, shader->variant_key_size);
504
505 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
506 lp_debug_cs_variant(variant);
507 }
508
509 lp_jit_init_cs_types(variant);
510
511 generate_compute(lp, shader, variant);
512
513 gallivm_compile_module(variant->gallivm);
514
515 variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
516
517 variant->jit_function = (lp_jit_cs_func)gallivm_jit_function(variant->gallivm, variant->function);
518
519 gallivm_free_ir(variant->gallivm);
520 return variant;
521 }
522
523 static void
524 lp_cs_ctx_set_cs_variant( struct lp_cs_context *csctx,
525 struct lp_compute_shader_variant *variant)
526 {
527 csctx->cs.current.variant = variant;
528 }
529
530 static void
531 llvmpipe_update_cs(struct llvmpipe_context *lp)
532 {
533 struct lp_compute_shader *shader = lp->cs;
534
535 struct lp_compute_shader_variant_key key;
536 struct lp_compute_shader_variant *variant = NULL;
537 struct lp_cs_variant_list_item *li;
538
539 make_variant_key(lp, shader, &key);
540
541 /* Search the variants for one which matches the key */
542 li = first_elem(&shader->variants);
543 while(!at_end(&shader->variants, li)) {
544 if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
545 variant = li->base;
546 break;
547 }
548 li = next_elem(li);
549 }
550
551 if (variant) {
552 /* Move this variant to the head of the list to implement LRU
553 * deletion of shader's when we have too many.
554 */
555 move_to_head(&lp->cs_variants_list, &variant->list_item_global);
556 }
557 else {
558 /* variant not found, create it now */
559 int64_t t0, t1, dt;
560 unsigned i;
561 unsigned variants_to_cull;
562
563 if (LP_DEBUG & DEBUG_CS) {
564 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
565 lp->nr_cs_variants,
566 lp->nr_cs_instrs,
567 lp->nr_cs_variants ? lp->nr_cs_instrs / lp->nr_cs_variants : 0);
568 }
569
570 /* First, check if we've exceeded the max number of shader variants.
571 * If so, free 6.25% of them (the least recently used ones).
572 */
573 variants_to_cull = lp->nr_cs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
574
575 if (variants_to_cull ||
576 lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
577 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
578 debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
579 "\t%u instrs,\t%u instrs/variant\n",
580 shader->variants_cached,
581 lp->nr_cs_variants, lp->nr_cs_instrs,
582 lp->nr_cs_instrs / lp->nr_cs_variants);
583 }
584
585 /*
586 * We need to re-check lp->nr_cs_variants because an arbitrarliy large
587 * number of shader variants (potentially all of them) could be
588 * pending for destruction on flush.
589 */
590
591 for (i = 0; i < variants_to_cull || lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
592 struct lp_cs_variant_list_item *item;
593 if (is_empty_list(&lp->cs_variants_list)) {
594 break;
595 }
596 item = last_elem(&lp->cs_variants_list);
597 assert(item);
598 assert(item->base);
599 llvmpipe_remove_cs_shader_variant(lp, item->base);
600 }
601 }
602 /*
603 * Generate the new variant.
604 */
605 t0 = os_time_get();
606 variant = generate_variant(lp, shader, &key);
607 t1 = os_time_get();
608 dt = t1 - t0;
609 LP_COUNT_ADD(llvm_compile_time, dt);
610 LP_COUNT_ADD(nr_llvm_compiles, 2); /* emit vs. omit in/out test */
611
612 /* Put the new variant into the list */
613 if (variant) {
614 insert_at_head(&shader->variants, &variant->list_item_local);
615 insert_at_head(&lp->cs_variants_list, &variant->list_item_global);
616 lp->nr_cs_variants++;
617 lp->nr_cs_instrs += variant->nr_instrs;
618 shader->variants_cached++;
619 }
620 }
621 /* Bind this variant */
622 lp_cs_ctx_set_cs_variant(lp->csctx, variant);
623 }
624
625 static void
626 llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe)
627 {
628 if (llvmpipe->cs_dirty & (LP_CSNEW_CS))
629 llvmpipe_update_cs(llvmpipe);
630
631 llvmpipe->cs_dirty = 0;
632 }
633
634 static void
635 cs_exec_fn(void *init_data, int iter_idx, struct lp_cs_local_mem *lmem)
636 {
637 struct lp_cs_job_info *job_info = init_data;
638 struct lp_jit_cs_thread_data thread_data;
639
640 memset(&thread_data, 0, sizeof(thread_data));
641
642 unsigned grid_z = iter_idx / (job_info->grid_size[0] * job_info->grid_size[1]);
643 unsigned grid_y = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1]))) / job_info->grid_size[0];
644 unsigned grid_x = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1])) - (grid_y * job_info->grid_size[0]));
645 struct lp_compute_shader_variant *variant = job_info->current->variant;
646 variant->jit_function(&job_info->current->jit_context,
647 job_info->block_size[0], job_info->block_size[1], job_info->block_size[2],
648 grid_x, grid_y, grid_z,
649 job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2],
650 &thread_data);
651 }
652
653 static void
654 fill_grid_size(struct pipe_context *pipe,
655 const struct pipe_grid_info *info,
656 uint32_t grid_size[3])
657 {
658 struct pipe_transfer *transfer;
659 uint32_t *params;
660 if (!info->indirect) {
661 grid_size[0] = info->grid[0];
662 grid_size[1] = info->grid[1];
663 grid_size[2] = info->grid[2];
664 return;
665 }
666 params = pipe_buffer_map_range(pipe, info->indirect,
667 info->indirect_offset,
668 3 * sizeof(uint32_t),
669 PIPE_TRANSFER_READ,
670 &transfer);
671
672 if (!transfer)
673 return;
674
675 grid_size[0] = params[0];
676 grid_size[1] = params[1];
677 grid_size[2] = params[2];
678 pipe_buffer_unmap(pipe, transfer);
679 }
680
681 static void llvmpipe_launch_grid(struct pipe_context *pipe,
682 const struct pipe_grid_info *info)
683 {
684 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
685 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
686 struct lp_cs_job_info job_info;
687
688 memset(&job_info, 0, sizeof(job_info));
689
690 llvmpipe_cs_update_derived(llvmpipe);
691
692 fill_grid_size(pipe, info, job_info.grid_size);
693
694 job_info.block_size[0] = info->block[0];
695 job_info.block_size[1] = info->block[1];
696 job_info.block_size[2] = info->block[2];
697 job_info.current = &llvmpipe->csctx->cs.current;
698
699 int num_tasks = job_info.grid_size[2] * job_info.grid_size[1] * job_info.grid_size[0];
700 if (num_tasks) {
701 struct lp_cs_tpool_task *task;
702 mtx_lock(&screen->cs_mutex);
703 task = lp_cs_tpool_queue_task(screen->cs_tpool, cs_exec_fn, &job_info, num_tasks);
704
705 lp_cs_tpool_wait_for_task(screen->cs_tpool, &task);
706 mtx_unlock(&screen->cs_mutex);
707 }
708 }
709
710 void
711 llvmpipe_init_compute_funcs(struct llvmpipe_context *llvmpipe)
712 {
713 llvmpipe->pipe.create_compute_state = llvmpipe_create_compute_state;
714 llvmpipe->pipe.bind_compute_state = llvmpipe_bind_compute_state;
715 llvmpipe->pipe.delete_compute_state = llvmpipe_delete_compute_state;
716 llvmpipe->pipe.launch_grid = llvmpipe_launch_grid;
717 }
718
719 void
720 lp_csctx_destroy(struct lp_cs_context *csctx)
721 {
722 FREE(csctx);
723 }
724
725 struct lp_cs_context *lp_csctx_create(struct pipe_context *pipe)
726 {
727 struct lp_cs_context *csctx;
728
729 csctx = CALLOC_STRUCT(lp_cs_context);
730 if (!csctx)
731 return NULL;
732
733 csctx->pipe = pipe;
734 return csctx;
735 }