llvmpipe/gallivm: add kernel inputs
[mesa.git] / src / gallium / drivers / llvmpipe / lp_state_cs.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "util/u_dump.h"
29 #include "util/u_string.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "gallivm/lp_bld_const.h"
33 #include "gallivm/lp_bld_debug.h"
34 #include "gallivm/lp_bld_intr.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_gather.h"
37 #include "gallivm/lp_bld_coro.h"
38 #include "gallivm/lp_bld_nir.h"
39 #include "lp_state_cs.h"
40 #include "lp_context.h"
41 #include "lp_debug.h"
42 #include "lp_state.h"
43 #include "lp_perf.h"
44 #include "lp_screen.h"
45 #include "lp_memory.h"
46 #include "lp_cs_tpool.h"
47 #include "state_tracker/sw_winsys.h"
48 #include "nir/nir_to_tgsi_info.h"
49
50 struct lp_cs_job_info {
51 unsigned grid_size[3];
52 unsigned block_size[3];
53 unsigned req_local_mem;
54 struct lp_cs_exec *current;
55 };
56
57 static void
58 generate_compute(struct llvmpipe_context *lp,
59 struct lp_compute_shader *shader,
60 struct lp_compute_shader_variant *variant)
61 {
62 struct gallivm_state *gallivm = variant->gallivm;
63 const struct lp_compute_shader_variant_key *key = &variant->key;
64 char func_name[64], func_name_coro[64];
65 LLVMTypeRef arg_types[13];
66 LLVMTypeRef func_type, coro_func_type;
67 LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
68 LLVMValueRef context_ptr;
69 LLVMValueRef x_size_arg, y_size_arg, z_size_arg;
70 LLVMValueRef grid_x_arg, grid_y_arg, grid_z_arg;
71 LLVMValueRef grid_size_x_arg, grid_size_y_arg, grid_size_z_arg;
72 LLVMValueRef thread_data_ptr;
73 LLVMBasicBlockRef block;
74 LLVMBuilderRef builder;
75 struct lp_build_sampler_soa *sampler;
76 struct lp_build_image_soa *image;
77 LLVMValueRef function, coro;
78 struct lp_type cs_type;
79 unsigned i;
80
81 /*
82 * This function has two parts
83 * a) setup the coroutine execution environment loop.
84 * b) build the compute shader llvm for use inside the coroutine.
85 */
86 assert(lp_native_vector_width / 32 >= 4);
87
88 memset(&cs_type, 0, sizeof cs_type);
89 cs_type.floating = TRUE; /* floating point values */
90 cs_type.sign = TRUE; /* values are signed */
91 cs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
92 cs_type.width = 32; /* 32-bit float */
93 cs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
94 snprintf(func_name, sizeof(func_name), "cs%u_variant%u",
95 shader->no, variant->no);
96
97 snprintf(func_name_coro, sizeof(func_name), "cs_co_%u_variant%u",
98 shader->no, variant->no);
99
100 arg_types[0] = variant->jit_cs_context_ptr_type; /* context */
101 arg_types[1] = int32_type; /* block_x_size */
102 arg_types[2] = int32_type; /* block_y_size */
103 arg_types[3] = int32_type; /* block_z_size */
104 arg_types[4] = int32_type; /* grid_x */
105 arg_types[5] = int32_type; /* grid_y */
106 arg_types[6] = int32_type; /* grid_z */
107 arg_types[7] = int32_type; /* grid_size_x */
108 arg_types[8] = int32_type; /* grid_size_y */
109 arg_types[9] = int32_type; /* grid_size_z */
110 arg_types[10] = variant->jit_cs_thread_data_ptr_type; /* per thread data */
111 arg_types[11] = int32_type;
112 arg_types[12] = int32_type;
113 func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
114 arg_types, ARRAY_SIZE(arg_types) - 2, 0);
115
116 coro_func_type = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0),
117 arg_types, ARRAY_SIZE(arg_types), 0);
118
119 function = LLVMAddFunction(gallivm->module, func_name, func_type);
120 LLVMSetFunctionCallConv(function, LLVMCCallConv);
121
122 coro = LLVMAddFunction(gallivm->module, func_name_coro, coro_func_type);
123 LLVMSetFunctionCallConv(coro, LLVMCCallConv);
124
125 variant->function = function;
126
127 for(i = 0; i < ARRAY_SIZE(arg_types); ++i) {
128 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
129 lp_add_function_attr(coro, i + 1, LP_FUNC_ATTR_NOALIAS);
130 lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
131 }
132 }
133
134 context_ptr = LLVMGetParam(function, 0);
135 x_size_arg = LLVMGetParam(function, 1);
136 y_size_arg = LLVMGetParam(function, 2);
137 z_size_arg = LLVMGetParam(function, 3);
138 grid_x_arg = LLVMGetParam(function, 4);
139 grid_y_arg = LLVMGetParam(function, 5);
140 grid_z_arg = LLVMGetParam(function, 6);
141 grid_size_x_arg = LLVMGetParam(function, 7);
142 grid_size_y_arg = LLVMGetParam(function, 8);
143 grid_size_z_arg = LLVMGetParam(function, 9);
144 thread_data_ptr = LLVMGetParam(function, 10);
145
146 lp_build_name(context_ptr, "context");
147 lp_build_name(x_size_arg, "x_size");
148 lp_build_name(y_size_arg, "y_size");
149 lp_build_name(z_size_arg, "z_size");
150 lp_build_name(grid_x_arg, "grid_x");
151 lp_build_name(grid_y_arg, "grid_y");
152 lp_build_name(grid_z_arg, "grid_z");
153 lp_build_name(grid_size_x_arg, "grid_size_x");
154 lp_build_name(grid_size_y_arg, "grid_size_y");
155 lp_build_name(grid_size_z_arg, "grid_size_z");
156 lp_build_name(thread_data_ptr, "thread_data");
157
158 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
159 builder = gallivm->builder;
160 assert(builder);
161 LLVMPositionBuilderAtEnd(builder, block);
162 sampler = lp_llvm_sampler_soa_create(key->state);
163 image = lp_llvm_image_soa_create(key->image_state);
164
165 struct lp_build_loop_state loop_state[4];
166 LLVMValueRef num_x_loop;
167 LLVMValueRef vec_length = lp_build_const_int32(gallivm, cs_type.length);
168 num_x_loop = LLVMBuildAdd(gallivm->builder, x_size_arg, vec_length, "");
169 num_x_loop = LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), "");
170 num_x_loop = LLVMBuildUDiv(gallivm->builder, num_x_loop, vec_length, "");
171 LLVMValueRef partials = LLVMBuildURem(gallivm->builder, x_size_arg, vec_length, "");
172
173 LLVMValueRef coro_num_hdls = LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, "");
174 coro_num_hdls = LLVMBuildMul(gallivm->builder, coro_num_hdls, z_size_arg, "");
175
176 LLVMTypeRef hdl_ptr_type = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
177 LLVMValueRef coro_hdls = LLVMBuildArrayAlloca(gallivm->builder, hdl_ptr_type, coro_num_hdls, "coro_hdls");
178
179 unsigned end_coroutine = INT_MAX;
180
181 /*
182 * This is the main coroutine execution loop. It iterates over the dimensions
183 * and calls the coroutine main entrypoint on the first pass, but in subsequent
184 * passes it checks if the coroutine has completed and resumes it if not.
185 */
186 /* take x_width - round up to type.length width */
187 lp_build_loop_begin(&loop_state[3], gallivm,
188 lp_build_const_int32(gallivm, 0)); /* coroutine reentry loop */
189 lp_build_loop_begin(&loop_state[2], gallivm,
190 lp_build_const_int32(gallivm, 0)); /* z loop */
191 lp_build_loop_begin(&loop_state[1], gallivm,
192 lp_build_const_int32(gallivm, 0)); /* y loop */
193 lp_build_loop_begin(&loop_state[0], gallivm,
194 lp_build_const_int32(gallivm, 0)); /* x loop */
195 {
196 LLVMValueRef args[13];
197 args[0] = context_ptr;
198 args[1] = loop_state[0].counter;
199 args[2] = loop_state[1].counter;
200 args[3] = loop_state[2].counter;
201 args[4] = grid_x_arg;
202 args[5] = grid_y_arg;
203 args[6] = grid_z_arg;
204 args[7] = grid_size_x_arg;
205 args[8] = grid_size_y_arg;
206 args[9] = grid_size_z_arg;
207 args[10] = thread_data_ptr;
208 args[11] = num_x_loop;
209 args[12] = partials;
210
211 /* idx = (z * (size_x * size_y) + y * size_x + x */
212 LLVMValueRef coro_hdl_idx = LLVMBuildMul(gallivm->builder, loop_state[2].counter,
213 LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, ""), "");
214 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
215 LLVMBuildMul(gallivm->builder, loop_state[1].counter,
216 num_x_loop, ""), "");
217 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
218 loop_state[0].counter, "");
219
220 LLVMValueRef coro_entry = LLVMBuildGEP(gallivm->builder, coro_hdls, &coro_hdl_idx, 1, "");
221
222 LLVMValueRef coro_hdl = LLVMBuildLoad(gallivm->builder, coro_entry, "coro_hdl");
223
224 struct lp_build_if_state ifstate;
225 LLVMValueRef cmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, loop_state[3].counter,
226 lp_build_const_int32(gallivm, 0), "");
227 /* first time here - call the coroutine function entry point */
228 lp_build_if(&ifstate, gallivm, cmp);
229 LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 13, "");
230 LLVMBuildStore(gallivm->builder, coro_ret, coro_entry);
231 lp_build_else(&ifstate);
232 /* subsequent calls for this invocation - check if done. */
233 LLVMValueRef coro_done = lp_build_coro_done(gallivm, coro_hdl);
234 struct lp_build_if_state ifstate2;
235 lp_build_if(&ifstate2, gallivm, coro_done);
236 /* if done destroy and force loop exit */
237 lp_build_coro_destroy(gallivm, coro_hdl);
238 lp_build_loop_force_set_counter(&loop_state[3], lp_build_const_int32(gallivm, end_coroutine - 1));
239 lp_build_else(&ifstate2);
240 /* otherwise resume the coroutine */
241 lp_build_coro_resume(gallivm, coro_hdl);
242 lp_build_endif(&ifstate2);
243 lp_build_endif(&ifstate);
244 lp_build_loop_force_reload_counter(&loop_state[3]);
245 }
246 lp_build_loop_end_cond(&loop_state[0],
247 num_x_loop,
248 NULL, LLVMIntUGE);
249 lp_build_loop_end_cond(&loop_state[1],
250 y_size_arg,
251 NULL, LLVMIntUGE);
252 lp_build_loop_end_cond(&loop_state[2],
253 z_size_arg,
254 NULL, LLVMIntUGE);
255 lp_build_loop_end_cond(&loop_state[3],
256 lp_build_const_int32(gallivm, end_coroutine),
257 NULL, LLVMIntEQ);
258 LLVMBuildRetVoid(builder);
259
260 /* This is stage (b) - generate the compute shader code inside the coroutine. */
261 context_ptr = LLVMGetParam(coro, 0);
262 x_size_arg = LLVMGetParam(coro, 1);
263 y_size_arg = LLVMGetParam(coro, 2);
264 z_size_arg = LLVMGetParam(coro, 3);
265 grid_x_arg = LLVMGetParam(coro, 4);
266 grid_y_arg = LLVMGetParam(coro, 5);
267 grid_z_arg = LLVMGetParam(coro, 6);
268 grid_size_x_arg = LLVMGetParam(coro, 7);
269 grid_size_y_arg = LLVMGetParam(coro, 8);
270 grid_size_z_arg = LLVMGetParam(coro, 9);
271 thread_data_ptr = LLVMGetParam(coro, 10);
272 num_x_loop = LLVMGetParam(coro, 11);
273 partials = LLVMGetParam(coro, 12);
274 block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "entry");
275 LLVMPositionBuilderAtEnd(builder, block);
276 {
277 LLVMValueRef consts_ptr, num_consts_ptr;
278 LLVMValueRef ssbo_ptr, num_ssbo_ptr;
279 LLVMValueRef shared_ptr;
280 LLVMValueRef kernel_args_ptr;
281 struct lp_build_mask_context mask;
282 struct lp_bld_tgsi_system_values system_values;
283
284 memset(&system_values, 0, sizeof(system_values));
285 consts_ptr = lp_jit_cs_context_constants(gallivm, context_ptr);
286 num_consts_ptr = lp_jit_cs_context_num_constants(gallivm, context_ptr);
287 ssbo_ptr = lp_jit_cs_context_ssbos(gallivm, context_ptr);
288 num_ssbo_ptr = lp_jit_cs_context_num_ssbos(gallivm, context_ptr);
289 kernel_args_ptr = lp_jit_cs_context_kernel_args(gallivm, context_ptr);
290
291 shared_ptr = lp_jit_cs_thread_data_shared(gallivm, thread_data_ptr);
292
293 /* these are coroutine entrypoint necessities */
294 LLVMValueRef coro_id = lp_build_coro_id(gallivm);
295 LLVMValueRef coro_hdl = lp_build_coro_begin_alloc_mem(gallivm, coro_id);
296
297 LLVMValueRef has_partials = LLVMBuildICmp(gallivm->builder, LLVMIntNE, partials, lp_build_const_int32(gallivm, 0), "");
298 LLVMValueRef tid_vals[3];
299 LLVMValueRef tids_x[LP_MAX_VECTOR_LENGTH], tids_y[LP_MAX_VECTOR_LENGTH], tids_z[LP_MAX_VECTOR_LENGTH];
300 LLVMValueRef base_val = LLVMBuildMul(gallivm->builder, x_size_arg, vec_length, "");
301 for (i = 0; i < cs_type.length; i++) {
302 tids_x[i] = LLVMBuildAdd(gallivm->builder, base_val, lp_build_const_int32(gallivm, i), "");
303 tids_y[i] = y_size_arg;
304 tids_z[i] = z_size_arg;
305 }
306 tid_vals[0] = lp_build_gather_values(gallivm, tids_x, cs_type.length);
307 tid_vals[1] = lp_build_gather_values(gallivm, tids_y, cs_type.length);
308 tid_vals[2] = lp_build_gather_values(gallivm, tids_z, cs_type.length);
309 system_values.thread_id = LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type, cs_type.length), 3));
310 for (i = 0; i < 3; i++)
311 system_values.thread_id = LLVMBuildInsertValue(builder, system_values.thread_id, tid_vals[i], i, "");
312
313 LLVMValueRef gtids[3] = { grid_x_arg, grid_y_arg, grid_z_arg };
314 system_values.block_id = LLVMGetUndef(LLVMVectorType(int32_type, 3));
315 for (i = 0; i < 3; i++)
316 system_values.block_id = LLVMBuildInsertElement(builder, system_values.block_id, gtids[i], lp_build_const_int32(gallivm, i), "");
317
318 LLVMValueRef gstids[3] = { grid_size_x_arg, grid_size_y_arg, grid_size_z_arg };
319 system_values.grid_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
320 for (i = 0; i < 3; i++)
321 system_values.grid_size = LLVMBuildInsertElement(builder, system_values.grid_size, gstids[i], lp_build_const_int32(gallivm, i), "");
322
323 LLVMValueRef last_x_loop = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, x_size_arg, LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), ""), "");
324 LLVMValueRef use_partial_mask = LLVMBuildAnd(gallivm->builder, last_x_loop, has_partials, "");
325 struct lp_build_if_state if_state;
326 LLVMValueRef mask_val = lp_build_alloca(gallivm, LLVMVectorType(int32_type, cs_type.length), "mask");
327 LLVMValueRef full_mask_val = lp_build_const_int_vec(gallivm, cs_type, ~0);
328 LLVMBuildStore(gallivm->builder, full_mask_val, mask_val);
329
330 lp_build_if(&if_state, gallivm, use_partial_mask);
331 struct lp_build_loop_state mask_loop_state;
332 lp_build_loop_begin(&mask_loop_state, gallivm, partials);
333 LLVMValueRef tmask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
334 tmask_val = LLVMBuildInsertElement(gallivm->builder, tmask_val, lp_build_const_int32(gallivm, 0), mask_loop_state.counter, "");
335 LLVMBuildStore(gallivm->builder, tmask_val, mask_val);
336 lp_build_loop_end_cond(&mask_loop_state, vec_length, NULL, LLVMIntUGE);
337 lp_build_endif(&if_state);
338
339 mask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
340 lp_build_mask_begin(&mask, gallivm, cs_type, mask_val);
341
342 struct lp_build_coro_suspend_info coro_info;
343
344 LLVMBasicBlockRef sus_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "suspend");
345 LLVMBasicBlockRef clean_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "cleanup");
346
347 coro_info.suspend = sus_block;
348 coro_info.cleanup = clean_block;
349
350 struct lp_build_tgsi_params params;
351 memset(&params, 0, sizeof(params));
352
353 params.type = cs_type;
354 params.mask = &mask;
355 params.consts_ptr = consts_ptr;
356 params.const_sizes_ptr = num_consts_ptr;
357 params.system_values = &system_values;
358 params.context_ptr = context_ptr;
359 params.sampler = sampler;
360 params.info = &shader->info.base;
361 params.ssbo_ptr = ssbo_ptr;
362 params.ssbo_sizes_ptr = num_ssbo_ptr;
363 params.image = image;
364 params.shared_ptr = shared_ptr;
365 params.coro = &coro_info;
366 params.kernel_args = kernel_args_ptr;
367
368 if (shader->base.type == PIPE_SHADER_IR_TGSI)
369 lp_build_tgsi_soa(gallivm, shader->base.tokens, &params, NULL);
370 else
371 lp_build_nir_soa(gallivm, shader->base.ir.nir, &params,
372 NULL);
373
374 mask_val = lp_build_mask_end(&mask);
375
376 lp_build_coro_suspend_switch(gallivm, &coro_info, NULL, true);
377 LLVMPositionBuilderAtEnd(builder, clean_block);
378
379 lp_build_coro_free_mem(gallivm, coro_id, coro_hdl);
380
381 LLVMBuildBr(builder, sus_block);
382 LLVMPositionBuilderAtEnd(builder, sus_block);
383
384 lp_build_coro_end(gallivm, coro_hdl);
385 LLVMBuildRet(builder, coro_hdl);
386 }
387
388 sampler->destroy(sampler);
389 image->destroy(image);
390
391 gallivm_verify_function(gallivm, coro);
392 gallivm_verify_function(gallivm, function);
393 }
394
395 static void *
396 llvmpipe_create_compute_state(struct pipe_context *pipe,
397 const struct pipe_compute_state *templ)
398 {
399 struct lp_compute_shader *shader;
400 int nr_samplers, nr_sampler_views;
401 shader = CALLOC_STRUCT(lp_compute_shader);
402 if (!shader)
403 return NULL;
404
405 shader->base.type = templ->ir_type;
406 if (templ->ir_type == PIPE_SHADER_IR_TGSI) {
407 /* get/save the summary info for this shader */
408 lp_build_tgsi_info(templ->prog, &shader->info);
409
410 /* we need to keep a local copy of the tokens */
411 shader->base.tokens = tgsi_dup_tokens(templ->prog);
412 } else {
413 shader->base.ir.nir = (struct nir_shader *)templ->prog;
414 nir_tgsi_scan_shader(templ->prog, &shader->info.base, false);
415 }
416
417 shader->req_local_mem = templ->req_local_mem;
418 make_empty_list(&shader->variants);
419
420 nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
421 nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
422 shader->variant_key_size = Offset(struct lp_compute_shader_variant_key,
423 state[MAX2(nr_samplers, nr_sampler_views)]);
424 return shader;
425 }
426
427 static void
428 llvmpipe_bind_compute_state(struct pipe_context *pipe,
429 void *cs)
430 {
431 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
432
433 if (llvmpipe->cs == cs)
434 return;
435
436 llvmpipe->cs = (struct lp_compute_shader *)cs;
437 llvmpipe->cs_dirty |= LP_CSNEW_CS;
438 }
439
440 /**
441 * Remove shader variant from two lists: the shader's variant list
442 * and the context's variant list.
443 */
444 static void
445 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context *lp,
446 struct lp_compute_shader_variant *variant)
447 {
448 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
449 debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
450 "v total cached %u inst %u total inst %u\n",
451 variant->shader->no, variant->no,
452 variant->shader->variants_created,
453 variant->shader->variants_cached,
454 lp->nr_cs_variants, variant->nr_instrs, lp->nr_cs_instrs);
455 }
456
457 gallivm_destroy(variant->gallivm);
458
459 /* remove from shader's list */
460 remove_from_list(&variant->list_item_local);
461 variant->shader->variants_cached--;
462
463 /* remove from context's list */
464 remove_from_list(&variant->list_item_global);
465 lp->nr_fs_variants--;
466 lp->nr_fs_instrs -= variant->nr_instrs;
467
468 FREE(variant);
469 }
470
471 static void
472 llvmpipe_delete_compute_state(struct pipe_context *pipe,
473 void *cs)
474 {
475 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
476 struct lp_compute_shader *shader = cs;
477 struct lp_cs_variant_list_item *li;
478
479 /* Delete all the variants */
480 li = first_elem(&shader->variants);
481 while(!at_end(&shader->variants, li)) {
482 struct lp_cs_variant_list_item *next = next_elem(li);
483 llvmpipe_remove_cs_shader_variant(llvmpipe, li->base);
484 li = next;
485 }
486 tgsi_free_tokens(shader->base.tokens);
487 FREE(shader);
488 }
489
490 static void
491 make_variant_key(struct llvmpipe_context *lp,
492 struct lp_compute_shader *shader,
493 struct lp_compute_shader_variant_key *key)
494 {
495 int i;
496
497 memset(key, 0, shader->variant_key_size);
498
499 /* This value will be the same for all the variants of a given shader:
500 */
501 key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
502
503 for(i = 0; i < key->nr_samplers; ++i) {
504 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
505 lp_sampler_static_sampler_state(&key->state[i].sampler_state,
506 lp->samplers[PIPE_SHADER_COMPUTE][i]);
507 }
508 }
509
510 /*
511 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
512 * are dx10-style? Can't really have mixed opcodes, at least not
513 * if we want to skip the holes here (without rescanning tgsi).
514 */
515 if (shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
516 key->nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
517 for(i = 0; i < key->nr_sampler_views; ++i) {
518 /*
519 * Note sview may exceed what's representable by file_mask.
520 * This will still work, the only downside is that not actually
521 * used views may be included in the shader key.
522 */
523 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
524 lp_sampler_static_texture_state(&key->state[i].texture_state,
525 lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
526 }
527 }
528 }
529 else {
530 key->nr_sampler_views = key->nr_samplers;
531 for(i = 0; i < key->nr_sampler_views; ++i) {
532 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
533 lp_sampler_static_texture_state(&key->state[i].texture_state,
534 lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
535 }
536 }
537 }
538
539 key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
540 for (i = 0; i < key->nr_images; ++i) {
541 if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
542 lp_sampler_static_texture_state_image(&key->image_state[i].image_state,
543 &lp->images[PIPE_SHADER_COMPUTE][i]);
544 }
545 }
546 }
547
548 static void
549 dump_cs_variant_key(const struct lp_compute_shader_variant_key *key)
550 {
551 int i;
552 debug_printf("cs variant %p:\n", (void *) key);
553
554 for (i = 0; i < key->nr_samplers; ++i) {
555 const struct lp_static_sampler_state *sampler = &key->state[i].sampler_state;
556 debug_printf("sampler[%u] = \n", i);
557 debug_printf(" .wrap = %s %s %s\n",
558 util_str_tex_wrap(sampler->wrap_s, TRUE),
559 util_str_tex_wrap(sampler->wrap_t, TRUE),
560 util_str_tex_wrap(sampler->wrap_r, TRUE));
561 debug_printf(" .min_img_filter = %s\n",
562 util_str_tex_filter(sampler->min_img_filter, TRUE));
563 debug_printf(" .min_mip_filter = %s\n",
564 util_str_tex_mipfilter(sampler->min_mip_filter, TRUE));
565 debug_printf(" .mag_img_filter = %s\n",
566 util_str_tex_filter(sampler->mag_img_filter, TRUE));
567 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE)
568 debug_printf(" .compare_func = %s\n", util_str_func(sampler->compare_func, TRUE));
569 debug_printf(" .normalized_coords = %u\n", sampler->normalized_coords);
570 debug_printf(" .min_max_lod_equal = %u\n", sampler->min_max_lod_equal);
571 debug_printf(" .lod_bias_non_zero = %u\n", sampler->lod_bias_non_zero);
572 debug_printf(" .apply_min_lod = %u\n", sampler->apply_min_lod);
573 debug_printf(" .apply_max_lod = %u\n", sampler->apply_max_lod);
574 }
575 for (i = 0; i < key->nr_sampler_views; ++i) {
576 const struct lp_static_texture_state *texture = &key->state[i].texture_state;
577 debug_printf("texture[%u] = \n", i);
578 debug_printf(" .format = %s\n",
579 util_format_name(texture->format));
580 debug_printf(" .target = %s\n",
581 util_str_tex_target(texture->target, TRUE));
582 debug_printf(" .level_zero_only = %u\n",
583 texture->level_zero_only);
584 debug_printf(" .pot = %u %u %u\n",
585 texture->pot_width,
586 texture->pot_height,
587 texture->pot_depth);
588 }
589 for (i = 0; i < key->nr_images; ++i) {
590 const struct lp_static_texture_state *image = &key->image_state[i].image_state;
591 debug_printf("image[%u] = \n", i);
592 debug_printf(" .format = %s\n",
593 util_format_name(image->format));
594 debug_printf(" .target = %s\n",
595 util_str_tex_target(image->target, TRUE));
596 debug_printf(" .level_zero_only = %u\n",
597 image->level_zero_only);
598 debug_printf(" .pot = %u %u %u\n",
599 image->pot_width,
600 image->pot_height,
601 image->pot_depth);
602 }
603 }
604
605 static void
606 lp_debug_cs_variant(const struct lp_compute_shader_variant *variant)
607 {
608 debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
609 variant->shader->no, variant->no);
610 if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
611 tgsi_dump(variant->shader->base.tokens, 0);
612 else
613 nir_print_shader(variant->shader->base.ir.nir, stderr);
614 dump_cs_variant_key(&variant->key);
615 debug_printf("\n");
616 }
617
618 static struct lp_compute_shader_variant *
619 generate_variant(struct llvmpipe_context *lp,
620 struct lp_compute_shader *shader,
621 const struct lp_compute_shader_variant_key *key)
622 {
623 struct lp_compute_shader_variant *variant;
624 char module_name[64];
625
626 variant = CALLOC_STRUCT(lp_compute_shader_variant);
627 if (!variant)
628 return NULL;
629
630 snprintf(module_name, sizeof(module_name), "cs%u_variant%u",
631 shader->no, shader->variants_created);
632
633 variant->gallivm = gallivm_create(module_name, lp->context);
634 if (!variant->gallivm) {
635 FREE(variant);
636 return NULL;
637 }
638
639 variant->shader = shader;
640 variant->list_item_global.base = variant;
641 variant->list_item_local.base = variant;
642 variant->no = shader->variants_created++;
643
644 memcpy(&variant->key, key, shader->variant_key_size);
645
646 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
647 lp_debug_cs_variant(variant);
648 }
649
650 lp_jit_init_cs_types(variant);
651
652 generate_compute(lp, shader, variant);
653
654 gallivm_compile_module(variant->gallivm);
655
656 variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
657
658 variant->jit_function = (lp_jit_cs_func)gallivm_jit_function(variant->gallivm, variant->function);
659
660 gallivm_free_ir(variant->gallivm);
661 return variant;
662 }
663
664 static void
665 lp_cs_ctx_set_cs_variant( struct lp_cs_context *csctx,
666 struct lp_compute_shader_variant *variant)
667 {
668 csctx->cs.current.variant = variant;
669 }
670
671 static void
672 llvmpipe_update_cs(struct llvmpipe_context *lp)
673 {
674 struct lp_compute_shader *shader = lp->cs;
675
676 struct lp_compute_shader_variant_key key;
677 struct lp_compute_shader_variant *variant = NULL;
678 struct lp_cs_variant_list_item *li;
679
680 make_variant_key(lp, shader, &key);
681
682 /* Search the variants for one which matches the key */
683 li = first_elem(&shader->variants);
684 while(!at_end(&shader->variants, li)) {
685 if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
686 variant = li->base;
687 break;
688 }
689 li = next_elem(li);
690 }
691
692 if (variant) {
693 /* Move this variant to the head of the list to implement LRU
694 * deletion of shader's when we have too many.
695 */
696 move_to_head(&lp->cs_variants_list, &variant->list_item_global);
697 }
698 else {
699 /* variant not found, create it now */
700 int64_t t0, t1, dt;
701 unsigned i;
702 unsigned variants_to_cull;
703
704 if (LP_DEBUG & DEBUG_CS) {
705 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
706 lp->nr_cs_variants,
707 lp->nr_cs_instrs,
708 lp->nr_cs_variants ? lp->nr_cs_instrs / lp->nr_cs_variants : 0);
709 }
710
711 /* First, check if we've exceeded the max number of shader variants.
712 * If so, free 6.25% of them (the least recently used ones).
713 */
714 variants_to_cull = lp->nr_cs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
715
716 if (variants_to_cull ||
717 lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
718 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
719 debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
720 "\t%u instrs,\t%u instrs/variant\n",
721 shader->variants_cached,
722 lp->nr_cs_variants, lp->nr_cs_instrs,
723 lp->nr_cs_instrs / lp->nr_cs_variants);
724 }
725
726 /*
727 * We need to re-check lp->nr_cs_variants because an arbitrarliy large
728 * number of shader variants (potentially all of them) could be
729 * pending for destruction on flush.
730 */
731
732 for (i = 0; i < variants_to_cull || lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
733 struct lp_cs_variant_list_item *item;
734 if (is_empty_list(&lp->cs_variants_list)) {
735 break;
736 }
737 item = last_elem(&lp->cs_variants_list);
738 assert(item);
739 assert(item->base);
740 llvmpipe_remove_cs_shader_variant(lp, item->base);
741 }
742 }
743 /*
744 * Generate the new variant.
745 */
746 t0 = os_time_get();
747 variant = generate_variant(lp, shader, &key);
748 t1 = os_time_get();
749 dt = t1 - t0;
750 LP_COUNT_ADD(llvm_compile_time, dt);
751 LP_COUNT_ADD(nr_llvm_compiles, 2); /* emit vs. omit in/out test */
752
753 /* Put the new variant into the list */
754 if (variant) {
755 insert_at_head(&shader->variants, &variant->list_item_local);
756 insert_at_head(&lp->cs_variants_list, &variant->list_item_global);
757 lp->nr_cs_variants++;
758 lp->nr_cs_instrs += variant->nr_instrs;
759 shader->variants_cached++;
760 }
761 }
762 /* Bind this variant */
763 lp_cs_ctx_set_cs_variant(lp->csctx, variant);
764 }
765
766 /**
767 * Called during state validation when LP_CSNEW_SAMPLER_VIEW is set.
768 */
769 static void
770 lp_csctx_set_sampler_views(struct lp_cs_context *csctx,
771 unsigned num,
772 struct pipe_sampler_view **views)
773 {
774 unsigned i, max_tex_num;
775
776 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
777
778 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
779
780 max_tex_num = MAX2(num, csctx->cs.current_tex_num);
781
782 for (i = 0; i < max_tex_num; i++) {
783 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
784
785 if (view) {
786 struct pipe_resource *res = view->texture;
787 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
788 struct lp_jit_texture *jit_tex;
789 jit_tex = &csctx->cs.current.jit_context.textures[i];
790
791 /* We're referencing the texture's internal data, so save a
792 * reference to it.
793 */
794 pipe_resource_reference(&csctx->cs.current_tex[i], res);
795
796 if (!lp_tex->dt) {
797 /* regular texture - csctx array of mipmap level offsets */
798 int j;
799 unsigned first_level = 0;
800 unsigned last_level = 0;
801
802 if (llvmpipe_resource_is_texture(res)) {
803 first_level = view->u.tex.first_level;
804 last_level = view->u.tex.last_level;
805 assert(first_level <= last_level);
806 assert(last_level <= res->last_level);
807 jit_tex->base = lp_tex->tex_data;
808 }
809 else {
810 jit_tex->base = lp_tex->data;
811 }
812 if (LP_PERF & PERF_TEX_MEM) {
813 /* use dummy tile memory */
814 jit_tex->base = lp_dummy_tile;
815 jit_tex->width = TILE_SIZE/8;
816 jit_tex->height = TILE_SIZE/8;
817 jit_tex->depth = 1;
818 jit_tex->first_level = 0;
819 jit_tex->last_level = 0;
820 jit_tex->mip_offsets[0] = 0;
821 jit_tex->row_stride[0] = 0;
822 jit_tex->img_stride[0] = 0;
823 }
824 else {
825 jit_tex->width = res->width0;
826 jit_tex->height = res->height0;
827 jit_tex->depth = res->depth0;
828 jit_tex->first_level = first_level;
829 jit_tex->last_level = last_level;
830
831 if (llvmpipe_resource_is_texture(res)) {
832 for (j = first_level; j <= last_level; j++) {
833 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
834 jit_tex->row_stride[j] = lp_tex->row_stride[j];
835 jit_tex->img_stride[j] = lp_tex->img_stride[j];
836 }
837
838 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
839 res->target == PIPE_TEXTURE_2D_ARRAY ||
840 res->target == PIPE_TEXTURE_CUBE ||
841 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
842 /*
843 * For array textures, we don't have first_layer, instead
844 * adjust last_layer (stored as depth) plus the mip level offsets
845 * (as we have mip-first layout can't just adjust base ptr).
846 * XXX For mip levels, could do something similar.
847 */
848 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
849 for (j = first_level; j <= last_level; j++) {
850 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
851 lp_tex->img_stride[j];
852 }
853 if (view->target == PIPE_TEXTURE_CUBE ||
854 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
855 assert(jit_tex->depth % 6 == 0);
856 }
857 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
858 assert(view->u.tex.last_layer < res->array_size);
859 }
860 }
861 else {
862 /*
863 * For buffers, we don't have "offset", instead adjust
864 * the size (stored as width) plus the base pointer.
865 */
866 unsigned view_blocksize = util_format_get_blocksize(view->format);
867 /* probably don't really need to fill that out */
868 jit_tex->mip_offsets[0] = 0;
869 jit_tex->row_stride[0] = 0;
870 jit_tex->img_stride[0] = 0;
871
872 /* everything specified in number of elements here. */
873 jit_tex->width = view->u.buf.size / view_blocksize;
874 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
875 /* XXX Unsure if we need to sanitize parameters? */
876 assert(view->u.buf.offset + view->u.buf.size <= res->width0);
877 }
878 }
879 }
880 else {
881 /* display target texture/surface */
882 /*
883 * XXX: Where should this be unmapped?
884 */
885 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
886 struct sw_winsys *winsys = screen->winsys;
887 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
888 PIPE_TRANSFER_READ);
889 jit_tex->row_stride[0] = lp_tex->row_stride[0];
890 jit_tex->img_stride[0] = lp_tex->img_stride[0];
891 jit_tex->mip_offsets[0] = 0;
892 jit_tex->width = res->width0;
893 jit_tex->height = res->height0;
894 jit_tex->depth = res->depth0;
895 jit_tex->first_level = jit_tex->last_level = 0;
896 assert(jit_tex->base);
897 }
898 }
899 else {
900 pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
901 }
902 }
903 csctx->cs.current_tex_num = num;
904 }
905
906
907 /**
908 * Called during state validation when LP_NEW_SAMPLER is set.
909 */
910 static void
911 lp_csctx_set_sampler_state(struct lp_cs_context *csctx,
912 unsigned num,
913 struct pipe_sampler_state **samplers)
914 {
915 unsigned i;
916
917 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
918
919 assert(num <= PIPE_MAX_SAMPLERS);
920
921 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
922 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
923
924 if (sampler) {
925 struct lp_jit_sampler *jit_sam;
926 jit_sam = &csctx->cs.current.jit_context.samplers[i];
927
928 jit_sam->min_lod = sampler->min_lod;
929 jit_sam->max_lod = sampler->max_lod;
930 jit_sam->lod_bias = sampler->lod_bias;
931 COPY_4V(jit_sam->border_color, sampler->border_color.f);
932 }
933 }
934 }
935
936 static void
937 lp_csctx_set_cs_constants(struct lp_cs_context *csctx,
938 unsigned num,
939 struct pipe_constant_buffer *buffers)
940 {
941 unsigned i;
942
943 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
944
945 assert(num <= ARRAY_SIZE(csctx->constants));
946
947 for (i = 0; i < num; ++i) {
948 util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i]);
949 }
950 for (; i < ARRAY_SIZE(csctx->constants); i++) {
951 util_copy_constant_buffer(&csctx->constants[i].current, NULL);
952 }
953 }
954
955 static void
956 lp_csctx_set_cs_ssbos(struct lp_cs_context *csctx,
957 unsigned num,
958 struct pipe_shader_buffer *buffers)
959 {
960 int i;
961 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *)buffers);
962
963 assert (num <= ARRAY_SIZE(csctx->ssbos));
964
965 for (i = 0; i < num; ++i) {
966 util_copy_shader_buffer(&csctx->ssbos[i].current, &buffers[i]);
967 }
968 for (; i < ARRAY_SIZE(csctx->ssbos); i++) {
969 util_copy_shader_buffer(&csctx->ssbos[i].current, NULL);
970 }
971 }
972
973 static void
974 lp_csctx_set_cs_images(struct lp_cs_context *csctx,
975 unsigned num,
976 struct pipe_image_view *images)
977 {
978 unsigned i;
979
980 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
981
982 assert(num <= ARRAY_SIZE(csctx->images));
983
984 for (i = 0; i < num; ++i) {
985 struct pipe_image_view *image = &images[i];
986 util_copy_image_view(&csctx->images[i].current, &images[i]);
987
988 struct pipe_resource *res = image->resource;
989 struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
990 struct lp_jit_image *jit_image;
991
992 jit_image = &csctx->cs.current.jit_context.images[i];
993 if (!lp_res)
994 continue;
995 if (!lp_res->dt) {
996 /* regular texture - csctx array of mipmap level offsets */
997 if (llvmpipe_resource_is_texture(res)) {
998 jit_image->base = lp_res->tex_data;
999 } else
1000 jit_image->base = lp_res->data;
1001
1002 jit_image->width = res->width0;
1003 jit_image->height = res->height0;
1004 jit_image->depth = res->depth0;
1005
1006 if (llvmpipe_resource_is_texture(res)) {
1007 uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
1008
1009 jit_image->width = u_minify(jit_image->width, image->u.tex.level);
1010 jit_image->height = u_minify(jit_image->height, image->u.tex.level);
1011
1012 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
1013 res->target == PIPE_TEXTURE_2D_ARRAY ||
1014 res->target == PIPE_TEXTURE_3D ||
1015 res->target == PIPE_TEXTURE_CUBE ||
1016 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
1017 /*
1018 * For array textures, we don't have first_layer, instead
1019 * adjust last_layer (stored as depth) plus the mip level offsets
1020 * (as we have mip-first layout can't just adjust base ptr).
1021 * XXX For mip levels, could do something similar.
1022 */
1023 jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
1024 mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
1025 } else
1026 jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
1027
1028 jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
1029 jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
1030 jit_image->base = (uint8_t *)jit_image->base + mip_offset;
1031 } else {
1032 unsigned view_blocksize = util_format_get_blocksize(image->format);
1033 jit_image->width = image->u.buf.size / view_blocksize;
1034 jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
1035 }
1036 }
1037 }
1038 for (; i < ARRAY_SIZE(csctx->images); i++) {
1039 util_copy_image_view(&csctx->images[i].current, NULL);
1040 }
1041 }
1042
1043 static void
1044 update_csctx_consts(struct llvmpipe_context *llvmpipe)
1045 {
1046 struct lp_cs_context *csctx = llvmpipe->csctx;
1047 int i;
1048
1049 for (i = 0; i < ARRAY_SIZE(csctx->constants); ++i) {
1050 struct pipe_resource *buffer = csctx->constants[i].current.buffer;
1051 const ubyte *current_data = NULL;
1052
1053 if (buffer) {
1054 /* resource buffer */
1055 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1056 }
1057 else if (csctx->constants[i].current.user_buffer) {
1058 /* user-space buffer */
1059 current_data = (ubyte *) csctx->constants[i].current.user_buffer;
1060 }
1061
1062 if (current_data) {
1063 current_data += csctx->constants[i].current.buffer_offset;
1064
1065 csctx->cs.current.jit_context.constants[i] = (const float *)current_data;
1066 csctx->cs.current.jit_context.num_constants[i] = csctx->constants[i].current.buffer_size;
1067 } else {
1068 csctx->cs.current.jit_context.constants[i] = NULL;
1069 csctx->cs.current.jit_context.num_constants[i] = 0;
1070 }
1071 }
1072 }
1073
1074 static void
1075 update_csctx_ssbo(struct llvmpipe_context *llvmpipe)
1076 {
1077 struct lp_cs_context *csctx = llvmpipe->csctx;
1078 int i;
1079 for (i = 0; i < ARRAY_SIZE(csctx->ssbos); ++i) {
1080 struct pipe_resource *buffer = csctx->ssbos[i].current.buffer;
1081 const ubyte *current_data = NULL;
1082
1083 if (!buffer)
1084 continue;
1085 /* resource buffer */
1086 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1087 if (current_data) {
1088 current_data += csctx->ssbos[i].current.buffer_offset;
1089
1090 csctx->cs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1091 csctx->cs.current.jit_context.num_ssbos[i] = csctx->ssbos[i].current.buffer_size;
1092 } else {
1093 csctx->cs.current.jit_context.ssbos[i] = NULL;
1094 csctx->cs.current.jit_context.num_ssbos[i] = 0;
1095 }
1096 }
1097 }
1098
1099 static void
1100 llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe, void *input)
1101 {
1102 if (llvmpipe->cs_dirty & (LP_CSNEW_CS))
1103 llvmpipe_update_cs(llvmpipe);
1104
1105 if (llvmpipe->cs_dirty & LP_CSNEW_CONSTANTS) {
1106 lp_csctx_set_cs_constants(llvmpipe->csctx,
1107 ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_COMPUTE]),
1108 llvmpipe->constants[PIPE_SHADER_COMPUTE]);
1109 update_csctx_consts(llvmpipe);
1110 }
1111
1112 if (llvmpipe->cs_dirty & LP_CSNEW_SSBOS) {
1113 lp_csctx_set_cs_ssbos(llvmpipe->csctx,
1114 ARRAY_SIZE(llvmpipe->ssbos[PIPE_SHADER_COMPUTE]),
1115 llvmpipe->ssbos[PIPE_SHADER_COMPUTE]);
1116 update_csctx_ssbo(llvmpipe);
1117 }
1118
1119 if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER_VIEW)
1120 lp_csctx_set_sampler_views(llvmpipe->csctx,
1121 llvmpipe->num_sampler_views[PIPE_SHADER_COMPUTE],
1122 llvmpipe->sampler_views[PIPE_SHADER_COMPUTE]);
1123
1124 if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER)
1125 lp_csctx_set_sampler_state(llvmpipe->csctx,
1126 llvmpipe->num_samplers[PIPE_SHADER_COMPUTE],
1127 llvmpipe->samplers[PIPE_SHADER_COMPUTE]);
1128
1129 if (llvmpipe->cs_dirty & LP_CSNEW_IMAGES)
1130 lp_csctx_set_cs_images(llvmpipe->csctx,
1131 ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_COMPUTE]),
1132 llvmpipe->images[PIPE_SHADER_COMPUTE]);
1133
1134 if (input) {
1135 struct lp_cs_context *csctx = llvmpipe->csctx;
1136 csctx->input = input;
1137 csctx->cs.current.jit_context.kernel_args = input;
1138 }
1139
1140 llvmpipe->cs_dirty = 0;
1141 }
1142
1143 static void
1144 cs_exec_fn(void *init_data, int iter_idx, struct lp_cs_local_mem *lmem)
1145 {
1146 struct lp_cs_job_info *job_info = init_data;
1147 struct lp_jit_cs_thread_data thread_data;
1148
1149 memset(&thread_data, 0, sizeof(thread_data));
1150
1151 if (lmem->local_size < job_info->req_local_mem) {
1152 lmem->local_mem_ptr = REALLOC(lmem->local_mem_ptr, lmem->local_size,
1153 job_info->req_local_mem);
1154 lmem->local_size = job_info->req_local_mem;
1155 }
1156 thread_data.shared = lmem->local_mem_ptr;
1157
1158 unsigned grid_z = iter_idx / (job_info->grid_size[0] * job_info->grid_size[1]);
1159 unsigned grid_y = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1]))) / job_info->grid_size[0];
1160 unsigned grid_x = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1])) - (grid_y * job_info->grid_size[0]));
1161 struct lp_compute_shader_variant *variant = job_info->current->variant;
1162 variant->jit_function(&job_info->current->jit_context,
1163 job_info->block_size[0], job_info->block_size[1], job_info->block_size[2],
1164 grid_x, grid_y, grid_z,
1165 job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2],
1166 &thread_data);
1167 }
1168
1169 static void
1170 fill_grid_size(struct pipe_context *pipe,
1171 const struct pipe_grid_info *info,
1172 uint32_t grid_size[3])
1173 {
1174 struct pipe_transfer *transfer;
1175 uint32_t *params;
1176 if (!info->indirect) {
1177 grid_size[0] = info->grid[0];
1178 grid_size[1] = info->grid[1];
1179 grid_size[2] = info->grid[2];
1180 return;
1181 }
1182 params = pipe_buffer_map_range(pipe, info->indirect,
1183 info->indirect_offset,
1184 3 * sizeof(uint32_t),
1185 PIPE_TRANSFER_READ,
1186 &transfer);
1187
1188 if (!transfer)
1189 return;
1190
1191 grid_size[0] = params[0];
1192 grid_size[1] = params[1];
1193 grid_size[2] = params[2];
1194 pipe_buffer_unmap(pipe, transfer);
1195 }
1196
1197 static void llvmpipe_launch_grid(struct pipe_context *pipe,
1198 const struct pipe_grid_info *info)
1199 {
1200 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
1201 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1202 struct lp_cs_job_info job_info;
1203
1204 memset(&job_info, 0, sizeof(job_info));
1205
1206 llvmpipe_cs_update_derived(llvmpipe, info->input);
1207
1208 fill_grid_size(pipe, info, job_info.grid_size);
1209
1210 job_info.block_size[0] = info->block[0];
1211 job_info.block_size[1] = info->block[1];
1212 job_info.block_size[2] = info->block[2];
1213 job_info.req_local_mem = llvmpipe->cs->req_local_mem;
1214 job_info.current = &llvmpipe->csctx->cs.current;
1215
1216 int num_tasks = job_info.grid_size[2] * job_info.grid_size[1] * job_info.grid_size[0];
1217 if (num_tasks) {
1218 struct lp_cs_tpool_task *task;
1219 mtx_lock(&screen->cs_mutex);
1220 task = lp_cs_tpool_queue_task(screen->cs_tpool, cs_exec_fn, &job_info, num_tasks);
1221
1222 lp_cs_tpool_wait_for_task(screen->cs_tpool, &task);
1223 mtx_unlock(&screen->cs_mutex);
1224 }
1225 llvmpipe->pipeline_statistics.cs_invocations += num_tasks * info->block[0] * info->block[1] * info->block[2];
1226 }
1227
1228 void
1229 llvmpipe_init_compute_funcs(struct llvmpipe_context *llvmpipe)
1230 {
1231 llvmpipe->pipe.create_compute_state = llvmpipe_create_compute_state;
1232 llvmpipe->pipe.bind_compute_state = llvmpipe_bind_compute_state;
1233 llvmpipe->pipe.delete_compute_state = llvmpipe_delete_compute_state;
1234 llvmpipe->pipe.launch_grid = llvmpipe_launch_grid;
1235 }
1236
1237 void
1238 lp_csctx_destroy(struct lp_cs_context *csctx)
1239 {
1240 unsigned i;
1241 for (i = 0; i < ARRAY_SIZE(csctx->cs.current_tex); i++) {
1242 pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
1243 }
1244 for (i = 0; i < ARRAY_SIZE(csctx->constants); i++) {
1245 pipe_resource_reference(&csctx->constants[i].current.buffer, NULL);
1246 }
1247 for (i = 0; i < ARRAY_SIZE(csctx->ssbos); i++) {
1248 pipe_resource_reference(&csctx->ssbos[i].current.buffer, NULL);
1249 }
1250 FREE(csctx);
1251 }
1252
1253 struct lp_cs_context *lp_csctx_create(struct pipe_context *pipe)
1254 {
1255 struct lp_cs_context *csctx;
1256
1257 csctx = CALLOC_STRUCT(lp_cs_context);
1258 if (!csctx)
1259 return NULL;
1260
1261 csctx->pipe = pipe;
1262 return csctx;
1263 }