Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / llvmpipe / lp_state_cs.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "util/u_dump.h"
29 #include "util/u_string.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "gallivm/lp_bld_const.h"
33 #include "gallivm/lp_bld_debug.h"
34 #include "gallivm/lp_bld_intr.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_gather.h"
37 #include "gallivm/lp_bld_coro.h"
38 #include "gallivm/lp_bld_nir.h"
39 #include "lp_state_cs.h"
40 #include "lp_context.h"
41 #include "lp_debug.h"
42 #include "lp_state.h"
43 #include "lp_perf.h"
44 #include "lp_screen.h"
45 #include "lp_memory.h"
46 #include "lp_query.h"
47 #include "lp_cs_tpool.h"
48 #include "frontend/sw_winsys.h"
49 #include "nir/nir_to_tgsi_info.h"
50 #include "util/mesa-sha1.h"
51 #include "nir_serialize.h"
52
53 /** Fragment shader number (for debugging) */
54 static unsigned cs_no = 0;
55
56 struct lp_cs_job_info {
57 unsigned grid_size[3];
58 unsigned block_size[3];
59 unsigned req_local_mem;
60 unsigned work_dim;
61 struct lp_cs_exec *current;
62 };
63
64 static void
65 generate_compute(struct llvmpipe_context *lp,
66 struct lp_compute_shader *shader,
67 struct lp_compute_shader_variant *variant)
68 {
69 struct gallivm_state *gallivm = variant->gallivm;
70 const struct lp_compute_shader_variant_key *key = &variant->key;
71 char func_name[64], func_name_coro[64];
72 LLVMTypeRef arg_types[17];
73 LLVMTypeRef func_type, coro_func_type;
74 LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
75 LLVMValueRef context_ptr;
76 LLVMValueRef x_size_arg, y_size_arg, z_size_arg;
77 LLVMValueRef grid_x_arg, grid_y_arg, grid_z_arg;
78 LLVMValueRef grid_size_x_arg, grid_size_y_arg, grid_size_z_arg;
79 LLVMValueRef work_dim_arg, thread_data_ptr;
80 LLVMBasicBlockRef block;
81 LLVMBuilderRef builder;
82 struct lp_build_sampler_soa *sampler;
83 struct lp_build_image_soa *image;
84 LLVMValueRef function, coro;
85 struct lp_type cs_type;
86 unsigned i;
87
88 /*
89 * This function has two parts
90 * a) setup the coroutine execution environment loop.
91 * b) build the compute shader llvm for use inside the coroutine.
92 */
93 assert(lp_native_vector_width / 32 >= 4);
94
95 memset(&cs_type, 0, sizeof cs_type);
96 cs_type.floating = TRUE; /* floating point values */
97 cs_type.sign = TRUE; /* values are signed */
98 cs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
99 cs_type.width = 32; /* 32-bit float */
100 cs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
101 snprintf(func_name, sizeof(func_name), "cs_variant");
102
103 snprintf(func_name_coro, sizeof(func_name), "cs_co_variant");
104
105 arg_types[0] = variant->jit_cs_context_ptr_type; /* context */
106 arg_types[1] = int32_type; /* block_x_size */
107 arg_types[2] = int32_type; /* block_y_size */
108 arg_types[3] = int32_type; /* block_z_size */
109 arg_types[4] = int32_type; /* grid_x */
110 arg_types[5] = int32_type; /* grid_y */
111 arg_types[6] = int32_type; /* grid_z */
112 arg_types[7] = int32_type; /* grid_size_x */
113 arg_types[8] = int32_type; /* grid_size_y */
114 arg_types[9] = int32_type; /* grid_size_z */
115 arg_types[10] = int32_type; /* work dim */
116 arg_types[11] = variant->jit_cs_thread_data_ptr_type; /* per thread data */
117 arg_types[12] = int32_type; /* coro only - num X loops */
118 arg_types[13] = int32_type; /* coro only - partials */
119 arg_types[14] = int32_type; /* coro block_x_size */
120 arg_types[15] = int32_type; /* coro block_y_size */
121 arg_types[16] = int32_type; /* coro block_z_size */
122 func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
123 arg_types, ARRAY_SIZE(arg_types) - 5, 0);
124
125 coro_func_type = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0),
126 arg_types, ARRAY_SIZE(arg_types), 0);
127
128 function = LLVMAddFunction(gallivm->module, func_name, func_type);
129 LLVMSetFunctionCallConv(function, LLVMCCallConv);
130
131 coro = LLVMAddFunction(gallivm->module, func_name_coro, coro_func_type);
132 LLVMSetFunctionCallConv(coro, LLVMCCallConv);
133
134 variant->function = function;
135
136 for(i = 0; i < ARRAY_SIZE(arg_types); ++i) {
137 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
138 lp_add_function_attr(coro, i + 1, LP_FUNC_ATTR_NOALIAS);
139 lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
140 }
141 }
142
143 lp_build_coro_declare_malloc_hooks(gallivm);
144
145 if (variant->gallivm->cache->data_size)
146 return;
147
148 context_ptr = LLVMGetParam(function, 0);
149 x_size_arg = LLVMGetParam(function, 1);
150 y_size_arg = LLVMGetParam(function, 2);
151 z_size_arg = LLVMGetParam(function, 3);
152 grid_x_arg = LLVMGetParam(function, 4);
153 grid_y_arg = LLVMGetParam(function, 5);
154 grid_z_arg = LLVMGetParam(function, 6);
155 grid_size_x_arg = LLVMGetParam(function, 7);
156 grid_size_y_arg = LLVMGetParam(function, 8);
157 grid_size_z_arg = LLVMGetParam(function, 9);
158 work_dim_arg = LLVMGetParam(function, 10);
159 thread_data_ptr = LLVMGetParam(function, 11);
160
161 lp_build_name(context_ptr, "context");
162 lp_build_name(x_size_arg, "x_size");
163 lp_build_name(y_size_arg, "y_size");
164 lp_build_name(z_size_arg, "z_size");
165 lp_build_name(grid_x_arg, "grid_x");
166 lp_build_name(grid_y_arg, "grid_y");
167 lp_build_name(grid_z_arg, "grid_z");
168 lp_build_name(grid_size_x_arg, "grid_size_x");
169 lp_build_name(grid_size_y_arg, "grid_size_y");
170 lp_build_name(grid_size_z_arg, "grid_size_z");
171 lp_build_name(work_dim_arg, "work_dim");
172 lp_build_name(thread_data_ptr, "thread_data");
173
174 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
175 builder = gallivm->builder;
176 assert(builder);
177 LLVMPositionBuilderAtEnd(builder, block);
178 sampler = lp_llvm_sampler_soa_create(key->samplers, key->nr_samplers);
179 image = lp_llvm_image_soa_create(lp_cs_variant_key_images(key), key->nr_images);
180
181 struct lp_build_loop_state loop_state[4];
182 LLVMValueRef num_x_loop;
183 LLVMValueRef vec_length = lp_build_const_int32(gallivm, cs_type.length);
184 num_x_loop = LLVMBuildAdd(gallivm->builder, x_size_arg, vec_length, "");
185 num_x_loop = LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), "");
186 num_x_loop = LLVMBuildUDiv(gallivm->builder, num_x_loop, vec_length, "");
187 LLVMValueRef partials = LLVMBuildURem(gallivm->builder, x_size_arg, vec_length, "");
188
189 LLVMValueRef coro_num_hdls = LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, "");
190 coro_num_hdls = LLVMBuildMul(gallivm->builder, coro_num_hdls, z_size_arg, "");
191
192 LLVMTypeRef hdl_ptr_type = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
193 LLVMValueRef coro_hdls = LLVMBuildArrayAlloca(gallivm->builder, hdl_ptr_type, coro_num_hdls, "coro_hdls");
194
195 unsigned end_coroutine = INT_MAX;
196
197 /*
198 * This is the main coroutine execution loop. It iterates over the dimensions
199 * and calls the coroutine main entrypoint on the first pass, but in subsequent
200 * passes it checks if the coroutine has completed and resumes it if not.
201 */
202 /* take x_width - round up to type.length width */
203 lp_build_loop_begin(&loop_state[3], gallivm,
204 lp_build_const_int32(gallivm, 0)); /* coroutine reentry loop */
205 lp_build_loop_begin(&loop_state[2], gallivm,
206 lp_build_const_int32(gallivm, 0)); /* z loop */
207 lp_build_loop_begin(&loop_state[1], gallivm,
208 lp_build_const_int32(gallivm, 0)); /* y loop */
209 lp_build_loop_begin(&loop_state[0], gallivm,
210 lp_build_const_int32(gallivm, 0)); /* x loop */
211 {
212 LLVMValueRef args[17];
213 args[0] = context_ptr;
214 args[1] = loop_state[0].counter;
215 args[2] = loop_state[1].counter;
216 args[3] = loop_state[2].counter;
217 args[4] = grid_x_arg;
218 args[5] = grid_y_arg;
219 args[6] = grid_z_arg;
220 args[7] = grid_size_x_arg;
221 args[8] = grid_size_y_arg;
222 args[9] = grid_size_z_arg;
223 args[10] = work_dim_arg;
224 args[11] = thread_data_ptr;
225 args[12] = num_x_loop;
226 args[13] = partials;
227 args[14] = x_size_arg;
228 args[15] = y_size_arg;
229 args[16] = z_size_arg;
230
231 /* idx = (z * (size_x * size_y) + y * size_x + x */
232 LLVMValueRef coro_hdl_idx = LLVMBuildMul(gallivm->builder, loop_state[2].counter,
233 LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, ""), "");
234 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
235 LLVMBuildMul(gallivm->builder, loop_state[1].counter,
236 num_x_loop, ""), "");
237 coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
238 loop_state[0].counter, "");
239
240 LLVMValueRef coro_entry = LLVMBuildGEP(gallivm->builder, coro_hdls, &coro_hdl_idx, 1, "");
241
242 LLVMValueRef coro_hdl = LLVMBuildLoad(gallivm->builder, coro_entry, "coro_hdl");
243
244 struct lp_build_if_state ifstate;
245 LLVMValueRef cmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, loop_state[3].counter,
246 lp_build_const_int32(gallivm, 0), "");
247 /* first time here - call the coroutine function entry point */
248 lp_build_if(&ifstate, gallivm, cmp);
249 LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 17, "");
250 LLVMBuildStore(gallivm->builder, coro_ret, coro_entry);
251 lp_build_else(&ifstate);
252 /* subsequent calls for this invocation - check if done. */
253 LLVMValueRef coro_done = lp_build_coro_done(gallivm, coro_hdl);
254 struct lp_build_if_state ifstate2;
255 lp_build_if(&ifstate2, gallivm, coro_done);
256 /* if done destroy and force loop exit */
257 lp_build_coro_destroy(gallivm, coro_hdl);
258 lp_build_loop_force_set_counter(&loop_state[3], lp_build_const_int32(gallivm, end_coroutine - 1));
259 lp_build_else(&ifstate2);
260 /* otherwise resume the coroutine */
261 lp_build_coro_resume(gallivm, coro_hdl);
262 lp_build_endif(&ifstate2);
263 lp_build_endif(&ifstate);
264 lp_build_loop_force_reload_counter(&loop_state[3]);
265 }
266 lp_build_loop_end_cond(&loop_state[0],
267 num_x_loop,
268 NULL, LLVMIntUGE);
269 lp_build_loop_end_cond(&loop_state[1],
270 y_size_arg,
271 NULL, LLVMIntUGE);
272 lp_build_loop_end_cond(&loop_state[2],
273 z_size_arg,
274 NULL, LLVMIntUGE);
275 lp_build_loop_end_cond(&loop_state[3],
276 lp_build_const_int32(gallivm, end_coroutine),
277 NULL, LLVMIntEQ);
278 LLVMBuildRetVoid(builder);
279
280 /* This is stage (b) - generate the compute shader code inside the coroutine. */
281 LLVMValueRef block_x_size_arg, block_y_size_arg, block_z_size_arg;
282 context_ptr = LLVMGetParam(coro, 0);
283 x_size_arg = LLVMGetParam(coro, 1);
284 y_size_arg = LLVMGetParam(coro, 2);
285 z_size_arg = LLVMGetParam(coro, 3);
286 grid_x_arg = LLVMGetParam(coro, 4);
287 grid_y_arg = LLVMGetParam(coro, 5);
288 grid_z_arg = LLVMGetParam(coro, 6);
289 grid_size_x_arg = LLVMGetParam(coro, 7);
290 grid_size_y_arg = LLVMGetParam(coro, 8);
291 grid_size_z_arg = LLVMGetParam(coro, 9);
292 work_dim_arg = LLVMGetParam(coro, 10);
293 thread_data_ptr = LLVMGetParam(coro, 11);
294 num_x_loop = LLVMGetParam(coro, 12);
295 partials = LLVMGetParam(coro, 13);
296 block_x_size_arg = LLVMGetParam(coro, 14);
297 block_y_size_arg = LLVMGetParam(coro, 15);
298 block_z_size_arg = LLVMGetParam(coro, 16);
299 block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "entry");
300 LLVMPositionBuilderAtEnd(builder, block);
301 {
302 LLVMValueRef consts_ptr, num_consts_ptr;
303 LLVMValueRef ssbo_ptr, num_ssbo_ptr;
304 LLVMValueRef shared_ptr;
305 LLVMValueRef kernel_args_ptr;
306 struct lp_build_mask_context mask;
307 struct lp_bld_tgsi_system_values system_values;
308
309 memset(&system_values, 0, sizeof(system_values));
310 consts_ptr = lp_jit_cs_context_constants(gallivm, context_ptr);
311 num_consts_ptr = lp_jit_cs_context_num_constants(gallivm, context_ptr);
312 ssbo_ptr = lp_jit_cs_context_ssbos(gallivm, context_ptr);
313 num_ssbo_ptr = lp_jit_cs_context_num_ssbos(gallivm, context_ptr);
314 kernel_args_ptr = lp_jit_cs_context_kernel_args(gallivm, context_ptr);
315
316 shared_ptr = lp_jit_cs_thread_data_shared(gallivm, thread_data_ptr);
317
318 /* these are coroutine entrypoint necessities */
319 LLVMValueRef coro_id = lp_build_coro_id(gallivm);
320 LLVMValueRef coro_hdl = lp_build_coro_begin_alloc_mem(gallivm, coro_id);
321
322 LLVMValueRef has_partials = LLVMBuildICmp(gallivm->builder, LLVMIntNE, partials, lp_build_const_int32(gallivm, 0), "");
323 LLVMValueRef tid_vals[3];
324 LLVMValueRef tids_x[LP_MAX_VECTOR_LENGTH], tids_y[LP_MAX_VECTOR_LENGTH], tids_z[LP_MAX_VECTOR_LENGTH];
325 LLVMValueRef base_val = LLVMBuildMul(gallivm->builder, x_size_arg, vec_length, "");
326 for (i = 0; i < cs_type.length; i++) {
327 tids_x[i] = LLVMBuildAdd(gallivm->builder, base_val, lp_build_const_int32(gallivm, i), "");
328 tids_y[i] = y_size_arg;
329 tids_z[i] = z_size_arg;
330 }
331 tid_vals[0] = lp_build_gather_values(gallivm, tids_x, cs_type.length);
332 tid_vals[1] = lp_build_gather_values(gallivm, tids_y, cs_type.length);
333 tid_vals[2] = lp_build_gather_values(gallivm, tids_z, cs_type.length);
334 system_values.thread_id = LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type, cs_type.length), 3));
335 for (i = 0; i < 3; i++)
336 system_values.thread_id = LLVMBuildInsertValue(builder, system_values.thread_id, tid_vals[i], i, "");
337
338 LLVMValueRef gtids[3] = { grid_x_arg, grid_y_arg, grid_z_arg };
339 system_values.block_id = LLVMGetUndef(LLVMVectorType(int32_type, 3));
340 for (i = 0; i < 3; i++)
341 system_values.block_id = LLVMBuildInsertElement(builder, system_values.block_id, gtids[i], lp_build_const_int32(gallivm, i), "");
342
343 LLVMValueRef gstids[3] = { grid_size_x_arg, grid_size_y_arg, grid_size_z_arg };
344 system_values.grid_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
345 for (i = 0; i < 3; i++)
346 system_values.grid_size = LLVMBuildInsertElement(builder, system_values.grid_size, gstids[i], lp_build_const_int32(gallivm, i), "");
347
348 system_values.work_dim = work_dim_arg;
349
350 LLVMValueRef bsize[3] = { block_x_size_arg, block_y_size_arg, block_z_size_arg };
351 system_values.block_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
352 for (i = 0; i < 3; i++)
353 system_values.block_size = LLVMBuildInsertElement(builder, system_values.block_size, bsize[i], lp_build_const_int32(gallivm, i), "");
354
355 LLVMValueRef last_x_loop = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, x_size_arg, LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), ""), "");
356 LLVMValueRef use_partial_mask = LLVMBuildAnd(gallivm->builder, last_x_loop, has_partials, "");
357 struct lp_build_if_state if_state;
358 LLVMValueRef mask_val = lp_build_alloca(gallivm, LLVMVectorType(int32_type, cs_type.length), "mask");
359 LLVMValueRef full_mask_val = lp_build_const_int_vec(gallivm, cs_type, ~0);
360 LLVMBuildStore(gallivm->builder, full_mask_val, mask_val);
361
362 lp_build_if(&if_state, gallivm, use_partial_mask);
363 struct lp_build_loop_state mask_loop_state;
364 lp_build_loop_begin(&mask_loop_state, gallivm, partials);
365 LLVMValueRef tmask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
366 tmask_val = LLVMBuildInsertElement(gallivm->builder, tmask_val, lp_build_const_int32(gallivm, 0), mask_loop_state.counter, "");
367 LLVMBuildStore(gallivm->builder, tmask_val, mask_val);
368 lp_build_loop_end_cond(&mask_loop_state, vec_length, NULL, LLVMIntUGE);
369 lp_build_endif(&if_state);
370
371 mask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
372 lp_build_mask_begin(&mask, gallivm, cs_type, mask_val);
373
374 struct lp_build_coro_suspend_info coro_info;
375
376 LLVMBasicBlockRef sus_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "suspend");
377 LLVMBasicBlockRef clean_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "cleanup");
378
379 coro_info.suspend = sus_block;
380 coro_info.cleanup = clean_block;
381
382 struct lp_build_tgsi_params params;
383 memset(&params, 0, sizeof(params));
384
385 params.type = cs_type;
386 params.mask = &mask;
387 params.consts_ptr = consts_ptr;
388 params.const_sizes_ptr = num_consts_ptr;
389 params.system_values = &system_values;
390 params.context_ptr = context_ptr;
391 params.sampler = sampler;
392 params.info = &shader->info.base;
393 params.ssbo_ptr = ssbo_ptr;
394 params.ssbo_sizes_ptr = num_ssbo_ptr;
395 params.image = image;
396 params.shared_ptr = shared_ptr;
397 params.coro = &coro_info;
398 params.kernel_args = kernel_args_ptr;
399
400 if (shader->base.type == PIPE_SHADER_IR_TGSI)
401 lp_build_tgsi_soa(gallivm, shader->base.tokens, &params, NULL);
402 else
403 lp_build_nir_soa(gallivm, shader->base.ir.nir, &params,
404 NULL);
405
406 mask_val = lp_build_mask_end(&mask);
407
408 lp_build_coro_suspend_switch(gallivm, &coro_info, NULL, true);
409 LLVMPositionBuilderAtEnd(builder, clean_block);
410
411 lp_build_coro_free_mem(gallivm, coro_id, coro_hdl);
412
413 LLVMBuildBr(builder, sus_block);
414 LLVMPositionBuilderAtEnd(builder, sus_block);
415
416 lp_build_coro_end(gallivm, coro_hdl);
417 LLVMBuildRet(builder, coro_hdl);
418 }
419
420 sampler->destroy(sampler);
421 image->destroy(image);
422
423 gallivm_verify_function(gallivm, coro);
424 gallivm_verify_function(gallivm, function);
425 }
426
427 static void *
428 llvmpipe_create_compute_state(struct pipe_context *pipe,
429 const struct pipe_compute_state *templ)
430 {
431 struct lp_compute_shader *shader;
432 int nr_samplers, nr_sampler_views;
433
434 shader = CALLOC_STRUCT(lp_compute_shader);
435 if (!shader)
436 return NULL;
437
438 shader->no = cs_no++;
439
440 shader->base.type = templ->ir_type;
441 if (templ->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
442 struct blob_reader reader;
443 const struct pipe_binary_program_header *hdr = templ->prog;
444
445 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
446 shader->base.ir.nir = nir_deserialize(NULL, pipe->screen->get_compiler_options(pipe->screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE), &reader);
447 shader->base.type = PIPE_SHADER_IR_NIR;
448
449 pipe->screen->finalize_nir(pipe->screen, shader->base.ir.nir, false);
450 } else if (templ->ir_type == PIPE_SHADER_IR_NIR)
451 shader->base.ir.nir = (struct nir_shader *)templ->prog;
452
453 if (shader->base.type == PIPE_SHADER_IR_TGSI) {
454 /* get/save the summary info for this shader */
455 lp_build_tgsi_info(templ->prog, &shader->info);
456
457 /* we need to keep a local copy of the tokens */
458 shader->base.tokens = tgsi_dup_tokens(templ->prog);
459 } else {
460 nir_tgsi_scan_shader(shader->base.ir.nir, &shader->info.base, false);
461 }
462
463 shader->req_local_mem = templ->req_local_mem;
464 make_empty_list(&shader->variants);
465
466 nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
467 nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
468 int nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
469 shader->variant_key_size = lp_cs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
470
471 return shader;
472 }
473
474 static void
475 llvmpipe_bind_compute_state(struct pipe_context *pipe,
476 void *cs)
477 {
478 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
479
480 if (llvmpipe->cs == cs)
481 return;
482
483 llvmpipe->cs = (struct lp_compute_shader *)cs;
484 llvmpipe->cs_dirty |= LP_CSNEW_CS;
485 }
486
487 /**
488 * Remove shader variant from two lists: the shader's variant list
489 * and the context's variant list.
490 */
491 static void
492 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context *lp,
493 struct lp_compute_shader_variant *variant)
494 {
495 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
496 debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
497 "v total cached %u inst %u total inst %u\n",
498 variant->shader->no, variant->no,
499 variant->shader->variants_created,
500 variant->shader->variants_cached,
501 lp->nr_cs_variants, variant->nr_instrs, lp->nr_cs_instrs);
502 }
503
504 gallivm_destroy(variant->gallivm);
505
506 /* remove from shader's list */
507 remove_from_list(&variant->list_item_local);
508 variant->shader->variants_cached--;
509
510 /* remove from context's list */
511 remove_from_list(&variant->list_item_global);
512 lp->nr_cs_variants--;
513 lp->nr_cs_instrs -= variant->nr_instrs;
514
515 FREE(variant);
516 }
517
518 static void
519 llvmpipe_delete_compute_state(struct pipe_context *pipe,
520 void *cs)
521 {
522 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
523 struct lp_compute_shader *shader = cs;
524 struct lp_cs_variant_list_item *li;
525
526 if (llvmpipe->cs == cs)
527 llvmpipe->cs = NULL;
528 for (unsigned i = 0; i < shader->max_global_buffers; i++)
529 pipe_resource_reference(&shader->global_buffers[i], NULL);
530 FREE(shader->global_buffers);
531
532 /* Delete all the variants */
533 li = first_elem(&shader->variants);
534 while(!at_end(&shader->variants, li)) {
535 struct lp_cs_variant_list_item *next = next_elem(li);
536 llvmpipe_remove_cs_shader_variant(llvmpipe, li->base);
537 li = next;
538 }
539 if (shader->base.ir.nir)
540 ralloc_free(shader->base.ir.nir);
541 tgsi_free_tokens(shader->base.tokens);
542 FREE(shader);
543 }
544
545 static struct lp_compute_shader_variant_key *
546 make_variant_key(struct llvmpipe_context *lp,
547 struct lp_compute_shader *shader,
548 char *store)
549 {
550 int i;
551 struct lp_compute_shader_variant_key *key;
552 key = (struct lp_compute_shader_variant_key *)store;
553 memset(key, 0, offsetof(struct lp_compute_shader_variant_key, samplers[1]));
554
555 /* This value will be the same for all the variants of a given shader:
556 */
557 key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
558
559 struct lp_sampler_static_state *cs_sampler;
560
561 cs_sampler = key->samplers;
562 for(i = 0; i < key->nr_samplers; ++i) {
563 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
564 lp_sampler_static_sampler_state(&cs_sampler[i].sampler_state,
565 lp->samplers[PIPE_SHADER_COMPUTE][i]);
566 }
567 }
568
569 /*
570 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
571 * are dx10-style? Can't really have mixed opcodes, at least not
572 * if we want to skip the holes here (without rescanning tgsi).
573 */
574 if (shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
575 key->nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
576 for(i = 0; i < key->nr_sampler_views; ++i) {
577 /*
578 * Note sview may exceed what's representable by file_mask.
579 * This will still work, the only downside is that not actually
580 * used views may be included in the shader key.
581 */
582 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
583 lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
584 lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
585 }
586 }
587 }
588 else {
589 key->nr_sampler_views = key->nr_samplers;
590 for(i = 0; i < key->nr_sampler_views; ++i) {
591 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
592 lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
593 lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
594 }
595 }
596 }
597
598 struct lp_image_static_state *lp_image;
599 lp_image = lp_cs_variant_key_images(key);
600 key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
601 for (i = 0; i < key->nr_images; ++i) {
602 if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
603 lp_sampler_static_texture_state_image(&lp_image[i].image_state,
604 &lp->images[PIPE_SHADER_COMPUTE][i]);
605 }
606 }
607 return key;
608 }
609
610 static void
611 dump_cs_variant_key(const struct lp_compute_shader_variant_key *key)
612 {
613 int i;
614 debug_printf("cs variant %p:\n", (void *) key);
615
616 for (i = 0; i < key->nr_samplers; ++i) {
617 const struct lp_static_sampler_state *sampler = &key->samplers[i].sampler_state;
618 debug_printf("sampler[%u] = \n", i);
619 debug_printf(" .wrap = %s %s %s\n",
620 util_str_tex_wrap(sampler->wrap_s, TRUE),
621 util_str_tex_wrap(sampler->wrap_t, TRUE),
622 util_str_tex_wrap(sampler->wrap_r, TRUE));
623 debug_printf(" .min_img_filter = %s\n",
624 util_str_tex_filter(sampler->min_img_filter, TRUE));
625 debug_printf(" .min_mip_filter = %s\n",
626 util_str_tex_mipfilter(sampler->min_mip_filter, TRUE));
627 debug_printf(" .mag_img_filter = %s\n",
628 util_str_tex_filter(sampler->mag_img_filter, TRUE));
629 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE)
630 debug_printf(" .compare_func = %s\n", util_str_func(sampler->compare_func, TRUE));
631 debug_printf(" .normalized_coords = %u\n", sampler->normalized_coords);
632 debug_printf(" .min_max_lod_equal = %u\n", sampler->min_max_lod_equal);
633 debug_printf(" .lod_bias_non_zero = %u\n", sampler->lod_bias_non_zero);
634 debug_printf(" .apply_min_lod = %u\n", sampler->apply_min_lod);
635 debug_printf(" .apply_max_lod = %u\n", sampler->apply_max_lod);
636 }
637 for (i = 0; i < key->nr_sampler_views; ++i) {
638 const struct lp_static_texture_state *texture = &key->samplers[i].texture_state;
639 debug_printf("texture[%u] = \n", i);
640 debug_printf(" .format = %s\n",
641 util_format_name(texture->format));
642 debug_printf(" .target = %s\n",
643 util_str_tex_target(texture->target, TRUE));
644 debug_printf(" .level_zero_only = %u\n",
645 texture->level_zero_only);
646 debug_printf(" .pot = %u %u %u\n",
647 texture->pot_width,
648 texture->pot_height,
649 texture->pot_depth);
650 }
651 struct lp_image_static_state *images = lp_cs_variant_key_images(key);
652 for (i = 0; i < key->nr_images; ++i) {
653 const struct lp_static_texture_state *image = &images[i].image_state;
654 debug_printf("image[%u] = \n", i);
655 debug_printf(" .format = %s\n",
656 util_format_name(image->format));
657 debug_printf(" .target = %s\n",
658 util_str_tex_target(image->target, TRUE));
659 debug_printf(" .level_zero_only = %u\n",
660 image->level_zero_only);
661 debug_printf(" .pot = %u %u %u\n",
662 image->pot_width,
663 image->pot_height,
664 image->pot_depth);
665 }
666 }
667
668 static void
669 lp_debug_cs_variant(const struct lp_compute_shader_variant *variant)
670 {
671 debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
672 variant->shader->no, variant->no);
673 if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
674 tgsi_dump(variant->shader->base.tokens, 0);
675 else
676 nir_print_shader(variant->shader->base.ir.nir, stderr);
677 dump_cs_variant_key(&variant->key);
678 debug_printf("\n");
679 }
680
681 static void
682 lp_cs_get_ir_cache_key(struct lp_compute_shader_variant *variant,
683 unsigned char ir_sha1_cache_key[20])
684 {
685 struct blob blob = { 0 };
686 unsigned ir_size;
687 void *ir_binary;
688
689 blob_init(&blob);
690 nir_serialize(&blob, variant->shader->base.ir.nir, true);
691 ir_binary = blob.data;
692 ir_size = blob.size;
693
694 struct mesa_sha1 ctx;
695 _mesa_sha1_init(&ctx);
696 _mesa_sha1_update(&ctx, &variant->key, variant->shader->variant_key_size);
697 _mesa_sha1_update(&ctx, ir_binary, ir_size);
698 _mesa_sha1_final(&ctx, ir_sha1_cache_key);
699
700 blob_finish(&blob);
701 }
702
703 static struct lp_compute_shader_variant *
704 generate_variant(struct llvmpipe_context *lp,
705 struct lp_compute_shader *shader,
706 const struct lp_compute_shader_variant_key *key)
707 {
708 struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
709 struct lp_compute_shader_variant *variant;
710 char module_name[64];
711 unsigned char ir_sha1_cache_key[20];
712 struct lp_cached_code cached = { 0 };
713 bool needs_caching = false;
714 variant = MALLOC(sizeof *variant + shader->variant_key_size - sizeof variant->key);
715 if (!variant)
716 return NULL;
717
718 memset(variant, 0, sizeof(*variant));
719 snprintf(module_name, sizeof(module_name), "cs%u_variant%u",
720 shader->no, shader->variants_created);
721
722 variant->shader = shader;
723 memcpy(&variant->key, key, shader->variant_key_size);
724
725 if (shader->base.ir.nir) {
726 lp_cs_get_ir_cache_key(variant, ir_sha1_cache_key);
727
728 lp_disk_cache_find_shader(screen, &cached, ir_sha1_cache_key);
729 if (!cached.data_size)
730 needs_caching = true;
731 }
732 variant->gallivm = gallivm_create(module_name, lp->context, &cached);
733 if (!variant->gallivm) {
734 FREE(variant);
735 return NULL;
736 }
737
738 variant->list_item_global.base = variant;
739 variant->list_item_local.base = variant;
740 variant->no = shader->variants_created++;
741
742
743
744 if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
745 lp_debug_cs_variant(variant);
746 }
747
748 lp_jit_init_cs_types(variant);
749
750 generate_compute(lp, shader, variant);
751
752 gallivm_compile_module(variant->gallivm);
753
754 lp_build_coro_add_malloc_hooks(variant->gallivm);
755 variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
756
757 variant->jit_function = (lp_jit_cs_func)gallivm_jit_function(variant->gallivm, variant->function);
758
759 if (needs_caching) {
760 lp_disk_cache_insert_shader(screen, &cached, ir_sha1_cache_key);
761 }
762 gallivm_free_ir(variant->gallivm);
763 return variant;
764 }
765
766 static void
767 lp_cs_ctx_set_cs_variant( struct lp_cs_context *csctx,
768 struct lp_compute_shader_variant *variant)
769 {
770 csctx->cs.current.variant = variant;
771 }
772
773 static void
774 llvmpipe_update_cs(struct llvmpipe_context *lp)
775 {
776 struct lp_compute_shader *shader = lp->cs;
777
778 struct lp_compute_shader_variant_key *key;
779 struct lp_compute_shader_variant *variant = NULL;
780 struct lp_cs_variant_list_item *li;
781 char store[LP_CS_MAX_VARIANT_KEY_SIZE];
782
783 key = make_variant_key(lp, shader, store);
784
785 /* Search the variants for one which matches the key */
786 li = first_elem(&shader->variants);
787 while(!at_end(&shader->variants, li)) {
788 if(memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
789 variant = li->base;
790 break;
791 }
792 li = next_elem(li);
793 }
794
795 if (variant) {
796 /* Move this variant to the head of the list to implement LRU
797 * deletion of shader's when we have too many.
798 */
799 move_to_head(&lp->cs_variants_list, &variant->list_item_global);
800 }
801 else {
802 /* variant not found, create it now */
803 int64_t t0, t1, dt;
804 unsigned i;
805 unsigned variants_to_cull;
806
807 if (LP_DEBUG & DEBUG_CS) {
808 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
809 lp->nr_cs_variants,
810 lp->nr_cs_instrs,
811 lp->nr_cs_variants ? lp->nr_cs_instrs / lp->nr_cs_variants : 0);
812 }
813
814 /* First, check if we've exceeded the max number of shader variants.
815 * If so, free 6.25% of them (the least recently used ones).
816 */
817 variants_to_cull = lp->nr_cs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
818
819 if (variants_to_cull ||
820 lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
821 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
822 debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
823 "\t%u instrs,\t%u instrs/variant\n",
824 shader->variants_cached,
825 lp->nr_cs_variants, lp->nr_cs_instrs,
826 lp->nr_cs_instrs / lp->nr_cs_variants);
827 }
828
829 /*
830 * We need to re-check lp->nr_cs_variants because an arbitrarliy large
831 * number of shader variants (potentially all of them) could be
832 * pending for destruction on flush.
833 */
834
835 for (i = 0; i < variants_to_cull || lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
836 struct lp_cs_variant_list_item *item;
837 if (is_empty_list(&lp->cs_variants_list)) {
838 break;
839 }
840 item = last_elem(&lp->cs_variants_list);
841 assert(item);
842 assert(item->base);
843 llvmpipe_remove_cs_shader_variant(lp, item->base);
844 }
845 }
846 /*
847 * Generate the new variant.
848 */
849 t0 = os_time_get();
850 variant = generate_variant(lp, shader, key);
851 t1 = os_time_get();
852 dt = t1 - t0;
853 LP_COUNT_ADD(llvm_compile_time, dt);
854 LP_COUNT_ADD(nr_llvm_compiles, 2); /* emit vs. omit in/out test */
855
856 /* Put the new variant into the list */
857 if (variant) {
858 insert_at_head(&shader->variants, &variant->list_item_local);
859 insert_at_head(&lp->cs_variants_list, &variant->list_item_global);
860 lp->nr_cs_variants++;
861 lp->nr_cs_instrs += variant->nr_instrs;
862 shader->variants_cached++;
863 }
864 }
865 /* Bind this variant */
866 lp_cs_ctx_set_cs_variant(lp->csctx, variant);
867 }
868
869 /**
870 * Called during state validation when LP_CSNEW_SAMPLER_VIEW is set.
871 */
872 static void
873 lp_csctx_set_sampler_views(struct lp_cs_context *csctx,
874 unsigned num,
875 struct pipe_sampler_view **views)
876 {
877 unsigned i, max_tex_num;
878
879 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
880
881 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
882
883 max_tex_num = MAX2(num, csctx->cs.current_tex_num);
884
885 for (i = 0; i < max_tex_num; i++) {
886 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
887
888 if (view) {
889 struct pipe_resource *res = view->texture;
890 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
891 struct lp_jit_texture *jit_tex;
892 jit_tex = &csctx->cs.current.jit_context.textures[i];
893
894 /* We're referencing the texture's internal data, so save a
895 * reference to it.
896 */
897 pipe_resource_reference(&csctx->cs.current_tex[i], res);
898
899 if (!lp_tex->dt) {
900 /* regular texture - csctx array of mipmap level offsets */
901 int j;
902 unsigned first_level = 0;
903 unsigned last_level = 0;
904
905 if (llvmpipe_resource_is_texture(res)) {
906 first_level = view->u.tex.first_level;
907 last_level = view->u.tex.last_level;
908 assert(first_level <= last_level);
909 assert(last_level <= res->last_level);
910 jit_tex->base = lp_tex->tex_data;
911 }
912 else {
913 jit_tex->base = lp_tex->data;
914 }
915 if (LP_PERF & PERF_TEX_MEM) {
916 /* use dummy tile memory */
917 jit_tex->base = lp_dummy_tile;
918 jit_tex->width = TILE_SIZE/8;
919 jit_tex->height = TILE_SIZE/8;
920 jit_tex->depth = 1;
921 jit_tex->first_level = 0;
922 jit_tex->last_level = 0;
923 jit_tex->mip_offsets[0] = 0;
924 jit_tex->row_stride[0] = 0;
925 jit_tex->img_stride[0] = 0;
926 jit_tex->num_samples = 0;
927 jit_tex->sample_stride = 0;
928 }
929 else {
930 jit_tex->width = res->width0;
931 jit_tex->height = res->height0;
932 jit_tex->depth = res->depth0;
933 jit_tex->first_level = first_level;
934 jit_tex->last_level = last_level;
935 jit_tex->num_samples = res->nr_samples;
936 jit_tex->sample_stride = 0;
937
938 if (llvmpipe_resource_is_texture(res)) {
939 for (j = first_level; j <= last_level; j++) {
940 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
941 jit_tex->row_stride[j] = lp_tex->row_stride[j];
942 jit_tex->img_stride[j] = lp_tex->img_stride[j];
943 }
944 jit_tex->sample_stride = lp_tex->sample_stride;
945
946 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
947 res->target == PIPE_TEXTURE_2D_ARRAY ||
948 res->target == PIPE_TEXTURE_CUBE ||
949 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
950 /*
951 * For array textures, we don't have first_layer, instead
952 * adjust last_layer (stored as depth) plus the mip level offsets
953 * (as we have mip-first layout can't just adjust base ptr).
954 * XXX For mip levels, could do something similar.
955 */
956 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
957 for (j = first_level; j <= last_level; j++) {
958 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
959 lp_tex->img_stride[j];
960 }
961 if (view->target == PIPE_TEXTURE_CUBE ||
962 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
963 assert(jit_tex->depth % 6 == 0);
964 }
965 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
966 assert(view->u.tex.last_layer < res->array_size);
967 }
968 }
969 else {
970 /*
971 * For buffers, we don't have "offset", instead adjust
972 * the size (stored as width) plus the base pointer.
973 */
974 unsigned view_blocksize = util_format_get_blocksize(view->format);
975 /* probably don't really need to fill that out */
976 jit_tex->mip_offsets[0] = 0;
977 jit_tex->row_stride[0] = 0;
978 jit_tex->img_stride[0] = 0;
979
980 /* everything specified in number of elements here. */
981 jit_tex->width = view->u.buf.size / view_blocksize;
982 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
983 /* XXX Unsure if we need to sanitize parameters? */
984 assert(view->u.buf.offset + view->u.buf.size <= res->width0);
985 }
986 }
987 }
988 else {
989 /* display target texture/surface */
990 /*
991 * XXX: Where should this be unmapped?
992 */
993 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
994 struct sw_winsys *winsys = screen->winsys;
995 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
996 PIPE_TRANSFER_READ);
997 jit_tex->row_stride[0] = lp_tex->row_stride[0];
998 jit_tex->img_stride[0] = lp_tex->img_stride[0];
999 jit_tex->mip_offsets[0] = 0;
1000 jit_tex->width = res->width0;
1001 jit_tex->height = res->height0;
1002 jit_tex->depth = res->depth0;
1003 jit_tex->first_level = jit_tex->last_level = 0;
1004 jit_tex->num_samples = res->nr_samples;
1005 jit_tex->sample_stride = 0;
1006 assert(jit_tex->base);
1007 }
1008 }
1009 else {
1010 pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
1011 }
1012 }
1013 csctx->cs.current_tex_num = num;
1014 }
1015
1016
1017 /**
1018 * Called during state validation when LP_NEW_SAMPLER is set.
1019 */
1020 static void
1021 lp_csctx_set_sampler_state(struct lp_cs_context *csctx,
1022 unsigned num,
1023 struct pipe_sampler_state **samplers)
1024 {
1025 unsigned i;
1026
1027 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
1028
1029 assert(num <= PIPE_MAX_SAMPLERS);
1030
1031 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
1032 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
1033
1034 if (sampler) {
1035 struct lp_jit_sampler *jit_sam;
1036 jit_sam = &csctx->cs.current.jit_context.samplers[i];
1037
1038 jit_sam->min_lod = sampler->min_lod;
1039 jit_sam->max_lod = sampler->max_lod;
1040 jit_sam->lod_bias = sampler->lod_bias;
1041 COPY_4V(jit_sam->border_color, sampler->border_color.f);
1042 }
1043 }
1044 }
1045
1046 static void
1047 lp_csctx_set_cs_constants(struct lp_cs_context *csctx,
1048 unsigned num,
1049 struct pipe_constant_buffer *buffers)
1050 {
1051 unsigned i;
1052
1053 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
1054
1055 assert(num <= ARRAY_SIZE(csctx->constants));
1056
1057 for (i = 0; i < num; ++i) {
1058 util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i]);
1059 }
1060 for (; i < ARRAY_SIZE(csctx->constants); i++) {
1061 util_copy_constant_buffer(&csctx->constants[i].current, NULL);
1062 }
1063 }
1064
1065 static void
1066 lp_csctx_set_cs_ssbos(struct lp_cs_context *csctx,
1067 unsigned num,
1068 struct pipe_shader_buffer *buffers)
1069 {
1070 int i;
1071 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *)buffers);
1072
1073 assert (num <= ARRAY_SIZE(csctx->ssbos));
1074
1075 for (i = 0; i < num; ++i) {
1076 util_copy_shader_buffer(&csctx->ssbos[i].current, &buffers[i]);
1077 }
1078 for (; i < ARRAY_SIZE(csctx->ssbos); i++) {
1079 util_copy_shader_buffer(&csctx->ssbos[i].current, NULL);
1080 }
1081 }
1082
1083 static void
1084 lp_csctx_set_cs_images(struct lp_cs_context *csctx,
1085 unsigned num,
1086 struct pipe_image_view *images)
1087 {
1088 unsigned i;
1089
1090 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
1091
1092 assert(num <= ARRAY_SIZE(csctx->images));
1093
1094 for (i = 0; i < num; ++i) {
1095 struct pipe_image_view *image = &images[i];
1096 util_copy_image_view(&csctx->images[i].current, &images[i]);
1097
1098 struct pipe_resource *res = image->resource;
1099 struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
1100 struct lp_jit_image *jit_image;
1101
1102 jit_image = &csctx->cs.current.jit_context.images[i];
1103 if (!lp_res)
1104 continue;
1105 if (!lp_res->dt) {
1106 /* regular texture - csctx array of mipmap level offsets */
1107 if (llvmpipe_resource_is_texture(res)) {
1108 jit_image->base = lp_res->tex_data;
1109 } else
1110 jit_image->base = lp_res->data;
1111
1112 jit_image->width = res->width0;
1113 jit_image->height = res->height0;
1114 jit_image->depth = res->depth0;
1115 jit_image->num_samples = res->nr_samples;
1116
1117 if (llvmpipe_resource_is_texture(res)) {
1118 uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
1119
1120 jit_image->width = u_minify(jit_image->width, image->u.tex.level);
1121 jit_image->height = u_minify(jit_image->height, image->u.tex.level);
1122
1123 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
1124 res->target == PIPE_TEXTURE_2D_ARRAY ||
1125 res->target == PIPE_TEXTURE_3D ||
1126 res->target == PIPE_TEXTURE_CUBE ||
1127 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
1128 /*
1129 * For array textures, we don't have first_layer, instead
1130 * adjust last_layer (stored as depth) plus the mip level offsets
1131 * (as we have mip-first layout can't just adjust base ptr).
1132 * XXX For mip levels, could do something similar.
1133 */
1134 jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
1135 mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
1136 } else
1137 jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
1138
1139 jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
1140 jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
1141 jit_image->sample_stride = lp_res->sample_stride;
1142 jit_image->base = (uint8_t *)jit_image->base + mip_offset;
1143 } else {
1144 unsigned view_blocksize = util_format_get_blocksize(image->format);
1145 jit_image->width = image->u.buf.size / view_blocksize;
1146 jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
1147 }
1148 }
1149 }
1150 for (; i < ARRAY_SIZE(csctx->images); i++) {
1151 util_copy_image_view(&csctx->images[i].current, NULL);
1152 }
1153 }
1154
1155 static void
1156 update_csctx_consts(struct llvmpipe_context *llvmpipe)
1157 {
1158 struct lp_cs_context *csctx = llvmpipe->csctx;
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(csctx->constants); ++i) {
1162 struct pipe_resource *buffer = csctx->constants[i].current.buffer;
1163 const ubyte *current_data = NULL;
1164 unsigned current_size = csctx->constants[i].current.buffer_size;
1165 if (buffer) {
1166 /* resource buffer */
1167 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1168 }
1169 else if (csctx->constants[i].current.user_buffer) {
1170 /* user-space buffer */
1171 current_data = (ubyte *) csctx->constants[i].current.user_buffer;
1172 }
1173
1174 if (current_data && current_size >= sizeof(float)) {
1175 current_data += csctx->constants[i].current.buffer_offset;
1176 csctx->cs.current.jit_context.constants[i] = (const float *)current_data;
1177 csctx->cs.current.jit_context.num_constants[i] =
1178 DIV_ROUND_UP(csctx->constants[i].current.buffer_size,
1179 lp_get_constant_buffer_stride(llvmpipe->pipe.screen));
1180 } else {
1181 static const float fake_const_buf[4];
1182 csctx->cs.current.jit_context.constants[i] = fake_const_buf;
1183 csctx->cs.current.jit_context.num_constants[i] = 0;
1184 }
1185 }
1186 }
1187
1188 static void
1189 update_csctx_ssbo(struct llvmpipe_context *llvmpipe)
1190 {
1191 struct lp_cs_context *csctx = llvmpipe->csctx;
1192 int i;
1193 for (i = 0; i < ARRAY_SIZE(csctx->ssbos); ++i) {
1194 struct pipe_resource *buffer = csctx->ssbos[i].current.buffer;
1195 const ubyte *current_data = NULL;
1196
1197 if (!buffer)
1198 continue;
1199 /* resource buffer */
1200 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1201 if (current_data) {
1202 current_data += csctx->ssbos[i].current.buffer_offset;
1203
1204 csctx->cs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1205 csctx->cs.current.jit_context.num_ssbos[i] = csctx->ssbos[i].current.buffer_size;
1206 } else {
1207 csctx->cs.current.jit_context.ssbos[i] = NULL;
1208 csctx->cs.current.jit_context.num_ssbos[i] = 0;
1209 }
1210 }
1211 }
1212
1213 static void
1214 llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe, void *input)
1215 {
1216 if (llvmpipe->cs_dirty & LP_CSNEW_CONSTANTS) {
1217 lp_csctx_set_cs_constants(llvmpipe->csctx,
1218 ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_COMPUTE]),
1219 llvmpipe->constants[PIPE_SHADER_COMPUTE]);
1220 update_csctx_consts(llvmpipe);
1221 }
1222
1223 if (llvmpipe->cs_dirty & LP_CSNEW_SSBOS) {
1224 lp_csctx_set_cs_ssbos(llvmpipe->csctx,
1225 ARRAY_SIZE(llvmpipe->ssbos[PIPE_SHADER_COMPUTE]),
1226 llvmpipe->ssbos[PIPE_SHADER_COMPUTE]);
1227 update_csctx_ssbo(llvmpipe);
1228 }
1229
1230 if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER_VIEW)
1231 lp_csctx_set_sampler_views(llvmpipe->csctx,
1232 llvmpipe->num_sampler_views[PIPE_SHADER_COMPUTE],
1233 llvmpipe->sampler_views[PIPE_SHADER_COMPUTE]);
1234
1235 if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER)
1236 lp_csctx_set_sampler_state(llvmpipe->csctx,
1237 llvmpipe->num_samplers[PIPE_SHADER_COMPUTE],
1238 llvmpipe->samplers[PIPE_SHADER_COMPUTE]);
1239
1240 if (llvmpipe->cs_dirty & LP_CSNEW_IMAGES)
1241 lp_csctx_set_cs_images(llvmpipe->csctx,
1242 ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_COMPUTE]),
1243 llvmpipe->images[PIPE_SHADER_COMPUTE]);
1244
1245 if (input) {
1246 struct lp_cs_context *csctx = llvmpipe->csctx;
1247 csctx->input = input;
1248 csctx->cs.current.jit_context.kernel_args = input;
1249 }
1250
1251 if (llvmpipe->cs_dirty & (LP_CSNEW_CS |
1252 LP_CSNEW_IMAGES |
1253 LP_CSNEW_SAMPLER_VIEW |
1254 LP_CSNEW_SAMPLER))
1255 llvmpipe_update_cs(llvmpipe);
1256
1257
1258 llvmpipe->cs_dirty = 0;
1259 }
1260
1261 static void
1262 cs_exec_fn(void *init_data, int iter_idx, struct lp_cs_local_mem *lmem)
1263 {
1264 struct lp_cs_job_info *job_info = init_data;
1265 struct lp_jit_cs_thread_data thread_data;
1266
1267 memset(&thread_data, 0, sizeof(thread_data));
1268
1269 if (lmem->local_size < job_info->req_local_mem) {
1270 lmem->local_mem_ptr = REALLOC(lmem->local_mem_ptr, lmem->local_size,
1271 job_info->req_local_mem);
1272 lmem->local_size = job_info->req_local_mem;
1273 }
1274 thread_data.shared = lmem->local_mem_ptr;
1275
1276 unsigned grid_z = iter_idx / (job_info->grid_size[0] * job_info->grid_size[1]);
1277 unsigned grid_y = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1]))) / job_info->grid_size[0];
1278 unsigned grid_x = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1])) - (grid_y * job_info->grid_size[0]));
1279 struct lp_compute_shader_variant *variant = job_info->current->variant;
1280 variant->jit_function(&job_info->current->jit_context,
1281 job_info->block_size[0], job_info->block_size[1], job_info->block_size[2],
1282 grid_x, grid_y, grid_z,
1283 job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2], job_info->work_dim,
1284 &thread_data);
1285 }
1286
1287 static void
1288 fill_grid_size(struct pipe_context *pipe,
1289 const struct pipe_grid_info *info,
1290 uint32_t grid_size[3])
1291 {
1292 struct pipe_transfer *transfer;
1293 uint32_t *params;
1294 if (!info->indirect) {
1295 grid_size[0] = info->grid[0];
1296 grid_size[1] = info->grid[1];
1297 grid_size[2] = info->grid[2];
1298 return;
1299 }
1300 params = pipe_buffer_map_range(pipe, info->indirect,
1301 info->indirect_offset,
1302 3 * sizeof(uint32_t),
1303 PIPE_TRANSFER_READ,
1304 &transfer);
1305
1306 if (!transfer)
1307 return;
1308
1309 grid_size[0] = params[0];
1310 grid_size[1] = params[1];
1311 grid_size[2] = params[2];
1312 pipe_buffer_unmap(pipe, transfer);
1313 }
1314
1315 static void llvmpipe_launch_grid(struct pipe_context *pipe,
1316 const struct pipe_grid_info *info)
1317 {
1318 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
1319 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1320 struct lp_cs_job_info job_info;
1321
1322 if (!llvmpipe_check_render_cond(llvmpipe))
1323 return;
1324
1325 memset(&job_info, 0, sizeof(job_info));
1326
1327 llvmpipe_cs_update_derived(llvmpipe, info->input);
1328
1329 fill_grid_size(pipe, info, job_info.grid_size);
1330
1331 job_info.block_size[0] = info->block[0];
1332 job_info.block_size[1] = info->block[1];
1333 job_info.block_size[2] = info->block[2];
1334 job_info.work_dim = info->work_dim;
1335 job_info.req_local_mem = llvmpipe->cs->req_local_mem;
1336 job_info.current = &llvmpipe->csctx->cs.current;
1337
1338 int num_tasks = job_info.grid_size[2] * job_info.grid_size[1] * job_info.grid_size[0];
1339 if (num_tasks) {
1340 struct lp_cs_tpool_task *task;
1341 mtx_lock(&screen->cs_mutex);
1342 task = lp_cs_tpool_queue_task(screen->cs_tpool, cs_exec_fn, &job_info, num_tasks);
1343
1344 lp_cs_tpool_wait_for_task(screen->cs_tpool, &task);
1345 mtx_unlock(&screen->cs_mutex);
1346 }
1347 llvmpipe->pipeline_statistics.cs_invocations += num_tasks * info->block[0] * info->block[1] * info->block[2];
1348 }
1349
1350 static void
1351 llvmpipe_set_compute_resources(struct pipe_context *pipe,
1352 unsigned start, unsigned count,
1353 struct pipe_surface **resources)
1354 {
1355
1356
1357 }
1358
1359 static void
1360 llvmpipe_set_global_binding(struct pipe_context *pipe,
1361 unsigned first, unsigned count,
1362 struct pipe_resource **resources,
1363 uint32_t **handles)
1364 {
1365 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
1366 struct lp_compute_shader *cs = llvmpipe->cs;
1367 unsigned i;
1368
1369 if (first + count > cs->max_global_buffers) {
1370 unsigned old_max = cs->max_global_buffers;
1371 cs->max_global_buffers = first + count;
1372 cs->global_buffers = realloc(cs->global_buffers,
1373 cs->max_global_buffers * sizeof(cs->global_buffers[0]));
1374 if (!cs->global_buffers) {
1375 return;
1376 }
1377
1378 memset(&cs->global_buffers[old_max], 0, (cs->max_global_buffers - old_max) * sizeof(cs->global_buffers[0]));
1379 }
1380
1381 if (!resources) {
1382 for (i = 0; i < count; i++)
1383 pipe_resource_reference(&cs->global_buffers[first + i], NULL);
1384 return;
1385 }
1386
1387 for (i = 0; i < count; i++) {
1388 uintptr_t va;
1389 uint32_t offset;
1390 pipe_resource_reference(&cs->global_buffers[first + i], resources[i]);
1391 struct llvmpipe_resource *lp_res = llvmpipe_resource(resources[i]);
1392 offset = *handles[i];
1393 va = (uintptr_t)((char *)lp_res->data + offset);
1394 memcpy(handles[i], &va, sizeof(va));
1395 }
1396 }
1397
1398 void
1399 llvmpipe_init_compute_funcs(struct llvmpipe_context *llvmpipe)
1400 {
1401 llvmpipe->pipe.create_compute_state = llvmpipe_create_compute_state;
1402 llvmpipe->pipe.bind_compute_state = llvmpipe_bind_compute_state;
1403 llvmpipe->pipe.delete_compute_state = llvmpipe_delete_compute_state;
1404 llvmpipe->pipe.set_compute_resources = llvmpipe_set_compute_resources;
1405 llvmpipe->pipe.set_global_binding = llvmpipe_set_global_binding;
1406 llvmpipe->pipe.launch_grid = llvmpipe_launch_grid;
1407 }
1408
1409 void
1410 lp_csctx_destroy(struct lp_cs_context *csctx)
1411 {
1412 unsigned i;
1413 for (i = 0; i < ARRAY_SIZE(csctx->cs.current_tex); i++) {
1414 pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
1415 }
1416 for (i = 0; i < ARRAY_SIZE(csctx->constants); i++) {
1417 pipe_resource_reference(&csctx->constants[i].current.buffer, NULL);
1418 }
1419 for (i = 0; i < ARRAY_SIZE(csctx->ssbos); i++) {
1420 pipe_resource_reference(&csctx->ssbos[i].current.buffer, NULL);
1421 }
1422 FREE(csctx);
1423 }
1424
1425 struct lp_cs_context *lp_csctx_create(struct pipe_context *pipe)
1426 {
1427 struct lp_cs_context *csctx;
1428
1429 csctx = CALLOC_STRUCT(lp_cs_context);
1430 if (!csctx)
1431 return NULL;
1432
1433 csctx->pipe = pipe;
1434 return csctx;
1435 }