1 /**************************************************************************
3 * Copyright 2019 Red Hat.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "util/u_dump.h"
29 #include "util/u_string.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "gallivm/lp_bld_const.h"
33 #include "gallivm/lp_bld_debug.h"
34 #include "gallivm/lp_bld_intr.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_gather.h"
37 #include "gallivm/lp_bld_coro.h"
38 #include "gallivm/lp_bld_nir.h"
39 #include "lp_state_cs.h"
40 #include "lp_context.h"
44 #include "lp_screen.h"
45 #include "lp_memory.h"
47 #include "lp_cs_tpool.h"
48 #include "frontend/sw_winsys.h"
49 #include "nir/nir_to_tgsi_info.h"
50 #include "util/mesa-sha1.h"
51 #include "nir_serialize.h"
53 /** Fragment shader number (for debugging) */
54 static unsigned cs_no
= 0;
56 struct lp_cs_job_info
{
57 unsigned grid_size
[3];
58 unsigned block_size
[3];
59 unsigned req_local_mem
;
61 struct lp_cs_exec
*current
;
65 generate_compute(struct llvmpipe_context
*lp
,
66 struct lp_compute_shader
*shader
,
67 struct lp_compute_shader_variant
*variant
)
69 struct gallivm_state
*gallivm
= variant
->gallivm
;
70 const struct lp_compute_shader_variant_key
*key
= &variant
->key
;
71 char func_name
[64], func_name_coro
[64];
72 LLVMTypeRef arg_types
[17];
73 LLVMTypeRef func_type
, coro_func_type
;
74 LLVMTypeRef int32_type
= LLVMInt32TypeInContext(gallivm
->context
);
75 LLVMValueRef context_ptr
;
76 LLVMValueRef x_size_arg
, y_size_arg
, z_size_arg
;
77 LLVMValueRef grid_x_arg
, grid_y_arg
, grid_z_arg
;
78 LLVMValueRef grid_size_x_arg
, grid_size_y_arg
, grid_size_z_arg
;
79 LLVMValueRef work_dim_arg
, thread_data_ptr
;
80 LLVMBasicBlockRef block
;
81 LLVMBuilderRef builder
;
82 struct lp_build_sampler_soa
*sampler
;
83 struct lp_build_image_soa
*image
;
84 LLVMValueRef function
, coro
;
85 struct lp_type cs_type
;
89 * This function has two parts
90 * a) setup the coroutine execution environment loop.
91 * b) build the compute shader llvm for use inside the coroutine.
93 assert(lp_native_vector_width
/ 32 >= 4);
95 memset(&cs_type
, 0, sizeof cs_type
);
96 cs_type
.floating
= TRUE
; /* floating point values */
97 cs_type
.sign
= TRUE
; /* values are signed */
98 cs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
99 cs_type
.width
= 32; /* 32-bit float */
100 cs_type
.length
= MIN2(lp_native_vector_width
/ 32, 16); /* n*4 elements per vector */
101 snprintf(func_name
, sizeof(func_name
), "cs_variant");
103 snprintf(func_name_coro
, sizeof(func_name
), "cs_co_variant");
105 arg_types
[0] = variant
->jit_cs_context_ptr_type
; /* context */
106 arg_types
[1] = int32_type
; /* block_x_size */
107 arg_types
[2] = int32_type
; /* block_y_size */
108 arg_types
[3] = int32_type
; /* block_z_size */
109 arg_types
[4] = int32_type
; /* grid_x */
110 arg_types
[5] = int32_type
; /* grid_y */
111 arg_types
[6] = int32_type
; /* grid_z */
112 arg_types
[7] = int32_type
; /* grid_size_x */
113 arg_types
[8] = int32_type
; /* grid_size_y */
114 arg_types
[9] = int32_type
; /* grid_size_z */
115 arg_types
[10] = int32_type
; /* work dim */
116 arg_types
[11] = variant
->jit_cs_thread_data_ptr_type
; /* per thread data */
117 arg_types
[12] = int32_type
; /* coro only - num X loops */
118 arg_types
[13] = int32_type
; /* coro only - partials */
119 arg_types
[14] = int32_type
; /* coro block_x_size */
120 arg_types
[15] = int32_type
; /* coro block_y_size */
121 arg_types
[16] = int32_type
; /* coro block_z_size */
122 func_type
= LLVMFunctionType(LLVMVoidTypeInContext(gallivm
->context
),
123 arg_types
, ARRAY_SIZE(arg_types
) - 5, 0);
125 coro_func_type
= LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm
->context
), 0),
126 arg_types
, ARRAY_SIZE(arg_types
), 0);
128 function
= LLVMAddFunction(gallivm
->module
, func_name
, func_type
);
129 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
131 coro
= LLVMAddFunction(gallivm
->module
, func_name_coro
, coro_func_type
);
132 LLVMSetFunctionCallConv(coro
, LLVMCCallConv
);
134 variant
->function
= function
;
136 for(i
= 0; i
< ARRAY_SIZE(arg_types
); ++i
) {
137 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
) {
138 lp_add_function_attr(coro
, i
+ 1, LP_FUNC_ATTR_NOALIAS
);
139 lp_add_function_attr(function
, i
+ 1, LP_FUNC_ATTR_NOALIAS
);
143 lp_build_coro_declare_malloc_hooks(gallivm
);
145 if (variant
->gallivm
->cache
->data_size
)
148 context_ptr
= LLVMGetParam(function
, 0);
149 x_size_arg
= LLVMGetParam(function
, 1);
150 y_size_arg
= LLVMGetParam(function
, 2);
151 z_size_arg
= LLVMGetParam(function
, 3);
152 grid_x_arg
= LLVMGetParam(function
, 4);
153 grid_y_arg
= LLVMGetParam(function
, 5);
154 grid_z_arg
= LLVMGetParam(function
, 6);
155 grid_size_x_arg
= LLVMGetParam(function
, 7);
156 grid_size_y_arg
= LLVMGetParam(function
, 8);
157 grid_size_z_arg
= LLVMGetParam(function
, 9);
158 work_dim_arg
= LLVMGetParam(function
, 10);
159 thread_data_ptr
= LLVMGetParam(function
, 11);
161 lp_build_name(context_ptr
, "context");
162 lp_build_name(x_size_arg
, "x_size");
163 lp_build_name(y_size_arg
, "y_size");
164 lp_build_name(z_size_arg
, "z_size");
165 lp_build_name(grid_x_arg
, "grid_x");
166 lp_build_name(grid_y_arg
, "grid_y");
167 lp_build_name(grid_z_arg
, "grid_z");
168 lp_build_name(grid_size_x_arg
, "grid_size_x");
169 lp_build_name(grid_size_y_arg
, "grid_size_y");
170 lp_build_name(grid_size_z_arg
, "grid_size_z");
171 lp_build_name(work_dim_arg
, "work_dim");
172 lp_build_name(thread_data_ptr
, "thread_data");
174 block
= LLVMAppendBasicBlockInContext(gallivm
->context
, function
, "entry");
175 builder
= gallivm
->builder
;
177 LLVMPositionBuilderAtEnd(builder
, block
);
178 sampler
= lp_llvm_sampler_soa_create(key
->samplers
, key
->nr_samplers
);
179 image
= lp_llvm_image_soa_create(lp_cs_variant_key_images(key
), key
->nr_images
);
181 struct lp_build_loop_state loop_state
[4];
182 LLVMValueRef num_x_loop
;
183 LLVMValueRef vec_length
= lp_build_const_int32(gallivm
, cs_type
.length
);
184 num_x_loop
= LLVMBuildAdd(gallivm
->builder
, x_size_arg
, vec_length
, "");
185 num_x_loop
= LLVMBuildSub(gallivm
->builder
, num_x_loop
, lp_build_const_int32(gallivm
, 1), "");
186 num_x_loop
= LLVMBuildUDiv(gallivm
->builder
, num_x_loop
, vec_length
, "");
187 LLVMValueRef partials
= LLVMBuildURem(gallivm
->builder
, x_size_arg
, vec_length
, "");
189 LLVMValueRef coro_num_hdls
= LLVMBuildMul(gallivm
->builder
, num_x_loop
, y_size_arg
, "");
190 coro_num_hdls
= LLVMBuildMul(gallivm
->builder
, coro_num_hdls
, z_size_arg
, "");
192 LLVMTypeRef hdl_ptr_type
= LLVMPointerType(LLVMInt8TypeInContext(gallivm
->context
), 0);
193 LLVMValueRef coro_hdls
= LLVMBuildArrayAlloca(gallivm
->builder
, hdl_ptr_type
, coro_num_hdls
, "coro_hdls");
195 unsigned end_coroutine
= INT_MAX
;
198 * This is the main coroutine execution loop. It iterates over the dimensions
199 * and calls the coroutine main entrypoint on the first pass, but in subsequent
200 * passes it checks if the coroutine has completed and resumes it if not.
202 /* take x_width - round up to type.length width */
203 lp_build_loop_begin(&loop_state
[3], gallivm
,
204 lp_build_const_int32(gallivm
, 0)); /* coroutine reentry loop */
205 lp_build_loop_begin(&loop_state
[2], gallivm
,
206 lp_build_const_int32(gallivm
, 0)); /* z loop */
207 lp_build_loop_begin(&loop_state
[1], gallivm
,
208 lp_build_const_int32(gallivm
, 0)); /* y loop */
209 lp_build_loop_begin(&loop_state
[0], gallivm
,
210 lp_build_const_int32(gallivm
, 0)); /* x loop */
212 LLVMValueRef args
[17];
213 args
[0] = context_ptr
;
214 args
[1] = loop_state
[0].counter
;
215 args
[2] = loop_state
[1].counter
;
216 args
[3] = loop_state
[2].counter
;
217 args
[4] = grid_x_arg
;
218 args
[5] = grid_y_arg
;
219 args
[6] = grid_z_arg
;
220 args
[7] = grid_size_x_arg
;
221 args
[8] = grid_size_y_arg
;
222 args
[9] = grid_size_z_arg
;
223 args
[10] = work_dim_arg
;
224 args
[11] = thread_data_ptr
;
225 args
[12] = num_x_loop
;
227 args
[14] = x_size_arg
;
228 args
[15] = y_size_arg
;
229 args
[16] = z_size_arg
;
231 /* idx = (z * (size_x * size_y) + y * size_x + x */
232 LLVMValueRef coro_hdl_idx
= LLVMBuildMul(gallivm
->builder
, loop_state
[2].counter
,
233 LLVMBuildMul(gallivm
->builder
, num_x_loop
, y_size_arg
, ""), "");
234 coro_hdl_idx
= LLVMBuildAdd(gallivm
->builder
, coro_hdl_idx
,
235 LLVMBuildMul(gallivm
->builder
, loop_state
[1].counter
,
236 num_x_loop
, ""), "");
237 coro_hdl_idx
= LLVMBuildAdd(gallivm
->builder
, coro_hdl_idx
,
238 loop_state
[0].counter
, "");
240 LLVMValueRef coro_entry
= LLVMBuildGEP(gallivm
->builder
, coro_hdls
, &coro_hdl_idx
, 1, "");
242 LLVMValueRef coro_hdl
= LLVMBuildLoad(gallivm
->builder
, coro_entry
, "coro_hdl");
244 struct lp_build_if_state ifstate
;
245 LLVMValueRef cmp
= LLVMBuildICmp(gallivm
->builder
, LLVMIntEQ
, loop_state
[3].counter
,
246 lp_build_const_int32(gallivm
, 0), "");
247 /* first time here - call the coroutine function entry point */
248 lp_build_if(&ifstate
, gallivm
, cmp
);
249 LLVMValueRef coro_ret
= LLVMBuildCall(gallivm
->builder
, coro
, args
, 17, "");
250 LLVMBuildStore(gallivm
->builder
, coro_ret
, coro_entry
);
251 lp_build_else(&ifstate
);
252 /* subsequent calls for this invocation - check if done. */
253 LLVMValueRef coro_done
= lp_build_coro_done(gallivm
, coro_hdl
);
254 struct lp_build_if_state ifstate2
;
255 lp_build_if(&ifstate2
, gallivm
, coro_done
);
256 /* if done destroy and force loop exit */
257 lp_build_coro_destroy(gallivm
, coro_hdl
);
258 lp_build_loop_force_set_counter(&loop_state
[3], lp_build_const_int32(gallivm
, end_coroutine
- 1));
259 lp_build_else(&ifstate2
);
260 /* otherwise resume the coroutine */
261 lp_build_coro_resume(gallivm
, coro_hdl
);
262 lp_build_endif(&ifstate2
);
263 lp_build_endif(&ifstate
);
264 lp_build_loop_force_reload_counter(&loop_state
[3]);
266 lp_build_loop_end_cond(&loop_state
[0],
269 lp_build_loop_end_cond(&loop_state
[1],
272 lp_build_loop_end_cond(&loop_state
[2],
275 lp_build_loop_end_cond(&loop_state
[3],
276 lp_build_const_int32(gallivm
, end_coroutine
),
278 LLVMBuildRetVoid(builder
);
280 /* This is stage (b) - generate the compute shader code inside the coroutine. */
281 LLVMValueRef block_x_size_arg
, block_y_size_arg
, block_z_size_arg
;
282 context_ptr
= LLVMGetParam(coro
, 0);
283 x_size_arg
= LLVMGetParam(coro
, 1);
284 y_size_arg
= LLVMGetParam(coro
, 2);
285 z_size_arg
= LLVMGetParam(coro
, 3);
286 grid_x_arg
= LLVMGetParam(coro
, 4);
287 grid_y_arg
= LLVMGetParam(coro
, 5);
288 grid_z_arg
= LLVMGetParam(coro
, 6);
289 grid_size_x_arg
= LLVMGetParam(coro
, 7);
290 grid_size_y_arg
= LLVMGetParam(coro
, 8);
291 grid_size_z_arg
= LLVMGetParam(coro
, 9);
292 work_dim_arg
= LLVMGetParam(coro
, 10);
293 thread_data_ptr
= LLVMGetParam(coro
, 11);
294 num_x_loop
= LLVMGetParam(coro
, 12);
295 partials
= LLVMGetParam(coro
, 13);
296 block_x_size_arg
= LLVMGetParam(coro
, 14);
297 block_y_size_arg
= LLVMGetParam(coro
, 15);
298 block_z_size_arg
= LLVMGetParam(coro
, 16);
299 block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "entry");
300 LLVMPositionBuilderAtEnd(builder
, block
);
302 LLVMValueRef consts_ptr
, num_consts_ptr
;
303 LLVMValueRef ssbo_ptr
, num_ssbo_ptr
;
304 LLVMValueRef shared_ptr
;
305 LLVMValueRef kernel_args_ptr
;
306 struct lp_build_mask_context mask
;
307 struct lp_bld_tgsi_system_values system_values
;
309 memset(&system_values
, 0, sizeof(system_values
));
310 consts_ptr
= lp_jit_cs_context_constants(gallivm
, context_ptr
);
311 num_consts_ptr
= lp_jit_cs_context_num_constants(gallivm
, context_ptr
);
312 ssbo_ptr
= lp_jit_cs_context_ssbos(gallivm
, context_ptr
);
313 num_ssbo_ptr
= lp_jit_cs_context_num_ssbos(gallivm
, context_ptr
);
314 kernel_args_ptr
= lp_jit_cs_context_kernel_args(gallivm
, context_ptr
);
316 shared_ptr
= lp_jit_cs_thread_data_shared(gallivm
, thread_data_ptr
);
318 /* these are coroutine entrypoint necessities */
319 LLVMValueRef coro_id
= lp_build_coro_id(gallivm
);
320 LLVMValueRef coro_hdl
= lp_build_coro_begin_alloc_mem(gallivm
, coro_id
);
322 LLVMValueRef has_partials
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
, partials
, lp_build_const_int32(gallivm
, 0), "");
323 LLVMValueRef tid_vals
[3];
324 LLVMValueRef tids_x
[LP_MAX_VECTOR_LENGTH
], tids_y
[LP_MAX_VECTOR_LENGTH
], tids_z
[LP_MAX_VECTOR_LENGTH
];
325 LLVMValueRef base_val
= LLVMBuildMul(gallivm
->builder
, x_size_arg
, vec_length
, "");
326 for (i
= 0; i
< cs_type
.length
; i
++) {
327 tids_x
[i
] = LLVMBuildAdd(gallivm
->builder
, base_val
, lp_build_const_int32(gallivm
, i
), "");
328 tids_y
[i
] = y_size_arg
;
329 tids_z
[i
] = z_size_arg
;
331 tid_vals
[0] = lp_build_gather_values(gallivm
, tids_x
, cs_type
.length
);
332 tid_vals
[1] = lp_build_gather_values(gallivm
, tids_y
, cs_type
.length
);
333 tid_vals
[2] = lp_build_gather_values(gallivm
, tids_z
, cs_type
.length
);
334 system_values
.thread_id
= LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type
, cs_type
.length
), 3));
335 for (i
= 0; i
< 3; i
++)
336 system_values
.thread_id
= LLVMBuildInsertValue(builder
, system_values
.thread_id
, tid_vals
[i
], i
, "");
338 LLVMValueRef gtids
[3] = { grid_x_arg
, grid_y_arg
, grid_z_arg
};
339 system_values
.block_id
= LLVMGetUndef(LLVMVectorType(int32_type
, 3));
340 for (i
= 0; i
< 3; i
++)
341 system_values
.block_id
= LLVMBuildInsertElement(builder
, system_values
.block_id
, gtids
[i
], lp_build_const_int32(gallivm
, i
), "");
343 LLVMValueRef gstids
[3] = { grid_size_x_arg
, grid_size_y_arg
, grid_size_z_arg
};
344 system_values
.grid_size
= LLVMGetUndef(LLVMVectorType(int32_type
, 3));
345 for (i
= 0; i
< 3; i
++)
346 system_values
.grid_size
= LLVMBuildInsertElement(builder
, system_values
.grid_size
, gstids
[i
], lp_build_const_int32(gallivm
, i
), "");
348 system_values
.work_dim
= work_dim_arg
;
350 LLVMValueRef bsize
[3] = { block_x_size_arg
, block_y_size_arg
, block_z_size_arg
};
351 system_values
.block_size
= LLVMGetUndef(LLVMVectorType(int32_type
, 3));
352 for (i
= 0; i
< 3; i
++)
353 system_values
.block_size
= LLVMBuildInsertElement(builder
, system_values
.block_size
, bsize
[i
], lp_build_const_int32(gallivm
, i
), "");
355 LLVMValueRef last_x_loop
= LLVMBuildICmp(gallivm
->builder
, LLVMIntEQ
, x_size_arg
, LLVMBuildSub(gallivm
->builder
, num_x_loop
, lp_build_const_int32(gallivm
, 1), ""), "");
356 LLVMValueRef use_partial_mask
= LLVMBuildAnd(gallivm
->builder
, last_x_loop
, has_partials
, "");
357 struct lp_build_if_state if_state
;
358 LLVMValueRef mask_val
= lp_build_alloca(gallivm
, LLVMVectorType(int32_type
, cs_type
.length
), "mask");
359 LLVMValueRef full_mask_val
= lp_build_const_int_vec(gallivm
, cs_type
, ~0);
360 LLVMBuildStore(gallivm
->builder
, full_mask_val
, mask_val
);
362 lp_build_if(&if_state
, gallivm
, use_partial_mask
);
363 struct lp_build_loop_state mask_loop_state
;
364 lp_build_loop_begin(&mask_loop_state
, gallivm
, partials
);
365 LLVMValueRef tmask_val
= LLVMBuildLoad(gallivm
->builder
, mask_val
, "");
366 tmask_val
= LLVMBuildInsertElement(gallivm
->builder
, tmask_val
, lp_build_const_int32(gallivm
, 0), mask_loop_state
.counter
, "");
367 LLVMBuildStore(gallivm
->builder
, tmask_val
, mask_val
);
368 lp_build_loop_end_cond(&mask_loop_state
, vec_length
, NULL
, LLVMIntUGE
);
369 lp_build_endif(&if_state
);
371 mask_val
= LLVMBuildLoad(gallivm
->builder
, mask_val
, "");
372 lp_build_mask_begin(&mask
, gallivm
, cs_type
, mask_val
);
374 struct lp_build_coro_suspend_info coro_info
;
376 LLVMBasicBlockRef sus_block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "suspend");
377 LLVMBasicBlockRef clean_block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "cleanup");
379 coro_info
.suspend
= sus_block
;
380 coro_info
.cleanup
= clean_block
;
382 struct lp_build_tgsi_params params
;
383 memset(¶ms
, 0, sizeof(params
));
385 params
.type
= cs_type
;
387 params
.consts_ptr
= consts_ptr
;
388 params
.const_sizes_ptr
= num_consts_ptr
;
389 params
.system_values
= &system_values
;
390 params
.context_ptr
= context_ptr
;
391 params
.sampler
= sampler
;
392 params
.info
= &shader
->info
.base
;
393 params
.ssbo_ptr
= ssbo_ptr
;
394 params
.ssbo_sizes_ptr
= num_ssbo_ptr
;
395 params
.image
= image
;
396 params
.shared_ptr
= shared_ptr
;
397 params
.coro
= &coro_info
;
398 params
.kernel_args
= kernel_args_ptr
;
400 if (shader
->base
.type
== PIPE_SHADER_IR_TGSI
)
401 lp_build_tgsi_soa(gallivm
, shader
->base
.tokens
, ¶ms
, NULL
);
403 lp_build_nir_soa(gallivm
, shader
->base
.ir
.nir
, ¶ms
,
406 mask_val
= lp_build_mask_end(&mask
);
408 lp_build_coro_suspend_switch(gallivm
, &coro_info
, NULL
, true);
409 LLVMPositionBuilderAtEnd(builder
, clean_block
);
411 lp_build_coro_free_mem(gallivm
, coro_id
, coro_hdl
);
413 LLVMBuildBr(builder
, sus_block
);
414 LLVMPositionBuilderAtEnd(builder
, sus_block
);
416 lp_build_coro_end(gallivm
, coro_hdl
);
417 LLVMBuildRet(builder
, coro_hdl
);
420 sampler
->destroy(sampler
);
421 image
->destroy(image
);
423 gallivm_verify_function(gallivm
, coro
);
424 gallivm_verify_function(gallivm
, function
);
428 llvmpipe_create_compute_state(struct pipe_context
*pipe
,
429 const struct pipe_compute_state
*templ
)
431 struct lp_compute_shader
*shader
;
432 int nr_samplers
, nr_sampler_views
;
434 shader
= CALLOC_STRUCT(lp_compute_shader
);
438 shader
->no
= cs_no
++;
440 shader
->base
.type
= templ
->ir_type
;
441 if (templ
->ir_type
== PIPE_SHADER_IR_NIR_SERIALIZED
) {
442 struct blob_reader reader
;
443 const struct pipe_binary_program_header
*hdr
= templ
->prog
;
445 blob_reader_init(&reader
, hdr
->blob
, hdr
->num_bytes
);
446 shader
->base
.ir
.nir
= nir_deserialize(NULL
, pipe
->screen
->get_compiler_options(pipe
->screen
, PIPE_SHADER_IR_NIR
, PIPE_SHADER_COMPUTE
), &reader
);
447 shader
->base
.type
= PIPE_SHADER_IR_NIR
;
449 pipe
->screen
->finalize_nir(pipe
->screen
, shader
->base
.ir
.nir
, false);
450 } else if (templ
->ir_type
== PIPE_SHADER_IR_NIR
)
451 shader
->base
.ir
.nir
= (struct nir_shader
*)templ
->prog
;
453 if (shader
->base
.type
== PIPE_SHADER_IR_TGSI
) {
454 /* get/save the summary info for this shader */
455 lp_build_tgsi_info(templ
->prog
, &shader
->info
);
457 /* we need to keep a local copy of the tokens */
458 shader
->base
.tokens
= tgsi_dup_tokens(templ
->prog
);
460 nir_tgsi_scan_shader(shader
->base
.ir
.nir
, &shader
->info
.base
, false);
463 shader
->req_local_mem
= templ
->req_local_mem
;
464 make_empty_list(&shader
->variants
);
466 nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
467 nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
468 int nr_images
= shader
->info
.base
.file_max
[TGSI_FILE_IMAGE
] + 1;
469 shader
->variant_key_size
= lp_cs_variant_key_size(MAX2(nr_samplers
, nr_sampler_views
), nr_images
);
475 llvmpipe_bind_compute_state(struct pipe_context
*pipe
,
478 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
480 if (llvmpipe
->cs
== cs
)
483 llvmpipe
->cs
= (struct lp_compute_shader
*)cs
;
484 llvmpipe
->cs_dirty
|= LP_CSNEW_CS
;
488 * Remove shader variant from two lists: the shader's variant list
489 * and the context's variant list.
492 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context
*lp
,
493 struct lp_compute_shader_variant
*variant
)
495 if ((LP_DEBUG
& DEBUG_CS
) || (gallivm_debug
& GALLIVM_DEBUG_IR
)) {
496 debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
497 "v total cached %u inst %u total inst %u\n",
498 variant
->shader
->no
, variant
->no
,
499 variant
->shader
->variants_created
,
500 variant
->shader
->variants_cached
,
501 lp
->nr_cs_variants
, variant
->nr_instrs
, lp
->nr_cs_instrs
);
504 gallivm_destroy(variant
->gallivm
);
506 /* remove from shader's list */
507 remove_from_list(&variant
->list_item_local
);
508 variant
->shader
->variants_cached
--;
510 /* remove from context's list */
511 remove_from_list(&variant
->list_item_global
);
512 lp
->nr_fs_variants
--;
513 lp
->nr_fs_instrs
-= variant
->nr_instrs
;
519 llvmpipe_delete_compute_state(struct pipe_context
*pipe
,
522 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
523 struct lp_compute_shader
*shader
= cs
;
524 struct lp_cs_variant_list_item
*li
;
526 if (llvmpipe
->cs
== cs
)
528 for (unsigned i
= 0; i
< shader
->max_global_buffers
; i
++)
529 pipe_resource_reference(&shader
->global_buffers
[i
], NULL
);
530 FREE(shader
->global_buffers
);
532 /* Delete all the variants */
533 li
= first_elem(&shader
->variants
);
534 while(!at_end(&shader
->variants
, li
)) {
535 struct lp_cs_variant_list_item
*next
= next_elem(li
);
536 llvmpipe_remove_cs_shader_variant(llvmpipe
, li
->base
);
539 if (shader
->base
.ir
.nir
)
540 ralloc_free(shader
->base
.ir
.nir
);
541 tgsi_free_tokens(shader
->base
.tokens
);
545 static struct lp_compute_shader_variant_key
*
546 make_variant_key(struct llvmpipe_context
*lp
,
547 struct lp_compute_shader
*shader
,
551 struct lp_compute_shader_variant_key
*key
;
552 key
= (struct lp_compute_shader_variant_key
*)store
;
553 memset(key
, 0, offsetof(struct lp_compute_shader_variant_key
, samplers
[1]));
555 /* This value will be the same for all the variants of a given shader:
557 key
->nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
559 struct lp_sampler_static_state
*cs_sampler
;
561 cs_sampler
= key
->samplers
;
562 for(i
= 0; i
< key
->nr_samplers
; ++i
) {
563 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
564 lp_sampler_static_sampler_state(&cs_sampler
[i
].sampler_state
,
565 lp
->samplers
[PIPE_SHADER_COMPUTE
][i
]);
570 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
571 * are dx10-style? Can't really have mixed opcodes, at least not
572 * if we want to skip the holes here (without rescanning tgsi).
574 if (shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] != -1) {
575 key
->nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
576 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
578 * Note sview may exceed what's representable by file_mask.
579 * This will still work, the only downside is that not actually
580 * used views may be included in the shader key.
582 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER_VIEW
] & (1u << (i
& 31))) {
583 lp_sampler_static_texture_state(&cs_sampler
[i
].texture_state
,
584 lp
->sampler_views
[PIPE_SHADER_COMPUTE
][i
]);
589 key
->nr_sampler_views
= key
->nr_samplers
;
590 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
591 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
592 lp_sampler_static_texture_state(&cs_sampler
[i
].texture_state
,
593 lp
->sampler_views
[PIPE_SHADER_COMPUTE
][i
]);
598 struct lp_image_static_state
*lp_image
;
599 lp_image
= lp_cs_variant_key_images(key
);
600 key
->nr_images
= shader
->info
.base
.file_max
[TGSI_FILE_IMAGE
] + 1;
601 for (i
= 0; i
< key
->nr_images
; ++i
) {
602 if (shader
->info
.base
.file_mask
[TGSI_FILE_IMAGE
] & (1 << i
)) {
603 lp_sampler_static_texture_state_image(&lp_image
[i
].image_state
,
604 &lp
->images
[PIPE_SHADER_COMPUTE
][i
]);
611 dump_cs_variant_key(const struct lp_compute_shader_variant_key
*key
)
614 debug_printf("cs variant %p:\n", (void *) key
);
616 for (i
= 0; i
< key
->nr_samplers
; ++i
) {
617 const struct lp_static_sampler_state
*sampler
= &key
->samplers
[i
].sampler_state
;
618 debug_printf("sampler[%u] = \n", i
);
619 debug_printf(" .wrap = %s %s %s\n",
620 util_str_tex_wrap(sampler
->wrap_s
, TRUE
),
621 util_str_tex_wrap(sampler
->wrap_t
, TRUE
),
622 util_str_tex_wrap(sampler
->wrap_r
, TRUE
));
623 debug_printf(" .min_img_filter = %s\n",
624 util_str_tex_filter(sampler
->min_img_filter
, TRUE
));
625 debug_printf(" .min_mip_filter = %s\n",
626 util_str_tex_mipfilter(sampler
->min_mip_filter
, TRUE
));
627 debug_printf(" .mag_img_filter = %s\n",
628 util_str_tex_filter(sampler
->mag_img_filter
, TRUE
));
629 if (sampler
->compare_mode
!= PIPE_TEX_COMPARE_NONE
)
630 debug_printf(" .compare_func = %s\n", util_str_func(sampler
->compare_func
, TRUE
));
631 debug_printf(" .normalized_coords = %u\n", sampler
->normalized_coords
);
632 debug_printf(" .min_max_lod_equal = %u\n", sampler
->min_max_lod_equal
);
633 debug_printf(" .lod_bias_non_zero = %u\n", sampler
->lod_bias_non_zero
);
634 debug_printf(" .apply_min_lod = %u\n", sampler
->apply_min_lod
);
635 debug_printf(" .apply_max_lod = %u\n", sampler
->apply_max_lod
);
637 for (i
= 0; i
< key
->nr_sampler_views
; ++i
) {
638 const struct lp_static_texture_state
*texture
= &key
->samplers
[i
].texture_state
;
639 debug_printf("texture[%u] = \n", i
);
640 debug_printf(" .format = %s\n",
641 util_format_name(texture
->format
));
642 debug_printf(" .target = %s\n",
643 util_str_tex_target(texture
->target
, TRUE
));
644 debug_printf(" .level_zero_only = %u\n",
645 texture
->level_zero_only
);
646 debug_printf(" .pot = %u %u %u\n",
651 struct lp_image_static_state
*images
= lp_cs_variant_key_images(key
);
652 for (i
= 0; i
< key
->nr_images
; ++i
) {
653 const struct lp_static_texture_state
*image
= &images
[i
].image_state
;
654 debug_printf("image[%u] = \n", i
);
655 debug_printf(" .format = %s\n",
656 util_format_name(image
->format
));
657 debug_printf(" .target = %s\n",
658 util_str_tex_target(image
->target
, TRUE
));
659 debug_printf(" .level_zero_only = %u\n",
660 image
->level_zero_only
);
661 debug_printf(" .pot = %u %u %u\n",
669 lp_debug_cs_variant(const struct lp_compute_shader_variant
*variant
)
671 debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
672 variant
->shader
->no
, variant
->no
);
673 if (variant
->shader
->base
.type
== PIPE_SHADER_IR_TGSI
)
674 tgsi_dump(variant
->shader
->base
.tokens
, 0);
676 nir_print_shader(variant
->shader
->base
.ir
.nir
, stderr
);
677 dump_cs_variant_key(&variant
->key
);
682 lp_cs_get_ir_cache_key(struct lp_compute_shader_variant
*variant
,
683 unsigned char ir_sha1_cache_key
[20])
685 struct blob blob
= { 0 };
690 nir_serialize(&blob
, variant
->shader
->base
.ir
.nir
, true);
691 ir_binary
= blob
.data
;
694 struct mesa_sha1 ctx
;
695 _mesa_sha1_init(&ctx
);
696 _mesa_sha1_update(&ctx
, &variant
->key
, variant
->shader
->variant_key_size
);
697 _mesa_sha1_update(&ctx
, ir_binary
, ir_size
);
698 _mesa_sha1_final(&ctx
, ir_sha1_cache_key
);
703 static struct lp_compute_shader_variant
*
704 generate_variant(struct llvmpipe_context
*lp
,
705 struct lp_compute_shader
*shader
,
706 const struct lp_compute_shader_variant_key
*key
)
708 struct llvmpipe_screen
*screen
= llvmpipe_screen(lp
->pipe
.screen
);
709 struct lp_compute_shader_variant
*variant
;
710 char module_name
[64];
711 unsigned char ir_sha1_cache_key
[20];
712 struct lp_cached_code cached
= { 0 };
713 bool needs_caching
= false;
714 variant
= MALLOC(sizeof *variant
+ shader
->variant_key_size
- sizeof variant
->key
);
718 memset(variant
, 0, sizeof(*variant
));
719 snprintf(module_name
, sizeof(module_name
), "cs%u_variant%u",
720 shader
->no
, shader
->variants_created
);
722 variant
->shader
= shader
;
723 memcpy(&variant
->key
, key
, shader
->variant_key_size
);
725 if (shader
->base
.ir
.nir
) {
726 lp_cs_get_ir_cache_key(variant
, ir_sha1_cache_key
);
728 lp_disk_cache_find_shader(screen
, &cached
, ir_sha1_cache_key
);
729 if (!cached
.data_size
)
730 needs_caching
= true;
732 variant
->gallivm
= gallivm_create(module_name
, lp
->context
, &cached
);
733 if (!variant
->gallivm
) {
738 variant
->list_item_global
.base
= variant
;
739 variant
->list_item_local
.base
= variant
;
740 variant
->no
= shader
->variants_created
++;
744 if ((LP_DEBUG
& DEBUG_CS
) || (gallivm_debug
& GALLIVM_DEBUG_IR
)) {
745 lp_debug_cs_variant(variant
);
748 lp_jit_init_cs_types(variant
);
750 generate_compute(lp
, shader
, variant
);
752 gallivm_compile_module(variant
->gallivm
);
754 lp_build_coro_add_malloc_hooks(variant
->gallivm
);
755 variant
->nr_instrs
+= lp_build_count_ir_module(variant
->gallivm
->module
);
757 variant
->jit_function
= (lp_jit_cs_func
)gallivm_jit_function(variant
->gallivm
, variant
->function
);
760 lp_disk_cache_insert_shader(screen
, &cached
, ir_sha1_cache_key
);
762 gallivm_free_ir(variant
->gallivm
);
767 lp_cs_ctx_set_cs_variant( struct lp_cs_context
*csctx
,
768 struct lp_compute_shader_variant
*variant
)
770 csctx
->cs
.current
.variant
= variant
;
774 llvmpipe_update_cs(struct llvmpipe_context
*lp
)
776 struct lp_compute_shader
*shader
= lp
->cs
;
778 struct lp_compute_shader_variant_key
*key
;
779 struct lp_compute_shader_variant
*variant
= NULL
;
780 struct lp_cs_variant_list_item
*li
;
781 char store
[LP_CS_MAX_VARIANT_KEY_SIZE
];
783 key
= make_variant_key(lp
, shader
, store
);
785 /* Search the variants for one which matches the key */
786 li
= first_elem(&shader
->variants
);
787 while(!at_end(&shader
->variants
, li
)) {
788 if(memcmp(&li
->base
->key
, key
, shader
->variant_key_size
) == 0) {
796 /* Move this variant to the head of the list to implement LRU
797 * deletion of shader's when we have too many.
799 move_to_head(&lp
->cs_variants_list
, &variant
->list_item_global
);
802 /* variant not found, create it now */
805 unsigned variants_to_cull
;
807 if (LP_DEBUG
& DEBUG_CS
) {
808 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
811 lp
->nr_cs_variants
? lp
->nr_cs_instrs
/ lp
->nr_cs_variants
: 0);
814 /* First, check if we've exceeded the max number of shader variants.
815 * If so, free 6.25% of them (the least recently used ones).
817 variants_to_cull
= lp
->nr_cs_variants
>= LP_MAX_SHADER_VARIANTS
? LP_MAX_SHADER_VARIANTS
/ 16 : 0;
819 if (variants_to_cull
||
820 lp
->nr_cs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
) {
821 if (gallivm_debug
& GALLIVM_DEBUG_PERF
) {
822 debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
823 "\t%u instrs,\t%u instrs/variant\n",
824 shader
->variants_cached
,
825 lp
->nr_cs_variants
, lp
->nr_cs_instrs
,
826 lp
->nr_cs_instrs
/ lp
->nr_cs_variants
);
830 * We need to re-check lp->nr_cs_variants because an arbitrarliy large
831 * number of shader variants (potentially all of them) could be
832 * pending for destruction on flush.
835 for (i
= 0; i
< variants_to_cull
|| lp
->nr_cs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
; i
++) {
836 struct lp_cs_variant_list_item
*item
;
837 if (is_empty_list(&lp
->cs_variants_list
)) {
840 item
= last_elem(&lp
->cs_variants_list
);
843 llvmpipe_remove_cs_shader_variant(lp
, item
->base
);
847 * Generate the new variant.
850 variant
= generate_variant(lp
, shader
, key
);
853 LP_COUNT_ADD(llvm_compile_time
, dt
);
854 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
856 /* Put the new variant into the list */
858 insert_at_head(&shader
->variants
, &variant
->list_item_local
);
859 insert_at_head(&lp
->cs_variants_list
, &variant
->list_item_global
);
860 lp
->nr_cs_variants
++;
861 lp
->nr_cs_instrs
+= variant
->nr_instrs
;
862 shader
->variants_cached
++;
865 /* Bind this variant */
866 lp_cs_ctx_set_cs_variant(lp
->csctx
, variant
);
870 * Called during state validation when LP_CSNEW_SAMPLER_VIEW is set.
873 lp_csctx_set_sampler_views(struct lp_cs_context
*csctx
,
875 struct pipe_sampler_view
**views
)
877 unsigned i
, max_tex_num
;
879 LP_DBG(DEBUG_SETUP
, "%s\n", __FUNCTION__
);
881 assert(num
<= PIPE_MAX_SHADER_SAMPLER_VIEWS
);
883 max_tex_num
= MAX2(num
, csctx
->cs
.current_tex_num
);
885 for (i
= 0; i
< max_tex_num
; i
++) {
886 struct pipe_sampler_view
*view
= i
< num
? views
[i
] : NULL
;
889 struct pipe_resource
*res
= view
->texture
;
890 struct llvmpipe_resource
*lp_tex
= llvmpipe_resource(res
);
891 struct lp_jit_texture
*jit_tex
;
892 jit_tex
= &csctx
->cs
.current
.jit_context
.textures
[i
];
894 /* We're referencing the texture's internal data, so save a
897 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], res
);
900 /* regular texture - csctx array of mipmap level offsets */
902 unsigned first_level
= 0;
903 unsigned last_level
= 0;
905 if (llvmpipe_resource_is_texture(res
)) {
906 first_level
= view
->u
.tex
.first_level
;
907 last_level
= view
->u
.tex
.last_level
;
908 assert(first_level
<= last_level
);
909 assert(last_level
<= res
->last_level
);
910 jit_tex
->base
= lp_tex
->tex_data
;
913 jit_tex
->base
= lp_tex
->data
;
915 if (LP_PERF
& PERF_TEX_MEM
) {
916 /* use dummy tile memory */
917 jit_tex
->base
= lp_dummy_tile
;
918 jit_tex
->width
= TILE_SIZE
/8;
919 jit_tex
->height
= TILE_SIZE
/8;
921 jit_tex
->first_level
= 0;
922 jit_tex
->last_level
= 0;
923 jit_tex
->mip_offsets
[0] = 0;
924 jit_tex
->row_stride
[0] = 0;
925 jit_tex
->img_stride
[0] = 0;
926 jit_tex
->num_samples
= 0;
927 jit_tex
->sample_stride
= 0;
930 jit_tex
->width
= res
->width0
;
931 jit_tex
->height
= res
->height0
;
932 jit_tex
->depth
= res
->depth0
;
933 jit_tex
->first_level
= first_level
;
934 jit_tex
->last_level
= last_level
;
935 jit_tex
->num_samples
= res
->nr_samples
;
936 jit_tex
->sample_stride
= 0;
938 if (llvmpipe_resource_is_texture(res
)) {
939 for (j
= first_level
; j
<= last_level
; j
++) {
940 jit_tex
->mip_offsets
[j
] = lp_tex
->mip_offsets
[j
];
941 jit_tex
->row_stride
[j
] = lp_tex
->row_stride
[j
];
942 jit_tex
->img_stride
[j
] = lp_tex
->img_stride
[j
];
944 jit_tex
->sample_stride
= lp_tex
->sample_stride
;
946 if (res
->target
== PIPE_TEXTURE_1D_ARRAY
||
947 res
->target
== PIPE_TEXTURE_2D_ARRAY
||
948 res
->target
== PIPE_TEXTURE_CUBE
||
949 res
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
951 * For array textures, we don't have first_layer, instead
952 * adjust last_layer (stored as depth) plus the mip level offsets
953 * (as we have mip-first layout can't just adjust base ptr).
954 * XXX For mip levels, could do something similar.
956 jit_tex
->depth
= view
->u
.tex
.last_layer
- view
->u
.tex
.first_layer
+ 1;
957 for (j
= first_level
; j
<= last_level
; j
++) {
958 jit_tex
->mip_offsets
[j
] += view
->u
.tex
.first_layer
*
959 lp_tex
->img_stride
[j
];
961 if (view
->target
== PIPE_TEXTURE_CUBE
||
962 view
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
963 assert(jit_tex
->depth
% 6 == 0);
965 assert(view
->u
.tex
.first_layer
<= view
->u
.tex
.last_layer
);
966 assert(view
->u
.tex
.last_layer
< res
->array_size
);
971 * For buffers, we don't have "offset", instead adjust
972 * the size (stored as width) plus the base pointer.
974 unsigned view_blocksize
= util_format_get_blocksize(view
->format
);
975 /* probably don't really need to fill that out */
976 jit_tex
->mip_offsets
[0] = 0;
977 jit_tex
->row_stride
[0] = 0;
978 jit_tex
->img_stride
[0] = 0;
980 /* everything specified in number of elements here. */
981 jit_tex
->width
= view
->u
.buf
.size
/ view_blocksize
;
982 jit_tex
->base
= (uint8_t *)jit_tex
->base
+ view
->u
.buf
.offset
;
983 /* XXX Unsure if we need to sanitize parameters? */
984 assert(view
->u
.buf
.offset
+ view
->u
.buf
.size
<= res
->width0
);
989 /* display target texture/surface */
991 * XXX: Where should this be unmapped?
993 struct llvmpipe_screen
*screen
= llvmpipe_screen(res
->screen
);
994 struct sw_winsys
*winsys
= screen
->winsys
;
995 jit_tex
->base
= winsys
->displaytarget_map(winsys
, lp_tex
->dt
,
997 jit_tex
->row_stride
[0] = lp_tex
->row_stride
[0];
998 jit_tex
->img_stride
[0] = lp_tex
->img_stride
[0];
999 jit_tex
->mip_offsets
[0] = 0;
1000 jit_tex
->width
= res
->width0
;
1001 jit_tex
->height
= res
->height0
;
1002 jit_tex
->depth
= res
->depth0
;
1003 jit_tex
->first_level
= jit_tex
->last_level
= 0;
1004 jit_tex
->num_samples
= res
->nr_samples
;
1005 jit_tex
->sample_stride
= 0;
1006 assert(jit_tex
->base
);
1010 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], NULL
);
1013 csctx
->cs
.current_tex_num
= num
;
1018 * Called during state validation when LP_NEW_SAMPLER is set.
1021 lp_csctx_set_sampler_state(struct lp_cs_context
*csctx
,
1023 struct pipe_sampler_state
**samplers
)
1027 LP_DBG(DEBUG_SETUP
, "%s\n", __FUNCTION__
);
1029 assert(num
<= PIPE_MAX_SAMPLERS
);
1031 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
1032 const struct pipe_sampler_state
*sampler
= i
< num
? samplers
[i
] : NULL
;
1035 struct lp_jit_sampler
*jit_sam
;
1036 jit_sam
= &csctx
->cs
.current
.jit_context
.samplers
[i
];
1038 jit_sam
->min_lod
= sampler
->min_lod
;
1039 jit_sam
->max_lod
= sampler
->max_lod
;
1040 jit_sam
->lod_bias
= sampler
->lod_bias
;
1041 COPY_4V(jit_sam
->border_color
, sampler
->border_color
.f
);
1047 lp_csctx_set_cs_constants(struct lp_cs_context
*csctx
,
1049 struct pipe_constant_buffer
*buffers
)
1053 LP_DBG(DEBUG_SETUP
, "%s %p\n", __FUNCTION__
, (void *) buffers
);
1055 assert(num
<= ARRAY_SIZE(csctx
->constants
));
1057 for (i
= 0; i
< num
; ++i
) {
1058 util_copy_constant_buffer(&csctx
->constants
[i
].current
, &buffers
[i
]);
1060 for (; i
< ARRAY_SIZE(csctx
->constants
); i
++) {
1061 util_copy_constant_buffer(&csctx
->constants
[i
].current
, NULL
);
1066 lp_csctx_set_cs_ssbos(struct lp_cs_context
*csctx
,
1068 struct pipe_shader_buffer
*buffers
)
1071 LP_DBG(DEBUG_SETUP
, "%s %p\n", __FUNCTION__
, (void *)buffers
);
1073 assert (num
<= ARRAY_SIZE(csctx
->ssbos
));
1075 for (i
= 0; i
< num
; ++i
) {
1076 util_copy_shader_buffer(&csctx
->ssbos
[i
].current
, &buffers
[i
]);
1078 for (; i
< ARRAY_SIZE(csctx
->ssbos
); i
++) {
1079 util_copy_shader_buffer(&csctx
->ssbos
[i
].current
, NULL
);
1084 lp_csctx_set_cs_images(struct lp_cs_context
*csctx
,
1086 struct pipe_image_view
*images
)
1090 LP_DBG(DEBUG_SETUP
, "%s %p\n", __FUNCTION__
, (void *) images
);
1092 assert(num
<= ARRAY_SIZE(csctx
->images
));
1094 for (i
= 0; i
< num
; ++i
) {
1095 struct pipe_image_view
*image
= &images
[i
];
1096 util_copy_image_view(&csctx
->images
[i
].current
, &images
[i
]);
1098 struct pipe_resource
*res
= image
->resource
;
1099 struct llvmpipe_resource
*lp_res
= llvmpipe_resource(res
);
1100 struct lp_jit_image
*jit_image
;
1102 jit_image
= &csctx
->cs
.current
.jit_context
.images
[i
];
1106 /* regular texture - csctx array of mipmap level offsets */
1107 if (llvmpipe_resource_is_texture(res
)) {
1108 jit_image
->base
= lp_res
->tex_data
;
1110 jit_image
->base
= lp_res
->data
;
1112 jit_image
->width
= res
->width0
;
1113 jit_image
->height
= res
->height0
;
1114 jit_image
->depth
= res
->depth0
;
1115 jit_image
->num_samples
= res
->nr_samples
;
1117 if (llvmpipe_resource_is_texture(res
)) {
1118 uint32_t mip_offset
= lp_res
->mip_offsets
[image
->u
.tex
.level
];
1120 jit_image
->width
= u_minify(jit_image
->width
, image
->u
.tex
.level
);
1121 jit_image
->height
= u_minify(jit_image
->height
, image
->u
.tex
.level
);
1123 if (res
->target
== PIPE_TEXTURE_1D_ARRAY
||
1124 res
->target
== PIPE_TEXTURE_2D_ARRAY
||
1125 res
->target
== PIPE_TEXTURE_3D
||
1126 res
->target
== PIPE_TEXTURE_CUBE
||
1127 res
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
1129 * For array textures, we don't have first_layer, instead
1130 * adjust last_layer (stored as depth) plus the mip level offsets
1131 * (as we have mip-first layout can't just adjust base ptr).
1132 * XXX For mip levels, could do something similar.
1134 jit_image
->depth
= image
->u
.tex
.last_layer
- image
->u
.tex
.first_layer
+ 1;
1135 mip_offset
+= image
->u
.tex
.first_layer
* lp_res
->img_stride
[image
->u
.tex
.level
];
1137 jit_image
->depth
= u_minify(jit_image
->depth
, image
->u
.tex
.level
);
1139 jit_image
->row_stride
= lp_res
->row_stride
[image
->u
.tex
.level
];
1140 jit_image
->img_stride
= lp_res
->img_stride
[image
->u
.tex
.level
];
1141 jit_image
->sample_stride
= lp_res
->sample_stride
;
1142 jit_image
->base
= (uint8_t *)jit_image
->base
+ mip_offset
;
1144 unsigned view_blocksize
= util_format_get_blocksize(image
->format
);
1145 jit_image
->width
= image
->u
.buf
.size
/ view_blocksize
;
1146 jit_image
->base
= (uint8_t *)jit_image
->base
+ image
->u
.buf
.offset
;
1150 for (; i
< ARRAY_SIZE(csctx
->images
); i
++) {
1151 util_copy_image_view(&csctx
->images
[i
].current
, NULL
);
1156 update_csctx_consts(struct llvmpipe_context
*llvmpipe
)
1158 struct lp_cs_context
*csctx
= llvmpipe
->csctx
;
1161 for (i
= 0; i
< ARRAY_SIZE(csctx
->constants
); ++i
) {
1162 struct pipe_resource
*buffer
= csctx
->constants
[i
].current
.buffer
;
1163 const ubyte
*current_data
= NULL
;
1166 /* resource buffer */
1167 current_data
= (ubyte
*) llvmpipe_resource_data(buffer
);
1169 else if (csctx
->constants
[i
].current
.user_buffer
) {
1170 /* user-space buffer */
1171 current_data
= (ubyte
*) csctx
->constants
[i
].current
.user_buffer
;
1175 current_data
+= csctx
->constants
[i
].current
.buffer_offset
;
1177 csctx
->cs
.current
.jit_context
.constants
[i
] = (const float *)current_data
;
1178 csctx
->cs
.current
.jit_context
.num_constants
[i
] = csctx
->constants
[i
].current
.buffer_size
;
1180 csctx
->cs
.current
.jit_context
.constants
[i
] = NULL
;
1181 csctx
->cs
.current
.jit_context
.num_constants
[i
] = 0;
1187 update_csctx_ssbo(struct llvmpipe_context
*llvmpipe
)
1189 struct lp_cs_context
*csctx
= llvmpipe
->csctx
;
1191 for (i
= 0; i
< ARRAY_SIZE(csctx
->ssbos
); ++i
) {
1192 struct pipe_resource
*buffer
= csctx
->ssbos
[i
].current
.buffer
;
1193 const ubyte
*current_data
= NULL
;
1197 /* resource buffer */
1198 current_data
= (ubyte
*) llvmpipe_resource_data(buffer
);
1200 current_data
+= csctx
->ssbos
[i
].current
.buffer_offset
;
1202 csctx
->cs
.current
.jit_context
.ssbos
[i
] = (const uint32_t *)current_data
;
1203 csctx
->cs
.current
.jit_context
.num_ssbos
[i
] = csctx
->ssbos
[i
].current
.buffer_size
;
1205 csctx
->cs
.current
.jit_context
.ssbos
[i
] = NULL
;
1206 csctx
->cs
.current
.jit_context
.num_ssbos
[i
] = 0;
1212 llvmpipe_cs_update_derived(struct llvmpipe_context
*llvmpipe
, void *input
)
1214 if (llvmpipe
->cs_dirty
& LP_CSNEW_CONSTANTS
) {
1215 lp_csctx_set_cs_constants(llvmpipe
->csctx
,
1216 ARRAY_SIZE(llvmpipe
->constants
[PIPE_SHADER_COMPUTE
]),
1217 llvmpipe
->constants
[PIPE_SHADER_COMPUTE
]);
1218 update_csctx_consts(llvmpipe
);
1221 if (llvmpipe
->cs_dirty
& LP_CSNEW_SSBOS
) {
1222 lp_csctx_set_cs_ssbos(llvmpipe
->csctx
,
1223 ARRAY_SIZE(llvmpipe
->ssbos
[PIPE_SHADER_COMPUTE
]),
1224 llvmpipe
->ssbos
[PIPE_SHADER_COMPUTE
]);
1225 update_csctx_ssbo(llvmpipe
);
1228 if (llvmpipe
->cs_dirty
& LP_CSNEW_SAMPLER_VIEW
)
1229 lp_csctx_set_sampler_views(llvmpipe
->csctx
,
1230 llvmpipe
->num_sampler_views
[PIPE_SHADER_COMPUTE
],
1231 llvmpipe
->sampler_views
[PIPE_SHADER_COMPUTE
]);
1233 if (llvmpipe
->cs_dirty
& LP_CSNEW_SAMPLER
)
1234 lp_csctx_set_sampler_state(llvmpipe
->csctx
,
1235 llvmpipe
->num_samplers
[PIPE_SHADER_COMPUTE
],
1236 llvmpipe
->samplers
[PIPE_SHADER_COMPUTE
]);
1238 if (llvmpipe
->cs_dirty
& LP_CSNEW_IMAGES
)
1239 lp_csctx_set_cs_images(llvmpipe
->csctx
,
1240 ARRAY_SIZE(llvmpipe
->images
[PIPE_SHADER_COMPUTE
]),
1241 llvmpipe
->images
[PIPE_SHADER_COMPUTE
]);
1244 struct lp_cs_context
*csctx
= llvmpipe
->csctx
;
1245 csctx
->input
= input
;
1246 csctx
->cs
.current
.jit_context
.kernel_args
= input
;
1249 if (llvmpipe
->cs_dirty
& (LP_CSNEW_CS
|
1251 LP_CSNEW_SAMPLER_VIEW
|
1253 llvmpipe_update_cs(llvmpipe
);
1256 llvmpipe
->cs_dirty
= 0;
1260 cs_exec_fn(void *init_data
, int iter_idx
, struct lp_cs_local_mem
*lmem
)
1262 struct lp_cs_job_info
*job_info
= init_data
;
1263 struct lp_jit_cs_thread_data thread_data
;
1265 memset(&thread_data
, 0, sizeof(thread_data
));
1267 if (lmem
->local_size
< job_info
->req_local_mem
) {
1268 lmem
->local_mem_ptr
= REALLOC(lmem
->local_mem_ptr
, lmem
->local_size
,
1269 job_info
->req_local_mem
);
1270 lmem
->local_size
= job_info
->req_local_mem
;
1272 thread_data
.shared
= lmem
->local_mem_ptr
;
1274 unsigned grid_z
= iter_idx
/ (job_info
->grid_size
[0] * job_info
->grid_size
[1]);
1275 unsigned grid_y
= (iter_idx
- (grid_z
* (job_info
->grid_size
[0] * job_info
->grid_size
[1]))) / job_info
->grid_size
[0];
1276 unsigned grid_x
= (iter_idx
- (grid_z
* (job_info
->grid_size
[0] * job_info
->grid_size
[1])) - (grid_y
* job_info
->grid_size
[0]));
1277 struct lp_compute_shader_variant
*variant
= job_info
->current
->variant
;
1278 variant
->jit_function(&job_info
->current
->jit_context
,
1279 job_info
->block_size
[0], job_info
->block_size
[1], job_info
->block_size
[2],
1280 grid_x
, grid_y
, grid_z
,
1281 job_info
->grid_size
[0], job_info
->grid_size
[1], job_info
->grid_size
[2], job_info
->work_dim
,
1286 fill_grid_size(struct pipe_context
*pipe
,
1287 const struct pipe_grid_info
*info
,
1288 uint32_t grid_size
[3])
1290 struct pipe_transfer
*transfer
;
1292 if (!info
->indirect
) {
1293 grid_size
[0] = info
->grid
[0];
1294 grid_size
[1] = info
->grid
[1];
1295 grid_size
[2] = info
->grid
[2];
1298 params
= pipe_buffer_map_range(pipe
, info
->indirect
,
1299 info
->indirect_offset
,
1300 3 * sizeof(uint32_t),
1307 grid_size
[0] = params
[0];
1308 grid_size
[1] = params
[1];
1309 grid_size
[2] = params
[2];
1310 pipe_buffer_unmap(pipe
, transfer
);
1313 static void llvmpipe_launch_grid(struct pipe_context
*pipe
,
1314 const struct pipe_grid_info
*info
)
1316 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1317 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
1318 struct lp_cs_job_info job_info
;
1320 if (!llvmpipe_check_render_cond(llvmpipe
))
1323 memset(&job_info
, 0, sizeof(job_info
));
1325 llvmpipe_cs_update_derived(llvmpipe
, info
->input
);
1327 fill_grid_size(pipe
, info
, job_info
.grid_size
);
1329 job_info
.block_size
[0] = info
->block
[0];
1330 job_info
.block_size
[1] = info
->block
[1];
1331 job_info
.block_size
[2] = info
->block
[2];
1332 job_info
.work_dim
= info
->work_dim
;
1333 job_info
.req_local_mem
= llvmpipe
->cs
->req_local_mem
;
1334 job_info
.current
= &llvmpipe
->csctx
->cs
.current
;
1336 int num_tasks
= job_info
.grid_size
[2] * job_info
.grid_size
[1] * job_info
.grid_size
[0];
1338 struct lp_cs_tpool_task
*task
;
1339 mtx_lock(&screen
->cs_mutex
);
1340 task
= lp_cs_tpool_queue_task(screen
->cs_tpool
, cs_exec_fn
, &job_info
, num_tasks
);
1342 lp_cs_tpool_wait_for_task(screen
->cs_tpool
, &task
);
1343 mtx_unlock(&screen
->cs_mutex
);
1345 llvmpipe
->pipeline_statistics
.cs_invocations
+= num_tasks
* info
->block
[0] * info
->block
[1] * info
->block
[2];
1349 llvmpipe_set_compute_resources(struct pipe_context
*pipe
,
1350 unsigned start
, unsigned count
,
1351 struct pipe_surface
**resources
)
1358 llvmpipe_set_global_binding(struct pipe_context
*pipe
,
1359 unsigned first
, unsigned count
,
1360 struct pipe_resource
**resources
,
1363 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1364 struct lp_compute_shader
*cs
= llvmpipe
->cs
;
1367 if (first
+ count
> cs
->max_global_buffers
) {
1368 unsigned old_max
= cs
->max_global_buffers
;
1369 cs
->max_global_buffers
= first
+ count
;
1370 cs
->global_buffers
= realloc(cs
->global_buffers
,
1371 cs
->max_global_buffers
* sizeof(cs
->global_buffers
[0]));
1372 if (!cs
->global_buffers
) {
1376 memset(&cs
->global_buffers
[old_max
], 0, (cs
->max_global_buffers
- old_max
) * sizeof(cs
->global_buffers
[0]));
1380 for (i
= 0; i
< count
; i
++)
1381 pipe_resource_reference(&cs
->global_buffers
[first
+ i
], NULL
);
1385 for (i
= 0; i
< count
; i
++) {
1388 pipe_resource_reference(&cs
->global_buffers
[first
+ i
], resources
[i
]);
1389 struct llvmpipe_resource
*lp_res
= llvmpipe_resource(resources
[i
]);
1390 offset
= *handles
[i
];
1391 va
= (uintptr_t)((char *)lp_res
->data
+ offset
);
1392 memcpy(handles
[i
], &va
, sizeof(va
));
1397 llvmpipe_init_compute_funcs(struct llvmpipe_context
*llvmpipe
)
1399 llvmpipe
->pipe
.create_compute_state
= llvmpipe_create_compute_state
;
1400 llvmpipe
->pipe
.bind_compute_state
= llvmpipe_bind_compute_state
;
1401 llvmpipe
->pipe
.delete_compute_state
= llvmpipe_delete_compute_state
;
1402 llvmpipe
->pipe
.set_compute_resources
= llvmpipe_set_compute_resources
;
1403 llvmpipe
->pipe
.set_global_binding
= llvmpipe_set_global_binding
;
1404 llvmpipe
->pipe
.launch_grid
= llvmpipe_launch_grid
;
1408 lp_csctx_destroy(struct lp_cs_context
*csctx
)
1411 for (i
= 0; i
< ARRAY_SIZE(csctx
->cs
.current_tex
); i
++) {
1412 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], NULL
);
1414 for (i
= 0; i
< ARRAY_SIZE(csctx
->constants
); i
++) {
1415 pipe_resource_reference(&csctx
->constants
[i
].current
.buffer
, NULL
);
1417 for (i
= 0; i
< ARRAY_SIZE(csctx
->ssbos
); i
++) {
1418 pipe_resource_reference(&csctx
->ssbos
[i
].current
.buffer
, NULL
);
1423 struct lp_cs_context
*lp_csctx_create(struct pipe_context
*pipe
)
1425 struct lp_cs_context
*csctx
;
1427 csctx
= CALLOC_STRUCT(lp_cs_context
);