1 /**************************************************************************
3 * Copyright 2019 Red Hat.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "util/u_dump.h"
29 #include "util/u_string.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "gallivm/lp_bld_const.h"
33 #include "gallivm/lp_bld_debug.h"
34 #include "gallivm/lp_bld_intr.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_gather.h"
37 #include "gallivm/lp_bld_coro.h"
38 #include "lp_state_cs.h"
39 #include "lp_context.h"
43 #include "lp_screen.h"
44 #include "lp_memory.h"
45 #include "lp_cs_tpool.h"
46 #include "state_tracker/sw_winsys.h"
48 struct lp_cs_job_info
{
49 unsigned grid_size
[3];
50 unsigned block_size
[3];
51 struct lp_cs_exec
*current
;
55 generate_compute(struct llvmpipe_context
*lp
,
56 struct lp_compute_shader
*shader
,
57 struct lp_compute_shader_variant
*variant
)
59 struct gallivm_state
*gallivm
= variant
->gallivm
;
60 const struct lp_compute_shader_variant_key
*key
= &variant
->key
;
61 char func_name
[64], func_name_coro
[64];
62 LLVMTypeRef arg_types
[13];
63 LLVMTypeRef func_type
, coro_func_type
;
64 LLVMTypeRef int32_type
= LLVMInt32TypeInContext(gallivm
->context
);
65 LLVMValueRef context_ptr
;
66 LLVMValueRef x_size_arg
, y_size_arg
, z_size_arg
;
67 LLVMValueRef grid_x_arg
, grid_y_arg
, grid_z_arg
;
68 LLVMValueRef grid_size_x_arg
, grid_size_y_arg
, grid_size_z_arg
;
69 LLVMValueRef thread_data_ptr
;
70 LLVMBasicBlockRef block
;
71 LLVMBuilderRef builder
;
72 struct lp_build_sampler_soa
*sampler
;
73 LLVMValueRef function
, coro
;
74 struct lp_type cs_type
;
78 * This function has two parts
79 * a) setup the coroutine execution environment loop.
80 * b) build the compute shader llvm for use inside the coroutine.
82 assert(lp_native_vector_width
/ 32 >= 4);
84 memset(&cs_type
, 0, sizeof cs_type
);
85 cs_type
.floating
= TRUE
; /* floating point values */
86 cs_type
.sign
= TRUE
; /* values are signed */
87 cs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
88 cs_type
.width
= 32; /* 32-bit float */
89 cs_type
.length
= MIN2(lp_native_vector_width
/ 32, 16); /* n*4 elements per vector */
90 snprintf(func_name
, sizeof(func_name
), "cs%u_variant%u",
91 shader
->no
, variant
->no
);
93 snprintf(func_name_coro
, sizeof(func_name
), "cs_co_%u_variant%u",
94 shader
->no
, variant
->no
);
96 arg_types
[0] = variant
->jit_cs_context_ptr_type
; /* context */
97 arg_types
[1] = int32_type
; /* block_x_size */
98 arg_types
[2] = int32_type
; /* block_y_size */
99 arg_types
[3] = int32_type
; /* block_z_size */
100 arg_types
[4] = int32_type
; /* grid_x */
101 arg_types
[5] = int32_type
; /* grid_y */
102 arg_types
[6] = int32_type
; /* grid_z */
103 arg_types
[7] = int32_type
; /* grid_size_x */
104 arg_types
[8] = int32_type
; /* grid_size_y */
105 arg_types
[9] = int32_type
; /* grid_size_z */
106 arg_types
[10] = variant
->jit_cs_thread_data_ptr_type
; /* per thread data */
107 arg_types
[11] = int32_type
;
108 arg_types
[12] = int32_type
;
109 func_type
= LLVMFunctionType(LLVMVoidTypeInContext(gallivm
->context
),
110 arg_types
, ARRAY_SIZE(arg_types
) - 2, 0);
112 coro_func_type
= LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm
->context
), 0),
113 arg_types
, ARRAY_SIZE(arg_types
), 0);
115 function
= LLVMAddFunction(gallivm
->module
, func_name
, func_type
);
116 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
118 coro
= LLVMAddFunction(gallivm
->module
, func_name_coro
, coro_func_type
);
119 LLVMSetFunctionCallConv(coro
, LLVMCCallConv
);
121 variant
->function
= function
;
123 for(i
= 0; i
< ARRAY_SIZE(arg_types
); ++i
) {
124 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
) {
125 lp_add_function_attr(coro
, i
+ 1, LP_FUNC_ATTR_NOALIAS
);
126 lp_add_function_attr(function
, i
+ 1, LP_FUNC_ATTR_NOALIAS
);
130 context_ptr
= LLVMGetParam(function
, 0);
131 x_size_arg
= LLVMGetParam(function
, 1);
132 y_size_arg
= LLVMGetParam(function
, 2);
133 z_size_arg
= LLVMGetParam(function
, 3);
134 grid_x_arg
= LLVMGetParam(function
, 4);
135 grid_y_arg
= LLVMGetParam(function
, 5);
136 grid_z_arg
= LLVMGetParam(function
, 6);
137 grid_size_x_arg
= LLVMGetParam(function
, 7);
138 grid_size_y_arg
= LLVMGetParam(function
, 8);
139 grid_size_z_arg
= LLVMGetParam(function
, 9);
140 thread_data_ptr
= LLVMGetParam(function
, 10);
142 lp_build_name(context_ptr
, "context");
143 lp_build_name(x_size_arg
, "x_size");
144 lp_build_name(y_size_arg
, "y_size");
145 lp_build_name(z_size_arg
, "z_size");
146 lp_build_name(grid_x_arg
, "grid_x");
147 lp_build_name(grid_y_arg
, "grid_y");
148 lp_build_name(grid_z_arg
, "grid_z");
149 lp_build_name(grid_size_x_arg
, "grid_size_x");
150 lp_build_name(grid_size_y_arg
, "grid_size_y");
151 lp_build_name(grid_size_z_arg
, "grid_size_z");
152 lp_build_name(thread_data_ptr
, "thread_data");
154 block
= LLVMAppendBasicBlockInContext(gallivm
->context
, function
, "entry");
155 builder
= gallivm
->builder
;
157 LLVMPositionBuilderAtEnd(builder
, block
);
158 sampler
= lp_llvm_sampler_soa_create(key
->state
);
160 struct lp_build_loop_state loop_state
[4];
161 LLVMValueRef num_x_loop
;
162 LLVMValueRef vec_length
= lp_build_const_int32(gallivm
, cs_type
.length
);
163 num_x_loop
= LLVMBuildAdd(gallivm
->builder
, x_size_arg
, vec_length
, "");
164 num_x_loop
= LLVMBuildSub(gallivm
->builder
, num_x_loop
, lp_build_const_int32(gallivm
, 1), "");
165 num_x_loop
= LLVMBuildUDiv(gallivm
->builder
, num_x_loop
, vec_length
, "");
166 LLVMValueRef partials
= LLVMBuildURem(gallivm
->builder
, x_size_arg
, vec_length
, "");
168 LLVMValueRef coro_num_hdls
= LLVMBuildMul(gallivm
->builder
, num_x_loop
, y_size_arg
, "");
169 coro_num_hdls
= LLVMBuildMul(gallivm
->builder
, coro_num_hdls
, z_size_arg
, "");
171 LLVMTypeRef hdl_ptr_type
= LLVMPointerType(LLVMInt8TypeInContext(gallivm
->context
), 0);
172 LLVMValueRef coro_hdls
= LLVMBuildArrayAlloca(gallivm
->builder
, hdl_ptr_type
, coro_num_hdls
, "coro_hdls");
174 unsigned end_coroutine
= INT_MAX
;
177 * This is the main coroutine execution loop. It iterates over the dimensions
178 * and calls the coroutine main entrypoint on the first pass, but in subsequent
179 * passes it checks if the coroutine has completed and resumes it if not.
181 /* take x_width - round up to type.length width */
182 lp_build_loop_begin(&loop_state
[3], gallivm
,
183 lp_build_const_int32(gallivm
, 0)); /* coroutine reentry loop */
184 lp_build_loop_begin(&loop_state
[2], gallivm
,
185 lp_build_const_int32(gallivm
, 0)); /* z loop */
186 lp_build_loop_begin(&loop_state
[1], gallivm
,
187 lp_build_const_int32(gallivm
, 0)); /* y loop */
188 lp_build_loop_begin(&loop_state
[0], gallivm
,
189 lp_build_const_int32(gallivm
, 0)); /* x loop */
191 LLVMValueRef args
[13];
192 args
[0] = context_ptr
;
193 args
[1] = loop_state
[0].counter
;
194 args
[2] = loop_state
[1].counter
;
195 args
[3] = loop_state
[2].counter
;
196 args
[4] = grid_x_arg
;
197 args
[5] = grid_y_arg
;
198 args
[6] = grid_z_arg
;
199 args
[7] = grid_size_x_arg
;
200 args
[8] = grid_size_y_arg
;
201 args
[9] = grid_size_z_arg
;
202 args
[10] = thread_data_ptr
;
203 args
[11] = num_x_loop
;
206 /* idx = (z * (size_x * size_y) + y * size_x + x */
207 LLVMValueRef coro_hdl_idx
= LLVMBuildMul(gallivm
->builder
, loop_state
[2].counter
,
208 LLVMBuildMul(gallivm
->builder
, num_x_loop
, y_size_arg
, ""), "");
209 coro_hdl_idx
= LLVMBuildAdd(gallivm
->builder
, coro_hdl_idx
,
210 LLVMBuildMul(gallivm
->builder
, loop_state
[1].counter
,
211 num_x_loop
, ""), "");
212 coro_hdl_idx
= LLVMBuildAdd(gallivm
->builder
, coro_hdl_idx
,
213 loop_state
[0].counter
, "");
215 LLVMValueRef coro_entry
= LLVMBuildGEP(gallivm
->builder
, coro_hdls
, &coro_hdl_idx
, 1, "");
217 LLVMValueRef coro_hdl
= LLVMBuildLoad(gallivm
->builder
, coro_entry
, "coro_hdl");
219 struct lp_build_if_state ifstate
;
220 LLVMValueRef cmp
= LLVMBuildICmp(gallivm
->builder
, LLVMIntEQ
, loop_state
[3].counter
,
221 lp_build_const_int32(gallivm
, 0), "");
222 /* first time here - call the coroutine function entry point */
223 lp_build_if(&ifstate
, gallivm
, cmp
);
224 LLVMValueRef coro_ret
= LLVMBuildCall(gallivm
->builder
, coro
, args
, 13, "");
225 LLVMBuildStore(gallivm
->builder
, coro_ret
, coro_entry
);
226 lp_build_else(&ifstate
);
227 /* subsequent calls for this invocation - check if done. */
228 LLVMValueRef coro_done
= lp_build_coro_done(gallivm
, coro_hdl
);
229 struct lp_build_if_state ifstate2
;
230 lp_build_if(&ifstate2
, gallivm
, coro_done
);
231 /* if done destroy and force loop exit */
232 lp_build_coro_destroy(gallivm
, coro_hdl
);
233 lp_build_loop_force_set_counter(&loop_state
[3], lp_build_const_int32(gallivm
, end_coroutine
- 1));
234 lp_build_else(&ifstate2
);
235 /* otherwise resume the coroutine */
236 lp_build_coro_resume(gallivm
, coro_hdl
);
237 lp_build_endif(&ifstate2
);
238 lp_build_endif(&ifstate
);
239 lp_build_loop_force_reload_counter(&loop_state
[3]);
241 lp_build_loop_end_cond(&loop_state
[0],
244 lp_build_loop_end_cond(&loop_state
[1],
247 lp_build_loop_end_cond(&loop_state
[2],
250 lp_build_loop_end_cond(&loop_state
[3],
251 lp_build_const_int32(gallivm
, end_coroutine
),
253 LLVMBuildRetVoid(builder
);
255 /* This is stage (b) - generate the compute shader code inside the coroutine. */
256 context_ptr
= LLVMGetParam(coro
, 0);
257 x_size_arg
= LLVMGetParam(coro
, 1);
258 y_size_arg
= LLVMGetParam(coro
, 2);
259 z_size_arg
= LLVMGetParam(coro
, 3);
260 grid_x_arg
= LLVMGetParam(coro
, 4);
261 grid_y_arg
= LLVMGetParam(coro
, 5);
262 grid_z_arg
= LLVMGetParam(coro
, 6);
263 grid_size_x_arg
= LLVMGetParam(coro
, 7);
264 grid_size_y_arg
= LLVMGetParam(coro
, 8);
265 grid_size_z_arg
= LLVMGetParam(coro
, 9);
266 thread_data_ptr
= LLVMGetParam(coro
, 10);
267 num_x_loop
= LLVMGetParam(coro
, 11);
268 partials
= LLVMGetParam(coro
, 12);
269 block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "entry");
270 LLVMPositionBuilderAtEnd(builder
, block
);
272 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
273 LLVMValueRef consts_ptr
, num_consts_ptr
;
274 LLVMValueRef ssbo_ptr
, num_ssbo_ptr
;
275 LLVMValueRef shared_ptr
;
276 struct lp_build_mask_context mask
;
277 struct lp_bld_tgsi_system_values system_values
;
279 memset(&system_values
, 0, sizeof(system_values
));
280 consts_ptr
= lp_jit_cs_context_constants(gallivm
, context_ptr
);
281 num_consts_ptr
= lp_jit_cs_context_num_constants(gallivm
, context_ptr
);
282 ssbo_ptr
= lp_jit_cs_context_ssbos(gallivm
, context_ptr
);
283 num_ssbo_ptr
= lp_jit_cs_context_num_ssbos(gallivm
, context_ptr
);
284 shared_ptr
= lp_jit_cs_thread_data_shared(gallivm
, thread_data_ptr
);
286 /* these are coroutine entrypoint necessities */
287 LLVMValueRef coro_id
= lp_build_coro_id(gallivm
);
288 LLVMValueRef coro_hdl
= lp_build_coro_begin_alloc_mem(gallivm
, coro_id
);
290 LLVMValueRef has_partials
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
, partials
, lp_build_const_int32(gallivm
, 0), "");
291 LLVMValueRef tid_vals
[3];
292 LLVMValueRef tids_x
[LP_MAX_VECTOR_LENGTH
], tids_y
[LP_MAX_VECTOR_LENGTH
], tids_z
[LP_MAX_VECTOR_LENGTH
];
293 LLVMValueRef base_val
= LLVMBuildMul(gallivm
->builder
, x_size_arg
, vec_length
, "");
294 for (i
= 0; i
< cs_type
.length
; i
++) {
295 tids_x
[i
] = LLVMBuildAdd(gallivm
->builder
, base_val
, lp_build_const_int32(gallivm
, i
), "");
296 tids_y
[i
] = y_size_arg
;
297 tids_z
[i
] = z_size_arg
;
299 tid_vals
[0] = lp_build_gather_values(gallivm
, tids_x
, cs_type
.length
);
300 tid_vals
[1] = lp_build_gather_values(gallivm
, tids_y
, cs_type
.length
);
301 tid_vals
[2] = lp_build_gather_values(gallivm
, tids_z
, cs_type
.length
);
302 system_values
.thread_id
= LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type
, cs_type
.length
), 3));
303 for (i
= 0; i
< 3; i
++)
304 system_values
.thread_id
= LLVMBuildInsertValue(builder
, system_values
.thread_id
, tid_vals
[i
], i
, "");
306 LLVMValueRef gtids
[3] = { grid_x_arg
, grid_y_arg
, grid_z_arg
};
307 system_values
.block_id
= LLVMGetUndef(LLVMVectorType(int32_type
, 3));
308 for (i
= 0; i
< 3; i
++)
309 system_values
.block_id
= LLVMBuildInsertElement(builder
, system_values
.block_id
, gtids
[i
], lp_build_const_int32(gallivm
, i
), "");
311 LLVMValueRef gstids
[3] = { grid_size_x_arg
, grid_size_y_arg
, grid_size_z_arg
};
312 system_values
.grid_size
= LLVMGetUndef(LLVMVectorType(int32_type
, 3));
313 for (i
= 0; i
< 3; i
++)
314 system_values
.grid_size
= LLVMBuildInsertElement(builder
, system_values
.grid_size
, gstids
[i
], lp_build_const_int32(gallivm
, i
), "");
316 LLVMValueRef last_x_loop
= LLVMBuildICmp(gallivm
->builder
, LLVMIntEQ
, x_size_arg
, LLVMBuildSub(gallivm
->builder
, num_x_loop
, lp_build_const_int32(gallivm
, 1), ""), "");
317 LLVMValueRef use_partial_mask
= LLVMBuildAnd(gallivm
->builder
, last_x_loop
, has_partials
, "");
318 struct lp_build_if_state if_state
;
319 LLVMValueRef mask_val
= lp_build_alloca(gallivm
, LLVMVectorType(int32_type
, cs_type
.length
), "mask");
320 LLVMValueRef full_mask_val
= lp_build_const_int_vec(gallivm
, cs_type
, ~0);
321 LLVMBuildStore(gallivm
->builder
, full_mask_val
, mask_val
);
323 lp_build_if(&if_state
, gallivm
, use_partial_mask
);
324 struct lp_build_loop_state mask_loop_state
;
325 lp_build_loop_begin(&mask_loop_state
, gallivm
, partials
);
326 LLVMValueRef tmask_val
= LLVMBuildLoad(gallivm
->builder
, mask_val
, "");
327 tmask_val
= LLVMBuildInsertElement(gallivm
->builder
, tmask_val
, lp_build_const_int32(gallivm
, 0), mask_loop_state
.counter
, "");
328 LLVMBuildStore(gallivm
->builder
, tmask_val
, mask_val
);
329 lp_build_loop_end_cond(&mask_loop_state
, vec_length
, NULL
, LLVMIntUGE
);
330 lp_build_endif(&if_state
);
332 mask_val
= LLVMBuildLoad(gallivm
->builder
, mask_val
, "");
333 lp_build_mask_begin(&mask
, gallivm
, cs_type
, mask_val
);
335 struct lp_build_coro_suspend_info coro_info
;
337 LLVMBasicBlockRef sus_block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "suspend");
338 LLVMBasicBlockRef clean_block
= LLVMAppendBasicBlockInContext(gallivm
->context
, coro
, "cleanup");
340 coro_info
.suspend
= sus_block
;
341 coro_info
.cleanup
= clean_block
;
343 struct lp_build_tgsi_params params
;
344 memset(¶ms
, 0, sizeof(params
));
346 params
.type
= cs_type
;
348 params
.consts_ptr
= consts_ptr
;
349 params
.const_sizes_ptr
= num_consts_ptr
;
350 params
.system_values
= &system_values
;
351 params
.context_ptr
= context_ptr
;
352 params
.sampler
= sampler
;
353 params
.info
= &shader
->info
.base
;
354 params
.ssbo_ptr
= ssbo_ptr
;
355 params
.ssbo_sizes_ptr
= num_ssbo_ptr
;
356 params
.shared_ptr
= shared_ptr
;
357 params
.coro
= &coro_info
;
359 lp_build_tgsi_soa(gallivm
, tokens
, ¶ms
, NULL
);
361 mask_val
= lp_build_mask_end(&mask
);
363 lp_build_coro_suspend_switch(gallivm
, &coro_info
, NULL
, true);
364 LLVMPositionBuilderAtEnd(builder
, clean_block
);
366 lp_build_coro_free_mem(gallivm
, coro_id
, coro_hdl
);
368 LLVMBuildBr(builder
, sus_block
);
369 LLVMPositionBuilderAtEnd(builder
, sus_block
);
371 lp_build_coro_end(gallivm
, coro_hdl
);
372 LLVMBuildRet(builder
, coro_hdl
);
375 sampler
->destroy(sampler
);
377 gallivm_verify_function(gallivm
, coro
);
378 gallivm_verify_function(gallivm
, function
);
382 llvmpipe_create_compute_state(struct pipe_context
*pipe
,
383 const struct pipe_compute_state
*templ
)
385 struct lp_compute_shader
*shader
;
386 int nr_samplers
, nr_sampler_views
;
387 shader
= CALLOC_STRUCT(lp_compute_shader
);
391 assert(templ
->ir_type
== PIPE_SHADER_IR_TGSI
);
392 shader
->base
.tokens
= tgsi_dup_tokens(templ
->prog
);
394 lp_build_tgsi_info(shader
->base
.tokens
, &shader
->info
);
395 make_empty_list(&shader
->variants
);
397 nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
398 nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
399 shader
->variant_key_size
= Offset(struct lp_compute_shader_variant_key
,
400 state
[MAX2(nr_samplers
, nr_sampler_views
)]);
405 llvmpipe_bind_compute_state(struct pipe_context
*pipe
,
408 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
410 if (llvmpipe
->cs
== cs
)
413 llvmpipe
->cs
= (struct lp_compute_shader
*)cs
;
414 llvmpipe
->cs_dirty
|= LP_CSNEW_CS
;
418 * Remove shader variant from two lists: the shader's variant list
419 * and the context's variant list.
422 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context
*lp
,
423 struct lp_compute_shader_variant
*variant
)
425 if ((LP_DEBUG
& DEBUG_CS
) || (gallivm_debug
& GALLIVM_DEBUG_IR
)) {
426 debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
427 "v total cached %u inst %u total inst %u\n",
428 variant
->shader
->no
, variant
->no
,
429 variant
->shader
->variants_created
,
430 variant
->shader
->variants_cached
,
431 lp
->nr_cs_variants
, variant
->nr_instrs
, lp
->nr_cs_instrs
);
434 gallivm_destroy(variant
->gallivm
);
436 /* remove from shader's list */
437 remove_from_list(&variant
->list_item_local
);
438 variant
->shader
->variants_cached
--;
440 /* remove from context's list */
441 remove_from_list(&variant
->list_item_global
);
442 lp
->nr_fs_variants
--;
443 lp
->nr_fs_instrs
-= variant
->nr_instrs
;
449 llvmpipe_delete_compute_state(struct pipe_context
*pipe
,
452 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
453 struct lp_compute_shader
*shader
= cs
;
454 struct lp_cs_variant_list_item
*li
;
456 /* Delete all the variants */
457 li
= first_elem(&shader
->variants
);
458 while(!at_end(&shader
->variants
, li
)) {
459 struct lp_cs_variant_list_item
*next
= next_elem(li
);
460 llvmpipe_remove_cs_shader_variant(llvmpipe
, li
->base
);
463 tgsi_free_tokens(shader
->base
.tokens
);
468 make_variant_key(struct llvmpipe_context
*lp
,
469 struct lp_compute_shader
*shader
,
470 struct lp_compute_shader_variant_key
*key
)
474 memset(key
, 0, shader
->variant_key_size
);
476 /* This value will be the same for all the variants of a given shader:
478 key
->nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
480 for(i
= 0; i
< key
->nr_samplers
; ++i
) {
481 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
482 lp_sampler_static_sampler_state(&key
->state
[i
].sampler_state
,
483 lp
->samplers
[PIPE_SHADER_COMPUTE
][i
]);
488 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
489 * are dx10-style? Can't really have mixed opcodes, at least not
490 * if we want to skip the holes here (without rescanning tgsi).
492 if (shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] != -1) {
493 key
->nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
494 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
496 * Note sview may exceed what's representable by file_mask.
497 * This will still work, the only downside is that not actually
498 * used views may be included in the shader key.
500 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER_VIEW
] & (1u << (i
& 31))) {
501 lp_sampler_static_texture_state(&key
->state
[i
].texture_state
,
502 lp
->sampler_views
[PIPE_SHADER_COMPUTE
][i
]);
507 key
->nr_sampler_views
= key
->nr_samplers
;
508 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
509 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
510 lp_sampler_static_texture_state(&key
->state
[i
].texture_state
,
511 lp
->sampler_views
[PIPE_SHADER_COMPUTE
][i
]);
519 dump_cs_variant_key(const struct lp_compute_shader_variant_key
*key
)
522 debug_printf("cs variant %p:\n", (void *) key
);
524 for (i
= 0; i
< key
->nr_samplers
; ++i
) {
525 const struct lp_static_sampler_state
*sampler
= &key
->state
[i
].sampler_state
;
526 debug_printf("sampler[%u] = \n", i
);
527 debug_printf(" .wrap = %s %s %s\n",
528 util_str_tex_wrap(sampler
->wrap_s
, TRUE
),
529 util_str_tex_wrap(sampler
->wrap_t
, TRUE
),
530 util_str_tex_wrap(sampler
->wrap_r
, TRUE
));
531 debug_printf(" .min_img_filter = %s\n",
532 util_str_tex_filter(sampler
->min_img_filter
, TRUE
));
533 debug_printf(" .min_mip_filter = %s\n",
534 util_str_tex_mipfilter(sampler
->min_mip_filter
, TRUE
));
535 debug_printf(" .mag_img_filter = %s\n",
536 util_str_tex_filter(sampler
->mag_img_filter
, TRUE
));
537 if (sampler
->compare_mode
!= PIPE_TEX_COMPARE_NONE
)
538 debug_printf(" .compare_func = %s\n", util_str_func(sampler
->compare_func
, TRUE
));
539 debug_printf(" .normalized_coords = %u\n", sampler
->normalized_coords
);
540 debug_printf(" .min_max_lod_equal = %u\n", sampler
->min_max_lod_equal
);
541 debug_printf(" .lod_bias_non_zero = %u\n", sampler
->lod_bias_non_zero
);
542 debug_printf(" .apply_min_lod = %u\n", sampler
->apply_min_lod
);
543 debug_printf(" .apply_max_lod = %u\n", sampler
->apply_max_lod
);
545 for (i
= 0; i
< key
->nr_sampler_views
; ++i
) {
546 const struct lp_static_texture_state
*texture
= &key
->state
[i
].texture_state
;
547 debug_printf("texture[%u] = \n", i
);
548 debug_printf(" .format = %s\n",
549 util_format_name(texture
->format
));
550 debug_printf(" .target = %s\n",
551 util_str_tex_target(texture
->target
, TRUE
));
552 debug_printf(" .level_zero_only = %u\n",
553 texture
->level_zero_only
);
554 debug_printf(" .pot = %u %u %u\n",
562 lp_debug_cs_variant(const struct lp_compute_shader_variant
*variant
)
564 debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
565 variant
->shader
->no
, variant
->no
);
566 tgsi_dump(variant
->shader
->base
.tokens
, 0);
567 dump_cs_variant_key(&variant
->key
);
571 static struct lp_compute_shader_variant
*
572 generate_variant(struct llvmpipe_context
*lp
,
573 struct lp_compute_shader
*shader
,
574 const struct lp_compute_shader_variant_key
*key
)
576 struct lp_compute_shader_variant
*variant
;
577 char module_name
[64];
579 variant
= CALLOC_STRUCT(lp_compute_shader_variant
);
583 snprintf(module_name
, sizeof(module_name
), "cs%u_variant%u",
584 shader
->no
, shader
->variants_created
);
586 variant
->gallivm
= gallivm_create(module_name
, lp
->context
);
587 if (!variant
->gallivm
) {
592 variant
->shader
= shader
;
593 variant
->list_item_global
.base
= variant
;
594 variant
->list_item_local
.base
= variant
;
595 variant
->no
= shader
->variants_created
++;
597 memcpy(&variant
->key
, key
, shader
->variant_key_size
);
599 if ((LP_DEBUG
& DEBUG_CS
) || (gallivm_debug
& GALLIVM_DEBUG_IR
)) {
600 lp_debug_cs_variant(variant
);
603 lp_jit_init_cs_types(variant
);
605 generate_compute(lp
, shader
, variant
);
607 gallivm_compile_module(variant
->gallivm
);
609 variant
->nr_instrs
+= lp_build_count_ir_module(variant
->gallivm
->module
);
611 variant
->jit_function
= (lp_jit_cs_func
)gallivm_jit_function(variant
->gallivm
, variant
->function
);
613 gallivm_free_ir(variant
->gallivm
);
618 lp_cs_ctx_set_cs_variant( struct lp_cs_context
*csctx
,
619 struct lp_compute_shader_variant
*variant
)
621 csctx
->cs
.current
.variant
= variant
;
625 llvmpipe_update_cs(struct llvmpipe_context
*lp
)
627 struct lp_compute_shader
*shader
= lp
->cs
;
629 struct lp_compute_shader_variant_key key
;
630 struct lp_compute_shader_variant
*variant
= NULL
;
631 struct lp_cs_variant_list_item
*li
;
633 make_variant_key(lp
, shader
, &key
);
635 /* Search the variants for one which matches the key */
636 li
= first_elem(&shader
->variants
);
637 while(!at_end(&shader
->variants
, li
)) {
638 if(memcmp(&li
->base
->key
, &key
, shader
->variant_key_size
) == 0) {
646 /* Move this variant to the head of the list to implement LRU
647 * deletion of shader's when we have too many.
649 move_to_head(&lp
->cs_variants_list
, &variant
->list_item_global
);
652 /* variant not found, create it now */
655 unsigned variants_to_cull
;
657 if (LP_DEBUG
& DEBUG_CS
) {
658 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
661 lp
->nr_cs_variants
? lp
->nr_cs_instrs
/ lp
->nr_cs_variants
: 0);
664 /* First, check if we've exceeded the max number of shader variants.
665 * If so, free 6.25% of them (the least recently used ones).
667 variants_to_cull
= lp
->nr_cs_variants
>= LP_MAX_SHADER_VARIANTS
? LP_MAX_SHADER_VARIANTS
/ 16 : 0;
669 if (variants_to_cull
||
670 lp
->nr_cs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
) {
671 if (gallivm_debug
& GALLIVM_DEBUG_PERF
) {
672 debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
673 "\t%u instrs,\t%u instrs/variant\n",
674 shader
->variants_cached
,
675 lp
->nr_cs_variants
, lp
->nr_cs_instrs
,
676 lp
->nr_cs_instrs
/ lp
->nr_cs_variants
);
680 * We need to re-check lp->nr_cs_variants because an arbitrarliy large
681 * number of shader variants (potentially all of them) could be
682 * pending for destruction on flush.
685 for (i
= 0; i
< variants_to_cull
|| lp
->nr_cs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
; i
++) {
686 struct lp_cs_variant_list_item
*item
;
687 if (is_empty_list(&lp
->cs_variants_list
)) {
690 item
= last_elem(&lp
->cs_variants_list
);
693 llvmpipe_remove_cs_shader_variant(lp
, item
->base
);
697 * Generate the new variant.
700 variant
= generate_variant(lp
, shader
, &key
);
703 LP_COUNT_ADD(llvm_compile_time
, dt
);
704 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
706 /* Put the new variant into the list */
708 insert_at_head(&shader
->variants
, &variant
->list_item_local
);
709 insert_at_head(&lp
->cs_variants_list
, &variant
->list_item_global
);
710 lp
->nr_cs_variants
++;
711 lp
->nr_cs_instrs
+= variant
->nr_instrs
;
712 shader
->variants_cached
++;
715 /* Bind this variant */
716 lp_cs_ctx_set_cs_variant(lp
->csctx
, variant
);
720 * Called during state validation when LP_CSNEW_SAMPLER_VIEW is set.
723 lp_csctx_set_sampler_views(struct lp_cs_context
*csctx
,
725 struct pipe_sampler_view
**views
)
727 unsigned i
, max_tex_num
;
729 LP_DBG(DEBUG_SETUP
, "%s\n", __FUNCTION__
);
731 assert(num
<= PIPE_MAX_SHADER_SAMPLER_VIEWS
);
733 max_tex_num
= MAX2(num
, csctx
->cs
.current_tex_num
);
735 for (i
= 0; i
< max_tex_num
; i
++) {
736 struct pipe_sampler_view
*view
= i
< num
? views
[i
] : NULL
;
739 struct pipe_resource
*res
= view
->texture
;
740 struct llvmpipe_resource
*lp_tex
= llvmpipe_resource(res
);
741 struct lp_jit_texture
*jit_tex
;
742 jit_tex
= &csctx
->cs
.current
.jit_context
.textures
[i
];
744 /* We're referencing the texture's internal data, so save a
747 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], res
);
750 /* regular texture - csctx array of mipmap level offsets */
752 unsigned first_level
= 0;
753 unsigned last_level
= 0;
755 if (llvmpipe_resource_is_texture(res
)) {
756 first_level
= view
->u
.tex
.first_level
;
757 last_level
= view
->u
.tex
.last_level
;
758 assert(first_level
<= last_level
);
759 assert(last_level
<= res
->last_level
);
760 jit_tex
->base
= lp_tex
->tex_data
;
763 jit_tex
->base
= lp_tex
->data
;
765 if (LP_PERF
& PERF_TEX_MEM
) {
766 /* use dummy tile memory */
767 jit_tex
->base
= lp_dummy_tile
;
768 jit_tex
->width
= TILE_SIZE
/8;
769 jit_tex
->height
= TILE_SIZE
/8;
771 jit_tex
->first_level
= 0;
772 jit_tex
->last_level
= 0;
773 jit_tex
->mip_offsets
[0] = 0;
774 jit_tex
->row_stride
[0] = 0;
775 jit_tex
->img_stride
[0] = 0;
778 jit_tex
->width
= res
->width0
;
779 jit_tex
->height
= res
->height0
;
780 jit_tex
->depth
= res
->depth0
;
781 jit_tex
->first_level
= first_level
;
782 jit_tex
->last_level
= last_level
;
784 if (llvmpipe_resource_is_texture(res
)) {
785 for (j
= first_level
; j
<= last_level
; j
++) {
786 jit_tex
->mip_offsets
[j
] = lp_tex
->mip_offsets
[j
];
787 jit_tex
->row_stride
[j
] = lp_tex
->row_stride
[j
];
788 jit_tex
->img_stride
[j
] = lp_tex
->img_stride
[j
];
791 if (res
->target
== PIPE_TEXTURE_1D_ARRAY
||
792 res
->target
== PIPE_TEXTURE_2D_ARRAY
||
793 res
->target
== PIPE_TEXTURE_CUBE
||
794 res
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
796 * For array textures, we don't have first_layer, instead
797 * adjust last_layer (stored as depth) plus the mip level offsets
798 * (as we have mip-first layout can't just adjust base ptr).
799 * XXX For mip levels, could do something similar.
801 jit_tex
->depth
= view
->u
.tex
.last_layer
- view
->u
.tex
.first_layer
+ 1;
802 for (j
= first_level
; j
<= last_level
; j
++) {
803 jit_tex
->mip_offsets
[j
] += view
->u
.tex
.first_layer
*
804 lp_tex
->img_stride
[j
];
806 if (view
->target
== PIPE_TEXTURE_CUBE
||
807 view
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
808 assert(jit_tex
->depth
% 6 == 0);
810 assert(view
->u
.tex
.first_layer
<= view
->u
.tex
.last_layer
);
811 assert(view
->u
.tex
.last_layer
< res
->array_size
);
816 * For buffers, we don't have "offset", instead adjust
817 * the size (stored as width) plus the base pointer.
819 unsigned view_blocksize
= util_format_get_blocksize(view
->format
);
820 /* probably don't really need to fill that out */
821 jit_tex
->mip_offsets
[0] = 0;
822 jit_tex
->row_stride
[0] = 0;
823 jit_tex
->img_stride
[0] = 0;
825 /* everything specified in number of elements here. */
826 jit_tex
->width
= view
->u
.buf
.size
/ view_blocksize
;
827 jit_tex
->base
= (uint8_t *)jit_tex
->base
+ view
->u
.buf
.offset
;
828 /* XXX Unsure if we need to sanitize parameters? */
829 assert(view
->u
.buf
.offset
+ view
->u
.buf
.size
<= res
->width0
);
834 /* display target texture/surface */
836 * XXX: Where should this be unmapped?
838 struct llvmpipe_screen
*screen
= llvmpipe_screen(res
->screen
);
839 struct sw_winsys
*winsys
= screen
->winsys
;
840 jit_tex
->base
= winsys
->displaytarget_map(winsys
, lp_tex
->dt
,
842 jit_tex
->row_stride
[0] = lp_tex
->row_stride
[0];
843 jit_tex
->img_stride
[0] = lp_tex
->img_stride
[0];
844 jit_tex
->mip_offsets
[0] = 0;
845 jit_tex
->width
= res
->width0
;
846 jit_tex
->height
= res
->height0
;
847 jit_tex
->depth
= res
->depth0
;
848 jit_tex
->first_level
= jit_tex
->last_level
= 0;
849 assert(jit_tex
->base
);
853 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], NULL
);
856 csctx
->cs
.current_tex_num
= num
;
861 * Called during state validation when LP_NEW_SAMPLER is set.
864 lp_csctx_set_sampler_state(struct lp_cs_context
*csctx
,
866 struct pipe_sampler_state
**samplers
)
870 LP_DBG(DEBUG_SETUP
, "%s\n", __FUNCTION__
);
872 assert(num
<= PIPE_MAX_SAMPLERS
);
874 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
875 const struct pipe_sampler_state
*sampler
= i
< num
? samplers
[i
] : NULL
;
878 struct lp_jit_sampler
*jit_sam
;
879 jit_sam
= &csctx
->cs
.current
.jit_context
.samplers
[i
];
881 jit_sam
->min_lod
= sampler
->min_lod
;
882 jit_sam
->max_lod
= sampler
->max_lod
;
883 jit_sam
->lod_bias
= sampler
->lod_bias
;
884 COPY_4V(jit_sam
->border_color
, sampler
->border_color
.f
);
890 lp_csctx_set_cs_constants(struct lp_cs_context
*csctx
,
892 struct pipe_constant_buffer
*buffers
)
896 LP_DBG(DEBUG_SETUP
, "%s %p\n", __FUNCTION__
, (void *) buffers
);
898 assert(num
<= ARRAY_SIZE(csctx
->constants
));
900 for (i
= 0; i
< num
; ++i
) {
901 util_copy_constant_buffer(&csctx
->constants
[i
].current
, &buffers
[i
]);
903 for (; i
< ARRAY_SIZE(csctx
->constants
); i
++) {
904 util_copy_constant_buffer(&csctx
->constants
[i
].current
, NULL
);
909 lp_csctx_set_cs_ssbos(struct lp_cs_context
*csctx
,
911 struct pipe_shader_buffer
*buffers
)
914 LP_DBG(DEBUG_SETUP
, "%s %p\n", __FUNCTION__
, (void *)buffers
);
916 assert (num
<= ARRAY_SIZE(csctx
->ssbos
));
918 for (i
= 0; i
< num
; ++i
) {
919 util_copy_shader_buffer(&csctx
->ssbos
[i
].current
, &buffers
[i
]);
921 for (; i
< ARRAY_SIZE(csctx
->ssbos
); i
++) {
922 util_copy_shader_buffer(&csctx
->ssbos
[i
].current
, NULL
);
927 update_csctx_consts(struct llvmpipe_context
*llvmpipe
)
929 struct lp_cs_context
*csctx
= llvmpipe
->csctx
;
932 for (i
= 0; i
< ARRAY_SIZE(csctx
->constants
); ++i
) {
933 struct pipe_resource
*buffer
= csctx
->constants
[i
].current
.buffer
;
934 const ubyte
*current_data
= NULL
;
937 /* resource buffer */
938 current_data
= (ubyte
*) llvmpipe_resource_data(buffer
);
940 else if (csctx
->constants
[i
].current
.user_buffer
) {
941 /* user-space buffer */
942 current_data
= (ubyte
*) csctx
->constants
[i
].current
.user_buffer
;
946 current_data
+= csctx
->constants
[i
].current
.buffer_offset
;
948 csctx
->cs
.current
.jit_context
.constants
[i
] = (const float *)current_data
;
949 csctx
->cs
.current
.jit_context
.num_constants
[i
] = csctx
->constants
[i
].current
.buffer_size
;
951 csctx
->cs
.current
.jit_context
.constants
[i
] = NULL
;
952 csctx
->cs
.current
.jit_context
.num_constants
[i
] = 0;
958 update_csctx_ssbo(struct llvmpipe_context
*llvmpipe
)
960 struct lp_cs_context
*csctx
= llvmpipe
->csctx
;
962 for (i
= 0; i
< ARRAY_SIZE(csctx
->ssbos
); ++i
) {
963 struct pipe_resource
*buffer
= csctx
->ssbos
[i
].current
.buffer
;
964 const ubyte
*current_data
= NULL
;
968 /* resource buffer */
969 current_data
= (ubyte
*) llvmpipe_resource_data(buffer
);
971 current_data
+= csctx
->ssbos
[i
].current
.buffer_offset
;
973 csctx
->cs
.current
.jit_context
.ssbos
[i
] = (const uint32_t *)current_data
;
974 csctx
->cs
.current
.jit_context
.num_ssbos
[i
] = csctx
->ssbos
[i
].current
.buffer_size
;
976 csctx
->cs
.current
.jit_context
.ssbos
[i
] = NULL
;
977 csctx
->cs
.current
.jit_context
.num_ssbos
[i
] = 0;
983 llvmpipe_cs_update_derived(struct llvmpipe_context
*llvmpipe
)
985 if (llvmpipe
->cs_dirty
& (LP_CSNEW_CS
))
986 llvmpipe_update_cs(llvmpipe
);
988 if (llvmpipe
->cs_dirty
& LP_CSNEW_CONSTANTS
) {
989 lp_csctx_set_cs_constants(llvmpipe
->csctx
,
990 ARRAY_SIZE(llvmpipe
->constants
[PIPE_SHADER_COMPUTE
]),
991 llvmpipe
->constants
[PIPE_SHADER_COMPUTE
]);
992 update_csctx_consts(llvmpipe
);
995 if (llvmpipe
->cs_dirty
& LP_CSNEW_SSBOS
) {
996 lp_csctx_set_cs_ssbos(llvmpipe
->csctx
,
997 ARRAY_SIZE(llvmpipe
->ssbos
[PIPE_SHADER_COMPUTE
]),
998 llvmpipe
->ssbos
[PIPE_SHADER_COMPUTE
]);
999 update_csctx_ssbo(llvmpipe
);
1002 if (llvmpipe
->cs_dirty
& LP_CSNEW_SAMPLER_VIEW
)
1003 lp_csctx_set_sampler_views(llvmpipe
->csctx
,
1004 llvmpipe
->num_sampler_views
[PIPE_SHADER_COMPUTE
],
1005 llvmpipe
->sampler_views
[PIPE_SHADER_COMPUTE
]);
1007 if (llvmpipe
->cs_dirty
& LP_CSNEW_SAMPLER
)
1008 lp_csctx_set_sampler_state(llvmpipe
->csctx
,
1009 llvmpipe
->num_samplers
[PIPE_SHADER_COMPUTE
],
1010 llvmpipe
->samplers
[PIPE_SHADER_COMPUTE
]);
1011 llvmpipe
->cs_dirty
= 0;
1015 cs_exec_fn(void *init_data
, int iter_idx
, struct lp_cs_local_mem
*lmem
)
1017 struct lp_cs_job_info
*job_info
= init_data
;
1018 struct lp_jit_cs_thread_data thread_data
;
1020 memset(&thread_data
, 0, sizeof(thread_data
));
1022 unsigned grid_z
= iter_idx
/ (job_info
->grid_size
[0] * job_info
->grid_size
[1]);
1023 unsigned grid_y
= (iter_idx
- (grid_z
* (job_info
->grid_size
[0] * job_info
->grid_size
[1]))) / job_info
->grid_size
[0];
1024 unsigned grid_x
= (iter_idx
- (grid_z
* (job_info
->grid_size
[0] * job_info
->grid_size
[1])) - (grid_y
* job_info
->grid_size
[0]));
1025 struct lp_compute_shader_variant
*variant
= job_info
->current
->variant
;
1026 variant
->jit_function(&job_info
->current
->jit_context
,
1027 job_info
->block_size
[0], job_info
->block_size
[1], job_info
->block_size
[2],
1028 grid_x
, grid_y
, grid_z
,
1029 job_info
->grid_size
[0], job_info
->grid_size
[1], job_info
->grid_size
[2],
1034 fill_grid_size(struct pipe_context
*pipe
,
1035 const struct pipe_grid_info
*info
,
1036 uint32_t grid_size
[3])
1038 struct pipe_transfer
*transfer
;
1040 if (!info
->indirect
) {
1041 grid_size
[0] = info
->grid
[0];
1042 grid_size
[1] = info
->grid
[1];
1043 grid_size
[2] = info
->grid
[2];
1046 params
= pipe_buffer_map_range(pipe
, info
->indirect
,
1047 info
->indirect_offset
,
1048 3 * sizeof(uint32_t),
1055 grid_size
[0] = params
[0];
1056 grid_size
[1] = params
[1];
1057 grid_size
[2] = params
[2];
1058 pipe_buffer_unmap(pipe
, transfer
);
1061 static void llvmpipe_launch_grid(struct pipe_context
*pipe
,
1062 const struct pipe_grid_info
*info
)
1064 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1065 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
1066 struct lp_cs_job_info job_info
;
1068 memset(&job_info
, 0, sizeof(job_info
));
1070 llvmpipe_cs_update_derived(llvmpipe
);
1072 fill_grid_size(pipe
, info
, job_info
.grid_size
);
1074 job_info
.block_size
[0] = info
->block
[0];
1075 job_info
.block_size
[1] = info
->block
[1];
1076 job_info
.block_size
[2] = info
->block
[2];
1077 job_info
.current
= &llvmpipe
->csctx
->cs
.current
;
1079 int num_tasks
= job_info
.grid_size
[2] * job_info
.grid_size
[1] * job_info
.grid_size
[0];
1081 struct lp_cs_tpool_task
*task
;
1082 mtx_lock(&screen
->cs_mutex
);
1083 task
= lp_cs_tpool_queue_task(screen
->cs_tpool
, cs_exec_fn
, &job_info
, num_tasks
);
1085 lp_cs_tpool_wait_for_task(screen
->cs_tpool
, &task
);
1086 mtx_unlock(&screen
->cs_mutex
);
1088 llvmpipe
->pipeline_statistics
.cs_invocations
+= num_tasks
* info
->block
[0] * info
->block
[1] * info
->block
[2];
1092 llvmpipe_init_compute_funcs(struct llvmpipe_context
*llvmpipe
)
1094 llvmpipe
->pipe
.create_compute_state
= llvmpipe_create_compute_state
;
1095 llvmpipe
->pipe
.bind_compute_state
= llvmpipe_bind_compute_state
;
1096 llvmpipe
->pipe
.delete_compute_state
= llvmpipe_delete_compute_state
;
1097 llvmpipe
->pipe
.launch_grid
= llvmpipe_launch_grid
;
1101 lp_csctx_destroy(struct lp_cs_context
*csctx
)
1104 for (i
= 0; i
< ARRAY_SIZE(csctx
->cs
.current_tex
); i
++) {
1105 pipe_resource_reference(&csctx
->cs
.current_tex
[i
], NULL
);
1107 for (i
= 0; i
< ARRAY_SIZE(csctx
->constants
); i
++) {
1108 pipe_resource_reference(&csctx
->constants
[i
].current
.buffer
, NULL
);
1110 for (i
= 0; i
< ARRAY_SIZE(csctx
->ssbos
); i
++) {
1111 pipe_resource_reference(&csctx
->ssbos
[i
].current
.buffer
, NULL
);
1116 struct lp_cs_context
*lp_csctx_create(struct pipe_context
*pipe
)
1118 struct lp_cs_context
*csctx
;
1120 csctx
= CALLOC_STRUCT(lp_cs_context
);