#include "gallivm/lp_bld_flow.h"
#include "gallivm/lp_bld_gather.h"
#include "gallivm/lp_bld_coro.h"
+#include "gallivm/lp_bld_nir.h"
#include "lp_state_cs.h"
#include "lp_context.h"
#include "lp_debug.h"
#include "lp_perf.h"
#include "lp_screen.h"
#include "lp_memory.h"
+#include "lp_query.h"
#include "lp_cs_tpool.h"
-#include "state_tracker/sw_winsys.h"
+#include "frontend/sw_winsys.h"
+#include "nir/nir_to_tgsi_info.h"
+#include "util/mesa-sha1.h"
+#include "nir_serialize.h"
+
+/** Fragment shader number (for debugging) */
+static unsigned cs_no = 0;
struct lp_cs_job_info {
unsigned grid_size[3];
unsigned block_size[3];
unsigned req_local_mem;
+ unsigned work_dim;
struct lp_cs_exec *current;
};
struct gallivm_state *gallivm = variant->gallivm;
const struct lp_compute_shader_variant_key *key = &variant->key;
char func_name[64], func_name_coro[64];
- LLVMTypeRef arg_types[13];
+ LLVMTypeRef arg_types[17];
LLVMTypeRef func_type, coro_func_type;
LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
LLVMValueRef context_ptr;
LLVMValueRef x_size_arg, y_size_arg, z_size_arg;
LLVMValueRef grid_x_arg, grid_y_arg, grid_z_arg;
LLVMValueRef grid_size_x_arg, grid_size_y_arg, grid_size_z_arg;
- LLVMValueRef thread_data_ptr;
+ LLVMValueRef work_dim_arg, thread_data_ptr;
LLVMBasicBlockRef block;
LLVMBuilderRef builder;
struct lp_build_sampler_soa *sampler;
cs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
cs_type.width = 32; /* 32-bit float */
cs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
- snprintf(func_name, sizeof(func_name), "cs%u_variant%u",
- shader->no, variant->no);
+ snprintf(func_name, sizeof(func_name), "cs_variant");
- snprintf(func_name_coro, sizeof(func_name), "cs_co_%u_variant%u",
- shader->no, variant->no);
+ snprintf(func_name_coro, sizeof(func_name), "cs_co_variant");
arg_types[0] = variant->jit_cs_context_ptr_type; /* context */
arg_types[1] = int32_type; /* block_x_size */
arg_types[7] = int32_type; /* grid_size_x */
arg_types[8] = int32_type; /* grid_size_y */
arg_types[9] = int32_type; /* grid_size_z */
- arg_types[10] = variant->jit_cs_thread_data_ptr_type; /* per thread data */
- arg_types[11] = int32_type;
- arg_types[12] = int32_type;
+ arg_types[10] = int32_type; /* work dim */
+ arg_types[11] = variant->jit_cs_thread_data_ptr_type; /* per thread data */
+ arg_types[12] = int32_type; /* coro only - num X loops */
+ arg_types[13] = int32_type; /* coro only - partials */
+ arg_types[14] = int32_type; /* coro block_x_size */
+ arg_types[15] = int32_type; /* coro block_y_size */
+ arg_types[16] = int32_type; /* coro block_z_size */
func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
- arg_types, ARRAY_SIZE(arg_types) - 2, 0);
+ arg_types, ARRAY_SIZE(arg_types) - 5, 0);
coro_func_type = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0),
arg_types, ARRAY_SIZE(arg_types), 0);
}
}
+ lp_build_coro_declare_malloc_hooks(gallivm);
+
+ if (variant->gallivm->cache->data_size)
+ return;
+
context_ptr = LLVMGetParam(function, 0);
x_size_arg = LLVMGetParam(function, 1);
y_size_arg = LLVMGetParam(function, 2);
grid_size_x_arg = LLVMGetParam(function, 7);
grid_size_y_arg = LLVMGetParam(function, 8);
grid_size_z_arg = LLVMGetParam(function, 9);
- thread_data_ptr = LLVMGetParam(function, 10);
+ work_dim_arg = LLVMGetParam(function, 10);
+ thread_data_ptr = LLVMGetParam(function, 11);
lp_build_name(context_ptr, "context");
lp_build_name(x_size_arg, "x_size");
lp_build_name(grid_size_x_arg, "grid_size_x");
lp_build_name(grid_size_y_arg, "grid_size_y");
lp_build_name(grid_size_z_arg, "grid_size_z");
+ lp_build_name(work_dim_arg, "work_dim");
lp_build_name(thread_data_ptr, "thread_data");
block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
builder = gallivm->builder;
assert(builder);
LLVMPositionBuilderAtEnd(builder, block);
- sampler = lp_llvm_sampler_soa_create(key->state);
- image = lp_llvm_image_soa_create(key->image_state);
+ sampler = lp_llvm_sampler_soa_create(key->samplers, key->nr_samplers);
+ image = lp_llvm_image_soa_create(lp_cs_variant_key_images(key), key->nr_images);
struct lp_build_loop_state loop_state[4];
LLVMValueRef num_x_loop;
lp_build_loop_begin(&loop_state[0], gallivm,
lp_build_const_int32(gallivm, 0)); /* x loop */
{
- LLVMValueRef args[13];
+ LLVMValueRef args[17];
args[0] = context_ptr;
args[1] = loop_state[0].counter;
args[2] = loop_state[1].counter;
args[7] = grid_size_x_arg;
args[8] = grid_size_y_arg;
args[9] = grid_size_z_arg;
- args[10] = thread_data_ptr;
- args[11] = num_x_loop;
- args[12] = partials;
+ args[10] = work_dim_arg;
+ args[11] = thread_data_ptr;
+ args[12] = num_x_loop;
+ args[13] = partials;
+ args[14] = x_size_arg;
+ args[15] = y_size_arg;
+ args[16] = z_size_arg;
/* idx = (z * (size_x * size_y) + y * size_x + x */
LLVMValueRef coro_hdl_idx = LLVMBuildMul(gallivm->builder, loop_state[2].counter,
lp_build_const_int32(gallivm, 0), "");
/* first time here - call the coroutine function entry point */
lp_build_if(&ifstate, gallivm, cmp);
- LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 13, "");
+ LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 17, "");
LLVMBuildStore(gallivm->builder, coro_ret, coro_entry);
lp_build_else(&ifstate);
/* subsequent calls for this invocation - check if done. */
LLVMBuildRetVoid(builder);
/* This is stage (b) - generate the compute shader code inside the coroutine. */
+ LLVMValueRef block_x_size_arg, block_y_size_arg, block_z_size_arg;
context_ptr = LLVMGetParam(coro, 0);
x_size_arg = LLVMGetParam(coro, 1);
y_size_arg = LLVMGetParam(coro, 2);
grid_size_x_arg = LLVMGetParam(coro, 7);
grid_size_y_arg = LLVMGetParam(coro, 8);
grid_size_z_arg = LLVMGetParam(coro, 9);
- thread_data_ptr = LLVMGetParam(coro, 10);
- num_x_loop = LLVMGetParam(coro, 11);
- partials = LLVMGetParam(coro, 12);
+ work_dim_arg = LLVMGetParam(coro, 10);
+ thread_data_ptr = LLVMGetParam(coro, 11);
+ num_x_loop = LLVMGetParam(coro, 12);
+ partials = LLVMGetParam(coro, 13);
+ block_x_size_arg = LLVMGetParam(coro, 14);
+ block_y_size_arg = LLVMGetParam(coro, 15);
+ block_z_size_arg = LLVMGetParam(coro, 16);
block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "entry");
LLVMPositionBuilderAtEnd(builder, block);
{
- const struct tgsi_token *tokens = shader->base.tokens;
LLVMValueRef consts_ptr, num_consts_ptr;
LLVMValueRef ssbo_ptr, num_ssbo_ptr;
LLVMValueRef shared_ptr;
+ LLVMValueRef kernel_args_ptr;
struct lp_build_mask_context mask;
struct lp_bld_tgsi_system_values system_values;
num_consts_ptr = lp_jit_cs_context_num_constants(gallivm, context_ptr);
ssbo_ptr = lp_jit_cs_context_ssbos(gallivm, context_ptr);
num_ssbo_ptr = lp_jit_cs_context_num_ssbos(gallivm, context_ptr);
+ kernel_args_ptr = lp_jit_cs_context_kernel_args(gallivm, context_ptr);
+
shared_ptr = lp_jit_cs_thread_data_shared(gallivm, thread_data_ptr);
/* these are coroutine entrypoint necessities */
for (i = 0; i < 3; i++)
system_values.grid_size = LLVMBuildInsertElement(builder, system_values.grid_size, gstids[i], lp_build_const_int32(gallivm, i), "");
+ system_values.work_dim = work_dim_arg;
+
+ LLVMValueRef bsize[3] = { block_x_size_arg, block_y_size_arg, block_z_size_arg };
+ system_values.block_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
+ for (i = 0; i < 3; i++)
+ system_values.block_size = LLVMBuildInsertElement(builder, system_values.block_size, bsize[i], lp_build_const_int32(gallivm, i), "");
+
LLVMValueRef last_x_loop = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, x_size_arg, LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), ""), "");
LLVMValueRef use_partial_mask = LLVMBuildAnd(gallivm->builder, last_x_loop, has_partials, "");
struct lp_build_if_state if_state;
params.image = image;
params.shared_ptr = shared_ptr;
params.coro = &coro_info;
+ params.kernel_args = kernel_args_ptr;
- lp_build_tgsi_soa(gallivm, tokens, ¶ms, NULL);
+ if (shader->base.type == PIPE_SHADER_IR_TGSI)
+ lp_build_tgsi_soa(gallivm, shader->base.tokens, ¶ms, NULL);
+ else
+ lp_build_nir_soa(gallivm, shader->base.ir.nir, ¶ms,
+ NULL);
mask_val = lp_build_mask_end(&mask);
{
struct lp_compute_shader *shader;
int nr_samplers, nr_sampler_views;
+
shader = CALLOC_STRUCT(lp_compute_shader);
if (!shader)
return NULL;
- assert(templ->ir_type == PIPE_SHADER_IR_TGSI);
- shader->base.tokens = tgsi_dup_tokens(templ->prog);
+ shader->no = cs_no++;
+
+ shader->base.type = templ->ir_type;
+ if (templ->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
+ struct blob_reader reader;
+ const struct pipe_binary_program_header *hdr = templ->prog;
+
+ blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
+ shader->base.ir.nir = nir_deserialize(NULL, pipe->screen->get_compiler_options(pipe->screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE), &reader);
+ shader->base.type = PIPE_SHADER_IR_NIR;
+
+ pipe->screen->finalize_nir(pipe->screen, shader->base.ir.nir, false);
+ } else if (templ->ir_type == PIPE_SHADER_IR_NIR)
+ shader->base.ir.nir = (struct nir_shader *)templ->prog;
+
+ if (shader->base.type == PIPE_SHADER_IR_TGSI) {
+ /* get/save the summary info for this shader */
+ lp_build_tgsi_info(templ->prog, &shader->info);
+
+ /* we need to keep a local copy of the tokens */
+ shader->base.tokens = tgsi_dup_tokens(templ->prog);
+ } else {
+ nir_tgsi_scan_shader(shader->base.ir.nir, &shader->info.base, false);
+ }
shader->req_local_mem = templ->req_local_mem;
- lp_build_tgsi_info(shader->base.tokens, &shader->info);
make_empty_list(&shader->variants);
nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
- shader->variant_key_size = Offset(struct lp_compute_shader_variant_key,
- state[MAX2(nr_samplers, nr_sampler_views)]);
+ int nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
+ shader->variant_key_size = lp_cs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
+
return shader;
}
struct lp_compute_shader *shader = cs;
struct lp_cs_variant_list_item *li;
+ if (llvmpipe->cs == cs)
+ llvmpipe->cs = NULL;
+ for (unsigned i = 0; i < shader->max_global_buffers; i++)
+ pipe_resource_reference(&shader->global_buffers[i], NULL);
+ FREE(shader->global_buffers);
+
/* Delete all the variants */
li = first_elem(&shader->variants);
while(!at_end(&shader->variants, li)) {
llvmpipe_remove_cs_shader_variant(llvmpipe, li->base);
li = next;
}
+ if (shader->base.ir.nir)
+ ralloc_free(shader->base.ir.nir);
tgsi_free_tokens(shader->base.tokens);
FREE(shader);
}
-static void
+static struct lp_compute_shader_variant_key *
make_variant_key(struct llvmpipe_context *lp,
struct lp_compute_shader *shader,
- struct lp_compute_shader_variant_key *key)
+ char *store)
{
int i;
-
- memset(key, 0, shader->variant_key_size);
+ struct lp_compute_shader_variant_key *key;
+ key = (struct lp_compute_shader_variant_key *)store;
+ memset(key, 0, offsetof(struct lp_compute_shader_variant_key, samplers[1]));
/* This value will be the same for all the variants of a given shader:
*/
key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
+ struct lp_sampler_static_state *cs_sampler;
+
+ cs_sampler = key->samplers;
for(i = 0; i < key->nr_samplers; ++i) {
if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
- lp_sampler_static_sampler_state(&key->state[i].sampler_state,
+ lp_sampler_static_sampler_state(&cs_sampler[i].sampler_state,
lp->samplers[PIPE_SHADER_COMPUTE][i]);
}
}
* used views may be included in the shader key.
*/
if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
- lp_sampler_static_texture_state(&key->state[i].texture_state,
+ lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
}
}
key->nr_sampler_views = key->nr_samplers;
for(i = 0; i < key->nr_sampler_views; ++i) {
if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
- lp_sampler_static_texture_state(&key->state[i].texture_state,
+ lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
}
}
}
+ struct lp_image_static_state *lp_image;
+ lp_image = lp_cs_variant_key_images(key);
key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
for (i = 0; i < key->nr_images; ++i) {
if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
- lp_sampler_static_texture_state_image(&key->image_state[i].image_state,
+ lp_sampler_static_texture_state_image(&lp_image[i].image_state,
&lp->images[PIPE_SHADER_COMPUTE][i]);
}
}
+ return key;
}
static void
debug_printf("cs variant %p:\n", (void *) key);
for (i = 0; i < key->nr_samplers; ++i) {
- const struct lp_static_sampler_state *sampler = &key->state[i].sampler_state;
+ const struct lp_static_sampler_state *sampler = &key->samplers[i].sampler_state;
debug_printf("sampler[%u] = \n", i);
debug_printf(" .wrap = %s %s %s\n",
util_str_tex_wrap(sampler->wrap_s, TRUE),
debug_printf(" .apply_max_lod = %u\n", sampler->apply_max_lod);
}
for (i = 0; i < key->nr_sampler_views; ++i) {
- const struct lp_static_texture_state *texture = &key->state[i].texture_state;
+ const struct lp_static_texture_state *texture = &key->samplers[i].texture_state;
debug_printf("texture[%u] = \n", i);
debug_printf(" .format = %s\n",
util_format_name(texture->format));
texture->pot_height,
texture->pot_depth);
}
+ struct lp_image_static_state *images = lp_cs_variant_key_images(key);
for (i = 0; i < key->nr_images; ++i) {
- const struct lp_static_texture_state *image = &key->image_state[i].image_state;
+ const struct lp_static_texture_state *image = &images[i].image_state;
debug_printf("image[%u] = \n", i);
debug_printf(" .format = %s\n",
util_format_name(image->format));
{
debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
variant->shader->no, variant->no);
- tgsi_dump(variant->shader->base.tokens, 0);
+ if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
+ tgsi_dump(variant->shader->base.tokens, 0);
+ else
+ nir_print_shader(variant->shader->base.ir.nir, stderr);
dump_cs_variant_key(&variant->key);
debug_printf("\n");
}
+static void
+lp_cs_get_ir_cache_key(struct lp_compute_shader_variant *variant,
+ unsigned char ir_sha1_cache_key[20])
+{
+ struct blob blob = { 0 };
+ unsigned ir_size;
+ void *ir_binary;
+
+ blob_init(&blob);
+ nir_serialize(&blob, variant->shader->base.ir.nir, true);
+ ir_binary = blob.data;
+ ir_size = blob.size;
+
+ struct mesa_sha1 ctx;
+ _mesa_sha1_init(&ctx);
+ _mesa_sha1_update(&ctx, &variant->key, variant->shader->variant_key_size);
+ _mesa_sha1_update(&ctx, ir_binary, ir_size);
+ _mesa_sha1_final(&ctx, ir_sha1_cache_key);
+
+ blob_finish(&blob);
+}
+
static struct lp_compute_shader_variant *
generate_variant(struct llvmpipe_context *lp,
struct lp_compute_shader *shader,
const struct lp_compute_shader_variant_key *key)
{
+ struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
struct lp_compute_shader_variant *variant;
char module_name[64];
-
- variant = CALLOC_STRUCT(lp_compute_shader_variant);
+ unsigned char ir_sha1_cache_key[20];
+ struct lp_cached_code cached = { 0 };
+ bool needs_caching = false;
+ variant = MALLOC(sizeof *variant + shader->variant_key_size - sizeof variant->key);
if (!variant)
return NULL;
+ memset(variant, 0, sizeof(*variant));
snprintf(module_name, sizeof(module_name), "cs%u_variant%u",
shader->no, shader->variants_created);
- variant->gallivm = gallivm_create(module_name, lp->context);
+ variant->shader = shader;
+ memcpy(&variant->key, key, shader->variant_key_size);
+
+ if (shader->base.ir.nir) {
+ lp_cs_get_ir_cache_key(variant, ir_sha1_cache_key);
+
+ lp_disk_cache_find_shader(screen, &cached, ir_sha1_cache_key);
+ if (!cached.data_size)
+ needs_caching = true;
+ }
+ variant->gallivm = gallivm_create(module_name, lp->context, &cached);
if (!variant->gallivm) {
FREE(variant);
return NULL;
}
- variant->shader = shader;
variant->list_item_global.base = variant;
variant->list_item_local.base = variant;
variant->no = shader->variants_created++;
- memcpy(&variant->key, key, shader->variant_key_size);
+
if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
lp_debug_cs_variant(variant);
gallivm_compile_module(variant->gallivm);
+ lp_build_coro_add_malloc_hooks(variant->gallivm);
variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
variant->jit_function = (lp_jit_cs_func)gallivm_jit_function(variant->gallivm, variant->function);
+ if (needs_caching) {
+ lp_disk_cache_insert_shader(screen, &cached, ir_sha1_cache_key);
+ }
gallivm_free_ir(variant->gallivm);
return variant;
}
{
struct lp_compute_shader *shader = lp->cs;
- struct lp_compute_shader_variant_key key;
+ struct lp_compute_shader_variant_key *key;
struct lp_compute_shader_variant *variant = NULL;
struct lp_cs_variant_list_item *li;
+ char store[LP_CS_MAX_VARIANT_KEY_SIZE];
- make_variant_key(lp, shader, &key);
+ key = make_variant_key(lp, shader, store);
/* Search the variants for one which matches the key */
li = first_elem(&shader->variants);
while(!at_end(&shader->variants, li)) {
- if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
+ if(memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
variant = li->base;
break;
}
* Generate the new variant.
*/
t0 = os_time_get();
- variant = generate_variant(lp, shader, &key);
+ variant = generate_variant(lp, shader, key);
t1 = os_time_get();
dt = t1 - t0;
LP_COUNT_ADD(llvm_compile_time, dt);
jit_tex->mip_offsets[0] = 0;
jit_tex->row_stride[0] = 0;
jit_tex->img_stride[0] = 0;
+ jit_tex->num_samples = 0;
+ jit_tex->sample_stride = 0;
}
else {
jit_tex->width = res->width0;
jit_tex->depth = res->depth0;
jit_tex->first_level = first_level;
jit_tex->last_level = last_level;
+ jit_tex->num_samples = res->nr_samples;
+ jit_tex->sample_stride = 0;
if (llvmpipe_resource_is_texture(res)) {
for (j = first_level; j <= last_level; j++) {
jit_tex->row_stride[j] = lp_tex->row_stride[j];
jit_tex->img_stride[j] = lp_tex->img_stride[j];
}
+ jit_tex->sample_stride = lp_tex->sample_stride;
if (res->target == PIPE_TEXTURE_1D_ARRAY ||
res->target == PIPE_TEXTURE_2D_ARRAY ||
jit_tex->height = res->height0;
jit_tex->depth = res->depth0;
jit_tex->first_level = jit_tex->last_level = 0;
+ jit_tex->num_samples = res->nr_samples;
+ jit_tex->sample_stride = 0;
assert(jit_tex->base);
}
}
jit_image->width = res->width0;
jit_image->height = res->height0;
jit_image->depth = res->depth0;
+ jit_image->num_samples = res->nr_samples;
if (llvmpipe_resource_is_texture(res)) {
uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
+ jit_image->sample_stride = lp_res->sample_stride;
jit_image->base = (uint8_t *)jit_image->base + mip_offset;
} else {
unsigned view_blocksize = util_format_get_blocksize(image->format);
for (i = 0; i < ARRAY_SIZE(csctx->constants); ++i) {
struct pipe_resource *buffer = csctx->constants[i].current.buffer;
const ubyte *current_data = NULL;
-
+ unsigned current_size = csctx->constants[i].current.buffer_size;
if (buffer) {
/* resource buffer */
current_data = (ubyte *) llvmpipe_resource_data(buffer);
current_data = (ubyte *) csctx->constants[i].current.user_buffer;
}
- if (current_data) {
+ if (current_data && current_size >= sizeof(float)) {
current_data += csctx->constants[i].current.buffer_offset;
-
csctx->cs.current.jit_context.constants[i] = (const float *)current_data;
- csctx->cs.current.jit_context.num_constants[i] = csctx->constants[i].current.buffer_size;
+ csctx->cs.current.jit_context.num_constants[i] =
+ DIV_ROUND_UP(csctx->constants[i].current.buffer_size,
+ lp_get_constant_buffer_stride(llvmpipe->pipe.screen));
} else {
- csctx->cs.current.jit_context.constants[i] = NULL;
+ static const float fake_const_buf[4];
+ csctx->cs.current.jit_context.constants[i] = fake_const_buf;
csctx->cs.current.jit_context.num_constants[i] = 0;
}
}
}
static void
-llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe)
+llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe, void *input)
{
- if (llvmpipe->cs_dirty & (LP_CSNEW_CS))
- llvmpipe_update_cs(llvmpipe);
-
if (llvmpipe->cs_dirty & LP_CSNEW_CONSTANTS) {
lp_csctx_set_cs_constants(llvmpipe->csctx,
ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_COMPUTE]),
ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_COMPUTE]),
llvmpipe->images[PIPE_SHADER_COMPUTE]);
+ if (input) {
+ struct lp_cs_context *csctx = llvmpipe->csctx;
+ csctx->input = input;
+ csctx->cs.current.jit_context.kernel_args = input;
+ }
+
+ if (llvmpipe->cs_dirty & (LP_CSNEW_CS |
+ LP_CSNEW_IMAGES |
+ LP_CSNEW_SAMPLER_VIEW |
+ LP_CSNEW_SAMPLER))
+ llvmpipe_update_cs(llvmpipe);
+
+
llvmpipe->cs_dirty = 0;
}
variant->jit_function(&job_info->current->jit_context,
job_info->block_size[0], job_info->block_size[1], job_info->block_size[2],
grid_x, grid_y, grid_z,
- job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2],
+ job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2], job_info->work_dim,
&thread_data);
}
struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
struct lp_cs_job_info job_info;
+ if (!llvmpipe_check_render_cond(llvmpipe))
+ return;
+
memset(&job_info, 0, sizeof(job_info));
- llvmpipe_cs_update_derived(llvmpipe);
+ llvmpipe_cs_update_derived(llvmpipe, info->input);
fill_grid_size(pipe, info, job_info.grid_size);
job_info.block_size[0] = info->block[0];
job_info.block_size[1] = info->block[1];
job_info.block_size[2] = info->block[2];
+ job_info.work_dim = info->work_dim;
job_info.req_local_mem = llvmpipe->cs->req_local_mem;
job_info.current = &llvmpipe->csctx->cs.current;
llvmpipe->pipeline_statistics.cs_invocations += num_tasks * info->block[0] * info->block[1] * info->block[2];
}
+static void
+llvmpipe_set_compute_resources(struct pipe_context *pipe,
+ unsigned start, unsigned count,
+ struct pipe_surface **resources)
+{
+
+
+}
+
+static void
+llvmpipe_set_global_binding(struct pipe_context *pipe,
+ unsigned first, unsigned count,
+ struct pipe_resource **resources,
+ uint32_t **handles)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ struct lp_compute_shader *cs = llvmpipe->cs;
+ unsigned i;
+
+ if (first + count > cs->max_global_buffers) {
+ unsigned old_max = cs->max_global_buffers;
+ cs->max_global_buffers = first + count;
+ cs->global_buffers = realloc(cs->global_buffers,
+ cs->max_global_buffers * sizeof(cs->global_buffers[0]));
+ if (!cs->global_buffers) {
+ return;
+ }
+
+ memset(&cs->global_buffers[old_max], 0, (cs->max_global_buffers - old_max) * sizeof(cs->global_buffers[0]));
+ }
+
+ if (!resources) {
+ for (i = 0; i < count; i++)
+ pipe_resource_reference(&cs->global_buffers[first + i], NULL);
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ uintptr_t va;
+ uint32_t offset;
+ pipe_resource_reference(&cs->global_buffers[first + i], resources[i]);
+ struct llvmpipe_resource *lp_res = llvmpipe_resource(resources[i]);
+ offset = *handles[i];
+ va = (uintptr_t)((char *)lp_res->data + offset);
+ memcpy(handles[i], &va, sizeof(va));
+ }
+}
+
void
llvmpipe_init_compute_funcs(struct llvmpipe_context *llvmpipe)
{
llvmpipe->pipe.create_compute_state = llvmpipe_create_compute_state;
llvmpipe->pipe.bind_compute_state = llvmpipe_bind_compute_state;
llvmpipe->pipe.delete_compute_state = llvmpipe_delete_compute_state;
+ llvmpipe->pipe.set_compute_resources = llvmpipe_set_compute_resources;
+ llvmpipe->pipe.set_global_binding = llvmpipe_set_global_binding;
llvmpipe->pipe.launch_grid = llvmpipe_launch_grid;
}