struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
struct pipe_shader_buffer ssbos[PIPE_SHADER_TYPES][LP_MAX_TGSI_SHADER_BUFFERS];
+ struct pipe_image_view images[PIPE_SHADER_TYPES][LP_MAX_TGSI_SHADER_IMAGES];
unsigned num_samplers[PIPE_SHADER_TYPES];
unsigned num_sampler_views[PIPE_SHADER_TYPES];
+ unsigned num_images[PIPE_SHADER_TYPES];
unsigned num_vertex_buffers;
setup->dirty |= LP_SETUP_NEW_SSBOS;
}
+void
+lp_setup_set_fs_images(struct lp_setup_context *setup,
+ unsigned num,
+ struct pipe_image_view *images)
+{
+ unsigned i;
+
+ LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
+
+ assert(num <= ARRAY_SIZE(setup->images));
+
+ for (i = 0; i < num; ++i) {
+ struct pipe_image_view *image = &images[i];
+ util_copy_image_view(&setup->images[i].current, &images[i]);
+
+ struct pipe_resource *res = image->resource;
+ struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
+ struct lp_jit_image *jit_image;
+
+ jit_image = &setup->fs.current.jit_context.images[i];
+ if (!lp_res)
+ continue;
+ if (!lp_res->dt) {
+ /* regular texture - setup array of mipmap level offsets */
+ if (llvmpipe_resource_is_texture(res)) {
+ jit_image->base = lp_res->tex_data;
+ } else
+ jit_image->base = lp_res->data;
+
+ jit_image->width = res->width0;
+ jit_image->height = res->height0;
+ jit_image->depth = res->depth0;
+
+ if (llvmpipe_resource_is_texture(res)) {
+ uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
+
+ jit_image->width = u_minify(jit_image->width, image->u.tex.level);
+ jit_image->height = u_minify(jit_image->height, image->u.tex.level);
+
+ if (res->target == PIPE_TEXTURE_1D_ARRAY ||
+ res->target == PIPE_TEXTURE_2D_ARRAY ||
+ res->target == PIPE_TEXTURE_3D ||
+ res->target == PIPE_TEXTURE_CUBE ||
+ res->target == PIPE_TEXTURE_CUBE_ARRAY) {
+ /*
+ * For array textures, we don't have first_layer, instead
+ * adjust last_layer (stored as depth) plus the mip level offsets
+ * (as we have mip-first layout can't just adjust base ptr).
+ * XXX For mip levels, could do something similar.
+ */
+ jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
+ mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
+ } else
+ jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
+
+ jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
+ jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
+ jit_image->base = (uint8_t *)jit_image->base + mip_offset;
+ }
+ else {
+ unsigned view_blocksize = util_format_get_blocksize(image->format);
+ jit_image->width = image->u.buf.size / view_blocksize;
+ jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
+ }
+ }
+ }
+ for (; i < ARRAY_SIZE(setup->images); i++) {
+ util_copy_image_view(&setup->images[i].current, NULL);
+ }
+ setup->dirty |= LP_SETUP_NEW_IMAGES;
+}
void
lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
}
+ for (i = 0; i < ARRAY_SIZE(setup->images); i++) {
+ if (setup->images[i].current.resource == texture)
+ return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
+ }
+
return LP_UNREFERENCED;
}
unsigned num,
struct pipe_shader_buffer *buffers);
+void
+lp_setup_set_fs_images(struct lp_setup_context *setup,
+ unsigned num,
+ struct pipe_image_view *images);
+
void
lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
float alpha_ref_value );
#define LP_SETUP_NEW_SCISSOR 0x08
#define LP_SETUP_NEW_VIEWPORTS 0x10
#define LP_SETUP_NEW_SSBOS 0x20
+#define LP_SETUP_NEW_IMAGES 0x40
struct lp_setup_variant;
struct pipe_shader_buffer current;
} ssbos[LP_MAX_TGSI_SHADER_BUFFERS];
+ struct {
+ struct pipe_image_view current;
+ } images[LP_MAX_TGSI_SHADER_IMAGES];
+
struct {
struct pipe_blend_color current;
uint8_t *stored;
#define LP_NEW_SO 0x20000
#define LP_NEW_SO_BUFFERS 0x40000
#define LP_NEW_FS_SSBOS 0x80000
-
+#define LP_NEW_FS_IMAGES 0x100000
struct vertex_info;
ARRAY_SIZE(llvmpipe->ssbos[PIPE_SHADER_FRAGMENT]),
llvmpipe->ssbos[PIPE_SHADER_FRAGMENT]);
+ if (llvmpipe->dirty & LP_NEW_FS_IMAGES)
+ lp_setup_set_fs_images(llvmpipe->setup,
+ ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_FRAGMENT]),
+ llvmpipe->images[PIPE_SHADER_FRAGMENT]);
+
if (llvmpipe->dirty & (LP_NEW_SAMPLER_VIEW))
lp_setup_set_fragment_sampler_views(llvmpipe->setup,
llvmpipe->num_sampler_views[PIPE_SHADER_FRAGMENT],
LLVMValueRef num_loop,
struct lp_build_interp_soa_context *interp,
const struct lp_build_sampler_soa *sampler,
+ const struct lp_build_image_soa *image,
LLVMValueRef mask_store,
LLVMValueRef (*out_color)[4],
LLVMValueRef depth_ptr,
params.info = &shader->info.base;
params.ssbo_ptr = ssbo_ptr;
params.ssbo_sizes_ptr = num_ssbo_ptr;
+ params.image = image;
/* Build the actual shader */
lp_build_tgsi_soa(gallivm, tokens, ¶ms,
LLVMBasicBlockRef block;
LLVMBuilderRef builder;
struct lp_build_sampler_soa *sampler;
+ struct lp_build_image_soa *image;
struct lp_build_interp_soa_context interp;
LLVMValueRef fs_mask[16 / 4];
LLVMValueRef fs_out_color[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][16 / 4];
/* code generated texture sampling */
sampler = lp_llvm_sampler_soa_create(key->samplers);
+ image = lp_llvm_image_soa_create(lp_fs_variant_key_images(key));
num_fs = 16 / fs_type.length; /* number of loops per 4x4 stamp */
/* for 1d resources only run "upper half" of stamp */
num_loop,
&interp,
sampler,
+ image,
mask_store, /* output */
color_store,
depth_ptr,
}
sampler->destroy(sampler);
-
+ image->destroy(image);
/* Loop over color outputs / color buffers to do blending.
*/
for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
texture->pot_height,
texture->pot_depth);
}
+ struct lp_image_static_state *images = lp_fs_variant_key_images(key);
+ for (i = 0; i < key->nr_images; ++i) {
+ const struct lp_static_texture_state *image = &images[i].image_state;
+ debug_printf("image[%u] = \n", i);
+ debug_printf(" .format = %s\n",
+ util_format_name(image->format));
+ debug_printf(" .target = %s\n",
+ util_str_tex_target(image->target, TRUE));
+ debug_printf(" .level_zero_only = %u\n",
+ image->level_zero_only);
+ debug_printf(" .pot = %u %u %u\n",
+ image->pot_width,
+ image->pot_height,
+ image->pot_depth);
+ }
}
struct lp_fragment_shader *shader;
int nr_samplers;
int nr_sampler_views;
+ int nr_images;
int i;
shader = CALLOC_STRUCT(lp_fragment_shader);
nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
-
- shader->variant_key_size = lp_fs_variant_key_size(MAX2(nr_samplers, nr_sampler_views));
+ nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
+ shader->variant_key_size = lp_fs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
for (i = 0; i < shader->info.base.num_inputs; i++) {
shader->inputs[i].usage_mask = shader->info.base.input_usage_mask[i];
}
}
+static void
+llvmpipe_set_shader_images(struct pipe_context *pipe,
+ enum pipe_shader_type shader, unsigned start_slot,
+ unsigned count, const struct pipe_image_view *images)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ unsigned i, idx;
+
+ draw_flush(llvmpipe->draw);
+ for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
+ const struct pipe_image_view *image = images ? &images[idx] : NULL;
+
+ util_copy_image_view(&llvmpipe->images[shader][i], image);
+ }
+
+ llvmpipe->num_images[shader] = start_slot + count;
+ if (shader == PIPE_SHADER_VERTEX ||
+ shader == PIPE_SHADER_GEOMETRY) {
+ draw_set_images(llvmpipe->draw,
+ shader,
+ llvmpipe->images[shader],
+ start_slot + count);
+ } else
+ llvmpipe->dirty |= LP_NEW_FS_IMAGES;
+}
+
/**
* Return the blend factor equivalent to a destination alpha of one.
*/
}
}
}
+
+ struct lp_image_static_state *lp_image;
+ lp_image = lp_fs_variant_key_images(key);
+ key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
+ for (i = 0; i < key->nr_images; ++i) {
+ if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
+ lp_sampler_static_texture_state_image(&lp_image[i].image_state,
+ &lp->images[PIPE_SHADER_FRAGMENT][i]);
+ }
+ }
return key;
}
llvmpipe->pipe.set_constant_buffer = llvmpipe_set_constant_buffer;
llvmpipe->pipe.set_shader_buffers = llvmpipe_set_shader_buffers;
+ llvmpipe->pipe.set_shader_images = llvmpipe_set_shader_images;
}
};
+struct lp_image_static_state
+{
+ struct lp_static_texture_state image_state;
+};
+
struct lp_fragment_shader_variant_key
{
struct pipe_depth_state depth;
unsigned nr_cbufs:8;
unsigned nr_samplers:8; /* actually derivable from just the shader */
unsigned nr_sampler_views:8; /* actually derivable from just the shader */
+ unsigned nr_images:8; /* actually derivable from just the shader */
unsigned flatshade:1;
unsigned occlusion_count:1;
unsigned resource_1d:1;
enum pipe_format cbuf_format[PIPE_MAX_COLOR_BUFS];
struct lp_sampler_static_state samplers[1];
+ /* followed by variable number of images */
};
#define LP_FS_MAX_VARIANT_KEY_SIZE \
(sizeof(struct lp_fragment_shader_variant_key) + \
- PIPE_MAX_SHADER_SAMPLER_VIEWS * sizeof(struct lp_sampler_static_state))
+ PIPE_MAX_SHADER_SAMPLER_VIEWS * sizeof(struct lp_sampler_static_state) +\
+ PIPE_MAX_SHADER_IMAGES * sizeof(struct lp_image_static_state))
static inline size_t
-lp_fs_variant_key_size(unsigned nr_samplers)
+lp_fs_variant_key_size(unsigned nr_samplers, unsigned nr_images)
{
unsigned samplers = nr_samplers > 1 ? (nr_samplers - 1) : 0;
return (sizeof(struct lp_fragment_shader_variant_key) +
- samplers * sizeof(struct lp_sampler_static_state));
+ samplers * sizeof(struct lp_sampler_static_state) +
+ nr_images * sizeof(struct lp_image_static_state));
+}
+
+static inline struct lp_image_static_state *
+lp_fs_variant_key_images(struct lp_fragment_shader_variant_key *key)
+{
+ return (struct lp_image_static_state *)
+ &key->samplers[key->nr_samplers];
}
/** doubly-linked list item */
struct llvmpipe_sampler_dynamic_state dynamic_state;
};
+struct llvmpipe_image_dynamic_state
+{
+ struct lp_sampler_dynamic_state base;
+
+ const struct lp_image_static_state *static_state;
+};
+
+/**
+ * This is the bridge between our sampler and the TGSI translator.
+ */
+struct lp_llvm_image_soa
+{
+ struct lp_build_image_soa base;
+
+ struct llvmpipe_image_dynamic_state dynamic_state;
+};
+
/**
* Fetch the specified member of the lp_jit_texture structure.
LP_LLVM_SAMPLER_MEMBER(border_color, LP_JIT_SAMPLER_BORDER_COLOR, FALSE)
+/**
+ * Fetch the specified member of the lp_jit_image structure.
+ * \param emit_load if TRUE, emit the LLVM load instruction to actually
+ * fetch the field's value. Otherwise, just emit the
+ * GEP code to address the field.
+ *
+ * @sa http://llvm.org/docs/GetElementPtr.html
+ */
+static LLVMValueRef
+lp_llvm_image_member(const struct lp_sampler_dynamic_state *base,
+ struct gallivm_state *gallivm,
+ LLVMValueRef context_ptr,
+ unsigned image_unit,
+ unsigned member_index,
+ const char *member_name,
+ boolean emit_load)
+{
+ LLVMBuilderRef builder = gallivm->builder;
+ LLVMValueRef indices[4];
+ LLVMValueRef ptr;
+ LLVMValueRef res;
+
+ assert(image_unit < PIPE_MAX_SHADER_IMAGES);
+
+ /* context[0] */
+ indices[0] = lp_build_const_int32(gallivm, 0);
+ /* context[0].images */
+ indices[1] = lp_build_const_int32(gallivm, LP_JIT_CTX_IMAGES);
+ /* context[0].images[unit] */
+ indices[2] = lp_build_const_int32(gallivm, image_unit);
+ /* context[0].images[unit].member */
+ indices[3] = lp_build_const_int32(gallivm, member_index);
+
+ ptr = LLVMBuildGEP(builder, context_ptr, indices, ARRAY_SIZE(indices), "");
+
+ if (emit_load)
+ res = LLVMBuildLoad(builder, ptr, "");
+ else
+ res = ptr;
+
+ lp_build_name(res, "context.image%u.%s", image_unit, member_name);
+
+ return res;
+}
+
+
+/**
+ * Helper macro to instantiate the functions that generate the code to
+ * fetch the members of lp_jit_image to fulfill the sampler code
+ * generator requests.
+ *
+ * This complexity is the price we have to pay to keep the image
+ * sampler code generator a reusable module without dependencies to
+ * llvmpipe internals.
+ */
+#define LP_LLVM_IMAGE_MEMBER(_name, _index, _emit_load) \
+ static LLVMValueRef \
+ lp_llvm_image_##_name( const struct lp_sampler_dynamic_state *base, \
+ struct gallivm_state *gallivm, \
+ LLVMValueRef context_ptr, \
+ unsigned image_unit) \
+ { \
+ return lp_llvm_image_member(base, gallivm, context_ptr, \
+ image_unit, _index, #_name, _emit_load ); \
+ }
+
+
+LP_LLVM_IMAGE_MEMBER(width, LP_JIT_IMAGE_WIDTH, TRUE)
+LP_LLVM_IMAGE_MEMBER(height, LP_JIT_IMAGE_HEIGHT, TRUE)
+LP_LLVM_IMAGE_MEMBER(depth, LP_JIT_IMAGE_DEPTH, TRUE)
+LP_LLVM_IMAGE_MEMBER(base_ptr, LP_JIT_IMAGE_BASE, TRUE)
+LP_LLVM_IMAGE_MEMBER(row_stride, LP_JIT_IMAGE_ROW_STRIDE, TRUE)
+LP_LLVM_IMAGE_MEMBER(img_stride, LP_JIT_IMAGE_IMG_STRIDE, TRUE)
+
#if LP_USE_TEXTURE_CACHE
static LLVMValueRef
lp_llvm_texture_cache_ptr(const struct lp_sampler_dynamic_state *base,
return &sampler->base;
}
+static void
+lp_llvm_image_soa_destroy(struct lp_build_image_soa *image)
+{
+ FREE(image);
+}
+
+static void
+lp_llvm_image_soa_emit_op(const struct lp_build_image_soa *base,
+ struct gallivm_state *gallivm,
+ const struct lp_img_params *params)
+{
+ struct lp_llvm_image_soa *image = (struct lp_llvm_image_soa *)base;
+ unsigned image_index = params->image_index;
+ assert(image_index < PIPE_MAX_SHADER_IMAGES);
+
+ lp_build_img_op_soa(&image->dynamic_state.static_state[image_index].image_state,
+ &image->dynamic_state.base,
+ gallivm, params);
+}
+
+/**
+ * Fetch the texture size.
+ */
+static void
+lp_llvm_image_soa_emit_size_query(const struct lp_build_image_soa *base,
+ struct gallivm_state *gallivm,
+ const struct lp_sampler_size_query_params *params)
+{
+ struct lp_llvm_image_soa *image = (struct lp_llvm_image_soa *)base;
+
+ assert(params->texture_unit < PIPE_MAX_SHADER_IMAGES);
+
+ lp_build_size_query_soa(gallivm,
+ &image->dynamic_state.static_state[params->texture_unit].image_state,
+ &image->dynamic_state.base,
+ params);
+}
+
+struct lp_build_image_soa *
+lp_llvm_image_soa_create(const struct lp_image_static_state *static_state)
+{
+ struct lp_llvm_image_soa *image;
+
+ image = CALLOC_STRUCT(lp_llvm_image_soa);
+ if (!image)
+ return NULL;
+
+ image->base.destroy = lp_llvm_image_soa_destroy;
+ image->base.emit_op = lp_llvm_image_soa_emit_op;
+ image->base.emit_size_query = lp_llvm_image_soa_emit_size_query;
+
+ image->dynamic_state.base.width = lp_llvm_image_width;
+ image->dynamic_state.base.height = lp_llvm_image_height;
+
+ image->dynamic_state.base.depth = lp_llvm_image_depth;
+ image->dynamic_state.base.base_ptr = lp_llvm_image_base_ptr;
+ image->dynamic_state.base.row_stride = lp_llvm_image_row_stride;
+ image->dynamic_state.base.img_stride = lp_llvm_image_img_stride;
+
+ image->dynamic_state.static_state = static_state;
+
+ return &image->base;
+}
struct lp_sampler_static_state;
+struct lp_image_static_state;
/**
* Whether texture cache is used for s3tc textures.
struct lp_build_sampler_soa *
lp_llvm_sampler_soa_create(const struct lp_sampler_static_state *key);
+struct lp_build_image_soa *
+lp_llvm_image_soa_create(const struct lp_image_static_state *key);
+
#endif /* LP_TEX_SAMPLE_H */
if (!(presource->bind & (PIPE_BIND_DEPTH_STENCIL |
PIPE_BIND_RENDER_TARGET |
PIPE_BIND_SAMPLER_VIEW |
- PIPE_BIND_SHADER_BUFFER)))
+ PIPE_BIND_SHADER_BUFFER |
+ PIPE_BIND_SHADER_IMAGE)))
return LP_UNREFERENCED;
return lp_setup_is_resource_referenced(llvmpipe->setup, presource);