*/
#include "util/u_pack_color.h"
+#include "util/u_upload_mgr.h"
#include "util/format_srgb.h"
#include "v3d_context.h"
}
}
-static struct v3d_bo *
-v3d_upload_ubo(struct v3d_context *v3d,
- struct v3d_compiled_shader *shader,
- const uint32_t *gallium_uniforms)
+static uint32_t
+get_image_size(struct v3d_shaderimg_stateobj *shaderimg,
+ enum quniform_contents contents,
+ uint32_t data)
{
- if (!shader->prog_data.base->ubo_size)
- return NULL;
-
- struct v3d_bo *ubo = v3d_bo_alloc(v3d->screen,
- shader->prog_data.base->ubo_size,
- "ubo");
- void *data = v3d_bo_map(ubo);
- for (uint32_t i = 0; i < shader->prog_data.base->num_ubo_ranges; i++) {
- memcpy(data + shader->prog_data.base->ubo_ranges[i].dst_offset,
- ((const void *)gallium_uniforms +
- shader->prog_data.base->ubo_ranges[i].src_offset),
- shader->prog_data.base->ubo_ranges[i].size);
- }
+ struct v3d_image_view *image = &shaderimg->si[data];
- return ubo;
+ switch (contents) {
+ case QUNIFORM_IMAGE_WIDTH:
+ return u_minify(image->base.resource->width0,
+ image->base.u.tex.level);
+ case QUNIFORM_IMAGE_HEIGHT:
+ return u_minify(image->base.resource->height0,
+ image->base.u.tex.level);
+ case QUNIFORM_IMAGE_DEPTH:
+ return u_minify(image->base.resource->depth0,
+ image->base.u.tex.level);
+ case QUNIFORM_IMAGE_ARRAY_SIZE:
+ return image->base.resource->array_size;
+ default:
+ unreachable("Bad texture size field");
+ }
}
/**
struct v3d_texture_stateobj *texstate,
uint32_t data)
{
- int unit = v3d_tmu_config_data_get_unit(data);
+ int unit = v3d_unit_data_get_unit(data);
struct pipe_sampler_view *psview = texstate->textures[unit];
struct v3d_sampler_view *sview = v3d_sampler_view(psview);
struct v3d_resource *rsc = v3d_resource(sview->texture);
cl_aligned_reloc(&job->indirect, uniforms, sview->bo,
- v3d_tmu_config_data_get_value(data));
+ v3d_unit_data_get_offset(data));
+ v3d_job_add_bo(job, rsc->bo);
+}
+
+static void
+write_image_tmu_p0(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_shaderimg_stateobj *img,
+ uint32_t data)
+{
+ /* Extract the image unit from the top bits, and the compiler's
+ * packed p0 from the bottom.
+ */
+ uint32_t unit = data >> 24;
+ uint32_t p0 = data & 0x00ffffff;
+
+ struct v3d_image_view *iview = &img->si[unit];
+ struct v3d_resource *rsc = v3d_resource(iview->base.resource);
+
+ cl_aligned_reloc(&job->indirect, uniforms,
+ v3d_resource(iview->tex_state)->bo,
+ iview->tex_state_offset | p0);
v3d_job_add_bo(job, rsc->bo);
}
struct v3d_texture_stateobj *texstate,
uint32_t data)
{
- uint32_t unit = v3d_tmu_config_data_get_unit(data);
+ uint32_t unit = v3d_unit_data_get_unit(data);
struct pipe_sampler_state *psampler = texstate->samplers[unit];
struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
+ struct pipe_sampler_view *psview = texstate->textures[unit];
+ struct v3d_sampler_view *sview = v3d_sampler_view(psview);
+ int variant = 0;
+
+ if (sampler->border_color_variants)
+ variant = sview->sampler_variant;
- cl_aligned_reloc(&job->indirect, uniforms, sampler->bo,
- v3d_tmu_config_data_get_value(data));
+ cl_aligned_reloc(&job->indirect, uniforms,
+ v3d_resource(sampler->sampler_state)->bo,
+ sampler->sampler_state_offset[variant] |
+ v3d_unit_data_get_offset(data));
}
struct v3d_cl_reloc
-v3d_write_uniforms(struct v3d_context *v3d, struct v3d_compiled_shader *shader,
+v3d_write_uniforms(struct v3d_context *v3d, struct v3d_job *job,
+ struct v3d_compiled_shader *shader,
enum pipe_shader_type stage)
{
struct v3d_constbuf_stateobj *cb = &v3d->constbuf[stage];
struct v3d_texture_stateobj *texstate = &v3d->tex[stage];
struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms;
- struct v3d_job *job = v3d->job;
const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
- struct v3d_bo *ubo = v3d_upload_ubo(v3d, shader, gallium_uniforms);
- /* We always need to return some space for uniforms, because the HW
- * will be prefetching, even if we don't read any in the program.
+ /* The hardware always pre-fetches the next uniform (also when there
+ * aren't any), so we always allocate space for an extra slot. This
+ * fixes MMU exceptions reported since Linux kernel 5.4 when the
+ * uniforms fill up the tail bytes of a page in the indirect
+ * BO. In that scenario, when the hardware pre-fetches after reading
+ * the last uniform it will read beyond the end of the page and trigger
+ * the MMU exception.
*/
- v3d_cl_ensure_space(&job->indirect, MAX2(uinfo->count, 1) * 4, 4);
+ v3d_cl_ensure_space(&job->indirect, (uinfo->count + 1) * 4, 4);
struct v3d_cl_reloc uniform_stream = cl_get_address(&job->indirect);
v3d_bo_reference(uniform_stream.bo);
write_tmu_p1(job, &uniforms, texstate, data);
break;
+ case QUNIFORM_IMAGE_TMU_CONFIG_P0:
+ write_image_tmu_p0(job, &uniforms,
+ &v3d->shaderimg[stage], data);
+ break;
+
case QUNIFORM_TEXTURE_CONFIG_P1:
write_texture_p1(job, &uniforms, texstate,
data);
data));
break;
+ case QUNIFORM_IMAGE_WIDTH:
+ case QUNIFORM_IMAGE_HEIGHT:
+ case QUNIFORM_IMAGE_DEPTH:
+ case QUNIFORM_IMAGE_ARRAY_SIZE:
+ cl_aligned_u32(&uniforms,
+ get_image_size(&v3d->shaderimg[stage],
+ uinfo->contents[i],
+ data));
+ break;
+
case QUNIFORM_ALPHA_REF:
cl_aligned_f(&uniforms,
v3d->zsa->base.alpha.ref_value);
break;
- case QUNIFORM_UBO_ADDR:
- if (data == 0) {
- cl_aligned_reloc(&job->indirect, &uniforms,
- ubo, 0);
- } else {
- int ubo_index = data;
- struct v3d_resource *rsc =
- v3d_resource(cb->cb[ubo_index].buffer);
-
- cl_aligned_reloc(&job->indirect, &uniforms,
- rsc->bo,
- cb->cb[ubo_index].buffer_offset);
+ case QUNIFORM_UBO_ADDR: {
+ uint32_t unit = v3d_unit_data_get_unit(data);
+ /* Constant buffer 0 may be a system memory pointer,
+ * in which case we want to upload a shadow copy to
+ * the GPU.
+ */
+ if (!cb->cb[unit].buffer) {
+ u_upload_data(v3d->uploader, 0,
+ cb->cb[unit].buffer_size, 16,
+ cb->cb[unit].user_buffer,
+ &cb->cb[unit].buffer_offset,
+ &cb->cb[unit].buffer);
}
+
+ cl_aligned_reloc(&job->indirect, &uniforms,
+ v3d_resource(cb->cb[unit].buffer)->bo,
+ cb->cb[unit].buffer_offset +
+ v3d_unit_data_get_offset(data));
+ break;
+ }
+
+ case QUNIFORM_SSBO_OFFSET: {
+ struct pipe_shader_buffer *sb =
+ &v3d->ssbo[stage].sb[data];
+
+ cl_aligned_reloc(&job->indirect, &uniforms,
+ v3d_resource(sb->buffer)->bo,
+ sb->buffer_offset);
+ break;
+ }
+
+ case QUNIFORM_GET_BUFFER_SIZE:
+ cl_aligned_u32(&uniforms,
+ v3d->ssbo[stage].sb[data].buffer_size);
break;
case QUNIFORM_TEXTURE_FIRST_LEVEL:
v3d->prog.spill_size_per_thread);
break;
+ case QUNIFORM_NUM_WORK_GROUPS:
+ cl_aligned_u32(&uniforms,
+ v3d->compute_num_workgroups[data]);
+ break;
+
+ case QUNIFORM_SHARED_OFFSET:
+ cl_aligned_reloc(&job->indirect, &uniforms,
+ v3d->compute_shared_memory, 0);
+ break;
+
default:
assert(quniform_contents_is_texture_p0(uinfo->contents[i]));
cl_end(&job->indirect, uniforms);
- v3d_bo_unreference(&ubo);
-
return uniform_stream;
}
/* We could flag this on just the stage we're
* compiling for, but it's not passed in.
*/
- dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX;
+ dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX |
+ VC5_DIRTY_GEOMTEX | VC5_DIRTY_COMPTEX;
+ break;
+
+ case QUNIFORM_SSBO_OFFSET:
+ case QUNIFORM_GET_BUFFER_SIZE:
+ dirty |= VC5_DIRTY_SSBO;
+ break;
+
+ case QUNIFORM_IMAGE_TMU_CONFIG_P0:
+ case QUNIFORM_IMAGE_WIDTH:
+ case QUNIFORM_IMAGE_HEIGHT:
+ case QUNIFORM_IMAGE_DEPTH:
+ case QUNIFORM_IMAGE_ARRAY_SIZE:
+ dirty |= VC5_DIRTY_SHADER_IMAGE;
break;
case QUNIFORM_ALPHA_REF:
dirty |= VC5_DIRTY_ZSA;
break;
+ case QUNIFORM_NUM_WORK_GROUPS:
+ case QUNIFORM_SHARED_OFFSET:
+ /* Compute always recalculates uniforms. */
+ break;
+
default:
assert(quniform_contents_is_texture_p0(shader->prog_data.base->uniforms.contents[i]));
- dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX;
+ dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX |
+ VC5_DIRTY_GEOMTEX | VC5_DIRTY_COMPTEX;
break;
}
}