return entry->offset + target_offset;
}
+void
+iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo)
+{
+ assert(bo->kflags & EXEC_OBJECT_PINNED);
+ add_exec_bo(batch, bo);
+}
+
uint64_t
iris_batch_reloc(struct iris_batch *batch, uint32_t batch_offset,
struct iris_bo *target, uint32_t target_offset,
#define RELOC_WRITE EXEC_OBJECT_WRITE
+void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo);
+
uint64_t iris_batch_reloc(struct iris_batch *batch,
uint32_t batch_offset,
struct iris_bo *target,
goto fail;
if (templ->flags & IRIS_RESOURCE_FLAG_INSTRUCTION_CACHE) {
+ res->bo->kflags = EXEC_OBJECT_PINNED;
+ res->bo->name = "instruction cache";
// XXX: p_atomic_add is backwards :(
res->bo->gtt_offset = __atomic_fetch_add(&screen->next_instruction_address, res->bo->size, __ATOMIC_ACQ_REL);
}
if (!(dirty & (IRIS_DIRTY_VS << stage)))
continue;
- if (ice->shaders.prog[stage]) {
- iris_batch_emit(batch, ice->shaders.prog[stage]->derived_data,
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+ if (shader) {
+ struct iris_resource *cache = (void *) shader->buffer;
+ iris_use_pinned_bo(batch, cache->bo);
+ iris_batch_emit(batch, shader->derived_data,
iris_derived_program_state_size(stage));
} else {
if (stage == MESA_SHADER_TESS_EVAL) {