* Chia-I Wu <olv@lunarg.com>
*/
-#include "intel_winsys.h"
+#include "core/ilo_builder_mi.h"
+#include "core/intel_winsys.h"
#include "ilo_shader.h"
#include "ilo_cp.h"
+static const struct ilo_cp_owner ilo_cp_default_owner;
+
+static void
+ilo_cp_release_owner(struct ilo_cp *cp)
+{
+ if (cp->owner != &ilo_cp_default_owner) {
+ const struct ilo_cp_owner *owner = cp->owner;
+
+ cp->owner = &ilo_cp_default_owner;
+
+ assert(ilo_cp_space(cp) >= owner->reserve);
+ owner->release(cp, owner->data);
+ }
+}
+
+/**
+ * Set the parser owner. If this is a new owner or a new ring, the old owner
+ * is released and the new owner's own() is called. The parser may implicitly
+ * submit if there is a ring change.
+ *
+ * own() is called before \p owner owns the parser. It must make sure there
+ * is more space than \p owner->reserve when it returns. Calling
+ * ilo_cp_submit() is allowed.
+ *
+ * release() will be called after \p owner loses the parser. That may happen
+ * just before the parser submits and ilo_cp_submit() is not allowed.
+ */
+void
+ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
+ const struct ilo_cp_owner *owner)
+{
+ if (!owner)
+ owner = &ilo_cp_default_owner;
+
+ if (cp->ring != ring) {
+ ilo_cp_submit(cp, "ring change");
+ cp->ring = ring;
+ }
+
+ if (cp->owner != owner) {
+ ilo_cp_release_owner(cp);
+
+ owner->own(cp, owner->data);
+
+ assert(ilo_cp_space(cp) >= owner->reserve);
+ cp->owner = owner;
+ }
+}
+
static struct intel_bo *
ilo_cp_end_batch(struct ilo_cp *cp, unsigned *used)
{
struct intel_bo *bo;
- ilo_cp_set_owner(cp, NULL, 0);
+ ilo_cp_release_owner(cp);
if (!ilo_builder_batch_used(&cp->builder)) {
ilo_builder_batch_discard(&cp->builder);
/* see ilo_cp_space() */
assert(ilo_builder_batch_space(&cp->builder) >= 2);
- ilo_builder_batch_mi_batch_buffer_end(&cp->builder);
+ gen6_mi_batch_buffer_end(&cp->builder);
bo = ilo_builder_end(&cp->builder, used);
return bo;
}
+static bool
+ilo_cp_detect_hang(struct ilo_cp *cp)
+{
+ uint32_t active_lost, pending_lost;
+ bool guilty = false;
+
+ if (likely(!(ilo_debug & ILO_DEBUG_HANG)))
+ return false;
+
+ /* wait and get reset stats */
+ if (intel_bo_wait(cp->last_submitted_bo, -1) ||
+ intel_winsys_get_reset_stats(cp->winsys, cp->render_ctx,
+ &active_lost, &pending_lost))
+ return false;
+
+ if (cp->active_lost != active_lost) {
+ ilo_err("GPU hang caused by bo %p\n", cp->last_submitted_bo);
+ cp->active_lost = active_lost;
+ guilty = true;
+ }
+
+ if (cp->pending_lost != pending_lost) {
+ ilo_err("GPU hang detected\n");
+ cp->pending_lost = pending_lost;
+ }
+
+ return guilty;
+}
+
/**
* Flush the command parser and execute the commands. When the parser buffer
* is empty, the callback is not invoked.
*/
void
-ilo_cp_flush_internal(struct ilo_cp *cp)
+ilo_cp_submit_internal(struct ilo_cp *cp)
{
const bool do_exec = !(ilo_debug & ILO_DEBUG_NOHW);
struct intel_bo *bo;
cp->one_off_flags = 0;
if (!err) {
- if (cp->last_submitted_bo)
- intel_bo_unreference(cp->last_submitted_bo);
- cp->last_submitted_bo = bo;
- intel_bo_reference(cp->last_submitted_bo);
+ bool guilty;
+
+ intel_bo_unref(cp->last_submitted_bo);
+ cp->last_submitted_bo = intel_bo_ref(bo);
+
+ guilty = ilo_cp_detect_hang(cp);
+
+ if (unlikely((ilo_debug & ILO_DEBUG_BATCH) || guilty)) {
+ ilo_builder_decode(&cp->builder);
+ if (guilty)
+ abort();
+ }
- if (cp->flush_callback)
- cp->flush_callback(cp, cp->flush_callback_data);
+ if (cp->submit_callback)
+ cp->submit_callback(cp, cp->submit_callback_data);
}
ilo_builder_begin(&cp->builder);
* Create a command parser.
*/
struct ilo_cp *
-ilo_cp_create(const struct ilo_dev_info *dev,
+ilo_cp_create(const struct ilo_dev *dev,
struct intel_winsys *winsys,
struct ilo_shader_cache *shc)
{
}
cp->ring = INTEL_RING_RENDER;
- cp->no_implicit_flush = false;
+ cp->owner = &ilo_cp_default_owner;
ilo_builder_init(&cp->builder, dev, winsys);