}
}
-static void
-ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
+void
+ilo_3d_own_render_ring(struct ilo_3d *hw3d)
{
- struct ilo_3d *hw3d = data;
-
- ilo_3d_pause_queries(hw3d);
+ ilo_cp_set_owner(hw3d->cp, INTEL_RING_RENDER, &hw3d->owner);
}
-void
-ilo_3d_own_render_ring(struct ilo_3d *hw3d)
+static void
+ilo_3d_reserve_for_query(struct ilo_3d *hw3d, struct ilo_query *q,
+ enum ilo_3d_pipeline_action act)
{
- ilo_cp_set_ring(hw3d->cp, INTEL_RING_RENDER);
+ q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline, act, NULL);
- if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
- ilo_3d_resume_queries(hw3d);
+ /* XXX we should check the aperture size */
+ if (ilo_cp_space(hw3d->cp) < q->reg_cmd_size * 2) {
+ ilo_cp_flush(hw3d->cp, "out of space");
+ assert(ilo_cp_space(hw3d->cp) >= q->reg_cmd_size * 2);
+ }
+
+ /* reserve space for pausing the query */
+ hw3d->owner.reserve += q->reg_cmd_size;
}
/**
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
- /* reserve some space for pausing the query */
- q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
- ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
- hw3d->owner_reserve += q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
-
+ ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_DEPTH_COUNT);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
- /* XXX we should check the aperture size */
- if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
- ilo_cp_flush(hw3d->cp, "out of space");
- assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
- }
-
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
/* nop */
break;
case PIPE_QUERY_TIME_ELAPSED:
- /* reserve some space for pausing the query */
- q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
- ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
- hw3d->owner_reserve += q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
-
+ ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_TIMESTAMP);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
- /* XXX we should check the aperture size */
- if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
- ilo_cp_flush(hw3d->cp, "out of space");
- assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
- }
-
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
list_add(&q->list, &hw3d->prim_emitted_queries);
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
- /* reserve some space for pausing the query */
- q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
- ILO_3D_PIPELINE_WRITE_STATISTICS, NULL);
- hw3d->owner_reserve += q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
-
+ ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_STATISTICS);
memset(&q->data.pipeline_statistics, 0,
sizeof(q->data.pipeline_statistics));
if (ilo_query_alloc_bo(q, 11 * 2, -1, hw3d->cp->winsys)) {
- /* XXX we should check the aperture size */
- if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
- ilo_cp_flush(hw3d->cp, "out of space");
- assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
- }
-
ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
q->bo, q->reg_read);
q->reg_read += 11;
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- hw3d->owner_reserve -= q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
+ assert(hw3d->owner.reserve >= q->reg_cmd_size);
+ hw3d->owner.reserve -= q->reg_cmd_size;
+
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- hw3d->owner_reserve -= q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
+ assert(hw3d->owner.reserve >= q->reg_cmd_size);
+ hw3d->owner.reserve -= q->reg_cmd_size;
+
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
list_del(&q->list);
assert(q->reg_read + 11 <= q->reg_total);
- hw3d->owner_reserve -= q->reg_cmd_size;
- ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
+ assert(hw3d->owner.reserve >= q->reg_cmd_size);
+ hw3d->owner.reserve -= q->reg_cmd_size;
+
ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
q->bo, q->reg_read);
q->reg_read += 11;
hw3d->new_batch = true;
}
+static void
+ilo_3d_own_cp(struct ilo_cp *cp, void *data)
+{
+ struct ilo_3d *hw3d = data;
+
+ ilo_3d_resume_queries(hw3d);
+}
+
+static void
+ilo_3d_release_cp(struct ilo_cp *cp, void *data)
+{
+ struct ilo_3d *hw3d = data;
+
+ ilo_3d_pause_queries(hw3d);
+}
+
/**
* Create a 3D context.
*/
return NULL;
hw3d->cp = cp;
- hw3d->owner.release_callback = ilo_3d_release_render_ring;
- hw3d->owner.release_data = hw3d;
+ hw3d->owner.own = ilo_3d_own_cp;
+ hw3d->owner.release = ilo_3d_release_cp;
+ hw3d->owner.data = hw3d;
+ hw3d->owner.reserve = 0;
hw3d->new_batch = true;
#include "ilo_shader.h"
#include "ilo_cp.h"
+static const struct ilo_cp_owner ilo_cp_default_owner;
+
+static void
+ilo_cp_release_owner(struct ilo_cp *cp)
+{
+ if (cp->owner != &ilo_cp_default_owner) {
+ const struct ilo_cp_owner *owner = cp->owner;
+
+ cp->owner = &ilo_cp_default_owner;
+
+ assert(ilo_cp_space(cp) >= owner->reserve);
+ owner->release(cp, owner->data);
+ }
+}
+
+/**
+ * Set the parser owner. If this is a new owner or a new ring, the old owner
+ * is released and the new owner's own() is called.
+ *
+ * The parser may be implicitly flushed if there is a ring change or there is
+ * not enough space for the new owner.
+ */
+void
+ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
+ const struct ilo_cp_owner *owner)
+{
+ if (!owner)
+ owner = &ilo_cp_default_owner;
+
+ if (cp->ring != ring) {
+ ilo_cp_flush(cp, "ring change");
+ cp->ring = ring;
+ }
+
+ if (cp->owner != owner) {
+ ilo_cp_release_owner(cp);
+
+ /* multiply by 2 because there are own() and release() */
+ if (ilo_cp_space(cp) < owner->reserve * 2) {
+ ilo_cp_flush(cp, "new owner");
+ assert(ilo_cp_space(cp) >= owner->reserve * 2);
+ }
+
+ cp->owner = owner;
+
+ assert(ilo_cp_space(cp) >= owner->reserve);
+ cp->owner->own(cp, cp->owner->data);
+ }
+}
+
static struct intel_bo *
ilo_cp_end_batch(struct ilo_cp *cp, unsigned *used)
{
struct intel_bo *bo;
- ilo_cp_set_owner(cp, NULL, 0);
+ ilo_cp_release_owner(cp);
if (!ilo_builder_batch_used(&cp->builder)) {
ilo_builder_batch_discard(&cp->builder);
}
cp->ring = INTEL_RING_RENDER;
+ cp->owner = &ilo_cp_default_owner;
ilo_builder_init(&cp->builder, dev, winsys);
typedef void (*ilo_cp_callback)(struct ilo_cp *cp, void *data);
+/**
+ * Parser owners are notified when they gain or lose the ownership of the
+ * parser. This gives owners a chance to emit prolog or epilog.
+ */
struct ilo_cp_owner {
- ilo_cp_callback release_callback;
- void *release_data;
+ ilo_cp_callback own;
+ ilo_cp_callback release;
+ void *data;
+
+ /*
+ * Space reserved for own() and release(). This can be modified at any
+ * time, as long as it is never increased by more than ilo_cp_space().
+ */
+ int reserve;
};
/**
ilo_cp_callback flush_callback;
void *flush_callback_data;
+ enum intel_ring_type ring;
const struct ilo_cp_owner *owner;
- int owner_reserve;
- enum intel_ring_type ring;
unsigned one_off_flags;
struct ilo_builder builder;
ilo_cp_flush_internal(cp);
}
+void
+ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
+ const struct ilo_cp_owner *owner);
+
/**
* Return true if the parser buffer is empty.
*/
const int space = ilo_builder_batch_space(&cp->builder);
const int mi_batch_buffer_end_space = 2;
- assert(space >= cp->owner_reserve + mi_batch_buffer_end_space);
-
- return space - cp->owner_reserve - mi_batch_buffer_end_space;
-}
-
-/**
- * Internal function called by functions that flush implicitly.
- */
-static inline void
-ilo_cp_implicit_flush(struct ilo_cp *cp)
-{
- ilo_cp_flush(cp, "out of space (implicit)");
-}
+ assert(space >= cp->owner->reserve + mi_batch_buffer_end_space);
-/**
- * Set the ring buffer.
- */
-static inline void
-ilo_cp_set_ring(struct ilo_cp *cp, enum intel_ring_type ring)
-{
- if (cp->ring != ring) {
- ilo_cp_implicit_flush(cp);
- cp->ring = ring;
- }
+ return space - cp->owner->reserve - mi_batch_buffer_end_space;
}
/**
cp->flush_callback_data = data;
}
-/**
- * Set the parser owner. If this is a new owner, the previous owner is
- * notified and the space it reserved is reclaimed.
- *
- * \return true if this is a new owner
- */
-static inline bool
-ilo_cp_set_owner(struct ilo_cp *cp, const struct ilo_cp_owner *owner,
- int reserve)
-{
- const bool new_owner = (cp->owner != owner);
-
- /* release current owner */
- if (new_owner && cp->owner) {
- /* reclaim the reserved space */
- cp->owner_reserve = 0;
-
- /* invoke the release callback */
- cp->owner->release_callback(cp, cp->owner->release_data);
-
- cp->owner = NULL;
- }
-
- if (cp->owner_reserve != reserve) {
- const int extra = reserve - cp->owner_reserve;
-
- if (ilo_cp_space(cp) < extra) {
- ilo_cp_implicit_flush(cp);
-
- assert(ilo_cp_space(cp) >= reserve);
- cp->owner_reserve = reserve;
- }
- else {
- cp->owner_reserve += extra;
- }
- }
-
- /* set owner last because of the possible flush above */
- cp->owner = owner;
-
- return new_owner;
-}
-
#endif /* ILO_CP_H */