struct pipe_fence_handle **fence),
void *flush_ctx);
+ /**
+ * Add a constant engine IB to a graphics CS. This makes the graphics CS
+ * from "cs_create" a group of two IBs that share a buffer list and are
+ * flushed together.
+ *
+ * The returned constant CS is only a stream for writing packets to the new
+ * IB. Calling other winsys functions with it is not allowed, not even
+ * "cs_destroy".
+ *
+ * In order to add buffers and check memory usage, use the graphics CS.
+ * In order to flush it, use the graphics CS, which will flush both IBs.
+ * Destroying the graphics CS will destroy both of them.
+ *
+ * \param cs The graphics CS from "cs_create" that will hold the buffer
+ * list and will be used for flushing.
+ */
+ struct radeon_winsys_cs *(*cs_add_const_ib)(struct radeon_winsys_cs *cs);
+
+ /**
+ * Add a constant engine preamble IB to a graphics CS. This add an extra IB
+ * in similar manner to cs_add_const_ib. This should always be called after
+ * cs_add_const_ib.
+ *
+ * The returned IB is a constant engine IB that only gets flushed if the
+ * context changed.
+ *
+ * \param cs The graphics CS from "cs_create" that will hold the buffer
+ * list and will be used for flushing.
+ */
+ struct radeon_winsys_cs *(*cs_add_const_preamble_ib)(struct radeon_winsys_cs *cs);
/**
* Destroy a command stream.
*
return NULL;
}
- if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib)) {
+ if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib[IB_MAIN])) {
amdgpu_destroy_cs_context(cs);
FREE(cs);
return NULL;
}
cs->request.number_of_ibs = 1;
- cs->request.ibs = &cs->ib;
+ cs->request.ibs = &cs->ib[IB_MAIN];
p_atomic_inc(&ctx->ws->num_cs);
return &cs->main.base;
}
+static struct radeon_winsys_cs *
+amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
+{
+ struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
+ struct amdgpu_winsys *ws = cs->ctx->ws;
+
+ /* only one const IB can be added */
+ if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
+ return NULL;
+
+ if (!amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST]))
+ return NULL;
+
+ cs->request.number_of_ibs = 2;
+ cs->request.ibs = &cs->ib[IB_CONST];
+ cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
+
+ return &cs->const_ib.base;
+}
+
+static struct radeon_winsys_cs *
+amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
+{
+ struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
+ struct amdgpu_winsys *ws = cs->ctx->ws;
+
+ /* only one const preamble IB can be added and only when the const IB has
+ * also been mapped */
+ if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
+ cs->const_preamble_ib.ib_mapped)
+ return NULL;
+
+ if (!amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
+ &cs->ib[IB_CONST_PREAMBLE], IB_CONST_PREAMBLE))
+ return NULL;
+
+ cs->request.number_of_ibs = 3;
+ cs->request.ibs = &cs->ib[IB_CONST_PREAMBLE];
+ cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE | AMDGPU_IB_FLAG_PREAMBLE;
+
+ return &cs->const_preamble_ib.base;
+}
+
#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
int amdgpu_lookup_buffer(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
while (rcs->cdw & 7)
OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
+
+ /* Also pad the const IB. */
+ if (cs->const_ib.ib_mapped)
+ while (!cs->const_ib.base.cdw || (cs->const_ib.base.cdw & 7))
+ OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
+
+ if (cs->const_preamble_ib.ib_mapped)
+ while (!cs->const_preamble_ib.base.cdw || (cs->const_preamble_ib.base.cdw & 7))
+ OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
break;
case RING_UVD:
while (rcs->cdw & 15)
amdgpu_cs_add_buffer(rcs, cs->main.big_ib_buffer,
RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+ if (cs->const_ib.ib_mapped)
+ amdgpu_cs_add_buffer(rcs, cs->const_ib.big_ib_buffer,
+ RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+
+ if (cs->const_preamble_ib.ib_mapped)
+ amdgpu_cs_add_buffer(rcs, cs->const_preamble_ib.big_ib_buffer,
+ RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+
/* If the CS is not empty or overflowed.... */
if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw && !debug_get_option_noop()) {
int r;
goto cleanup;
}
- cs->ib.size = cs->main.base.cdw;
+ cs->ib[IB_MAIN].size = cs->main.base.cdw;
cs->main.used_ib_space += cs->main.base.cdw * 4;
+ if (cs->const_ib.ib_mapped) {
+ cs->ib[IB_CONST].size = cs->const_ib.base.cdw;
+ cs->const_ib.used_ib_space += cs->const_ib.base.cdw * 4;
+ }
+
+ if (cs->const_preamble_ib.ib_mapped) {
+ cs->ib[IB_CONST_PREAMBLE].size = cs->const_preamble_ib.base.cdw;
+ cs->const_preamble_ib.used_ib_space += cs->const_preamble_ib.base.cdw * 4;
+ }
+
amdgpu_cs_do_submission(cs, fence);
/* Cleanup. */
cleanup:
amdgpu_cs_context_cleanup(cs);
- amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib);
+
+ amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib[IB_MAIN]);
+ if (cs->const_ib.ib_mapped)
+ amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST]);
+ if (cs->const_preamble_ib.ib_mapped)
+ amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
+ &cs->ib[IB_CONST_PREAMBLE]);
ws->num_cs_flushes++;
}
amdgpu_destroy_cs_context(cs);
p_atomic_dec(&cs->ctx->ws->num_cs);
pb_reference(&cs->main.big_ib_buffer, NULL);
+ pb_reference(&cs->const_ib.big_ib_buffer, NULL);
+ pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
FREE(cs);
}
ws->base.ctx_destroy = amdgpu_ctx_destroy;
ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
ws->base.cs_create = amdgpu_cs_create;
+ ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
+ ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
ws->base.cs_destroy = amdgpu_cs_destroy;
ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;