*/
uint64_t offset64;
+ /**
+ * The validation list index for this buffer, or -1 when not in a batch.
+ * Note that a single buffer may be in multiple batches (contexts), and
+ * this is a global field, which refers to the last batch using the BO.
+ * It should not be considered authoritative, but can be used to avoid a
+ * linear walk of the validation list in the common case by guessing that
+ * exec_bos[bo->index] == bo and confirming whether that's the case.
+ */
+ unsigned index;
+
/**
* Boolean of whether the GPU is definitely not accessing the buffer.
*
}
}
+#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
+
static void
add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
if (bo != batch->bo) {
- for (int i = 0; i < batch->exec_count; i++) {
- if (batch->exec_bos[i] == bo)
+ unsigned index = READ_ONCE(bo->index);
+
+ if (index < batch->exec_count && batch->exec_bos[index] == bo)
+ return;
+
+ /* May have been shared between multiple active batches */
+ for (index = 0; index < batch->exec_count; index++) {
+ if (batch->exec_bos[index] == bo)
return;
}
validation_entry->rsvd1 = 0;
validation_entry->rsvd2 = 0;
+ bo->index = batch->exec_count;
batch->exec_bos[batch->exec_count] = bo;
batch->exec_count++;
batch->aperture_space += bo->size;
struct brw_bo *bo = batch->exec_bos[i];
bo->idle = false;
+ bo->index = -1;
/* Update brw_bo::offset64 */
if (batch->validation_list[i].offset != bo->offset64) {
bool
brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
+ unsigned index = READ_ONCE(bo->index);
+ if (index < batch->exec_count && batch->exec_bos[index] == bo)
+ return true;
+
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] == bo)
return true;