#include "brw_defines.h"
#include "intel_batchbuffer.h"
-/**
- * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
- *
- * Some combinations of hardware and kernel versions allow this feature,
- * while others don't. Instead of trying to enumerate every case, just
- * try and write a register and see if works.
- */
-static bool
-can_do_pipelined_register_writes(struct brw_context *brw)
-{
- /**
- * gen >= 8 specifically allows these writes. gen <= 6 also
- * doesn't block them.
- */
- if (brw->gen != 7)
- return true;
-
- static int result = -1;
- if (result != -1)
- return result;
-
- /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
- * statistics registers), and we already reset it to zero before using it.
- */
- const int reg = GEN7_SO_WRITE_OFFSET(0);
- const int expected_value = 0x1337d0d0;
- const int offset = 100;
-
- /* The register we picked only exists on Gen7+. */
- assert(brw->gen == 7);
-
- uint32_t *data;
- /* Set a value in a BO to a known quantity. The workaround BO already
- * exists and doesn't contain anything important, so we may as well use it.
- */
- drm_intel_bo_map(brw->workaround_bo, true);
- data = brw->workaround_bo->virtual;
- data[offset] = 0xffffffff;
- drm_intel_bo_unmap(brw->workaround_bo);
-
- /* Write the register. */
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(reg);
- OUT_BATCH(expected_value);
- ADVANCE_BATCH();
-
- brw_emit_mi_flush(brw);
-
- /* Save the register's value back to the buffer. */
- BEGIN_BATCH(3);
- OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
- OUT_BATCH(reg);
- OUT_RELOC(brw->workaround_bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- offset * sizeof(uint32_t));
- ADVANCE_BATCH();
-
- intel_batchbuffer_flush(brw);
-
- /* Check whether the value got written. */
- drm_intel_bo_map(brw->workaround_bo, false);
- data = brw->workaround_bo->virtual;
- bool success = data[offset] == expected_value;
- drm_intel_bo_unmap(brw->workaround_bo);
-
- result = success;
-
- return success;
-}
-
/**
* Initializes potential list of extensions if ctx == NULL, or actually enables
* extensions for a context.
}
brw->predicate.supported = false;
- brw->can_do_pipelined_register_writes =
- can_do_pipelined_register_writes(brw);
if (brw->gen >= 7) {
ctx->Extensions.ARB_conservative_depth = true;
return 0;
}
+ /**
+ * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
+ *
+ * Some combinations of hardware and kernel versions allow this feature,
+ * while others don't. Instead of trying to enumerate every case, just
+ * try and write a register and see if works.
+ */
+static bool
+intel_detect_pipelined_register(struct intel_screen *screen,
+ int reg, uint32_t expected_value, bool reset)
+{
+ drm_intel_bo *results, *bo;
+ uint32_t *batch;
+ uint32_t offset = 0;
+ bool success = false;
+
+ /* Create a zero'ed temporary buffer for reading our results */
+ results = drm_intel_bo_alloc(screen->bufmgr, "registers", 4096, 0);
+ if (results == NULL)
+ goto err;
+
+ bo = drm_intel_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
+ if (bo == NULL)
+ goto err_results;
+
+ if (drm_intel_bo_map(bo, 1))
+ goto err_batch;
+
+ batch = bo->virtual;
+
+ /* Write the register. */
+ *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
+ *batch++ = reg;
+ *batch++ = expected_value;
+
+ /* Save the register's value back to the buffer. */
+ *batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
+ *batch++ = reg;
+ drm_intel_bo_emit_reloc(bo, (char *)batch -(char *)bo->virtual,
+ results, offset*sizeof(uint32_t),
+ I915_GEM_DOMAIN_INSTRUCTION,
+ I915_GEM_DOMAIN_INSTRUCTION);
+ *batch++ = results->offset + offset*sizeof(uint32_t);
+
+ /* And afterwards clear the register */
+ if (reset) {
+ *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
+ *batch++ = reg;
+ *batch++ = 0;
+ }
+
+ *batch++ = MI_BATCH_BUFFER_END;
+
+ drm_intel_bo_mrb_exec(bo, ALIGN((char *)batch - (char *)bo->virtual, 8),
+ NULL, 0, 0,
+ I915_EXEC_RENDER);
+
+ /* Check whether the value got written. */
+ if (drm_intel_bo_map(results, false) == 0) {
+ success = *((uint32_t *)results->virtual + offset) == expected_value;
+ drm_intel_bo_unmap(results);
+ }
+
+err_batch:
+ drm_intel_bo_unreference(bo);
+err_results:
+ drm_intel_bo_unreference(results);
+err:
+ return success;
+}
+
+static bool
+intel_detect_pipelined_so(struct intel_screen *screen)
+{
+ /* Supposedly, Broadwell just works. */
+ if (screen->devinfo.gen >= 8)
+ return true;
+
+ if (screen->devinfo.gen <= 6)
+ return false;
+
+ /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
+ * statistics registers), and we already reset it to zero before using it.
+ */
+ return intel_detect_pipelined_register(screen,
+ GEN7_SO_WRITE_OFFSET(0),
+ 0x1337d0d0,
+ false);
+}
+
/**
* Return array of MSAA modes supported by the hardware. The array is
* zero-terminated and sorted in decreasing order.
screen->subslice_total = 1 << (screen->devinfo.gt - 1);
}
+ if (intel_detect_pipelined_so(screen))
+ screen->hw_has_pipelined_register |= HW_HAS_PIPELINED_SOL_OFFSET;
+
const char *force_msaa = getenv("INTEL_FORCE_MSAA");
if (force_msaa) {
screen->winsys_msaa_samples_override =