#include "intel_batchbuffer.h"
#include "intel_tex.h"
#include "intel_fbo.h"
+#include "intel_buffer_objects.h"
#include "brw_context.h"
#include "brw_state.h"
case GL_TEXTURE_2D:
case GL_TEXTURE_2D_ARRAY_EXT:
+ case GL_TEXTURE_EXTERNAL_OES:
return BRW_SURFACE_2D;
case GL_TEXTURE_3D:
brw->format_supported_as_render_target[MESA_FORMAT_X8_Z24] = true;
brw->format_supported_as_render_target[MESA_FORMAT_S8] = true;
brw->format_supported_as_render_target[MESA_FORMAT_Z16] = true;
+ brw->format_supported_as_render_target[MESA_FORMAT_Z32_FLOAT] = true;
+ brw->format_supported_as_render_target[MESA_FORMAT_Z32_FLOAT_X24S8] = true;
/* We remap depth formats to a supported texturing format in
* translate_tex_format().
*/
ctx->TextureFormatSupported[MESA_FORMAT_S8_Z24] = true;
ctx->TextureFormatSupported[MESA_FORMAT_X8_Z24] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_Z32_FLOAT] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_Z32_FLOAT_X24S8] = true;
}
bool
case MESA_FORMAT_X8_Z24:
return BRW_SURFACEFORMAT_I24X8_UNORM;
+ case MESA_FORMAT_Z32_FLOAT:
+ return BRW_SURFACEFORMAT_I32_FLOAT;
+
+ case MESA_FORMAT_Z32_FLOAT_X24S8:
+ return BRW_SURFACEFORMAT_R32G32_FLOAT;
+
case MESA_FORMAT_SARGB8:
case MESA_FORMAT_SLA8:
case MESA_FORMAT_SL8:
surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
BRW_SURFACE_CUBEFACE_ENABLES |
- (translate_tex_format(firstImage->TexFormat,
+ (translate_tex_format(mt->format,
firstImage->InternalFormat,
sampler->DepthMode,
sampler->sRGBDecode) <<
I915_GEM_DOMAIN_SAMPLER, 0);
}
+/**
+ * Set up a binding table entry for use by stream output logic (transform
+ * feedback).
+ *
+ * buffer_size_minus_1 must me less than BRW_MAX_NUM_BUFFER_ENTRIES.
+ */
+void
+brw_update_sol_surface(struct brw_context *brw,
+ struct gl_buffer_object *buffer_obj,
+ uint32_t *out_offset, unsigned num_vector_components,
+ unsigned stride_dwords, unsigned offset_dwords)
+{
+ drm_intel_bo *bo = intel_buffer_object(buffer_obj)->buffer;
+ uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
+ out_offset);
+ uint32_t pitch_minus_1 = 4*stride_dwords - 1;
+ uint32_t offset_bytes = 4 * offset_dwords;
+ size_t size_dwords = buffer_obj->Size / 4;
+ uint32_t buffer_size_minus_1, width, height, depth, surface_format;
+
+ /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
+ * too big to map using a single binding table entry?
+ */
+ assert((size_dwords - offset_dwords) / stride_dwords
+ <= BRW_MAX_NUM_BUFFER_ENTRIES);
+
+ if (size_dwords > offset_dwords + num_vector_components) {
+ /* There is room for at least 1 transform feedback output in the buffer.
+ * Compute the number of additional transform feedback outputs the
+ * buffer has room for.
+ */
+ buffer_size_minus_1 =
+ (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
+ } else {
+ /* There isn't even room for a single transform feedback output in the
+ * buffer. We can't configure the binding table entry to prevent output
+ * entirely; we'll have to rely on the geometry shader to detect
+ * overflow. But to minimize the damage in case of a bug, set up the
+ * binding table entry to just allow a single output.
+ */
+ buffer_size_minus_1 = 0;
+ }
+ width = buffer_size_minus_1 & 0x7f;
+ height = (buffer_size_minus_1 & 0xfff80) >> 7;
+ depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
+
+ switch (num_vector_components) {
+ case 1:
+ surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
+ break;
+ case 2:
+ surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
+ break;
+ case 3:
+ surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
+ break;
+ case 4:
+ surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
+ break;
+ default:
+ assert(!"Invalid vector size for transform feedback output");
+ surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
+ break;
+ }
+
+ surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
+ BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
+ surface_format << BRW_SURFACE_FORMAT_SHIFT |
+ BRW_SURFACE_RC_READ_WRITE;
+ surf[1] = bo->offset + offset_bytes; /* reloc */
+ surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
+ height << BRW_SURFACE_HEIGHT_SHIFT);
+ surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
+ pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
+ surf[4] = 0;
+ surf[5] = 0;
+
+ /* Emit relocation to surface contents. */
+ drm_intel_bo_emit_reloc(brw->intel.batch.bo,
+ *out_offset + 4,
+ bo, offset_bytes,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+}
+
/* Creates a new WM constant buffer reflecting the current fragment program's
* constants, if needed by the fragment program.
*
drm_intel_bo_unreference(brw->wm.const_bo);
brw->wm.const_bo = NULL;
brw->bind.surf_offset[surf_index] = 0;
- brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
return;
}
params->NumParameters,
&brw->bind.surf_offset[surf_index]);
- brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_wm_pull_constants = {
} else {
intel->vtbl.update_null_renderbuffer_surface(brw, 0);
}
- brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_renderbuffer_surfaces = {
}
}
- brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_texture_surfaces = {
sizeof(uint32_t) * BRW_MAX_SURFACES,
32, &brw->bind.bo_offset);
- /* BRW_NEW_WM_SURFACES and BRW_NEW_VS_CONSTBUF */
+ /* BRW_NEW_SURFACES and BRW_NEW_VS_CONSTBUF */
for (i = 0; i < BRW_MAX_SURFACES; i++) {
bind[i] = brw->bind.surf_offset[i];
}
.mesa = 0,
.brw = (BRW_NEW_BATCH |
BRW_NEW_VS_CONSTBUF |
- BRW_NEW_WM_SURFACES),
+ BRW_NEW_SURFACES),
.cache = 0
},
.emit = brw_upload_binding_table,