extern "C" {
void
-intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
+intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
unsigned int level, unsigned int layer, gen6_hiz_op op)
{
const char *opname = NULL;
__FUNCTION__, opname, mt, level, layer);
brw_hiz_op_params params(mt, level, layer, op);
- brw_blorp_exec(intel, ¶ms);
+ brw_blorp_exec(brw, ¶ms);
}
} /* extern "C" */
void
-brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
+brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
switch (intel->gen) {
case 6:
- gen6_blorp_exec(intel, params);
+ gen6_blorp_exec(brw, params);
break;
case 7:
- gen7_blorp_exec(intel, params);
+ gen7_blorp_exec(brw, params);
break;
default:
/* BLORP is not supported before Gen6. */
}
if (unlikely(intel->always_flush_batch))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
/* We've smashed all state compared to what the normal 3D pipeline
* rendering tracks for GL.
/* Flush the sampler cache so any texturing from the destination is
* coherent.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
brw_hiz_op_params::brw_hiz_op_params(struct intel_mipmap_tree *mt,
#endif
void
-brw_blorp_blit_miptrees(struct intel_context *intel,
+brw_blorp_blit_miptrees(struct brw_context *brw,
struct intel_mipmap_tree *src_mt,
unsigned src_level, unsigned src_layer,
struct intel_mipmap_tree *dst_mt,
bool mirror_x, bool mirror_y);
bool
-brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
+brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
bool partial_clear);
void
-brw_blorp_resolve_color(struct intel_context *intel,
+brw_blorp_resolve_color(struct brw_context *brw,
struct intel_mipmap_tree *mt);
#ifdef __cplusplus
void
-brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params);
+brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params);
/**
}
void
-brw_blorp_blit_miptrees(struct intel_context *intel,
+brw_blorp_blit_miptrees(struct brw_context *brw,
struct intel_mipmap_tree *src_mt,
unsigned src_level, unsigned src_layer,
struct intel_mipmap_tree *dst_mt,
* to destination color buffers, and the standard render path is
* fast-color-aware.
*/
- intel_miptree_resolve_color(intel, src_mt);
- intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_layer);
- intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_layer);
+ intel_miptree_resolve_color(brw, src_mt);
+ intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_layer);
+ intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_layer);
DBG("%s from %s mt %p %d %d (%f,%f) (%f,%f)"
"to %s mt %p %d %d (%f,%f) (%f,%f) (flip %d,%d)\n",
dst_level, dst_layer, dst_x0, dst_y0, dst_x1, dst_y1,
mirror_x, mirror_y);
- brw_blorp_blit_params params(brw_context(&intel->ctx),
+ brw_blorp_blit_params params(brw,
src_mt, src_level, src_layer,
dst_mt, dst_level, dst_layer,
src_x0, src_y0,
dst_x0, dst_y0,
dst_x1, dst_y1,
mirror_x, mirror_y);
- brw_blorp_exec(intel, ¶ms);
+ brw_blorp_exec(brw, ¶ms);
intel_miptree_slice_set_needs_hiz_resolve(dst_mt, dst_level, dst_layer);
}
static void
-do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
+do_blorp_blit(struct brw_context *brw, GLbitfield buffer_bit,
struct intel_renderbuffer *src_irb,
struct intel_renderbuffer *dst_irb,
GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
struct intel_mipmap_tree *dst_mt = find_miptree(buffer_bit, dst_irb);
/* Do the blit */
- brw_blorp_blit_miptrees(intel,
+ brw_blorp_blit_miptrees(brw,
src_mt, src_irb->mt_level, src_irb->mt_layer,
dst_mt, dst_irb->mt_level, dst_irb->mt_layer,
srcX0, srcY0, srcX1, srcY1,
}
static bool
-try_blorp_blit(struct intel_context *intel,
+try_blorp_blit(struct brw_context *brw,
GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
GLfloat dstX0, GLfloat dstY0, GLfloat dstX1, GLfloat dstY1,
GLenum filter, GLbitfield buffer_bit)
{
- struct gl_context *ctx = &intel->ctx;
+ struct gl_context *ctx = &brw->intel.ctx;
/* Sync up the state of window system buffers. We need to do this before
* we go looking for the buffers.
*/
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
const struct gl_framebuffer *read_fb = ctx->ReadBuffer;
const struct gl_framebuffer *draw_fb = ctx->DrawBuffer;
for (unsigned i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; ++i) {
dst_irb = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
if (dst_irb)
- do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+ do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
mirror_x, mirror_y);
}
intel_renderbuffer(draw_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
if (!formats_match(buffer_bit, src_irb, dst_irb))
return false;
- do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+ do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
mirror_x, mirror_y);
break;
intel_renderbuffer(draw_fb->Attachment[BUFFER_STENCIL].Renderbuffer);
if (!formats_match(buffer_bit, src_irb, dst_irb))
return false;
- do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+ do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
mirror_x, mirror_y);
break;
}
bool
-brw_blorp_copytexsubimage(struct intel_context *intel,
+brw_blorp_copytexsubimage(struct brw_context *brw,
struct gl_renderbuffer *src_rb,
struct gl_texture_image *dst_image,
int slice,
int dstX0, int dstY0,
int width, int height)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
struct intel_texture_image *intel_image = intel_texture_image(dst_image);
/* Sync up the state of window system buffers. We need to do this before
* we go looking at the src renderbuffer's miptree.
*/
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
struct intel_mipmap_tree *src_mt = src_irb->mt;
struct intel_mipmap_tree *dst_mt = intel_image->mt;
mirror_y = true;
}
- brw_blorp_blit_miptrees(intel,
+ brw_blorp_blit_miptrees(brw,
src_mt, src_irb->mt_level, src_irb->mt_layer,
dst_mt, dst_image->Level, dst_image->Face + slice,
srcX0, srcY0, srcX1, srcY1,
dst_mt = dst_mt->stencil_mt;
if (src_mt != dst_mt) {
- brw_blorp_blit_miptrees(intel,
+ brw_blorp_blit_miptrees(brw,
src_mt, src_irb->mt_level, src_irb->mt_layer,
dst_mt, dst_image->Level,
dst_image->Face + slice,
GLbitfield
-brw_blorp_framebuffer(struct intel_context *intel,
+brw_blorp_framebuffer(struct brw_context *brw,
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
+ struct intel_context *intel = &brw->intel;
+
/* BLORP is not supported before Gen6. */
if (intel->gen < 6)
return mask;
for (unsigned int i = 0; i < ARRAY_SIZE(buffer_bits); ++i) {
if ((mask & buffer_bits[i]) &&
- try_blorp_blit(intel,
+ try_blorp_blit(brw,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
filter, buffer_bits[i])) {
* moment we only support floating point, unorm, and snorm buffers.
*/
static bool
-is_color_fast_clear_compatible(struct intel_context *intel,
+is_color_fast_clear_compatible(struct brw_context *brw,
gl_format format,
const union gl_color_union *color)
{
+ struct intel_context *intel = &brw->intel;
if (_mesa_is_format_integer_color(format))
return false;
/* If we can do this as a fast color clear, do so. */
if (irb->mt->mcs_state != INTEL_MCS_STATE_NONE && !partial_clear &&
wm_prog_key.use_simd16_replicated_data &&
- is_color_fast_clear_compatible(intel, format, &ctx->Color.ClearColor)) {
+ is_color_fast_clear_compatible(brw, format, &ctx->Color.ClearColor)) {
memset(push_consts, 0xff, 4*sizeof(float));
fast_clear_op = GEN7_FAST_CLEAR_OP_FAST_CLEAR;
* with X alignment multiplied by 16 and Y alignment multiplied by 32.
*/
unsigned x_align, y_align;
- intel_get_non_msrt_mcs_alignment(intel, irb->mt, &x_align, &y_align);
+ intel_get_non_msrt_mcs_alignment(brw, irb->mt, &x_align, &y_align);
x_align *= 16;
y_align *= 32;
x0 = ROUND_DOWN_TO(x0, x_align);
* X and Y alignment each divided by 2.
*/
unsigned x_align, y_align;
- intel_get_non_msrt_mcs_alignment(&brw->intel, mt, &x_align, &y_align);
+ intel_get_non_msrt_mcs_alignment(brw, mt, &x_align, &y_align);
unsigned x_scaledown = x_align / 2;
unsigned y_scaledown = y_align / 2;
x0 = y0 = 0;
extern "C" {
bool
-brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
+brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
bool partial_clear)
{
- struct gl_context *ctx = &intel->ctx;
- struct brw_context *brw = brw_context(ctx);
+ struct gl_context *ctx = &brw->intel.ctx;
/* The constant color clear code doesn't work for multisampled surfaces, so
* we need to support falling back to other clear mechanisms.
* it now.
*/
if (!irb->mt->mcs_mt) {
- if (!intel_miptree_alloc_non_msrt_mcs(intel, irb->mt)) {
+ if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
/* MCS allocation failed--probably this will only happen in
* out-of-memory conditions. But in any case, try to recover
* by falling back to a non-blorp clear technique.
DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
irb->mt, irb->mt_level, irb->mt_layer);
- brw_blorp_exec(intel, ¶ms);
+ brw_blorp_exec(brw, ¶ms);
if (is_fast_clear) {
/* Now that the fast clear has occurred, put the buffer in
}
void
-brw_blorp_resolve_color(struct intel_context *intel, struct intel_mipmap_tree *mt)
+brw_blorp_resolve_color(struct brw_context *brw, struct intel_mipmap_tree *mt)
{
- struct brw_context *brw = brw_context(&intel->ctx);
-
DBG("%s to mt %p\n", __FUNCTION__, mt);
brw_blorp_rt_resolve_params params(brw, mt);
- brw_blorp_exec(intel, ¶ms);
+ brw_blorp_exec(brw, ¶ms);
mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
}
static bool
brw_fast_clear_depth(struct gl_context *ctx)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *depth_irb =
* flags out of the HiZ buffer into the real depth buffer.
*/
if (mt->depth_clear_value != depth_clear_value) {
- intel_miptree_all_slices_resolve_depth(intel, mt);
+ intel_miptree_all_slices_resolve_depth(brw, mt);
mt->depth_clear_value = depth_clear_value;
}
* must be issued before the rectangle primitive used for the depth
* buffer clear operation.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
- intel_hiz_exec(intel, mt, depth_irb->mt_level, depth_irb->mt_layer,
+ intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
GEN6_HIZ_OP_DEPTH_CLEAR);
if (intel->gen == 6) {
* by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
* followed by Depth FLUSH'
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
/* Now, the HiZ buffer contains data that needs to be resolved to the depth
intel->front_buffer_dirty = true;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
if (mask & BUFFER_BIT_DEPTH) {
/* BLORP is currently only supported on Gen6+. */
if (intel->gen >= 6) {
if (mask & BUFFER_BITS_COLOR) {
- if (brw_blorp_clear_color(intel, fb, partial_clear)) {
+ if (brw_blorp_clear_color(brw, fb, partial_clear)) {
debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
mask &= ~BUFFER_BITS_COLOR;
}
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
- if (!intelInitContext( intel, api, major_version, minor_version,
+ if (!intelInitContext( brw, api, major_version, minor_version,
mesaVis, driContextPriv,
sharedContextPrivate, &functions,
error)) {
/*======================================================================
* brw_state_dump.c
*/
-void brw_debug_batch(struct intel_context *intel);
-void brw_annotate_aub(struct intel_context *intel);
+void brw_debug_batch(struct brw_context *brw);
+void brw_annotate_aub(struct brw_context *brw);
/*======================================================================
* brw_tex.c
void brwInitFragProgFuncs( struct dd_function_table *functions );
int brw_get_scratch_size(int size);
-void brw_get_scratch_bo(struct intel_context *intel,
+void brw_get_scratch_bo(struct brw_context *brw,
drm_intel_bo **scratch_bo, int size);
void brw_init_shader_time(struct brw_context *brw);
int brw_get_shader_time_index(struct brw_context *brw,
uint32_t *surf_offsets);
/* brw_surface_formats.c */
-bool brw_is_hiz_depth_format(struct intel_context *ctx, gl_format format);
-bool brw_render_target_supported(struct intel_context *intel,
+bool brw_is_hiz_depth_format(struct brw_context *ctx, gl_format format);
+bool brw_render_target_supported(struct brw_context *brw,
struct gl_renderbuffer *rb);
/* gen6_sol.c */
/* brw_blorp_blit.cpp */
GLbitfield
-brw_blorp_framebuffer(struct intel_context *intel,
+brw_blorp_framebuffer(struct brw_context *brw,
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter);
bool
-brw_blorp_copytexsubimage(struct intel_context *intel,
+brw_blorp_copytexsubimage(struct brw_context *brw,
struct gl_renderbuffer *src_rb,
struct gl_texture_image *dst_image,
int slice,
}
bool brw_do_cubemap_normalize(struct exec_list *instructions);
-bool brw_lower_texture_gradients(struct intel_context *intel,
+bool brw_lower_texture_gradients(struct brw_context *brw,
struct exec_list *instructions);
struct opcode_desc {
*/
void brw_upload_cs_urb_state(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
/* It appears that this is the state packet for the CS unit, ie. the
* urb entries detailed here are housed in the CS range from the
* the besides the draw code.
*/
if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
BEGIN_BATCH(6);
intel->batch.need_workaround_flush = true;
if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
}
* the besides the draw code.
*/
if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
BEGIN_BATCH(7);
ADVANCE_BATCH();
if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
}
brw_predraw_resolve_buffers(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
- struct intel_context *intel = &brw->intel;
struct intel_renderbuffer *depth_irb;
struct intel_texture_object *tex_obj;
/* Resolve the depth buffer's HiZ buffer. */
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
if (depth_irb)
- intel_renderbuffer_resolve_hiz(intel, depth_irb);
+ intel_renderbuffer_resolve_hiz(brw, depth_irb);
/* Resolve depth buffer of each enabled depth texture, and color buffer of
* each fast-clear-enabled color texture.
tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
if (!tex_obj || !tex_obj->mt)
continue;
- intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
- intel_miptree_resolve_color(intel, tex_obj->mt);
+ intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
+ intel_miptree_resolve_color(brw, tex_obj->mt);
}
}
*/
brw_validate_textures( brw );
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
/* This workaround has to happen outside of brw_upload_state() because it
* may flush the batchbuffer for a blit, affecting the state flags.
* we've got validated state that needs to be in the same batch as the
* primitives.
*/
- intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
- intel_batchbuffer_save_state(intel);
+ intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
+ intel_batchbuffer_save_state(brw);
if (brw->num_instances != prim->num_instances) {
brw->num_instances = prim->num_instances;
if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
if (!fail_next) {
- intel_batchbuffer_reset_to_saved(intel);
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_flush(brw);
fail_next = true;
goto retry;
} else {
- if (intel_batchbuffer_flush(intel) == -ENOSPC) {
+ if (intel_batchbuffer_flush(brw) == -ENOSPC) {
static bool warned = false;
if (!warned) {
}
if (intel->always_flush_batch)
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
brw_state_cache_check_size(brw);
brw_postdraw_set_buffers_need_resolve(brw);
* Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
*/
static unsigned
-get_surface_type(struct intel_context *intel,
+get_surface_type(struct brw_context *brw,
const struct gl_client_array *glarray)
{
+ struct intel_context *intel = &brw->intel;
int size = glarray->Size;
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
* to replicate it out.
*/
if (src_stride == 0) {
- intel_upload_data(&brw->intel, element->glarray->Ptr,
+ intel_upload_data(brw, element->glarray->Ptr,
element->glarray->_ElementSize,
element->glarray->_ElementSize,
&buffer->bo, &buffer->offset);
GLuint size = count * dst_stride;
if (dst_stride == src_stride) {
- intel_upload_data(&brw->intel, src, size, dst_stride,
+ intel_upload_data(brw, src, size, dst_stride,
&buffer->bo, &buffer->offset);
} else {
- char * const map = intel_upload_map(&brw->intel, size, dst_stride);
+ char * const map = intel_upload_map(brw, size, dst_stride);
char *dst = map;
while (count--) {
src += src_stride;
dst += dst_stride;
}
- intel_upload_unmap(&brw->intel, map, size, dst_stride,
+ intel_upload_unmap(brw, map, size, dst_stride,
&buffer->bo, &buffer->offset);
}
buffer->stride = dst_stride;
struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
/* Named buffer object: Just reference its contents directly. */
- buffer->bo = intel_bufferobj_source(intel,
+ buffer->bo = intel_bufferobj_source(brw,
intel_buffer, 1,
&buffer->offset);
drm_intel_bo_reference(buffer->bo);
OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
for (i = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
- uint32_t format = get_surface_type(intel, input->glarray);
+ uint32_t format = get_surface_type(brw, input->glarray);
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
}
if (intel->gen >= 6 && gen6_edgeflag_input) {
- uint32_t format = get_surface_type(intel, gen6_edgeflag_input->glarray);
+ uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray);
OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
/* Get new bufferobj, offset:
*/
- intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
+ intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
&bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
} else {
GL_MAP_READ_BIT,
bufferobj);
- intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
- &bo, &offset);
+ intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
ctx->Driver.UnmapBuffer(ctx, bufferobj);
*/
brw->ib.start_vertex_offset = offset / ib_type_size;
- bo = intel_bufferobj_source(intel,
+ bo = intel_bufferobj_source(brw,
intel_buffer_object(bufferobj),
ib_type_size,
&offset);
p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
- brw_init_compaction_tables(&brw->intel);
+ brw_init_compaction_tables(brw);
}
brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
void *store = p->store;
bool dump_hex = false;
((uint32_t *)insn)[0]);
}
- brw_uncompact_instruction(intel, &uncompacted, compacted);
+ brw_uncompact_instruction(brw, &uncompacted, compacted);
insn = &uncompacted;
offset += 8;
} else {
uint32_t brw_swap_cmod(uint32_t cmod);
/* brw_eu_compact.c */
-void brw_init_compaction_tables(struct intel_context *intel);
+void brw_init_compaction_tables(struct brw_context *brw);
void brw_compact_instructions(struct brw_compile *p);
-void brw_uncompact_instruction(struct intel_context *intel,
+void brw_uncompact_instruction(struct brw_context *brw,
struct brw_instruction *dst,
struct brw_compact_instruction *src);
bool brw_try_compact_instruction(struct brw_compile *p,
struct brw_compact_instruction *dst,
struct brw_instruction *src);
-void brw_debug_compact_uncompact(struct intel_context *intel,
+void brw_debug_compact_uncompact(struct brw_context *brw,
struct brw_instruction *orig,
struct brw_instruction *uncompacted);
static const uint32_t *src_index_table;
static bool
-set_control_index(struct intel_context *intel,
+set_control_index(struct brw_context *brw,
struct brw_compact_instruction *dst,
struct brw_instruction *src)
{
+ struct intel_context *intel = &brw->intel;
uint32_t *src_u32 = (uint32_t *)src;
uint32_t uncompacted = 0;
temp.dw0.opcode = src->header.opcode;
temp.dw0.debug_control = src->header.debug_control;
- if (!set_control_index(intel, &temp, src))
+ if (!set_control_index(brw, &temp, src))
return false;
if (!set_datatype_index(&temp, src))
return false;
}
static void
-set_uncompacted_control(struct intel_context *intel,
+set_uncompacted_control(struct brw_context *brw,
struct brw_instruction *dst,
struct brw_compact_instruction *src)
{
+ struct intel_context *intel = &brw->intel;
uint32_t *dst_u32 = (uint32_t *)dst;
uint32_t uncompacted = control_index_table[src->dw0.control_index];
}
void
-brw_uncompact_instruction(struct intel_context *intel,
+brw_uncompact_instruction(struct brw_context *brw,
struct brw_instruction *dst,
struct brw_compact_instruction *src)
{
+ struct intel_context *intel = &brw->intel;
memset(dst, 0, sizeof(*dst));
dst->header.opcode = src->dw0.opcode;
dst->header.debug_control = src->dw0.debug_control;
- set_uncompacted_control(intel, dst, src);
+ set_uncompacted_control(brw, dst, src);
set_uncompacted_datatype(dst, src);
set_uncompacted_subreg(dst, src);
dst->header.acc_wr_control = src->dw0.acc_wr_control;
dst->bits3.da1.src1_reg_nr = src->dw1.src1_reg_nr;
}
-void brw_debug_compact_uncompact(struct intel_context *intel,
+void brw_debug_compact_uncompact(struct brw_context *brw,
struct brw_instruction *orig,
struct brw_instruction *uncompacted)
{
+ struct intel_context *intel = &brw->intel;
fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n",
intel->gen);
}
void
-brw_init_compaction_tables(struct intel_context *intel)
+brw_init_compaction_tables(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0);
assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0);
assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0);
if (INTEL_DEBUG) {
struct brw_instruction uncompacted;
- brw_uncompact_instruction(intel, &uncompacted, dst);
+ brw_uncompact_instruction(brw, &uncompacted, dst);
if (memcmp(&saved, &uncompacted, sizeof(uncompacted))) {
- brw_debug_compact_uncompact(intel, &saved, &uncompacted);
+ brw_debug_compact_uncompact(brw, &saved, &uncompacted);
}
}
extern "C" {
bool
-brw_lower_texture_gradients(struct intel_context *intel,
+brw_lower_texture_gradients(struct brw_context *brw,
struct exec_list *instructions)
{
+ struct intel_context *intel = &brw->intel;
bool has_sample_d_c = intel->gen >= 8 || intel->is_haswell;
lower_texture_grad_visitor v(has_sample_d_c);
*/
static void upload_binding_table_pointers(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(6);
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
OUT_BATCH(brw->vs.bind_bo_offset);
*/
static void upload_gen6_binding_table_pointers(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
GEN6_BINDING_TABLE_MODIFY_VS |
perf_debug("HW workaround: blitting depth level %d to a temporary "
"to fix alignment (depth tile offset %d,%d)\n",
depth_irb->mt_level, tile_x, tile_y);
- intel_renderbuffer_move_to_temp(intel, depth_irb, invalidate_depth);
+ intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
/* In the case of stencil_irb being the same packed depth/stencil
* texture but not the same rb, make it point at our rebased mt, too.
*/
"to fix alignment (stencil tile offset %d,%d)\n",
stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
- intel_renderbuffer_move_to_temp(intel, stencil_irb, invalidate_stencil);
+ intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
stencil_mt = get_stencil_miptree(stencil_irb);
intel_miptree_get_image_offset(stencil_mt,
tile_x, tile_y,
stencil_tile_x, stencil_tile_y);
- intel_renderbuffer_move_to_temp(intel, depth_irb,
- invalidate_depth);
+ intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
tile_x = depth_irb->draw_x & tile_mask_x;
tile_y = depth_irb->draw_y & tile_mask_y;
* non-pipelined state that will need the PIPE_CONTROL workaround.
*/
if (intel->gen == 6) {
- intel_emit_post_sync_nonzero_flush(intel);
- intel_emit_depth_stall_flushes(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
+ intel_emit_depth_stall_flushes(brw);
}
unsigned int len;
*/
if (intel->gen >= 6 || hiz) {
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
return;
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(33);
OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
return;
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
return;
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
/* use legacy aa line coverage computation */
return;
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
/* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
/* Select the 3D pipeline (as opposed to media) */
BEGIN_BATCH(1);
if (intel->gen >= 6) {
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(10);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
}
void
-brw_get_scratch_bo(struct intel_context *intel,
+brw_get_scratch_bo(struct brw_context *brw,
drm_intel_bo **scratch_bo, int size)
{
+ struct intel_context *intel = &brw->intel;
drm_intel_bo *old_bo = *scratch_bo;
if (old_bo && old_bo->size < size) {
void brw_populate_sampler_prog_key_data(struct gl_context *ctx,
const struct gl_program *prog,
struct brw_sampler_prog_key_data *key);
-bool brw_debug_recompile_sampler_key(struct intel_context *intel,
+bool brw_debug_recompile_sampler_key(struct brw_context *brw,
const struct brw_sampler_prog_key_data *old_key,
const struct brw_sampler_prog_key_data *key);
void brw_add_texrect_params(struct gl_program *prog);
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
*/
static void
-write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
if (intel->gen >= 6) {
/* Emit workaround flushes: */
if (intel->gen == 6) {
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
*/
static void
-write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen < 6);
BEGIN_BATCH(4);
brw_queryobj_get_results(struct gl_context *ctx,
struct brw_query_object *query)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
int i;
* when mapped.
*/
if (drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (unlikely(intel->perf_debug)) {
if (drm_intel_bo_busy(query->bo)) {
*/
drm_intel_bo_unreference(query->bo);
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096);
- write_timestamp(intel, query->bo, 0);
+ write_timestamp(brw, query->bo, 0);
break;
case GL_ANY_SAMPLES_PASSED:
switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT:
/* Write the final timestamp. */
- write_timestamp(intel, query->bo, 1);
+ write_timestamp(brw, query->bo, 1);
break;
case GL_ANY_SAMPLES_PASSED:
*/
static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
* the async query will return true in finite time.
*/
if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
brw_queryobj_get_results(ctx, query);
ensure_bo_has_space(ctx, query);
- write_depth_count(intel, query->bo, query->last_index * 2);
+ write_depth_count(brw, query->bo, query->last_index * 2);
brw->query.begin_emitted = true;
}
if (!brw->query.begin_emitted)
return;
- write_depth_count(intel, query->bo, query->last_index * 2 + 1);
+ write_depth_count(brw, query->bo, query->last_index * 2 + 1);
brw->query.begin_emitted = false;
query->last_index++;
static void
brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *) q;
drm_intel_bo_unreference(query->bo);
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timestamp query", 4096, 4096);
- write_timestamp(intel, query->bo, 0);
+ write_timestamp(brw, query->bo, 0);
}
/**
class schedule_node : public exec_node
{
public:
- schedule_node(backend_instruction *inst, const struct intel_context *intel)
+ schedule_node(backend_instruction *inst, const struct brw_context *brw)
{
+ const struct intel_context *intel = &brw->intel;
+
this->inst = inst;
this->child_array_size = 0;
this->children = NULL;
void
instruction_scheduler::add_inst(backend_instruction *inst)
{
- schedule_node *n = new(mem_ctx) schedule_node(inst, bv->intel);
+ schedule_node *n = new(mem_ctx) schedule_node(inst, bv->brw);
assert(!inst->is_head_sentinel());
assert(!inst->is_tail_sentinel());
lower_if_to_cond_assign(shader->ir, 16);
do_lower_texture_projection(shader->ir);
- brw_lower_texture_gradients(intel, shader->ir);
+ brw_lower_texture_gradients(brw, shader->ir);
do_vec_index_to_cond_assign(shader->ir);
lower_vector_insert(shader->ir, true);
brw_do_cubemap_normalize(shader->ir);
/***********************************************************************
* brw_state_batch.c
*/
-#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(&brw->intel, (s), \
+#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(brw, (s), \
sizeof(*(s)), false)
void *brw_state_batch(struct brw_context *brw,
GLuint translate_tex_target(GLenum target);
-GLuint translate_tex_format(struct intel_context *intel,
+GLuint translate_tex_format(struct brw_context *brw,
gl_format mesa_format,
GLenum depth_mode,
GLenum srgb_decode);
* is annotated according to the type of each data structure.
*/
void
-brw_annotate_aub(struct intel_context *intel)
+brw_annotate_aub(struct brw_context *brw)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
unsigned annotation_count = 2 * brw->state_batch_count + 1;
drm_intel_aub_annotation annotations[annotation_count];
*/
if (batch->state_batch_offset < size ||
offset < 4*batch->used + batch->reserved_space) {
- intel_batchbuffer_flush(&brw->intel);
+ intel_batchbuffer_flush(brw);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
}
static void
brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
{
- struct intel_context *intel = &brw->intel;
struct brw_cache_item *c, *next;
GLuint i;
brw->state.dirty.mesa |= ~0;
brw->state.dirty.brw |= ~0;
brw->state.dirty.cache |= ~0;
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
void
* The buffer offsets printed rely on the buffer containing the last offset
* it was validated at.
*/
-void brw_debug_batch(struct intel_context *intel)
+void brw_debug_batch(struct brw_context *brw)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
drm_intel_bo_map(intel->batch.bo, false);
dump_state_batch(brw);
if ((state->mesa | state->cache | state->brw) == 0)
return;
- intel_check_front_buffer_rendering(intel);
+ intel_check_front_buffer_rendering(brw);
if (unlikely(INTEL_DEBUG)) {
/* Debug version which enforces various sanity checks on the
}
bool
-brw_render_target_supported(struct intel_context *intel,
+brw_render_target_supported(struct brw_context *brw,
struct gl_renderbuffer *rb)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
gl_format format = rb->Format;
/* Many integer formats are promoted to RGBA (like XRGB8888 is), which means
}
GLuint
-translate_tex_format(struct intel_context *intel,
+translate_tex_format(struct brw_context *brw,
gl_format mesa_format,
GLenum depth_mode,
GLenum srgb_decode)
{
- struct gl_context *ctx = &intel->ctx;
+ struct intel_context *intel = &brw->intel;
+ struct gl_context *ctx = &brw->intel.ctx;
if (srgb_decode == GL_SKIP_DECODE_EXT)
mesa_format = _mesa_get_srgb_format_linear(mesa_format);
/** Can HiZ be enabled on a depthbuffer of the given format? */
bool
-brw_is_hiz_depth_format(struct intel_context *intel, gl_format format)
+brw_is_hiz_depth_format(struct brw_context *brw, gl_format format)
{
+ struct intel_context *intel = &brw->intel;
if (!intel->has_hiz)
return false;
void brw_validate_textures( struct brw_context *brw )
{
struct gl_context *ctx = &brw->intel.ctx;
- struct intel_context *intel = &brw->intel;
int i;
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
if (texUnit->_ReallyEnabled) {
- intel_finalize_mipmap_tree(intel, i);
+ intel_finalize_mipmap_tree(brw, i);
}
}
}
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
static unsigned int
-intel_horizontal_texture_alignment_unit(struct intel_context *intel,
+intel_horizontal_texture_alignment_unit(struct brw_context *brw,
gl_format format)
{
+ struct intel_context *intel = &brw->intel;
/**
* From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
}
static unsigned int
-intel_vertical_texture_alignment_unit(struct intel_context *intel,
+intel_vertical_texture_alignment_unit(struct brw_context *brw,
gl_format format)
{
+ struct intel_context *intel = &brw->intel;
/**
* From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
}
static void
-brw_miptree_layout_texture_array(struct intel_context *intel,
+brw_miptree_layout_texture_array(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
+ struct intel_context *intel = &brw->intel;
unsigned qpitch = 0;
int h0, h1;
}
static void
-brw_miptree_layout_texture_3d(struct intel_context *intel,
+brw_miptree_layout_texture_3d(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
unsigned width = mt->physical_width0;
}
void
-brw_miptree_layout(struct intel_context *intel, struct intel_mipmap_tree *mt)
+brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt)
{
- mt->align_w = intel_horizontal_texture_alignment_unit(intel, mt->format);
- mt->align_h = intel_vertical_texture_alignment_unit(intel, mt->format);
+ struct intel_context *intel = &brw->intel;
+ mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format);
+ mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format);
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:
if (intel->gen == 4) {
/* Gen4 stores cube maps as 3D textures. */
assert(mt->physical_depth0 == 6);
- brw_miptree_layout_texture_3d(intel, mt);
+ brw_miptree_layout_texture_3d(brw, mt);
} else {
/* All other hardware stores cube maps as 2D arrays. */
- brw_miptree_layout_texture_array(intel, mt);
+ brw_miptree_layout_texture_array(brw, mt);
}
break;
case GL_TEXTURE_3D:
- brw_miptree_layout_texture_3d(intel, mt);
+ brw_miptree_layout_texture_3d(brw, mt);
break;
case GL_TEXTURE_1D_ARRAY:
case GL_TEXTURE_2D_ARRAY:
case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
case GL_TEXTURE_CUBE_MAP_ARRAY:
- brw_miptree_layout_texture_array(intel, mt);
+ brw_miptree_layout_texture_array(brw, mt);
break;
default:
switch (mt->msaa_layout) {
case INTEL_MSAA_LAYOUT_UMS:
case INTEL_MSAA_LAYOUT_CMS:
- brw_miptree_layout_texture_array(intel, mt);
+ brw_miptree_layout_texture_array(brw, mt);
break;
case INTEL_MSAA_LAYOUT_NONE:
case INTEL_MSAA_LAYOUT_IMS:
prog_data.base.total_scratch
= brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
- brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
+ brw_get_scratch_bo(brw, &brw->vs.scratch_bo,
prog_data.base.total_scratch * brw->max_vs_threads);
}
}
static bool
-key_debug(struct intel_context *intel, const char *name, int a, int b)
+key_debug(struct brw_context *brw, const char *name, int a, int b)
{
+ struct intel_context *intel = &brw->intel;
if (a != b) {
perf_debug(" %s %d->%d\n", name, a, b);
return true;
}
for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
- found |= key_debug(intel, "Vertex attrib w/a flags",
+ found |= key_debug(brw, "Vertex attrib w/a flags",
old_key->gl_attrib_wa_flags[i],
key->gl_attrib_wa_flags[i]);
}
- found |= key_debug(intel, "user clip flags",
+ found |= key_debug(brw, "user clip flags",
old_key->base.userclip_active, key->base.userclip_active);
- found |= key_debug(intel, "user clipping planes as push constants",
+ found |= key_debug(brw, "user clipping planes as push constants",
old_key->base.nr_userclip_plane_consts,
key->base.nr_userclip_plane_consts);
- found |= key_debug(intel, "clip distance enable",
+ found |= key_debug(brw, "clip distance enable",
old_key->base.uses_clip_distance, key->base.uses_clip_distance);
- found |= key_debug(intel, "clip plane enable bitfield",
+ found |= key_debug(brw, "clip plane enable bitfield",
old_key->base.userclip_planes_enabled_gen_4_5,
key->base.userclip_planes_enabled_gen_4_5);
- found |= key_debug(intel, "copy edgeflag",
+ found |= key_debug(brw, "copy edgeflag",
old_key->copy_edgeflag, key->copy_edgeflag);
- found |= key_debug(intel, "PointCoord replace",
+ found |= key_debug(brw, "PointCoord replace",
old_key->point_coord_replace, key->point_coord_replace);
- found |= key_debug(intel, "vertex color clamping",
+ found |= key_debug(brw, "vertex color clamping",
old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
- found |= brw_debug_recompile_sampler_key(intel, &old_key->base.tex,
+ found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
&key->base.tex);
if (!found) {
/**
* called from intelDestroyContext()
*/
-static void brw_destroy_context( struct intel_context *intel )
+static void
+brw_destroy_context(struct brw_context *brw)
{
- struct brw_context *brw = brw_context(&intel->ctx);
-
+ struct intel_context *intel = &brw->intel;
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
/* Force a report. */
brw->shader_time.report_time = 0;
* at the end of a batchbuffer. If you add more GPU state, increase
* the BATCH_RESERVED macro.
*/
-static void brw_finish_batch(struct intel_context *intel)
+static void
+brw_finish_batch(struct brw_context *brw)
{
- struct brw_context *brw = brw_context(&intel->ctx);
brw_emit_query_end(brw);
if (brw->curbe.curbe_bo) {
/**
* called from intelFlushBatchLocked
*/
-static void brw_new_batch( struct intel_context *intel )
+static void
+brw_new_batch(struct brw_context *brw)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
/* If the kernel supports hardware contexts, then most hardware state is
* preserved between batches; we only need to re-emit state that is required
c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
- brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
+ brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
c->prog_data.total_scratch * brw->max_wm_threads);
}
}
static bool
-key_debug(struct intel_context *intel, const char *name, int a, int b)
+key_debug(struct brw_context *brw, const char *name, int a, int b)
{
+ struct intel_context *intel = &brw->intel;
if (a != b) {
perf_debug(" %s %d->%d\n", name, a, b);
return true;
}
bool
-brw_debug_recompile_sampler_key(struct intel_context *intel,
+brw_debug_recompile_sampler_key(struct brw_context *brw,
const struct brw_sampler_prog_key_data *old_key,
const struct brw_sampler_prog_key_data *key)
{
bool found = false;
for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
- found |= key_debug(intel, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
+ found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
old_key->swizzles[i], key->swizzles[i]);
}
- found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 1st coordinate",
+ found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
- found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
+ found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
- found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
+ found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
- found |= key_debug(intel, "GL_MESA_ycbcr texturing\n",
+ found |= key_debug(brw, "GL_MESA_ycbcr texturing\n",
old_key->yuvtex_mask, key->yuvtex_mask);
- found |= key_debug(intel, "GL_MESA_ycbcr UV swapping\n",
+ found |= key_debug(brw, "GL_MESA_ycbcr UV swapping\n",
old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
return found;
return;
}
- found |= key_debug(intel, "alphatest, computed depth, depth test, or "
+ found |= key_debug(brw, "alphatest, computed depth, depth test, or "
"depth write",
old_key->iz_lookup, key->iz_lookup);
- found |= key_debug(intel, "depth statistics",
+ found |= key_debug(brw, "depth statistics",
old_key->stats_wm, key->stats_wm);
- found |= key_debug(intel, "flat shading",
+ found |= key_debug(brw, "flat shading",
old_key->flat_shade, key->flat_shade);
- found |= key_debug(intel, "number of color buffers",
+ found |= key_debug(brw, "number of color buffers",
old_key->nr_color_regions, key->nr_color_regions);
- found |= key_debug(intel, "MRT alpha test or alpha-to-coverage",
+ found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
old_key->replicate_alpha, key->replicate_alpha);
- found |= key_debug(intel, "rendering to FBO",
+ found |= key_debug(brw, "rendering to FBO",
old_key->render_to_fbo, key->render_to_fbo);
- found |= key_debug(intel, "fragment color clamping",
+ found |= key_debug(brw, "fragment color clamping",
old_key->clamp_fragment_color, key->clamp_fragment_color);
- found |= key_debug(intel, "line smoothing",
+ found |= key_debug(brw, "line smoothing",
old_key->line_aa, key->line_aa);
- found |= key_debug(intel, "renderbuffer height",
+ found |= key_debug(brw, "renderbuffer height",
old_key->drawable_height, key->drawable_height);
- found |= key_debug(intel, "input slots valid",
+ found |= key_debug(brw, "input slots valid",
old_key->input_slots_valid, key->input_slots_valid);
- found |= brw_debug_recompile_sampler_key(intel, &old_key->tex, &key->tex);
+ found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
if (!found) {
perf_debug(" Something else\n");
uint32_t *binding_table,
unsigned surf_index)
{
- struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
BRW_SURFACE_CUBEFACE_ENABLES |
- (translate_tex_format(intel,
+ (translate_tex_format(brw,
mt->format,
tObj->DepthMode,
sampler->sRGBDecode) <<
{
struct intel_context *intel = &brw->intel;
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
- drm_intel_bo *bo =
- intel_bufferobj_buffer(intel, intel_bo, INTEL_WRITE_PART);
+ drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART);
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
out_offset);
uint32_t pitch_minus_1 = 4*stride_dwords - 1;
unsigned width_in_tiles = ALIGN(fb->Width, 16) / 16;
unsigned height_in_tiles = ALIGN(fb->Height, 16) / 16;
unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
- brw_get_scratch_bo(intel, &brw->wm.multisampled_null_render_target_bo,
+ brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
size_needed);
bo = brw->wm.multisampled_null_render_target_bo;
surface_type = BRW_SURFACE_2D;
* select the image. So, instead, we just make a new single-level
* miptree and render into that.
*/
- intel_renderbuffer_move_to_temp(intel, irb, false);
+ intel_renderbuffer_move_to_temp(brw, irb, false);
mt = irb->mt;
}
}
binding = &ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding];
intel_bo = intel_buffer_object(binding->BufferObject);
- drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_bo, INTEL_READ);
+ drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_READ);
/* Because behavior for referencing outside of the binding's size in the
* glBindBufferRange case is undefined, we can just bind the whole buffer
gen6_blorp_emit_urb_config(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_URB << 16 | (3 - 2));
OUT_BATCH(brw->urb.max_vs_entries << GEN6_URB_VS_ENTRIES_SHIFT);
uint32_t depthstencil_offset,
uint32_t cc_state_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
OUT_BATCH(cc_blend_state_offset | 1); /* BLEND_STATE offset */
const brw_blorp_params *params,
uint32_t sampler_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
VS_SAMPLER_STATE_CHANGE |
* toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
* command with CS stall bit set and a post sync operation.
*/
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
}
/* Disable the push constant buffers. */
gen6_blorp_emit_gs_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
/* Disable all the constant buffers. */
BEGIN_BATCH(5);
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
gen6_blorp_emit_clip_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_CLIP << 16 | (4 - 2));
OUT_BATCH(0);
gen6_blorp_emit_sf_config(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(20);
OUT_BATCH(_3DSTATE_SF << 16 | (20 - 2));
OUT_BATCH((1 - 1) << GEN6_SF_NUM_OUTPUTS_SHIFT | /* only position */
uint32_t prog_offset,
brw_blorp_prog_data *prog_data)
{
- struct intel_context *intel = &brw->intel;
uint32_t dw2, dw4, dw5, dw6;
/* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
const brw_blorp_params *params,
uint32_t wm_push_const_offset)
{
- struct intel_context *intel = &brw->intel;
-
/* Make sure the push constants fill an exact integer number of
* registers.
*/
gen6_blorp_emit_constant_ps_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
/* Disable the push constant buffers. */
BEGIN_BATCH(5);
OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (5 - 2));
const brw_blorp_params *params,
uint32_t wm_bind_bo_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
GEN6_BINDING_TABLE_MODIFY_PS |
tile_x &= ~7;
tile_y &= ~7;
- intel_emit_post_sync_nonzero_flush(intel);
- intel_emit_depth_stall_flushes(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
+ intel_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
gen6_blorp_emit_depth_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
gen6_blorp_emit_clear_params(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
GEN5_DEPTH_CLEAR_VALID |
gen6_blorp_emit_drawing_rectangle(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
OUT_BATCH(0);
gen6_blorp_emit_viewport_state(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
struct brw_cc_viewport *ccv;
uint32_t cc_vp_offset;
gen6_blorp_emit_primitive(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(6);
OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
_3DPRIM_RECTLIST << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
* This function alters no GL state.
*/
void
-gen6_blorp_exec(struct intel_context *intel,
+gen6_blorp_exec(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct gl_context *ctx = &intel->ctx;
- struct brw_context *brw = brw_context(ctx);
brw_blorp_prog_data *prog_data = NULL;
uint32_t cc_blend_state_offset = 0;
uint32_t cc_state_offset = 0;
}
void
-gen6_blorp_exec(struct intel_context *intel,
+gen6_blorp_exec(struct brw_context *brw,
const brw_blorp_params *params);
#endif
static void
upload_gs_state(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
/* Disable all the constant buffers. */
BEGIN_BATCH(5);
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
unsigned num_samples, float coverage,
bool coverage_invert, unsigned sample_mask)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
if (num_samples > 1) {
}
/* 3DSTATE_MULTISAMPLE is nonpipelined. */
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
gen6_emit_3dstate_multisample(brw, num_samples);
gen6_emit_3dstate_sample_mask(brw, num_samples, coverage,
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
*/
static void
-write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
/* Emit workaround flushes: */
if (intel->gen == 6) {
/* The timestamp write below is a non-zero post-sync op, which on
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
*/
static void
-write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
/* Emit Sandybridge workaround flush: */
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(5);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
* function also performs a pipeline flush for proper synchronization.
*/
static void
-write_reg(struct intel_context *intel,
+write_reg(struct brw_context *brw,
drm_intel_bo *query_bo, uint32_t reg, int idx)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen >= 6);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
/* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
* read a full 64-bit register, we need to do two of them.
}
static void
-write_primitives_generated(struct intel_context *intel,
+write_primitives_generated(struct brw_context *brw,
drm_intel_bo *query_bo, int idx)
{
- write_reg(intel, query_bo, CL_INVOCATION_COUNT, idx);
+ write_reg(brw, query_bo, CL_INVOCATION_COUNT, idx);
}
static void
-write_xfb_primitives_written(struct intel_context *intel,
+write_xfb_primitives_written(struct brw_context *brw,
drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
if (intel->gen >= 7) {
- write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
+ write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
} else {
- write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
+ write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
}
}
gen6_queryobj_get_results(struct gl_context *ctx,
struct brw_query_object *query)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
if (query->bo == NULL)
* when mapped.
*/
if (drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (unlikely(intel->perf_debug)) {
if (drm_intel_bo_busy(query->bo)) {
static void
gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
* obtain the time elapsed. Notably, this includes time elapsed while
* the system was doing other work, such as running other applications.
*/
- write_timestamp(intel, query->bo, 0);
+ write_timestamp(brw, query->bo, 0);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
- write_depth_count(intel, query->bo, 0);
+ write_depth_count(brw, query->bo, 0);
break;
case GL_PRIMITIVES_GENERATED:
- write_primitives_generated(intel, query->bo, 0);
+ write_primitives_generated(brw, query->bo, 0);
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
- write_xfb_primitives_written(intel, query->bo, 0);
+ write_xfb_primitives_written(brw, query->bo, 0);
break;
default:
static void
gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
- write_timestamp(intel, query->bo, 1);
+ write_timestamp(brw, query->bo, 1);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
- write_depth_count(intel, query->bo, 1);
+ write_depth_count(brw, query->bo, 1);
break;
case GL_PRIMITIVES_GENERATED:
- write_primitives_generated(intel, query->bo, 1);
+ write_primitives_generated(brw, query->bo, 1);
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
- write_xfb_primitives_written(intel, query->bo, 1);
+ write_xfb_primitives_written(brw, query->bo, 1);
break;
default:
*/
static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
* the async query will return true in finite time.
*/
if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
gen6_queryobj_get_results(ctx, query);
static void
upload_sampler_state_pointers(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
VS_SAMPLER_STATE_CHANGE |
* simplicity, just do a full flush.
*/
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
static void
gen6_upload_urb( struct brw_context *brw )
{
- struct intel_context *intel = &brw->intel;
int nr_vs_entries, nr_gs_entries;
int total_urb_size = brw->urb.size * 1024; /* in bytes */
* a workaround.
*/
if (brw->urb.gen6_gs_previously_active && !brw->gs.prog_active)
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
brw->urb.gen6_gs_previously_active = brw->gs.prog_active;
}
static void upload_viewport_state_pointers(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS << 16 | (4 - 2) |
GEN6_CC_VIEWPORT_MODIFY |
* flush can be executed by sending a PIPE_CONTROL command with CS
* stall bit set and a post sync operation.
*/
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
if (brw->vs.push_const_size == 0) {
/* Disable the push constant buffers. */
* bug reports that led to this workaround, and may be more than
* what is strictly required to avoid the issue.
*/
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
const brw_blorp_params *params,
uint32_t cc_blend_state_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_BLEND_STATE_POINTERS << 16 | (2 - 2));
OUT_BATCH(cc_blend_state_offset | 1);
const brw_blorp_params *params,
uint32_t cc_state_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
OUT_BATCH(cc_state_offset | 1);
gen7_blorp_emit_cc_viewport(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
struct brw_cc_viewport *ccv;
uint32_t cc_vp_offset;
const brw_blorp_params *params,
uint32_t depthstencil_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_DEPTH_STENCIL_STATE_POINTERS << 16 | (2 - 2));
OUT_BATCH(depthstencil_offset | 1);
gen7_blorp_emit_vs_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 | (7 - 2));
OUT_BATCH(0);
gen7_blorp_emit_hs_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_CONSTANT_HS << 16 | (7 - 2));
OUT_BATCH(0);
gen7_blorp_emit_te_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_TE << 16 | (4 - 2));
OUT_BATCH(0);
gen7_blorp_emit_ds_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_CONSTANT_DS << 16 | (7 - 2));
OUT_BATCH(0);
gen7_blorp_emit_gs_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (7 - 2));
OUT_BATCH(0);
gen7_blorp_emit_streamout_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_STREAMOUT << 16 | (3 - 2));
OUT_BATCH(0);
gen7_blorp_emit_sf_config(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
/* 3DSTATE_SF
*
* Disable ViewportTransformEnable (dw1.1)
const brw_blorp_params *params,
brw_blorp_prog_data *prog_data)
{
- struct intel_context *intel = &brw->intel;
-
uint32_t dw1 = 0, dw2 = 0;
switch (params->hiz_op) {
const brw_blorp_params *params,
uint32_t wm_bind_bo_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_PS << 16 | (2 - 2));
OUT_BATCH(wm_bind_bo_offset);
const brw_blorp_params *params,
uint32_t sampler_offset)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
OUT_BATCH(sampler_offset);
const brw_blorp_params *params,
uint32_t wm_push_const_offset)
{
- struct intel_context *intel = &brw->intel;
-
/* Make sure the push constants fill an exact integer number of
* registers.
*/
gen7_blorp_emit_constant_ps_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (7 - 2));
OUT_BATCH(0);
gen7_blorp_emit_depth_stencil_config(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
- struct gl_context *ctx = &intel->ctx;
+ struct gl_context *ctx = &brw->intel.ctx;
uint32_t draw_x = params->depth.x_offset;
uint32_t draw_y = params->depth.y_offset;
uint32_t tile_mask_x, tile_mask_y;
tile_x &= ~7;
tile_y &= ~7;
- intel_emit_depth_stall_flushes(intel);
+ intel_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
gen7_blorp_emit_depth_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
OUT_BATCH(BRW_DEPTHFORMAT_D32_FLOAT << 18 | (BRW_SURFACE_NULL << 29));
gen7_blorp_emit_clear_params(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(3);
OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
OUT_BATCH(params->depth.mt ? params->depth.mt->depth_clear_value : 0);
gen7_blorp_emit_primitive(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(7);
OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
OUT_BATCH(GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL |
* \copydoc gen6_blorp_exec()
*/
void
-gen7_blorp_exec(struct intel_context *intel,
+gen7_blorp_exec(struct brw_context *brw,
const brw_blorp_params *params)
{
- struct gl_context *ctx = &intel->ctx;
- struct brw_context *brw = brw_context(ctx);
brw_blorp_prog_data *prog_data = NULL;
uint32_t cc_blend_state_offset = 0;
uint32_t cc_state_offset = 0;
}
void
-gen7_blorp_exec(struct intel_context *intel,
+gen7_blorp_exec(struct brw_context *brw,
const brw_blorp_params *params);
#endif
static void
disable_stages(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
assert(!brw->gs.prog_active);
/* Disable the Geometry Shader (GS) Unit */
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
- intel_emit_depth_stall_flushes(intel);
+ intel_emit_depth_stall_flushes(brw);
/* _NEW_DEPTH, _NEW_STENCIL, _NEW_BUFFERS */
BEGIN_BATCH(7);
continue;
}
- bo = intel_bufferobj_buffer(intel, bufferobj, INTEL_WRITE_PART);
+ bo = intel_bufferobj_buffer(brw, bufferobj, INTEL_WRITE_PART);
stride = linked_xfb_info->BufferStride[i] * 4;
start = xfb_obj->Offset[i];
struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
intel->batch.needs_sol_reset = true;
}
* This also covers any cache flushing required.
*/
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
/* GS requirement */
assert(!brw->gs.prog_active);
- gen7_emit_vs_workaround_flush(intel);
+ gen7_emit_vs_workaround_flush(brw);
gen7_emit_urb_state(brw, brw->urb.nr_vs_entries, vs_size, brw->urb.vs_start);
}
gen7_emit_urb_state(struct brw_context *brw, GLuint nr_vs_entries,
GLuint vs_size, GLuint vs_start)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_URB_VS << 16 | (2 - 2));
OUT_BATCH(nr_vs_entries |
static void upload_cc_viewport_state_pointer(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
-
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS_CC << 16 | (2 - 2));
OUT_BATCH(brw->cc.vp_offset);
const int max_threads_shift = brw->intel.is_haswell ?
HSW_VS_MAX_THREADS_SHIFT : GEN6_VS_MAX_THREADS_SHIFT;
- gen7_emit_vs_workaround_flush(intel);
+ gen7_emit_vs_workaround_flush(brw);
/* BRW_NEW_VS_BINDING_TABLE */
BEGIN_BATCH(2);
8 * 4, 32, &binding_table[surf_index]);
memset(surf, 0, 8 * 4);
- uint32_t tex_format = translate_tex_format(intel,
+ uint32_t tex_format = translate_tex_format(brw,
mt->format,
tObj->DepthMode,
sampler->sRGBDecode);
/* Render targets can't use IMS layout */
assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
- assert(brw_render_target_supported(intel, rb));
+ assert(brw_render_target_supported(brw, rb));
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
_mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
#include "brw_context.h"
static void
-intel_batchbuffer_reset(struct intel_context *intel);
+intel_batchbuffer_reset(struct brw_context *brw);
struct cached_batch_item {
struct cached_batch_item *next;
uint16_t size;
};
-static void clear_cache( struct intel_context *intel )
+static void
+clear_cache(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
struct cached_batch_item *item = intel->batch.cached_items;
while (item) {
}
void
-intel_batchbuffer_init(struct intel_context *intel)
+intel_batchbuffer_init(struct brw_context *brw)
{
- intel_batchbuffer_reset(intel);
+ struct intel_context *intel = &brw->intel;
+ intel_batchbuffer_reset(brw);
if (intel->gen >= 6) {
/* We can't just use brw_state_batch to get a chunk of space for
}
static void
-intel_batchbuffer_reset(struct intel_context *intel)
+intel_batchbuffer_reset(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
if (intel->batch.last_bo != NULL) {
drm_intel_bo_unreference(intel->batch.last_bo);
intel->batch.last_bo = NULL;
}
intel->batch.last_bo = intel->batch.bo;
- clear_cache(intel);
+ clear_cache(brw);
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
BATCH_SZ, 4096);
}
void
-intel_batchbuffer_save_state(struct intel_context *intel)
+intel_batchbuffer_save_state(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
intel->batch.saved.used = intel->batch.used;
intel->batch.saved.reloc_count =
drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
}
void
-intel_batchbuffer_reset_to_saved(struct intel_context *intel)
+intel_batchbuffer_reset_to_saved(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
intel->batch.used = intel->batch.saved.used;
/* Cached batch state is dead, since we just cleared some unknown part of the
* batchbuffer. Assume that the caller resets any other state necessary.
*/
- clear_cache(intel);
+ clear_cache(brw);
}
void
-intel_batchbuffer_free(struct intel_context *intel)
+intel_batchbuffer_free(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
free(intel->batch.cpu_map);
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
drm_intel_bo_unreference(intel->batch.workaround_bo);
- clear_cache(intel);
+ clear_cache(brw);
}
static void
-do_batch_dump(struct intel_context *intel)
+do_batch_dump(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
struct drm_intel_decode *decode;
struct intel_batchbuffer *batch = &intel->batch;
int ret;
if (ret == 0) {
drm_intel_bo_unmap(batch->bo);
- brw_debug_batch(intel);
+ brw_debug_batch(brw);
}
}
/* TODO: Push this whole function into bufmgr.
*/
static int
-do_flush_locked(struct intel_context *intel)
+do_flush_locked(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
struct intel_batchbuffer *batch = &intel->batch;
int ret = 0;
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB))
- brw_annotate_aub(intel);
+ brw_annotate_aub(brw);
if (intel->hw_ctx == NULL || batch->is_blit) {
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
flags);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
- do_batch_dump(intel);
+ do_batch_dump(brw);
if (ret != 0) {
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
}
- intel->vtbl.new_batch(intel);
+ intel->vtbl.new_batch(brw);
return ret;
}
int
-_intel_batchbuffer_flush(struct intel_context *intel,
+_intel_batchbuffer_flush(struct brw_context *brw,
const char *file, int line)
{
+ struct intel_context *intel = &brw->intel;
int ret;
if (intel->batch.used == 0)
intel->batch.reserved_space = 0;
if (intel->vtbl.finish_batch)
- intel->vtbl.finish_batch(intel);
+ intel->vtbl.finish_batch(brw);
/* Mark the end of the buffer. */
- intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+ intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
if (intel->batch.used & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
- intel_batchbuffer_emit_dword(intel, MI_NOOP);
+ intel_batchbuffer_emit_dword(brw, MI_NOOP);
}
- intel_upload_finish(intel);
+ intel_upload_finish(brw);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
- ret = do_flush_locked(intel);
+ ret = do_flush_locked(brw);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
/* Reset the buffer:
*/
- intel_batchbuffer_reset(intel);
+ intel_batchbuffer_reset(brw);
return ret;
}
/* This is the only way buffers get added to the validate list.
*/
bool
-intel_batchbuffer_emit_reloc(struct intel_context *intel,
+intel_batchbuffer_emit_reloc(struct brw_context *brw,
drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
+ struct intel_context *intel = &brw->intel;
int ret;
ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
* the buffer doesn't move and we can short-circuit the relocation processing
* in the kernel
*/
- intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+ intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
return true;
}
bool
-intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t delta)
{
+ struct intel_context *intel = &brw->intel;
int ret;
ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
* be, in case the buffer doesn't move and we can short-circuit the
* relocation processing in the kernel
*/
- intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+ intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
return true;
}
void
-intel_batchbuffer_data(struct intel_context *intel,
+intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, bool is_blit)
{
+ struct intel_context *intel = &brw->intel;
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(intel, bytes, is_blit);
+ intel_batchbuffer_require_space(brw, bytes, is_blit);
__memcpy(intel->batch.map + intel->batch.used, data, bytes);
intel->batch.used += bytes >> 2;
}
void
-intel_batchbuffer_cached_advance(struct intel_context *intel)
+intel_batchbuffer_cached_advance(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
struct cached_batch_item **prev = &intel->batch.cached_items, *item;
uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
uint32_t *start = intel->batch.map + intel->batch.emit;
* already flushed (e.g., via a preceding MI_FLUSH).
*/
void
-intel_emit_depth_stall_flushes(struct intel_context *intel)
+intel_emit_depth_stall_flushes(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen >= 6 && intel->gen <= 7);
BEGIN_BATCH(4);
* to be sent before any combination of VS associated 3DSTATE."
*/
void
-gen7_emit_vs_workaround_flush(struct intel_context *intel)
+gen7_emit_vs_workaround_flush(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen == 7);
BEGIN_BATCH(4);
* really our business. That leaves only stall at scoreboard.
*/
void
-intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
+intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
if (!intel->batch.need_workaround_flush)
return;
* This is also used for the always_flush_cache driconf debug option.
*/
void
-intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
+intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
if (intel->gen >= 6) {
if (intel->batch.is_blit) {
BEGIN_BATCH_BLT(4);
* Flush Enable =1, a PIPE_CONTROL with any non-zero
* post-sync-op is required.
*/
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
}
BEGIN_BATCH(4);
struct intel_batchbuffer;
-void intel_batchbuffer_init(struct intel_context *intel);
-void intel_batchbuffer_free(struct intel_context *intel);
-void intel_batchbuffer_save_state(struct intel_context *intel);
-void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
+void intel_batchbuffer_init(struct brw_context *brw);
+void intel_batchbuffer_free(struct brw_context *brw);
+void intel_batchbuffer_save_state(struct brw_context *brw);
+void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
-int _intel_batchbuffer_flush(struct intel_context *intel,
+int _intel_batchbuffer_flush(struct brw_context *brw,
const char *file, int line);
#define intel_batchbuffer_flush(intel) \
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
-void intel_batchbuffer_data(struct intel_context *intel,
+void intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, bool is_blit);
-bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc(struct brw_context *brw,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
-bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
-void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
-void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
-void intel_emit_depth_stall_flushes(struct intel_context *intel);
-void gen7_emit_vs_workaround_flush(struct intel_context *intel);
+void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
+void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
+void intel_emit_depth_stall_flushes(struct brw_context *brw);
+void gen7_emit_vs_workaround_flush(struct brw_context *brw);
static INLINE uint32_t float_as_int(float f)
{
* work...
*/
static INLINE unsigned
-intel_batchbuffer_space(struct intel_context *intel)
+intel_batchbuffer_space(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
return (intel->batch.state_batch_offset - intel->batch.reserved_space)
- intel->batch.used*4;
}
static INLINE void
-intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
+intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
{
+ struct intel_context *intel = &brw->intel;
#ifdef DEBUG
- assert(intel_batchbuffer_space(intel) >= 4);
+ assert(intel_batchbuffer_space(brw) >= 4);
#endif
intel->batch.map[intel->batch.used++] = dword;
}
static INLINE void
-intel_batchbuffer_emit_float(struct intel_context *intel, float f)
+intel_batchbuffer_emit_float(struct brw_context *brw, float f)
{
- intel_batchbuffer_emit_dword(intel, float_as_int(f));
+ intel_batchbuffer_emit_dword(brw, float_as_int(f));
}
static INLINE void
-intel_batchbuffer_require_space(struct intel_context *intel,
- GLuint sz, int is_blit)
+intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
{
-
+ struct intel_context *intel = &brw->intel;
if (intel->gen >= 6 &&
intel->batch.is_blit != is_blit && intel->batch.used) {
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
intel->batch.is_blit = is_blit;
#ifdef DEBUG
assert(sz < BATCH_SZ - BATCH_RESERVED);
#endif
- if (intel_batchbuffer_space(intel) < sz)
- intel_batchbuffer_flush(intel);
+ if (intel_batchbuffer_space(brw) < sz)
+ intel_batchbuffer_flush(brw);
}
static INLINE void
-intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
+intel_batchbuffer_begin(struct brw_context *brw, int n, bool is_blit)
{
- intel_batchbuffer_require_space(intel, n * 4, is_blit);
+ struct intel_context *intel = &brw->intel;
+ intel_batchbuffer_require_space(brw, n * 4, is_blit);
intel->batch.emit = intel->batch.used;
#ifdef DEBUG
}
static INLINE void
-intel_batchbuffer_advance(struct intel_context *intel)
+intel_batchbuffer_advance(struct brw_context *brw)
{
#ifdef DEBUG
+ struct intel_context *intel = &brw->intel;
struct intel_batchbuffer *batch = &intel->batch;
unsigned int _n = batch->used - batch->emit;
assert(batch->total != 0);
#endif
}
-void intel_batchbuffer_cached_advance(struct intel_context *intel);
+void intel_batchbuffer_cached_advance(struct brw_context *brw);
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
-#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
-#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
-#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
-#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
+#define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, false)
+#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, true)
+#define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
+#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
- intel_batchbuffer_emit_reloc(intel, buf, \
+ intel_batchbuffer_emit_reloc(brw, buf, \
read_domains, write_domain, delta); \
} while (0)
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
- intel_batchbuffer_emit_reloc_fenced(intel, buf, \
+ intel_batchbuffer_emit_reloc_fenced(brw, buf, \
read_domains, write_domain, delta); \
} while (0)
-#define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
-#define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
+#define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
+#define CACHED_BATCH() intel_batchbuffer_cached_advance(brw);
#ifdef __cplusplus
}
#define FILE_DEBUG_FLAG DEBUG_BLIT
static void
-intel_miptree_set_alpha_to_one(struct intel_context *intel,
+intel_miptree_set_alpha_to_one(struct brw_context *brw,
struct intel_mipmap_tree *mt,
int x, int y, int width, int height);
* server).
*/
static void
-set_blitter_tiling(struct intel_context *intel,
+set_blitter_tiling(struct brw_context *brw,
bool dst_y_tiled, bool src_y_tiled)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen >= 6);
/* Idle the blitter before we update how tiling is interpreted. */
#define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) do { \
BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
if (dst_y_tiled || src_y_tiled) \
- set_blitter_tiling(intel, dst_y_tiled, src_y_tiled); \
+ set_blitter_tiling(brw, dst_y_tiled, src_y_tiled); \
} while (0)
#define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) do { \
if (dst_y_tiled || src_y_tiled) \
- set_blitter_tiling(intel, false, false); \
+ set_blitter_tiling(brw, false, false); \
ADVANCE_BATCH(); \
} while (0)
* renderbuffers/textures.
*/
bool
-intel_miptree_blit(struct intel_context *intel,
+intel_miptree_blit(struct brw_context *brw,
struct intel_mipmap_tree *src_mt,
int src_level, int src_slice,
uint32_t src_x, uint32_t src_y, bool src_flip,
uint32_t width, uint32_t height,
GLenum logicop)
{
+ struct intel_context *intel = &brw->intel;
/* No sRGB decode or encode is done by the hardware blitter, which is
* consistent with what we want in the callers (glCopyTexSubImage(),
* glBlitFramebuffer(), texture validation, etc.).
/* The blitter has no idea about HiZ or fast color clears, so we need to
* resolve the miptrees before we do anything.
*/
- intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_slice);
- intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_slice);
- intel_miptree_resolve_color(intel, src_mt);
- intel_miptree_resolve_color(intel, dst_mt);
+ intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_slice);
+ intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_slice);
+ intel_miptree_resolve_color(brw, src_mt);
+ intel_miptree_resolve_color(brw, dst_mt);
if (src_flip)
src_y = src_mt->level[src_level].height - src_y - height;
dst_x += dst_image_x;
dst_y += dst_image_y;
- if (!intelEmitCopyBlit(intel,
+ if (!intelEmitCopyBlit(brw,
src_mt->cpp,
src_pitch,
src_mt->region->bo, src_mt->offset,
if (src_mt->format == MESA_FORMAT_XRGB8888 &&
dst_mt->format == MESA_FORMAT_ARGB8888) {
- intel_miptree_set_alpha_to_one(intel, dst_mt,
+ intel_miptree_set_alpha_to_one(brw, dst_mt,
dst_x, dst_y,
width, height);
}
/* Copy BitBlt
*/
bool
-intelEmitCopyBlit(struct intel_context *intel,
+intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
GLshort src_pitch,
drm_intel_bo *src_buffer,
GLshort w, GLshort h,
GLenum logic_op)
{
+ struct intel_context *intel = &brw->intel;
GLuint CMD, BR13, pass = 0;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
aper_array[2] = src_buffer;
if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
pass++;
} else
break;
if (pass >= 2)
return false;
- intel_batchbuffer_require_space(intel, 8 * 4, true);
+ intel_batchbuffer_require_space(brw, 8 * 4, true);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
src_buffer, src_pitch, src_offset, src_x, src_y,
ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
return true;
}
bool
-intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+intelEmitImmediateColorExpandBlit(struct brw_context *brw,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
__FUNCTION__,
dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
- intel_batchbuffer_require_space(intel,
- (8 * 4) +
- (3 * 4) +
- dwords * 4, true);
+ intel_batchbuffer_require_space(brw, (8 * 4) + (3 * 4) + dwords * 4, true);
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
OUT_BATCH(((y + h) << 16) | (x + w));
ADVANCE_BATCH();
- intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
+ intel_batchbuffer_data(brw, src_bits, dwords * 4, true);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
return true;
}
* end to cover the last if we need.
*/
void
-intel_emit_linear_blit(struct intel_context *intel,
+intel_emit_linear_blit(struct brw_context *brw,
drm_intel_bo *dst_bo,
unsigned int dst_offset,
drm_intel_bo *src_bo,
unsigned int src_offset,
unsigned int size)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
GLuint pitch, height;
bool ok;
*/
pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
height = (pitch == 0) ? 1 : size / pitch;
- ok = intelEmitCopyBlit(intel, 1,
+ ok = intelEmitCopyBlit(brw, 1,
pitch, src_bo, src_offset, I915_TILING_NONE,
pitch, dst_bo, dst_offset, I915_TILING_NONE,
0, 0, /* src x/y */
assert (size < (1 << 15));
pitch = ALIGN(size, 4);
if (size != 0) {
- ok = intelEmitCopyBlit(intel, 1,
+ ok = intelEmitCopyBlit(brw, 1,
pitch, src_bo, src_offset, I915_TILING_NONE,
pitch, dst_bo, dst_offset, I915_TILING_NONE,
0, 0, /* src x/y */
* miptree.
*/
static void
-intel_miptree_set_alpha_to_one(struct intel_context *intel,
+intel_miptree_set_alpha_to_one(struct brw_context *brw,
struct intel_mipmap_tree *mt,
int x, int y, int width, int height)
{
+ struct intel_context *intel = &brw->intel;
struct intel_region *region = mt->region;
uint32_t BR13, CMD;
int pitch, cpp;
if (drm_intel_bufmgr_check_aperture_space(aper_array,
ARRAY_SIZE(aper_array)) != 0) {
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
bool dst_y_tiled = region->tiling == I915_TILING_Y;
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
ADVANCE_BATCH_TILED(dst_y_tiled, false);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
#include "brw_context.h"
bool
-intelEmitCopyBlit(struct intel_context *intel,
+intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
GLshort src_pitch,
drm_intel_bo *src_buffer,
GLshort w, GLshort h,
GLenum logicop );
-bool intel_miptree_blit(struct intel_context *intel,
+bool intel_miptree_blit(struct brw_context *brw,
struct intel_mipmap_tree *src_mt,
int src_level, int src_slice,
uint32_t src_x, uint32_t src_y, bool src_flip,
GLenum logicop);
bool
-intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+intelEmitImmediateColorExpandBlit(struct brw_context *brw,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLenum logic_op);
-void intel_emit_linear_blit(struct intel_context *intel,
+void intel_emit_linear_blit(struct brw_context *brw,
drm_intel_bo *dst_bo,
unsigned int dst_offset,
drm_intel_bo *src_bo,
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
-intel_bufferobj_alloc_buffer(struct intel_context *intel,
+intel_bufferobj_alloc_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj)
{
- struct brw_context *brw = brw_context(&intel->ctx);
-
+ struct intel_context *intel = &brw->intel;
intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
intel_obj->Base.Size, 64);
const GLvoid * data,
GLenum usage, struct gl_buffer_object *obj)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* Part of the ABI, but this function doesn't use it.
release_buffer(intel_obj);
if (size != 0) {
- intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(brw, intel_obj);
if (!intel_obj->buffer)
return false;
GLsizeiptrARB size,
const GLvoid * data, struct gl_buffer_object *obj)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
bool busy;
if (size == intel_obj->Base.Size) {
/* Replace the current busy bo with fresh data. */
drm_intel_bo_unreference(intel_obj->buffer);
- intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(brw, intel_obj);
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
} else {
perf_debug("Using a blit copy to avoid stalling on %ldb "
drm_intel_bo_subdata(temp_bo, 0, size, data);
- intel_emit_linear_blit(intel,
+ intel_emit_linear_blit(brw,
intel_obj->buffer, offset,
temp_bo, 0,
size);
GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
assert(intel_obj);
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
}
GLintptr offset, GLsizeiptr length,
GLbitfield access, struct gl_buffer_object *obj)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
drm_intel_bo_unreference(intel_obj->buffer);
- intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(brw, intel_obj);
} else {
perf_debug("Stalling on the GPU for mapping a busy buffer "
"object\n");
} else if (drm_intel_bo_busy(intel_obj->buffer) &&
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
drm_intel_bo_unreference(intel_obj->buffer);
- intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(brw, intel_obj);
}
}
GLintptr offset, GLsizeiptr length,
struct gl_buffer_object *obj)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
drm_intel_bo *temp_bo;
drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
- intel_emit_linear_blit(intel,
+ intel_emit_linear_blit(brw,
intel_obj->buffer, obj->Offset + offset,
temp_bo, 0,
length);
static GLboolean
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
free(intel_obj->range_map_buffer);
intel_obj->range_map_buffer = NULL;
} else if (intel_obj->range_map_bo != NULL) {
drm_intel_bo_unmap(intel_obj->range_map_bo);
- intel_emit_linear_blit(intel,
+ intel_emit_linear_blit(brw,
intel_obj->buffer, obj->Offset,
intel_obj->range_map_bo, 0,
obj->Length);
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
drm_intel_bo_unreference(intel_obj->range_map_bo);
intel_obj->range_map_bo = NULL;
}
drm_intel_bo *
-intel_bufferobj_buffer(struct intel_context *intel,
+intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
GLuint flag)
{
if (intel_obj->buffer == NULL)
- intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(brw, intel_obj);
return intel_obj->buffer;
}
#define INTEL_UPLOAD_SIZE (64*1024)
void
-intel_upload_finish(struct intel_context *intel)
+intel_upload_finish(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
if (!intel->upload.bo)
return;
intel->upload.bo = NULL;
}
-static void wrap_buffers(struct intel_context *intel, GLuint size)
+static void wrap_buffers(struct brw_context *brw, GLuint size)
{
- intel_upload_finish(intel);
+ struct intel_context *intel = &brw->intel;
+ intel_upload_finish(brw);
if (size < INTEL_UPLOAD_SIZE)
size = INTEL_UPLOAD_SIZE;
intel->upload.offset = 0;
}
-void intel_upload_data(struct intel_context *intel,
+void intel_upload_data(struct brw_context *brw,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset)
{
+ struct intel_context *intel = &brw->intel;
GLuint base, delta;
base = (intel->upload.offset + align - 1) / align * align;
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
- wrap_buffers(intel, size);
+ wrap_buffers(brw, size);
base = 0;
}
intel->upload.offset = base + size;
}
-void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
+void *intel_upload_map(struct brw_context *brw, GLuint size, GLuint align)
{
+ struct intel_context *intel = &brw->intel;
GLuint base, delta;
char *ptr;
base = (intel->upload.offset + align - 1) / align * align;
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
- wrap_buffers(intel, size);
+ wrap_buffers(brw, size);
base = 0;
}
return ptr;
}
-void intel_upload_unmap(struct intel_context *intel,
+void intel_upload_unmap(struct brw_context *brw,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset)
{
+ struct intel_context *intel = &brw->intel;
GLuint base;
base = (intel->upload.offset + align - 1) / align * align;
}
drm_intel_bo *
-intel_bufferobj_source(struct intel_context *intel,
+intel_bufferobj_source(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
GLuint align, GLuint *offset)
{
GLintptr read_offset, GLintptr write_offset,
GLsizeiptr size)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_src = intel_buffer_object(src);
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
drm_intel_bo *src_bo, *dst_bo;
if (size == 0)
return;
- dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
- src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
+ dst_bo = intel_bufferobj_buffer(brw, intel_dst, INTEL_WRITE_PART);
+ src_bo = intel_bufferobj_source(brw, intel_src, 64, &src_offset);
- intel_emit_linear_blit(intel,
+ intel_emit_linear_blit(brw,
dst_bo, write_offset,
src_bo, read_offset + src_offset, size);
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
static GLenum
return GL_RELEASED_APPLE;
} else {
/* XXX Create the buffer and madvise(MADV_DONTNEED)? */
- struct intel_context *intel = intel_context(ctx);
- drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
+ struct brw_context *brw = brw_context(ctx);
+ drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_obj, INTEL_READ);
return intel_buffer_purgeable(bo);
}
/* Get the bm buffer associated with a GL bufferobject:
*/
-drm_intel_bo *intel_bufferobj_buffer(struct intel_context *intel,
+drm_intel_bo *intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *obj,
GLuint flag);
-drm_intel_bo *intel_bufferobj_source(struct intel_context *intel,
+drm_intel_bo *intel_bufferobj_source(struct brw_context *brw,
struct intel_buffer_object *obj,
GLuint align,
GLuint *offset);
-void intel_upload_data(struct intel_context *intel,
+void intel_upload_data(struct brw_context *brw,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset);
-void *intel_upload_map(struct intel_context *intel,
+void *intel_upload_map(struct brw_context *brw,
GLuint size, GLuint align);
-void intel_upload_unmap(struct intel_context *intel,
+void intel_upload_unmap(struct brw_context *brw,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset);
-void intel_upload_finish(struct intel_context *intel);
+void intel_upload_finish(struct brw_context *brw);
/* Hook the bufferobject implementation into mesa:
*/
* If so, set the intel->front_buffer_dirty field to true.
*/
void
-intel_check_front_buffer_rendering(struct intel_context *intel)
+intel_check_front_buffer_rendering(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
if (_mesa_is_winsys_fbo(fb)) {
/* drawing to window system buffer */
struct intel_context;
struct intel_framebuffer;
-extern void intel_check_front_buffer_rendering(struct intel_context *intel);
+extern void intel_check_front_buffer_rendering(struct brw_context *brw);
extern void intelInitBufferFuncs(struct dd_function_table *functions);
}
void
-intel_resolve_for_dri2_flush(struct intel_context *intel,
+intel_resolve_for_dri2_flush(struct brw_context *brw,
__DRIdrawable *drawable)
{
+ struct intel_context *intel = &brw->intel;
if (intel->gen < 6) {
/* MSAA and fast color clear are not supported, so don't waste time
* checking whether a resolve is needed.
if (rb == NULL || rb->mt == NULL)
continue;
if (rb->mt->num_samples <= 1)
- intel_miptree_resolve_color(intel, rb->mt);
+ intel_miptree_resolve_color(brw, rb->mt);
else
- intel_miptree_downsample(intel, rb->mt);
+ intel_miptree_downsample(brw, rb->mt);
}
}
static void
intel_flush_front(struct gl_context *ctx)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
__DRIcontext *driContext = intel->driContext;
__DRIdrawable *driDrawable = driContext->driDrawablePriv;
* performance. And no one cares about front-buffer render
* performance.
*/
- intel_resolve_for_dri2_flush(intel, driDrawable);
+ intel_resolve_for_dri2_flush(brw, driDrawable);
screen->dri2.loader->flushFrontBuffer(driDrawable,
driDrawable->loaderPrivate);
}
static void
-intel_query_dri2_buffers(struct intel_context *intel,
+intel_query_dri2_buffers(struct brw_context *brw,
__DRIdrawable *drawable,
__DRIbuffer **buffers,
int *count);
static void
-intel_process_dri2_buffer(struct intel_context *intel,
+intel_process_dri2_buffer(struct brw_context *brw,
__DRIdrawable *drawable,
__DRIbuffer *buffer,
struct intel_renderbuffer *rb,
{
struct gl_framebuffer *fb = drawable->driverPrivate;
struct intel_renderbuffer *rb;
- struct intel_context *intel = context->driverPrivate;
+ struct brw_context *brw = context->driverPrivate;
+ struct intel_context *intel = &brw->intel;
__DRIbuffer *buffers = NULL;
int i, count;
const char *region_name;
if (unlikely(INTEL_DEBUG & DEBUG_DRI))
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
- intel_query_dri2_buffers(intel, drawable, &buffers, &count);
+ intel_query_dri2_buffers(brw, drawable, &buffers, &count);
if (buffers == NULL)
return;
return;
}
- intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
+ intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
}
driUpdateFramebufferSize(&intel->ctx, drawable);
* state is required.
*/
void
-intel_prepare_render(struct intel_context *intel)
+intel_prepare_render(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
__DRIcontext *driContext = intel->driContext;
__DRIdrawable *drawable;
void
_intel_flush(struct gl_context *ctx, const char *file, int line)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
if (intel->batch.used)
- _intel_batchbuffer_flush(intel, file, line);
+ _intel_batchbuffer_flush(brw, file, line);
}
static void
}
bool
-intelInitContext(struct intel_context *intel,
+intelInitContext(struct brw_context *brw,
int api,
unsigned major_version,
unsigned minor_version,
struct dd_function_table *functions,
unsigned *dri_ctx_error)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
if (INTEL_DEBUG & DEBUG_AUB)
drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
- intel_batchbuffer_init(intel);
+ intel_batchbuffer_init(brw);
- intel_fbo_init(intel);
+ intel_fbo_init(brw);
if (!driQueryOptionb(&intel->optionCache, "hiz")) {
intel->has_hiz = false;
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
- struct intel_context *intel =
- (struct intel_context *) driContextPriv->driverPrivate;
+ struct brw_context *brw =
+ (struct brw_context *) driContextPriv->driverPrivate;
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
assert(intel); /* should never be null */
if (intel) {
/* Dump a final BMP in case the application doesn't call SwapBuffers */
if (INTEL_DEBUG & DEBUG_AUB) {
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
aub_dump_bmp(&intel->ctx);
}
_mesa_meta_free(&intel->ctx);
- intel->vtbl.destroy(intel);
+ intel->vtbl.destroy(brw);
if (ctx->swrast_context) {
_swsetup_DestroyContext(&intel->ctx);
if (ctx->swrast_context)
_swrast_DestroyContext(&intel->ctx);
- intel_batchbuffer_free(intel);
+ intel_batchbuffer_free(brw);
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
intel->first_post_swapbuffers_batch = NULL;
* yet), we go turn that back off before anyone finds out.
*/
static void
-intel_gles3_srgb_workaround(struct intel_context *intel,
+intel_gles3_srgb_workaround(struct brw_context *brw,
struct gl_framebuffer *fb)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
__DRIdrawable * driDrawPriv,
__DRIdrawable * driReadPriv)
{
- struct intel_context *intel;
+ struct brw_context *brw;
GET_CURRENT_CONTEXT(curCtx);
if (driContextPriv)
- intel = (struct intel_context *) driContextPriv->driverPrivate;
+ brw = (struct brw_context *) driContextPriv->driverPrivate;
else
- intel = NULL;
+ brw = NULL;
/* According to the glXMakeCurrent() man page: "Pending commands to
* the previous context, if any, are flushed before it is released."
* But only flush if we're actually changing contexts.
*/
- if (intel_context(curCtx) && intel_context(curCtx) != intel) {
+ if (brw_context(curCtx) && brw_context(curCtx) != brw) {
_mesa_flush(curCtx);
}
+ struct intel_context *intel = &brw->intel;
+
if (driContextPriv) {
struct gl_context *ctx = &intel->ctx;
struct gl_framebuffer *fb, *readFb;
driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
_mesa_make_current(ctx, fb, readFb);
- intel_gles3_srgb_workaround(intel, ctx->WinSysDrawBuffer);
- intel_gles3_srgb_workaround(intel, ctx->WinSysReadBuffer);
+ intel_gles3_srgb_workaround(brw, ctx->WinSysDrawBuffer);
+ intel_gles3_srgb_workaround(brw, ctx->WinSysReadBuffer);
}
else {
_mesa_make_current(NULL, NULL, NULL);
* \see DRI2GetBuffersWithFormat()
*/
static void
-intel_query_dri2_buffers(struct intel_context *intel,
+intel_query_dri2_buffers(struct brw_context *brw,
__DRIdrawable *drawable,
__DRIbuffer **buffers,
int *buffer_count)
{
+ struct intel_context *intel = &brw->intel;
__DRIscreen *screen = intel->intelScreen->driScrnPriv;
struct gl_framebuffer *fb = drawable->driverPrivate;
int i = 0;
* \see intel_region_alloc_for_handle()
*/
static void
-intel_process_dri2_buffer(struct intel_context *intel,
+intel_process_dri2_buffer(struct brw_context *brw,
__DRIdrawable *drawable,
__DRIbuffer *buffer,
struct intel_renderbuffer *rb,
const char *buffer_name)
{
+ struct intel_context *intel = &brw->intel;
struct intel_region *region = NULL;
if (!rb)
if (!region)
return;
- rb->mt = intel_miptree_create_for_dri2_buffer(intel,
+ rb->mt = intel_miptree_create_for_dri2_buffer(brw,
buffer->attachment,
intel_rb_format(rb),
num_samples,
struct
{
- void (*destroy) (struct intel_context * intel);
- void (*finish_batch) (struct intel_context * intel);
- void (*new_batch) (struct intel_context * intel);
+ void (*destroy) (struct brw_context * brw);
+ void (*finish_batch) (struct brw_context * brw);
+ void (*new_batch) (struct brw_context * brw);
void (*update_texture_surface)(struct gl_context *ctx,
unsigned unit,
* intel_context.c:
*/
-extern bool intelInitContext(struct intel_context *intel,
+extern bool intelInitContext(struct brw_context *brw,
int api,
unsigned major_version,
unsigned minor_version,
void intel_update_renderbuffers(__DRIcontext *context,
__DRIdrawable *drawable);
-void intel_prepare_render(struct intel_context *intel);
+void intel_prepare_render(struct brw_context *brw);
void
-intel_resolve_for_dri2_flush(struct intel_context *intel,
+intel_resolve_for_dri2_flush(struct brw_context *brw,
__DRIdrawable *drawable);
extern void
GLubyte **out_map,
GLint *out_stride)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
void *map;
return;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
/* For a window-system renderbuffer, we need to flip the mapping we receive
* upside-down. So we need to ask for a rectangle on flipped vertically, and
y = rb->Height - y - h;
}
- intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+ intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
x, y, w, h, mode, &map, &stride);
if (rb->Name == 0) {
intel_unmap_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
return;
}
- intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
+ intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
}
GLenum internalFormat,
GLuint width, GLuint height)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_screen *screen = intel->intelScreen;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
if (width == 0 || height == 0)
return true;
- irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+ irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
width, height,
rb->NumSamples);
if (!irb->mt)
struct gl_renderbuffer *rb,
void *image_handle)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb;
__DRIscreen *screen;
/* __DRIimage is opaque to the core so it has to be checked here */
switch (image->format) {
case MESA_FORMAT_RGBA8888_REV:
- _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
+ _mesa_error(ctx, GL_INVALID_OPERATION,
"glEGLImageTargetRenderbufferStorage(unsupported image format");
return;
break;
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- irb->mt = intel_miptree_create_for_bo(intel,
+ irb->mt = intel_miptree_create_for_bo(brw,
image->region->bo,
image->format,
image->offset,
rb->Width = image->region->width;
rb->Height = image->region->height;
rb->Format = image->format;
- rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
- image->internal_format);
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
rb->NeedsFinishRenderTexture = true;
}
}
static bool
-intel_renderbuffer_update_wrapper(struct intel_context *intel,
+intel_renderbuffer_update_wrapper(struct brw_context *brw,
struct intel_renderbuffer *irb,
struct gl_texture_image *image,
uint32_t layer)
intel_renderbuffer_set_draw_offset(irb);
- if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(intel, rb->Format)) {
- intel_miptree_alloc_hiz(intel, mt);
+ if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
+ intel_miptree_alloc_hiz(brw, mt);
if (!mt->hiz_mt)
return false;
}
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct gl_renderbuffer *rb = att->Renderbuffer;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct gl_texture_image *image = rb->TexImage;
intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
- if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
+ if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
_swrast_render_texture(ctx, fb, att);
return;
}
static void
intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
* batch. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer like GEM does in the kernel.
*/
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
}
#define fbo_incomplete(fb, ...) do { \
static void
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH);
continue;
}
- if (!brw_render_target_supported(intel, rb)) {
+ if (!brw_render_target_supported(brw, rb)) {
fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
"texture/renderbuffer format attached: %s\n",
_mesa_get_format_name(intel_rb_format(irb)));
GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
if (mask & GL_COLOR_BUFFER_BIT) {
return mask;
}
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
src_irb->mt,
src_irb->mt_level, src_irb->mt_layer,
srcX0, srcY0, src_rb->Name == 0,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- mask = brw_blorp_framebuffer(intel_context(ctx),
+ mask = brw_blorp_framebuffer(brw_context(ctx),
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
}
bool
-intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+intel_renderbuffer_resolve_hiz(struct brw_context *brw,
struct intel_renderbuffer *irb)
{
if (irb->mt)
- return intel_miptree_slice_resolve_hiz(intel,
+ return intel_miptree_slice_resolve_hiz(brw,
irb->mt,
irb->mt_level,
irb->mt_layer);
}
bool
-intel_renderbuffer_resolve_depth(struct intel_context *intel,
+intel_renderbuffer_resolve_depth(struct brw_context *brw,
struct intel_renderbuffer *irb)
{
if (irb->mt)
- return intel_miptree_slice_resolve_depth(intel,
+ return intel_miptree_slice_resolve_depth(brw,
irb->mt,
irb->mt_level,
irb->mt_layer);
}
void
-intel_renderbuffer_move_to_temp(struct intel_context *intel,
+intel_renderbuffer_move_to_temp(struct brw_context *brw,
struct intel_renderbuffer *irb,
bool invalidate)
{
intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
- new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
+ new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
intel_image->base.Base.TexFormat,
intel_image->base.Base.Level,
intel_image->base.Base.Level,
irb->mt->num_samples,
INTEL_MIPTREE_TILING_ANY);
- if (brw_is_hiz_depth_format(intel, new_mt->format)) {
- intel_miptree_alloc_hiz(intel, new_mt);
+ if (brw_is_hiz_depth_format(brw, new_mt->format)) {
+ intel_miptree_alloc_hiz(brw, new_mt);
}
- intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
+ intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate);
intel_miptree_reference(&irb->mt, intel_image->mt);
intel_renderbuffer_set_draw_offset(irb);
* Hook in device driver functions.
*/
void
-intel_fbo_init(struct intel_context *intel)
+intel_fbo_init(struct brw_context *brw)
{
+ struct intel_context *intel = &brw->intel;
intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
gl_format format);
extern void
-intel_fbo_init(struct intel_context *intel);
+intel_fbo_init(struct brw_context *brw);
void
intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb);
* \return false if no resolve was needed
*/
bool
-intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+intel_renderbuffer_resolve_hiz(struct brw_context *brw,
struct intel_renderbuffer *irb);
/**
* \return false if no resolve was needed
*/
bool
-intel_renderbuffer_resolve_depth(struct intel_context *intel,
+intel_renderbuffer_resolve_depth(struct brw_context *brw,
struct intel_renderbuffer *irb);
-void intel_renderbuffer_move_to_temp(struct intel_context *intel,
+void intel_renderbuffer_move_to_temp(struct brw_context *brw,
struct intel_renderbuffer *irb,
bool invalidate);
* created, based on the chip generation and the surface type.
*/
static enum intel_msaa_layout
-compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
+compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target)
{
+ struct intel_context *intel = &brw->intel;
/* Prior to Gen7, all MSAA surfaces used IMS layout. */
if (intel->gen < 7)
return INTEL_MSAA_LAYOUT_IMS;
* by half the block width, and Y coordinates by half the block height.
*/
void
-intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
+intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned *width_px, unsigned *height)
{
* 64bpp, and 128bpp.
*/
bool
-intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
+intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- struct brw_context *brw = brw_context(&intel->ctx);
+ struct intel_context *intel = &brw->intel;
/* MCS support does not exist prior to Gen7 */
if (intel->gen < 7)
* \c stencil_mt.
*/
struct intel_mipmap_tree *
-intel_miptree_create_layout(struct intel_context *intel,
+intel_miptree_create_layout(struct brw_context *brw,
GLenum target,
gl_format format,
GLuint first_level,
bool for_bo,
GLuint num_samples)
{
+ struct intel_context *intel = &brw->intel;
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
if (!mt)
return NULL;
if (num_samples > 1) {
/* Adjust width/height/depth for MSAA */
- mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
+ mt->msaa_layout = compute_msaa_layout(brw, format, mt->target);
if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
/* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
*
_mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
(intel->must_use_separate_stencil ||
(intel->has_separate_stencil &&
- brw_is_hiz_depth_format(intel, format)))) {
- mt->stencil_mt = intel_miptree_create(intel,
+ brw_is_hiz_depth_format(brw, format)))) {
+ mt->stencil_mt = intel_miptree_create(brw,
mt->target,
MESA_FORMAT_S8,
mt->first_level,
}
}
- brw_miptree_layout(intel, mt);
+ brw_miptree_layout(brw, mt);
return mt;
}
* \brief Helper function for intel_miptree_create().
*/
static uint32_t
-intel_miptree_choose_tiling(struct intel_context *intel,
+intel_miptree_choose_tiling(struct brw_context *brw,
gl_format format,
uint32_t width0,
uint32_t num_samples,
enum intel_miptree_tiling_mode requested,
struct intel_mipmap_tree *mt)
{
-
+ struct intel_context *intel = &brw->intel;
if (format == MESA_FORMAT_S8) {
/* The stencil buffer is W tiled. However, we request from the kernel a
* non-tiled buffer because the GTT is incapable of W fencing.
}
struct intel_mipmap_tree *
-intel_miptree_create(struct intel_context *intel,
+intel_miptree_create(struct brw_context *brw,
GLenum target,
gl_format format,
GLuint first_level,
GLuint num_samples,
enum intel_miptree_tiling_mode requested_tiling)
{
+ struct intel_context *intel = &brw->intel;
struct intel_mipmap_tree *mt;
gl_format tex_format = format;
gl_format etc_format = MESA_FORMAT_NONE;
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
- mt = intel_miptree_create_layout(intel, target, format,
+ mt = intel_miptree_create_layout(brw, target, format,
first_level, last_level, width0,
height0, depth0,
false, num_samples);
total_height = ALIGN(total_height, 64);
}
- uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
+ uint32_t tiling = intel_miptree_choose_tiling(brw, format, width0,
num_samples, requested_tiling,
mt);
bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
* Allocation of the MCS miptree will be deferred until the first fast
* clear actually occurs.
*/
- if (intel_is_non_msrt_mcs_buffer_supported(intel, mt))
+ if (intel_is_non_msrt_mcs_buffer_supported(brw, mt))
mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
return mt;
}
struct intel_mipmap_tree *
-intel_miptree_create_for_bo(struct intel_context *intel,
+intel_miptree_create_for_bo(struct brw_context *brw,
drm_intel_bo *bo,
gl_format format,
uint32_t offset,
*/
assert(pitch >= 0);
- mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
+ mt = intel_miptree_create_layout(brw, GL_TEXTURE_2D, format,
0, 0,
width, height, 1,
true, 0 /* num_samples */);
* singlesample miptree is embedded as a child.
*/
struct intel_mipmap_tree*
-intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+intel_miptree_create_for_dri2_buffer(struct brw_context *brw,
unsigned dri_attachment,
gl_format format,
uint32_t num_samples,
struct intel_region *region)
{
+ struct intel_context *intel = &brw->intel;
struct intel_mipmap_tree *singlesample_mt = NULL;
struct intel_mipmap_tree *multisample_mt = NULL;
assert(_mesa_get_format_base_format(format) == GL_RGB ||
_mesa_get_format_base_format(format) == GL_RGBA);
- singlesample_mt = intel_miptree_create_for_bo(intel,
+ singlesample_mt = intel_miptree_create_for_bo(brw,
region->bo,
format,
0,
* Allocation of the MCS miptree will be deferred until the first fast
* clear actually occurs.
*/
- if (intel_is_non_msrt_mcs_buffer_supported(intel, singlesample_mt))
+ if (intel_is_non_msrt_mcs_buffer_supported(brw, singlesample_mt))
singlesample_mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
if (num_samples == 0)
return singlesample_mt;
- multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+ multisample_mt = intel_miptree_create_for_renderbuffer(brw,
format,
region->width,
region->height,
if (intel->is_front_buffer_rendering &&
(dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
- intel_miptree_upsample(intel, multisample_mt);
+ intel_miptree_upsample(brw, multisample_mt);
}
return multisample_mt;
}
struct intel_mipmap_tree*
-intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+intel_miptree_create_for_renderbuffer(struct brw_context *brw,
gl_format format,
uint32_t width,
uint32_t height,
uint32_t depth = 1;
bool ok;
- mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
+ mt = intel_miptree_create(brw, GL_TEXTURE_2D, format, 0, 0,
width, height, depth, true, num_samples,
INTEL_MIPTREE_TILING_ANY);
if (!mt)
goto fail;
- if (brw_is_hiz_depth_format(intel, format)) {
- ok = intel_miptree_alloc_hiz(intel, mt);
+ if (brw_is_hiz_depth_format(brw, format)) {
+ ok = intel_miptree_alloc_hiz(brw, mt);
if (!ok)
goto fail;
}
if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
- ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+ ok = intel_miptree_alloc_mcs(brw, mt, num_samples);
if (!ok)
goto fail;
}
}
static void
-intel_miptree_copy_slice_sw(struct intel_context *intel,
+intel_miptree_copy_slice_sw(struct brw_context *brw,
struct intel_mipmap_tree *dst_mt,
struct intel_mipmap_tree *src_mt,
int level,
int src_stride, dst_stride;
int cpp = dst_mt->cpp;
- intel_miptree_map(intel, src_mt,
+ intel_miptree_map(brw, src_mt,
level, slice,
0, 0,
width, height,
GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
&src, &src_stride);
- intel_miptree_map(intel, dst_mt,
+ intel_miptree_map(brw, dst_mt,
level, slice,
0, 0,
width, height,
}
}
- intel_miptree_unmap(intel, dst_mt, level, slice);
- intel_miptree_unmap(intel, src_mt, level, slice);
+ intel_miptree_unmap(brw, dst_mt, level, slice);
+ intel_miptree_unmap(brw, src_mt, level, slice);
/* Don't forget to copy the stencil data over, too. We could have skipped
* passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
*/
if (dst_mt->stencil_mt) {
assert(src_mt->stencil_mt);
- intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
+ intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
level, slice, width, height);
}
}
static void
-intel_miptree_copy_slice(struct intel_context *intel,
+intel_miptree_copy_slice(struct brw_context *brw,
struct intel_mipmap_tree *dst_mt,
struct intel_mipmap_tree *src_mt,
int level,
int depth)
{
+ struct intel_context *intel = &brw->intel;
gl_format format = src_mt->format;
uint32_t width = src_mt->level[level].width;
uint32_t height = src_mt->level[level].height;
* stencil's W tiling in the blitter.
*/
if (src_mt->stencil_mt) {
- intel_miptree_copy_slice_sw(intel,
+ intel_miptree_copy_slice_sw(brw,
dst_mt, src_mt,
level, slice,
width, height);
dst_mt, dst_x, dst_y, dst_mt->region->pitch,
width, height);
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
src_mt, level, slice, 0, 0, false,
dst_mt, level, slice, 0, 0, false,
width, height, GL_COPY)) {
perf_debug("miptree validate blit for %s failed\n",
_mesa_get_format_name(format));
- intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
+ intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
width, height);
}
}
* is set to true if we're about to clear the image).
*/
void
-intel_miptree_copy_teximage(struct intel_context *intel,
+intel_miptree_copy_teximage(struct brw_context *brw,
struct intel_texture_image *intelImage,
struct intel_mipmap_tree *dst_mt,
bool invalidate)
if (!invalidate) {
for (int slice = 0; slice < depth; slice++) {
- intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
}
}
}
bool
-intel_miptree_alloc_mcs(struct intel_context *intel,
+intel_miptree_alloc_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt,
GLuint num_samples)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen >= 7); /* MCS only used on Gen7+ */
assert(mt->mcs_mt == NULL);
* "The MCS surface must be stored as Tile Y."
*/
mt->mcs_state = INTEL_MCS_STATE_MSAA;
- mt->mcs_mt = intel_miptree_create(intel,
+ mt->mcs_mt = intel_miptree_create(brw,
mt->target,
format,
mt->first_level,
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
+ void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
memset(data, 0xff, mt->mcs_mt->region->bo->size);
- intel_miptree_unmap_raw(intel, mt->mcs_mt);
+ intel_miptree_unmap_raw(brw, mt->mcs_mt);
return mt->mcs_mt;
}
bool
-intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
+intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
assert(mt->mcs_mt == NULL);
const gl_format format = MESA_FORMAT_R_UINT32;
unsigned block_width_px;
unsigned block_height;
- intel_get_non_msrt_mcs_alignment(intel, mt, &block_width_px, &block_height);
+ intel_get_non_msrt_mcs_alignment(brw, mt, &block_width_px, &block_height);
unsigned width_divisor = block_width_px * 4;
unsigned height_divisor = block_height * 8;
unsigned mcs_width =
unsigned mcs_height =
ALIGN(mt->logical_height0, height_divisor) / height_divisor;
assert(mt->logical_depth0 == 1);
- mt->mcs_mt = intel_miptree_create(intel,
+ mt->mcs_mt = intel_miptree_create(brw,
mt->target,
format,
mt->first_level,
* \c has_hiz was set.
*/
static bool
-intel_miptree_slice_enable_hiz(struct intel_context *intel,
+intel_miptree_slice_enable_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer)
{
+ struct intel_context *intel = &brw->intel;
assert(mt->hiz_mt);
if (intel->is_haswell) {
bool
-intel_miptree_alloc_hiz(struct intel_context *intel,
+intel_miptree_alloc_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
assert(mt->hiz_mt == NULL);
- mt->hiz_mt = intel_miptree_create(intel,
+ mt->hiz_mt = intel_miptree_create(brw,
mt->target,
mt->format,
mt->first_level,
struct intel_resolve_map *head = &mt->hiz_map;
for (int level = mt->first_level; level <= mt->last_level; ++level) {
for (int layer = 0; layer < mt->level[level].depth; ++layer) {
- if (!intel_miptree_slice_enable_hiz(intel, mt, level, layer))
+ if (!intel_miptree_slice_enable_hiz(brw, mt, level, layer))
continue;
head->next = malloc(sizeof(*head->next));
}
static bool
-intel_miptree_slice_resolve(struct intel_context *intel,
+intel_miptree_slice_resolve(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer,
if (!item || item->need != need)
return false;
- intel_hiz_exec(intel, mt, level, layer, need);
+ intel_hiz_exec(brw, mt, level, layer, need);
intel_resolve_map_remove(item);
return true;
}
bool
-intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+intel_miptree_slice_resolve_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer)
{
- return intel_miptree_slice_resolve(intel, mt, level, layer,
+ return intel_miptree_slice_resolve(brw, mt, level, layer,
GEN6_HIZ_OP_HIZ_RESOLVE);
}
bool
-intel_miptree_slice_resolve_depth(struct intel_context *intel,
+intel_miptree_slice_resolve_depth(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer)
{
- return intel_miptree_slice_resolve(intel, mt, level, layer,
+ return intel_miptree_slice_resolve(brw, mt, level, layer,
GEN6_HIZ_OP_DEPTH_RESOLVE);
}
static bool
-intel_miptree_all_slices_resolve(struct intel_context *intel,
+intel_miptree_all_slices_resolve(struct brw_context *brw,
struct intel_mipmap_tree *mt,
enum gen6_hiz_op need)
{
if (i->need != need)
continue;
- intel_hiz_exec(intel, mt, i->level, i->layer, need);
+ intel_hiz_exec(brw, mt, i->level, i->layer, need);
intel_resolve_map_remove(i);
did_resolve = true;
}
}
bool
-intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- return intel_miptree_all_slices_resolve(intel, mt,
+ return intel_miptree_all_slices_resolve(brw, mt,
GEN6_HIZ_OP_HIZ_RESOLVE);
}
bool
-intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- return intel_miptree_all_slices_resolve(intel, mt,
+ return intel_miptree_all_slices_resolve(brw, mt,
GEN6_HIZ_OP_DEPTH_RESOLVE);
}
void
-intel_miptree_resolve_color(struct intel_context *intel,
+intel_miptree_resolve_color(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
switch (mt->mcs_state) {
break;
case INTEL_MCS_STATE_UNRESOLVED:
case INTEL_MCS_STATE_CLEAR:
- brw_blorp_resolve_color(intel, mt);
+ brw_blorp_resolve_color(brw, mt);
break;
}
}
* future.
*/
void
-intel_miptree_make_shareable(struct intel_context *intel,
+intel_miptree_make_shareable(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
/* MCS buffers are also used for multisample buffers, but we can't resolve
assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
if (mt->mcs_mt) {
- intel_miptree_resolve_color(intel, mt);
+ intel_miptree_resolve_color(brw, mt);
intel_miptree_release(&mt->mcs_mt);
mt->mcs_state = INTEL_MCS_STATE_NONE;
}
}
static void
-intel_miptree_updownsample(struct intel_context *intel,
+intel_miptree_updownsample(struct brw_context *brw,
struct intel_mipmap_tree *src,
struct intel_mipmap_tree *dst,
unsigned width,
int dst_x0 = 0;
int dst_y0 = 0;
- brw_blorp_blit_miptrees(intel,
+ brw_blorp_blit_miptrees(brw,
src, 0 /* level */, 0 /* layer */,
dst, 0 /* level */, 0 /* layer */,
src_x0, src_y0,
false, false /*mirror x, y*/);
if (src->stencil_mt) {
- brw_blorp_blit_miptrees(intel,
+ brw_blorp_blit_miptrees(brw,
src->stencil_mt, 0 /* level */, 0 /* layer */,
dst->stencil_mt, 0 /* level */, 0 /* layer */,
src_x0, src_y0,
* If the miptree needs no downsample, then skip.
*/
void
-intel_miptree_downsample(struct intel_context *intel,
+intel_miptree_downsample(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
/* Only flat, renderbuffer-like miptrees are supported. */
if (!mt->need_downsample)
return;
- intel_miptree_updownsample(intel,
+ intel_miptree_updownsample(brw,
mt, mt->singlesample_mt,
mt->logical_width0,
mt->logical_height0);
* The upsample is done unconditionally.
*/
void
-intel_miptree_upsample(struct intel_context *intel,
+intel_miptree_upsample(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
/* Only flat, renderbuffer-like miptrees are supported. */
assert_is_flat(mt);
assert(!mt->need_downsample);
- intel_miptree_updownsample(intel,
+ intel_miptree_updownsample(brw,
mt->singlesample_mt, mt,
mt->logical_width0,
mt->logical_height0);
}
void *
-intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
+intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
{
+ struct intel_context *intel = &brw->intel;
+ struct gl_context *ctx = &intel->ctx;
/* CPU accesses to color buffers don't understand fast color clears, so
* resolve any pending fast color clears before we map.
*/
- intel_miptree_resolve_color(intel, mt);
+ intel_miptree_resolve_color(brw, mt);
drm_intel_bo *bo = mt->region->bo;
}
}
- intel_flush(&intel->ctx);
+ intel_flush(ctx);
if (mt->region->tiling != I915_TILING_NONE)
drm_intel_gem_bo_map_gtt(bo);
}
void
-intel_miptree_unmap_raw(struct intel_context *intel,
+intel_miptree_unmap_raw(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
drm_intel_bo_unmap(mt->region->bo);
}
static void
-intel_miptree_map_gtt(struct intel_context *intel,
+intel_miptree_map_gtt(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
assert(y % bh == 0);
y /= bh;
- base = intel_miptree_map_raw(intel, mt) + mt->offset;
+ base = intel_miptree_map_raw(brw, mt) + mt->offset;
if (base == NULL)
map->ptr = NULL;
}
static void
-intel_miptree_unmap_gtt(struct intel_context *intel,
+intel_miptree_unmap_gtt(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
unsigned int slice)
{
- intel_miptree_unmap_raw(intel, mt);
+ intel_miptree_unmap_raw(brw, mt);
}
static void
-intel_miptree_map_blit(struct intel_context *intel,
+intel_miptree_map_blit(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
{
- map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
+ map->mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format,
0, 0,
map->w, map->h, 1,
false, 0,
}
map->stride = map->mt->region->pitch;
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
mt, level, slice,
map->x, map->y, false,
map->mt, 0, 0,
goto fail;
}
- intel_batchbuffer_flush(intel);
- map->ptr = intel_miptree_map_raw(intel, map->mt);
+ intel_batchbuffer_flush(brw);
+ map->ptr = intel_miptree_map_raw(brw, map->mt);
DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
}
static void
-intel_miptree_unmap_blit(struct intel_context *intel,
+intel_miptree_unmap_blit(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
unsigned int slice)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
- intel_miptree_unmap_raw(intel, map->mt);
+ intel_miptree_unmap_raw(brw, map->mt);
if (map->mode & GL_MAP_WRITE_BIT) {
- bool ok = intel_miptree_blit(intel,
+ bool ok = intel_miptree_blit(brw,
map->mt, 0, 0,
0, 0, false,
mt, level, slice,
}
static void
-intel_miptree_map_s8(struct intel_context *intel,
+intel_miptree_map_s8(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
{
+ struct intel_context *intel = &brw->intel;
map->stride = map->w;
map->buffer = map->ptr = malloc(map->stride * map->h);
if (!map->buffer)
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
unsigned int image_x, image_y;
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_miptree_unmap_raw(intel, mt);
+ intel_miptree_unmap_raw(brw, mt);
DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
}
static void
-intel_miptree_unmap_s8(struct intel_context *intel,
+intel_miptree_unmap_s8(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
unsigned int slice)
{
+ struct intel_context *intel = &brw->intel;
if (map->mode & GL_MAP_WRITE_BIT) {
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_miptree_unmap_raw(intel, mt);
+ intel_miptree_unmap_raw(brw, mt);
}
free(map->buffer);
}
static void
-intel_miptree_map_etc(struct intel_context *intel,
+intel_miptree_map_etc(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
}
static void
-intel_miptree_unmap_etc(struct intel_context *intel,
+intel_miptree_unmap_etc(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
image_x += map->x;
image_y += map->y;
- uint8_t *dst = intel_miptree_map_raw(intel, mt)
+ uint8_t *dst = intel_miptree_map_raw(brw, mt)
+ image_y * mt->region->pitch
+ image_x * mt->region->cpp;
map->ptr, map->stride,
map->w, map->h, mt->etc_format);
- intel_miptree_unmap_raw(intel, mt);
+ intel_miptree_unmap_raw(brw, mt);
free(map->buffer);
}
* copying the data between the actual backing store and the temporary.
*/
static void
-intel_miptree_map_depthstencil(struct intel_context *intel,
+intel_miptree_map_depthstencil(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
{
+ struct intel_context *intel = &brw->intel;
struct intel_mipmap_tree *z_mt = mt;
struct intel_mipmap_tree *s_mt = mt->stencil_mt;
bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
- uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
+ uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_miptree_unmap_raw(intel, s_mt);
- intel_miptree_unmap_raw(intel, z_mt);
+ intel_miptree_unmap_raw(brw, s_mt);
+ intel_miptree_unmap_raw(brw, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
}
static void
-intel_miptree_unmap_depthstencil(struct intel_context *intel,
+intel_miptree_unmap_depthstencil(struct brw_context *brw,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
unsigned int slice)
{
+ struct intel_context *intel = &brw->intel;
struct intel_mipmap_tree *z_mt = mt;
struct intel_mipmap_tree *s_mt = mt->stencil_mt;
bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
if (map->mode & GL_MAP_WRITE_BIT) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
- uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
+ uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_miptree_unmap_raw(intel, s_mt);
- intel_miptree_unmap_raw(intel, z_mt);
+ intel_miptree_unmap_raw(brw, s_mt);
+ intel_miptree_unmap_raw(brw, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
}
static void
-intel_miptree_map_singlesample(struct intel_context *intel,
+intel_miptree_map_singlesample(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
void **out_ptr,
int *out_stride)
{
+ struct intel_context *intel = &brw->intel;
struct intel_miptree_map *map;
assert(mt->num_samples <= 1);
return;
}
- intel_miptree_slice_resolve_depth(intel, mt, level, slice);
+ intel_miptree_slice_resolve_depth(brw, mt, level, slice);
if (map->mode & GL_MAP_WRITE_BIT) {
intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
}
if (mt->format == MESA_FORMAT_S8) {
- intel_miptree_map_s8(intel, mt, map, level, slice);
+ intel_miptree_map_s8(brw, mt, map, level, slice);
} else if (mt->etc_format != MESA_FORMAT_NONE &&
!(mode & BRW_MAP_DIRECT_BIT)) {
- intel_miptree_map_etc(intel, mt, map, level, slice);
+ intel_miptree_map_etc(brw, mt, map, level, slice);
} else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
- intel_miptree_map_depthstencil(intel, mt, map, level, slice);
+ intel_miptree_map_depthstencil(brw, mt, map, level, slice);
}
/* See intel_miptree_blit() for details on the 32k pitch limit. */
else if (intel->has_llc &&
(mt->region->tiling == I915_TILING_X ||
(intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
mt->region->pitch < 32768) {
- intel_miptree_map_blit(intel, mt, map, level, slice);
+ intel_miptree_map_blit(brw, mt, map, level, slice);
} else if (mt->region->tiling != I915_TILING_NONE &&
mt->region->bo->size >= intel->max_gtt_map_object_size) {
assert(mt->region->pitch < 32768);
- intel_miptree_map_blit(intel, mt, map, level, slice);
+ intel_miptree_map_blit(brw, mt, map, level, slice);
} else {
- intel_miptree_map_gtt(intel, mt, map, level, slice);
+ intel_miptree_map_gtt(brw, mt, map, level, slice);
}
*out_ptr = map->ptr;
}
static void
-intel_miptree_unmap_singlesample(struct intel_context *intel,
+intel_miptree_unmap_singlesample(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice)
mt, _mesa_get_format_name(mt->format), level, slice);
if (mt->format == MESA_FORMAT_S8) {
- intel_miptree_unmap_s8(intel, mt, map, level, slice);
+ intel_miptree_unmap_s8(brw, mt, map, level, slice);
} else if (mt->etc_format != MESA_FORMAT_NONE &&
!(map->mode & BRW_MAP_DIRECT_BIT)) {
- intel_miptree_unmap_etc(intel, mt, map, level, slice);
+ intel_miptree_unmap_etc(brw, mt, map, level, slice);
} else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
- intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
+ intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
} else if (map->mt) {
- intel_miptree_unmap_blit(intel, mt, map, level, slice);
+ intel_miptree_unmap_blit(brw, mt, map, level, slice);
} else {
- intel_miptree_unmap_gtt(intel, mt, map, level, slice);
+ intel_miptree_unmap_gtt(brw, mt, map, level, slice);
}
intel_miptree_release_map(mt, level, slice);
}
static void
-intel_miptree_map_multisample(struct intel_context *intel,
+intel_miptree_map_multisample(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
void **out_ptr,
int *out_stride)
{
+ struct gl_context *ctx = &brw->intel.ctx;
struct intel_miptree_map *map;
assert(mt->num_samples > 1);
if (mt->target != GL_TEXTURE_2D ||
mt->first_level != 0 ||
mt->last_level != 0) {
- _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+ _mesa_problem(ctx, "attempt to map a multisample miptree for "
"which (target, first_level, last_level != "
"(GL_TEXTURE_2D, 0, 0)");
goto fail;
if (!mt->singlesample_mt) {
mt->singlesample_mt =
- intel_miptree_create_for_renderbuffer(intel,
+ intel_miptree_create_for_renderbuffer(brw,
mt->format,
mt->logical_width0,
mt->logical_height0,
mt->need_downsample = true;
}
- intel_miptree_downsample(intel, mt);
- intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+ intel_miptree_downsample(brw, mt);
+ intel_miptree_map_singlesample(brw, mt->singlesample_mt,
level, slice,
x, y, w, h,
mode,
}
static void
-intel_miptree_unmap_multisample(struct intel_context *intel,
+intel_miptree_unmap_multisample(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice)
if (!map)
return;
- intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+ intel_miptree_unmap_singlesample(brw, mt->singlesample_mt, level, slice);
mt->need_downsample = false;
if (map->mode & GL_MAP_WRITE_BIT)
- intel_miptree_upsample(intel, mt);
+ intel_miptree_upsample(brw, mt);
if (map->singlesample_mt_is_tmp)
intel_miptree_release(&mt->singlesample_mt);
}
void
-intel_miptree_map(struct intel_context *intel,
+intel_miptree_map(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
int *out_stride)
{
if (mt->num_samples <= 1)
- intel_miptree_map_singlesample(intel, mt,
+ intel_miptree_map_singlesample(brw, mt,
level, slice,
x, y, w, h,
mode,
out_ptr, out_stride);
else
- intel_miptree_map_multisample(intel, mt,
+ intel_miptree_map_multisample(brw, mt,
level, slice,
x, y, w, h,
mode,
}
void
-intel_miptree_unmap(struct intel_context *intel,
+intel_miptree_unmap(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice)
{
if (mt->num_samples <= 1)
- intel_miptree_unmap_singlesample(intel, mt, level, slice);
+ intel_miptree_unmap_singlesample(brw, mt, level, slice);
else
- intel_miptree_unmap_multisample(intel, mt, level, slice);
+ intel_miptree_unmap_multisample(brw, mt, level, slice);
}
};
bool
-intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
+intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
struct intel_mipmap_tree *mt);
void
-intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
+intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned *width_px, unsigned *height);
bool
-intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
+intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt);
-struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
+struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
GLenum target,
gl_format format,
GLuint first_level,
enum intel_miptree_tiling_mode);
struct intel_mipmap_tree *
-intel_miptree_create_layout(struct intel_context *intel,
+intel_miptree_create_layout(struct brw_context *brw,
GLenum target,
gl_format format,
GLuint first_level,
GLuint num_samples);
struct intel_mipmap_tree *
-intel_miptree_create_for_bo(struct intel_context *intel,
+intel_miptree_create_for_bo(struct brw_context *brw,
drm_intel_bo *bo,
gl_format format,
uint32_t offset,
uint32_t tiling);
struct intel_mipmap_tree*
-intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+intel_miptree_create_for_dri2_buffer(struct brw_context *brw,
unsigned dri_attachment,
gl_format format,
uint32_t num_samples,
* - Depth is 1.
*/
struct intel_mipmap_tree*
-intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+intel_miptree_create_for_renderbuffer(struct brw_context *brw,
gl_format format,
uint32_t width,
uint32_t height,
GLuint img, GLuint x, GLuint y);
void
-intel_miptree_copy_teximage(struct intel_context *intel,
+intel_miptree_copy_teximage(struct brw_context *brw,
struct intel_texture_image *intelImage,
struct intel_mipmap_tree *dst_mt, bool invalidate);
bool
-intel_miptree_alloc_mcs(struct intel_context *intel,
+intel_miptree_alloc_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt,
GLuint num_samples);
*/
bool
-intel_miptree_alloc_hiz(struct intel_context *intel,
+intel_miptree_alloc_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt);
bool
* \return false if no resolve was needed
*/
bool
-intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+intel_miptree_slice_resolve_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int depth);
* \return false if no resolve was needed
*/
bool
-intel_miptree_slice_resolve_depth(struct intel_context *intel,
+intel_miptree_slice_resolve_depth(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int depth);
* \return false if no resolve was needed
*/
bool
-intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt);
/**
* \return false if no resolve was needed
*/
bool
-intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
struct intel_mipmap_tree *mt);
/**\}*/
}
void
-intel_miptree_resolve_color(struct intel_context *intel,
+intel_miptree_resolve_color(struct brw_context *brw,
struct intel_mipmap_tree *mt);
void
-intel_miptree_make_shareable(struct intel_context *intel,
+intel_miptree_make_shareable(struct brw_context *brw,
struct intel_mipmap_tree *mt);
void
-intel_miptree_downsample(struct intel_context *intel,
+intel_miptree_downsample(struct brw_context *brw,
struct intel_mipmap_tree *mt);
void
-intel_miptree_upsample(struct intel_context *intel,
+intel_miptree_upsample(struct brw_context *brw,
struct intel_mipmap_tree *mt);
-/* i915_mipmap_tree.c:
- */
-void i915_miptree_layout(struct intel_mipmap_tree *mt);
-void i945_miptree_layout(struct intel_mipmap_tree *mt);
-void brw_miptree_layout(struct intel_context *intel,
- struct intel_mipmap_tree *mt);
+void brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt);
-void *intel_miptree_map_raw(struct intel_context *intel,
+void *intel_miptree_map_raw(struct brw_context *brw,
struct intel_mipmap_tree *mt);
-void intel_miptree_unmap_raw(struct intel_context *intel,
+void intel_miptree_unmap_raw(struct brw_context *brw,
struct intel_mipmap_tree *mt);
void
-intel_miptree_map(struct intel_context *intel,
+intel_miptree_map(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
int *out_stride);
void
-intel_miptree_unmap(struct intel_context *intel,
+intel_miptree_unmap(struct brw_context *brw,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice);
void
-intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
+intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
unsigned int level, unsigned int layer, enum gen6_hiz_op op);
#ifdef __cplusplus
const struct gl_pixelstore_attrib *unpack,
const GLubyte *bitmap )
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *irb;
return false;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
if (fb->_NumColorDrawBuffers != 1) {
perf_debug("accelerated glBitmap() only supports rendering to a "
/* The blitter has no idea about fast color clears, so we need to resolve
* the miptree before we do anything.
*/
- intel_miptree_resolve_color(intel, irb->mt);
+ intel_miptree_resolve_color(brw, irb->mt);
/* Chop it all into chunks that can be digested by hardware: */
for (py = 0; py < height; py += DY) {
if (count == 0)
continue;
- if (!intelEmitImmediateColorExpandBlit(intel,
+ if (!intelEmitImmediateColorExpandBlit(brw,
irb->mt->cpp,
(GLubyte *)stipple,
sz,
out:
if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
}
- intel_check_front_buffer_rendering(intel);
+ intel_check_front_buffer_rendering(brw);
return true;
}
GLsizei width, GLsizei height,
GLint dstx, GLint dsty, GLenum type)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct gl_framebuffer *read_fb = ctx->ReadBuffer;
return false;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
intel_flush(&intel->ctx);
dstx += srcx - orig_srcx;
dsty += srcy - orig_srcy;
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
srcx, srcy, _mesa_is_winsys_fbo(read_fb),
draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
ctx->Query.CurrentOcclusionObject->Result += width * height;
out:
- intel_check_front_buffer_rendering(intel);
+ intel_check_front_buffer_rendering(brw);
DBG("%s: success\n", __FUNCTION__);
return true;
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
GLuint dst_offset;
}
dirty = intel->front_buffer_dirty;
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
intel->front_buffer_dirty = dirty;
all = (width * height * irb->mt->cpp == dst->Base.Size &&
x == 0 && dst_offset == 0);
- dst_buffer = intel_bufferobj_buffer(intel, dst,
+ dst_buffer = intel_bufferobj_buffer(brw, dst,
all ? INTEL_WRITE_FULL :
INTEL_WRITE_PART);
struct intel_mipmap_tree *pbo_mt =
- intel_miptree_create_for_bo(intel,
+ intel_miptree_create_for_bo(brw,
dst_buffer,
irb->mt->format,
dst_offset,
width, height,
dst_stride, I915_TILING_NONE);
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
irb->mt, irb->mt_level, irb->mt_layer,
x, y, _mesa_is_winsys_fbo(ctx->ReadBuffer),
pbo_mt, 0, 0,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
bool dirty;
/* glReadPixels() wont dirty the front buffer, so reset the dirty
* flag after calling intel_prepare_render(). */
dirty = intel->front_buffer_dirty;
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
intel->front_buffer_dirty = dirty;
/* Update Mesa state before calling _mesa_readpixels().
{
GET_CURRENT_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
if (intel == NULL)
return;
- intel_resolve_for_dri2_flush(intel, drawable);
+ intel_resolve_for_dri2_flush(brw, drawable);
intel->need_throttle = true;
if (intel->batch.used)
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (INTEL_DEBUG & DEBUG_AUB) {
aub_dump_bmp(ctx);
* Sets up a DRIImage structure to point to our shared image in a region
*/
static void
-intel_setup_image_from_mipmap_tree(struct intel_context *intel, __DRIimage *image,
+intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
struct intel_mipmap_tree *mt, GLuint level,
GLuint zoffset)
{
unsigned int draw_x, draw_y;
uint32_t mask_x, mask_y;
- intel_miptree_make_shareable(intel, mt);
+ intel_miptree_make_shareable(brw, mt);
intel_miptree_check_level_layer(mt, level, zoffset);
int renderbuffer, void *loaderPrivate)
{
__DRIimage *image;
- struct intel_context *intel = context->driverPrivate;
+ struct brw_context *brw = context->driverPrivate;
+ struct gl_context *ctx = &brw->intel.ctx;
struct gl_renderbuffer *rb;
struct intel_renderbuffer *irb;
- rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
+ rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
if (!rb) {
- _mesa_error(&intel->ctx,
- GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
return NULL;
}
irb = intel_renderbuffer(rb);
- intel_miptree_make_shareable(intel, irb->mt);
+ intel_miptree_make_shareable(brw, irb->mt);
image = calloc(1, sizeof *image);
if (image == NULL)
return NULL;
void *loaderPrivate)
{
__DRIimage *image;
- struct intel_context *intel = context->driverPrivate;
+ struct brw_context *brw = context->driverPrivate;
+ struct intel_context *intel = &brw->intel;
struct gl_texture_object *obj;
struct intel_texture_object *iobj;
GLuint face = 0;
image->internal_format = obj->Image[face][level]->InternalFormat;
image->format = obj->Image[face][level]->TexFormat;
image->data = loaderPrivate;
- intel_setup_image_from_mipmap_tree(intel, image, iobj->mt, level, zoffset);
+ intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
image->dri_format = intel_dri_format(image->format);
image->has_depthstencil = iobj->mt->stencil_mt? true : false;
if (image->dri_format == MESA_FORMAT_NONE) {
intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLenum condition, GLbitfield flags)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_sync_object *sync = (struct intel_sync_object *)s;
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
sync->bo = intel->batch.bo;
drm_intel_bo_reference(sync->bo);
intel_alloc_texture_image_buffer(struct gl_context *ctx,
struct gl_texture_image *image)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(image);
struct gl_texture_object *texobj = image->TexObject;
__FUNCTION__, texobj, image->Level,
image->Width, image->Height, image->Depth, intel_texobj->mt);
} else {
- intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
+ intel_image->mt = intel_miptree_create_for_teximage(brw, intel_texobj,
intel_image,
false);
GLubyte **map,
GLint *stride)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
struct intel_mipmap_tree *mt = intel_image->mt;
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
slice = tex_image->Face;
- intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
+ intel_miptree_map(brw, mt, tex_image->Level, slice, x, y, w, h, mode,
(void **)map, stride);
}
intel_unmap_texture_image(struct gl_context *ctx,
struct gl_texture_image *tex_image, GLuint slice)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
struct intel_mipmap_tree *mt = intel_image->mt;
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
slice = tex_image->Face;
- intel_miptree_unmap(intel, mt, tex_image->Level, slice);
+ intel_miptree_unmap(brw, mt, tex_image->Level, slice);
}
void
GLint target, GLint format, __DRIdrawable *pDraw);
struct intel_mipmap_tree *
-intel_miptree_create_for_teximage(struct intel_context *intel,
+intel_miptree_create_for_teximage(struct brw_context *brw,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
bool expect_accelerated_upload);
-GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
+GLuint intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit);
bool
intel_texsubimage_tiled_memcpy(struct gl_context *ctx,
static bool
-intel_copy_texsubimage(struct intel_context *intel,
+intel_copy_texsubimage(struct brw_context *brw,
struct intel_texture_image *intelImage,
GLint dstx, GLint dsty, GLint slice,
struct intel_renderbuffer *irb,
{
const GLenum internalFormat = intelImage->base.Base.InternalFormat;
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
/* glCopyTexSubImage() can be called on a multisampled renderbuffer (if
* that renderbuffer is associated with the window system framebuffer),
}
/* blit from src buffer to texture */
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
irb->mt, irb->mt_level, irb->mt_layer,
x, y, irb->Base.Base.Name == 0,
intelImage->mt, intelImage->base.Base.Level,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
/* Try BLORP first. It can handle almost everything. */
- if (brw_blorp_copytexsubimage(intel, rb, texImage, slice, x, y,
+ if (brw_blorp_copytexsubimage(brw, rb, texImage, slice, x, y,
xoffset, yoffset, width, height))
return;
/* Next, try the BLT engine. */
- if (intel_copy_texsubimage(intel,
+ if (intel_copy_texsubimage(brw,
intel_texture_image(texImage),
xoffset, yoffset, slice,
intel_renderbuffer(rb), x, y, width, height)) {
* miptree of that size.
*/
struct intel_mipmap_tree *
-intel_miptree_create_for_teximage(struct intel_context *intel,
+intel_miptree_create_for_teximage(struct brw_context *brw,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
bool expect_accelerated_upload)
}
}
- return intel_miptree_create(intel,
+ return intel_miptree_create(brw,
intelObj->base.Target,
intelImage->base.Base.TexFormat,
firstLevel,
{
struct intel_texture_image *intelImage = intel_texture_image(image);
struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
GLuint src_offset;
drm_intel_bo *src_buffer;
return false;
}
- src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
+ src_buffer = intel_bufferobj_source(brw, pbo, 64, &src_offset);
/* note: potential 64-bit ptr to 32-bit int cast */
src_offset += (GLuint) (unsigned long) pixels;
_mesa_image_row_stride(unpack, image->Width, format, type);
struct intel_mipmap_tree *pbo_mt =
- intel_miptree_create_for_bo(intel,
+ intel_miptree_create_for_bo(brw,
src_buffer,
intelImage->mt->format,
src_offset,
if (!pbo_mt)
return false;
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
pbo_mt, 0, 0,
0, 0, false,
intelImage->mt, image->Level, image->Face,
ctx->Driver.FreeTextureImageBuffer(ctx, image);
- intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat,
+ intel_image->mt = intel_miptree_create_layout(brw, target, image->TexFormat,
0, 0,
width, height, 1,
true, 0 /* num_samples */);
__DRIdrawable *dPriv)
{
struct gl_framebuffer *fb = dPriv->driverPrivate;
- struct intel_context *intel = pDRICtx->driverPrivate;
+ struct brw_context *brw = pDRICtx->driverPrivate;
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
struct intel_texture_object *intelObj;
struct intel_renderbuffer *rb;
_mesa_lock_texture(&intel->ctx, texObj);
texImage = _mesa_get_tex_image(ctx, texObj, target, level);
- intel_miptree_make_shareable(intel, rb->mt);
+ intel_miptree_make_shareable(brw, rb->mt);
intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
internalFormat, texFormat, 0,
rb->mt->region->width,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intelImage = intel_texture_image(texImage);
return false;
struct intel_mipmap_tree *temp_mt =
- intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat,
+ intel_miptree_create(brw, GL_TEXTURE_2D, texImage->TexFormat,
0, 0,
width, height, 1,
false, 0, INTEL_MIPTREE_TILING_NONE);
if (!temp_mt)
goto err;
- GLubyte *dst = intel_miptree_map_raw(intel, temp_mt);
+ GLubyte *dst = intel_miptree_map_raw(brw, temp_mt);
if (!dst)
goto err;
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
}
- intel_miptree_unmap_raw(intel, temp_mt);
+ intel_miptree_unmap_raw(brw, temp_mt);
bool ret;
- ret = intel_miptree_blit(intel,
+ ret = intel_miptree_blit(brw,
temp_mt, 0, 0,
0, 0, false,
intelImage->mt, texImage->Level, texImage->Face,
const struct gl_pixelstore_attrib *packing,
bool for_glTexImage)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *image = intel_texture_image(texImage);
/* Since we are going to write raw data to the miptree, we need to resolve
* any pending fast color clears before we start.
*/
- intel_miptree_resolve_color(intel, image->mt);
+ intel_miptree_resolve_color(brw, image->mt);
bo = image->mt->region->bo;
if (drm_intel_bo_references(intel->batch.bo, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
}
if (unlikely(intel->perf_debug)) {
/*
*/
GLuint
-intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit)
{
+ struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
_mesa_get_format_name(firstImage->base.Base.TexFormat),
width, height, depth, tObj->BaseLevel, intelObj->_MaxLevel);
- intelObj->mt = intel_miptree_create(intel,
+ intelObj->mt = intel_miptree_create(brw,
intelObj->base.Target,
firstImage->base.Base.TexFormat,
tObj->BaseLevel,
break;
if (intelObj->mt != intelImage->mt) {
- intel_miptree_copy_teximage(intel, intelImage, intelObj->mt,
+ intel_miptree_copy_teximage(brw, intelImage, intelObj->mt,
false /* invalidate */);
}
if (brw_try_compact_instruction(p, &dst, &src)) {
struct brw_instruction uncompacted;
- brw_uncompact_instruction(intel, &uncompacted, &dst);
+ brw_uncompact_instruction(brw, &uncompacted, &dst);
if (memcmp(&uncompacted, &src, sizeof(src))) {
- brw_debug_compact_uncompact(intel, &src, &uncompacted);
+ brw_debug_compact_uncompact(brw, &src, &uncompacted);
return false;
}
} else {