i965: Pass brw_context to functions rather than intel_context.
authorKenneth Graunke <kenneth@whitecape.org>
Wed, 3 Jul 2013 06:17:14 +0000 (23:17 -0700)
committerKenneth Graunke <kenneth@whitecape.org>
Tue, 9 Jul 2013 21:08:53 +0000 (14:08 -0700)
This makes brw_context available in every function that used
intel_context.  This makes it possible to start migrating fields from
intel_context to brw_context.

Surprisingly, this actually removes some code, as functions that use
OUT_BATCH don't need to declare "intel"; they just use "brw."

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Acked-by: Chris Forbes <chrisf@ijw.co.nz>
Acked-by: Paul Berry <stereotype441@gmail.com>
Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
77 files changed:
src/mesa/drivers/dri/i965/brw_blorp.cpp
src/mesa/drivers/dri/i965/brw_blorp.h
src/mesa/drivers/dri/i965/brw_blorp_blit.cpp
src/mesa/drivers/dri/i965/brw_blorp_clear.cpp
src/mesa/drivers/dri/i965/brw_clear.c
src/mesa/drivers/dri/i965/brw_context.c
src/mesa/drivers/dri/i965/brw_context.h
src/mesa/drivers/dri/i965/brw_curbe.c
src/mesa/drivers/dri/i965/brw_draw.c
src/mesa/drivers/dri/i965/brw_draw_upload.c
src/mesa/drivers/dri/i965/brw_eu.c
src/mesa/drivers/dri/i965/brw_eu.h
src/mesa/drivers/dri/i965/brw_eu_compact.c
src/mesa/drivers/dri/i965/brw_lower_texture_gradients.cpp
src/mesa/drivers/dri/i965/brw_misc_state.c
src/mesa/drivers/dri/i965/brw_program.c
src/mesa/drivers/dri/i965/brw_program.h
src/mesa/drivers/dri/i965/brw_queryobj.c
src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp
src/mesa/drivers/dri/i965/brw_shader.cpp
src/mesa/drivers/dri/i965/brw_state.h
src/mesa/drivers/dri/i965/brw_state_batch.c
src/mesa/drivers/dri/i965/brw_state_cache.c
src/mesa/drivers/dri/i965/brw_state_dump.c
src/mesa/drivers/dri/i965/brw_state_upload.c
src/mesa/drivers/dri/i965/brw_surface_formats.c
src/mesa/drivers/dri/i965/brw_tex.c
src/mesa/drivers/dri/i965/brw_tex_layout.c
src/mesa/drivers/dri/i965/brw_vs.c
src/mesa/drivers/dri/i965/brw_vtbl.c
src/mesa/drivers/dri/i965/brw_wm.c
src/mesa/drivers/dri/i965/brw_wm_surface_state.c
src/mesa/drivers/dri/i965/gen6_blorp.cpp
src/mesa/drivers/dri/i965/gen6_blorp.h
src/mesa/drivers/dri/i965/gen6_gs_state.c
src/mesa/drivers/dri/i965/gen6_multisample_state.c
src/mesa/drivers/dri/i965/gen6_queryobj.c
src/mesa/drivers/dri/i965/gen6_sampler_state.c
src/mesa/drivers/dri/i965/gen6_sol.c
src/mesa/drivers/dri/i965/gen6_urb.c
src/mesa/drivers/dri/i965/gen6_viewport_state.c
src/mesa/drivers/dri/i965/gen6_vs_state.c
src/mesa/drivers/dri/i965/gen7_blorp.cpp
src/mesa/drivers/dri/i965/gen7_blorp.h
src/mesa/drivers/dri/i965/gen7_disable.c
src/mesa/drivers/dri/i965/gen7_misc_state.c
src/mesa/drivers/dri/i965/gen7_sol_state.c
src/mesa/drivers/dri/i965/gen7_urb.c
src/mesa/drivers/dri/i965/gen7_viewport_state.c
src/mesa/drivers/dri/i965/gen7_vs_state.c
src/mesa/drivers/dri/i965/gen7_wm_surface_state.c
src/mesa/drivers/dri/i965/intel_batchbuffer.c
src/mesa/drivers/dri/i965/intel_batchbuffer.h
src/mesa/drivers/dri/i965/intel_blit.c
src/mesa/drivers/dri/i965/intel_blit.h
src/mesa/drivers/dri/i965/intel_buffer_objects.c
src/mesa/drivers/dri/i965/intel_buffer_objects.h
src/mesa/drivers/dri/i965/intel_buffers.c
src/mesa/drivers/dri/i965/intel_buffers.h
src/mesa/drivers/dri/i965/intel_context.c
src/mesa/drivers/dri/i965/intel_context.h
src/mesa/drivers/dri/i965/intel_fbo.c
src/mesa/drivers/dri/i965/intel_fbo.h
src/mesa/drivers/dri/i965/intel_mipmap_tree.c
src/mesa/drivers/dri/i965/intel_mipmap_tree.h
src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
src/mesa/drivers/dri/i965/intel_pixel_copy.c
src/mesa/drivers/dri/i965/intel_pixel_read.c
src/mesa/drivers/dri/i965/intel_screen.c
src/mesa/drivers/dri/i965/intel_syncobj.c
src/mesa/drivers/dri/i965/intel_tex.c
src/mesa/drivers/dri/i965/intel_tex.h
src/mesa/drivers/dri/i965/intel_tex_copy.c
src/mesa/drivers/dri/i965/intel_tex_image.c
src/mesa/drivers/dri/i965/intel_tex_subimage.c
src/mesa/drivers/dri/i965/intel_tex_validate.c
src/mesa/drivers/dri/i965/test_eu_compact.c

index c7e7cd23126c3decc6b72ca1e7ec0142bf15251b..65c99523700d60001ad2daa8678d3965d36efe1f 100644 (file)
@@ -159,7 +159,7 @@ brw_blorp_params::brw_blorp_params()
 
 extern "C" {
 void
-intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
+intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
               unsigned int level, unsigned int layer, gen6_hiz_op op)
 {
    const char *opname = NULL;
@@ -183,22 +183,22 @@ intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
        __FUNCTION__, opname, mt, level, layer);
 
    brw_hiz_op_params params(mt, level, layer, op);
-   brw_blorp_exec(intel, &params);
+   brw_blorp_exec(brw, &params);
 }
 
 } /* extern "C" */
 
 void
-brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
+brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
 
    switch (intel->gen) {
    case 6:
-      gen6_blorp_exec(intel, params);
+      gen6_blorp_exec(brw, params);
       break;
    case 7:
-      gen7_blorp_exec(intel, params);
+      gen7_blorp_exec(brw, params);
       break;
    default:
       /* BLORP is not supported before Gen6. */
@@ -207,7 +207,7 @@ brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
    }
 
    if (unlikely(intel->always_flush_batch))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    /* We've smashed all state compared to what the normal 3D pipeline
     * rendering tracks for GL.
@@ -220,7 +220,7 @@ brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
    /* Flush the sampler cache so any texturing from the destination is
     * coherent.
     */
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 }
 
 brw_hiz_op_params::brw_hiz_op_params(struct intel_mipmap_tree *mt,
index 9277d09733a0833f78aabeebf5bf9516f5f46a5a..a53ba2fcec57ba2cc97189782f651ad7bd22b7e5 100644 (file)
@@ -35,7 +35,7 @@ extern "C" {
 #endif
 
 void
-brw_blorp_blit_miptrees(struct intel_context *intel,
+brw_blorp_blit_miptrees(struct brw_context *brw,
                         struct intel_mipmap_tree *src_mt,
                         unsigned src_level, unsigned src_layer,
                         struct intel_mipmap_tree *dst_mt,
@@ -47,11 +47,11 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
                         bool mirror_x, bool mirror_y);
 
 bool
-brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
+brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
                       bool partial_clear);
 
 void
-brw_blorp_resolve_color(struct intel_context *intel,
+brw_blorp_resolve_color(struct brw_context *brw,
                         struct intel_mipmap_tree *mt);
 
 #ifdef __cplusplus
@@ -238,7 +238,7 @@ public:
 
 
 void
-brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params);
+brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params);
 
 
 /**
index b08d38d58acdd9de53291cd3c43e2c1601701902..2840592797642df7a55882e27497d2b1dada886f 100644 (file)
@@ -124,7 +124,7 @@ find_miptree(GLbitfield buffer_bit, struct intel_renderbuffer *irb)
 }
 
 void
-brw_blorp_blit_miptrees(struct intel_context *intel,
+brw_blorp_blit_miptrees(struct brw_context *brw,
                         struct intel_mipmap_tree *src_mt,
                         unsigned src_level, unsigned src_layer,
                         struct intel_mipmap_tree *dst_mt,
@@ -141,9 +141,9 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
     * to destination color buffers, and the standard render path is
     * fast-color-aware.
     */
-   intel_miptree_resolve_color(intel, src_mt);
-   intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_layer);
-   intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_layer);
+   intel_miptree_resolve_color(brw, src_mt);
+   intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_layer);
+   intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_layer);
 
    DBG("%s from %s mt %p %d %d (%f,%f) (%f,%f)"
        "to %s mt %p %d %d (%f,%f) (%f,%f) (flip %d,%d)\n",
@@ -154,7 +154,7 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
        dst_level, dst_layer, dst_x0, dst_y0, dst_x1, dst_y1,
        mirror_x, mirror_y);
 
-   brw_blorp_blit_params params(brw_context(&intel->ctx),
+   brw_blorp_blit_params params(brw,
                                 src_mt, src_level, src_layer,
                                 dst_mt, dst_level, dst_layer,
                                 src_x0, src_y0,
@@ -162,13 +162,13 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
                                 dst_x0, dst_y0,
                                 dst_x1, dst_y1,
                                 mirror_x, mirror_y);
-   brw_blorp_exec(intel, &params);
+   brw_blorp_exec(brw, &params);
 
    intel_miptree_slice_set_needs_hiz_resolve(dst_mt, dst_level, dst_layer);
 }
 
 static void
-do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
+do_blorp_blit(struct brw_context *brw, GLbitfield buffer_bit,
               struct intel_renderbuffer *src_irb,
               struct intel_renderbuffer *dst_irb,
               GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
@@ -180,7 +180,7 @@ do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
    struct intel_mipmap_tree *dst_mt = find_miptree(buffer_bit, dst_irb);
 
    /* Do the blit */
-   brw_blorp_blit_miptrees(intel,
+   brw_blorp_blit_miptrees(brw,
                            src_mt, src_irb->mt_level, src_irb->mt_layer,
                            dst_mt, dst_irb->mt_level, dst_irb->mt_layer,
                            srcX0, srcY0, srcX1, srcY1,
@@ -223,17 +223,17 @@ formats_match(GLbitfield buffer_bit, struct intel_renderbuffer *src_irb,
 }
 
 static bool
-try_blorp_blit(struct intel_context *intel,
+try_blorp_blit(struct brw_context *brw,
                GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
                GLfloat dstX0, GLfloat dstY0, GLfloat dstX1, GLfloat dstY1,
                GLenum filter, GLbitfield buffer_bit)
 {
-   struct gl_context *ctx = &intel->ctx;
+   struct gl_context *ctx = &brw->intel.ctx;
 
    /* Sync up the state of window system buffers.  We need to do this before
     * we go looking for the buffers.
     */
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    const struct gl_framebuffer *read_fb = ctx->ReadBuffer;
    const struct gl_framebuffer *draw_fb = ctx->DrawBuffer;
@@ -302,7 +302,7 @@ try_blorp_blit(struct intel_context *intel,
       for (unsigned i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; ++i) {
          dst_irb = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
         if (dst_irb)
-            do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+            do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
                           srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
                           mirror_x, mirror_y);
       }
@@ -314,7 +314,7 @@ try_blorp_blit(struct intel_context *intel,
          intel_renderbuffer(draw_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
       if (!formats_match(buffer_bit, src_irb, dst_irb))
          return false;
-      do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+      do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
                     srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
                     mirror_x, mirror_y);
       break;
@@ -325,7 +325,7 @@ try_blorp_blit(struct intel_context *intel,
          intel_renderbuffer(draw_fb->Attachment[BUFFER_STENCIL].Renderbuffer);
       if (!formats_match(buffer_bit, src_irb, dst_irb))
          return false;
-      do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
+      do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
                     srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
                     mirror_x, mirror_y);
       break;
@@ -337,7 +337,7 @@ try_blorp_blit(struct intel_context *intel,
 }
 
 bool
-brw_blorp_copytexsubimage(struct intel_context *intel,
+brw_blorp_copytexsubimage(struct brw_context *brw,
                           struct gl_renderbuffer *src_rb,
                           struct gl_texture_image *dst_image,
                           int slice,
@@ -345,6 +345,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
                           int dstX0, int dstY0,
                           int width, int height)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
    struct intel_texture_image *intel_image = intel_texture_image(dst_image);
@@ -352,7 +353,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
    /* Sync up the state of window system buffers.  We need to do this before
     * we go looking at the src renderbuffer's miptree.
     */
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    struct intel_mipmap_tree *src_mt = src_irb->mt;
    struct intel_mipmap_tree *dst_mt = intel_image->mt;
@@ -391,7 +392,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
       mirror_y = true;
    }
 
-   brw_blorp_blit_miptrees(intel,
+   brw_blorp_blit_miptrees(brw,
                            src_mt, src_irb->mt_level, src_irb->mt_layer,
                            dst_mt, dst_image->Level, dst_image->Face + slice,
                            srcX0, srcY0, srcX1, srcY1,
@@ -414,7 +415,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
          dst_mt = dst_mt->stencil_mt;
 
       if (src_mt != dst_mt) {
-         brw_blorp_blit_miptrees(intel,
+         brw_blorp_blit_miptrees(brw,
                                  src_mt, src_irb->mt_level, src_irb->mt_layer,
                                  dst_mt, dst_image->Level,
                                  dst_image->Face + slice,
@@ -429,11 +430,13 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
 
 
 GLbitfield
-brw_blorp_framebuffer(struct intel_context *intel,
+brw_blorp_framebuffer(struct brw_context *brw,
                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
                       GLbitfield mask, GLenum filter)
 {
+   struct intel_context *intel = &brw->intel;
+
    /* BLORP is not supported before Gen6. */
    if (intel->gen < 6)
       return mask;
@@ -446,7 +449,7 @@ brw_blorp_framebuffer(struct intel_context *intel,
 
    for (unsigned int i = 0; i < ARRAY_SIZE(buffer_bits); ++i) {
       if ((mask & buffer_bits[i]) &&
-       try_blorp_blit(intel,
+       try_blorp_blit(brw,
                       srcX0, srcY0, srcX1, srcY1,
                       dstX0, dstY0, dstX1, dstY1,
                       filter, buffer_bits[i])) {
index 5745f7f201683c2218dd627579f68e7420e01641..365c49c1eb1cfc704c306df1eb4fcd1d22b2548c 100644 (file)
@@ -143,10 +143,11 @@ brw_blorp_const_color_program::~brw_blorp_const_color_program()
  * moment we only support floating point, unorm, and snorm buffers.
  */
 static bool
-is_color_fast_clear_compatible(struct intel_context *intel,
+is_color_fast_clear_compatible(struct brw_context *brw,
                                gl_format format,
                                const union gl_color_union *color)
 {
+   struct intel_context *intel = &brw->intel;
    if (_mesa_is_format_integer_color(format))
       return false;
 
@@ -238,7 +239,7 @@ brw_blorp_clear_params::brw_blorp_clear_params(struct brw_context *brw,
    /* If we can do this as a fast color clear, do so. */
    if (irb->mt->mcs_state != INTEL_MCS_STATE_NONE && !partial_clear &&
        wm_prog_key.use_simd16_replicated_data &&
-       is_color_fast_clear_compatible(intel, format, &ctx->Color.ClearColor)) {
+       is_color_fast_clear_compatible(brw, format, &ctx->Color.ClearColor)) {
       memset(push_consts, 0xff, 4*sizeof(float));
       fast_clear_op = GEN7_FAST_CLEAR_OP_FAST_CLEAR;
 
@@ -258,7 +259,7 @@ brw_blorp_clear_params::brw_blorp_clear_params(struct brw_context *brw,
        * with X alignment multiplied by 16 and Y alignment multiplied by 32.
        */
       unsigned x_align, y_align;
-      intel_get_non_msrt_mcs_alignment(intel, irb->mt, &x_align, &y_align);
+      intel_get_non_msrt_mcs_alignment(brw, irb->mt, &x_align, &y_align);
       x_align *= 16;
       y_align *= 32;
       x0 = ROUND_DOWN_TO(x0, x_align);
@@ -303,7 +304,7 @@ brw_blorp_rt_resolve_params::brw_blorp_rt_resolve_params(
     * X and Y alignment each divided by 2.
     */
    unsigned x_align, y_align;
-   intel_get_non_msrt_mcs_alignment(&brw->intel, mt, &x_align, &y_align);
+   intel_get_non_msrt_mcs_alignment(brw, mt, &x_align, &y_align);
    unsigned x_scaledown = x_align / 2;
    unsigned y_scaledown = y_align / 2;
    x0 = y0 = 0;
@@ -425,11 +426,10 @@ brw_blorp_const_color_program::compile(struct brw_context *brw,
 
 extern "C" {
 bool
-brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
+brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
                       bool partial_clear)
 {
-   struct gl_context *ctx = &intel->ctx;
-   struct brw_context *brw = brw_context(ctx);
+   struct gl_context *ctx = &brw->intel.ctx;
 
    /* The constant color clear code doesn't work for multisampled surfaces, so
     * we need to support falling back to other clear mechanisms.
@@ -484,7 +484,7 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
           * it now.
           */
          if (!irb->mt->mcs_mt) {
-            if (!intel_miptree_alloc_non_msrt_mcs(intel, irb->mt)) {
+            if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
                /* MCS allocation failed--probably this will only happen in
                 * out-of-memory conditions.  But in any case, try to recover
                 * by falling back to a non-blorp clear technique.
@@ -498,7 +498,7 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
       DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
           irb->mt, irb->mt_level, irb->mt_layer);
 
-      brw_blorp_exec(intel, &params);
+      brw_blorp_exec(brw, &params);
 
       if (is_fast_clear) {
          /* Now that the fast clear has occurred, put the buffer in
@@ -513,14 +513,12 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
 }
 
 void
-brw_blorp_resolve_color(struct intel_context *intel, struct intel_mipmap_tree *mt)
+brw_blorp_resolve_color(struct brw_context *brw, struct intel_mipmap_tree *mt)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
-
    DBG("%s to mt %p\n", __FUNCTION__, mt);
 
    brw_blorp_rt_resolve_params params(brw, mt);
-   brw_blorp_exec(intel, &params);
+   brw_blorp_exec(brw, &params);
    mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
 }
 
index 0f3f7d94191126a541436fe4224babf8840698c8..f68969157e1f25edc64df3e1b282d889c1fe8ed6 100644 (file)
@@ -104,6 +104,7 @@ noop_scissor(struct gl_context *ctx, struct gl_framebuffer *fb)
 static bool
 brw_fast_clear_depth(struct gl_context *ctx)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct gl_framebuffer *fb = ctx->DrawBuffer;
    struct intel_renderbuffer *depth_irb =
@@ -167,7 +168,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
     * flags out of the HiZ buffer into the real depth buffer.
     */
    if (mt->depth_clear_value != depth_clear_value) {
-      intel_miptree_all_slices_resolve_depth(intel, mt);
+      intel_miptree_all_slices_resolve_depth(brw, mt);
       mt->depth_clear_value = depth_clear_value;
    }
 
@@ -178,9 +179,9 @@ brw_fast_clear_depth(struct gl_context *ctx)
     *      must be issued before the rectangle primitive used for the depth
     *      buffer clear operation.
     */
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 
-   intel_hiz_exec(intel, mt, depth_irb->mt_level, depth_irb->mt_layer,
+   intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
                  GEN6_HIZ_OP_DEPTH_CLEAR);
 
    if (intel->gen == 6) {
@@ -190,7 +191,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
        *      by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
        *      followed by Depth FLUSH'
       */
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    }
 
    /* Now, the HiZ buffer contains data that needs to be resolved to the depth
@@ -219,7 +220,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask)
       intel->front_buffer_dirty = true;
    }
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
    brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
 
    if (mask & BUFFER_BIT_DEPTH) {
@@ -232,7 +233,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask)
    /* BLORP is currently only supported on Gen6+. */
    if (intel->gen >= 6) {
       if (mask & BUFFER_BITS_COLOR) {
-         if (brw_blorp_clear_color(intel, fb, partial_clear)) {
+         if (brw_blorp_clear_color(brw, fb, partial_clear)) {
             debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
             mask &= ~BUFFER_BITS_COLOR;
          }
index 4507c54a7e5dc65d4556957fe49a57260dc8774e..f0ee07587fd60b49b3b3fe2d55fcecb039fc4fa8 100644 (file)
@@ -300,7 +300,7 @@ brwCreateContext(int api,
    struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
 
-   if (!intelInitContext( intel, api, major_version, minor_version,
+   if (!intelInitContext( brw, api, major_version, minor_version,
                           mesaVis, driContextPriv,
                          sharedContextPrivate, &functions,
                          error)) {
index e250f5137ade66d9ea81a7208f581ee69b6b621d..f40dcfe421abb007ad2988d8957c03e687b219f4 100644 (file)
@@ -1157,8 +1157,8 @@ void gen6_init_queryobj_functions(struct dd_function_table *functions);
 /*======================================================================
  * brw_state_dump.c
  */
-void brw_debug_batch(struct intel_context *intel);
-void brw_annotate_aub(struct intel_context *intel);
+void brw_debug_batch(struct brw_context *brw);
+void brw_annotate_aub(struct brw_context *brw);
 
 /*======================================================================
  * brw_tex.c
@@ -1172,7 +1172,7 @@ void brw_validate_textures( struct brw_context *brw );
 void brwInitFragProgFuncs( struct dd_function_table *functions );
 
 int brw_get_scratch_size(int size);
-void brw_get_scratch_bo(struct intel_context *intel,
+void brw_get_scratch_bo(struct brw_context *brw,
                        drm_intel_bo **scratch_bo, int size);
 void brw_init_shader_time(struct brw_context *brw);
 int brw_get_shader_time_index(struct brw_context *brw,
@@ -1212,8 +1212,8 @@ void brw_upload_ubo_surfaces(struct brw_context *brw,
                             uint32_t *surf_offsets);
 
 /* brw_surface_formats.c */
-bool brw_is_hiz_depth_format(struct intel_context *ctx, gl_format format);
-bool brw_render_target_supported(struct intel_context *intel,
+bool brw_is_hiz_depth_format(struct brw_context *ctx, gl_format format);
+bool brw_render_target_supported(struct brw_context *brw,
                                  struct gl_renderbuffer *rb);
 
 /* gen6_sol.c */
@@ -1234,13 +1234,13 @@ gen7_end_transform_feedback(struct gl_context *ctx,
 
 /* brw_blorp_blit.cpp */
 GLbitfield
-brw_blorp_framebuffer(struct intel_context *intel,
+brw_blorp_framebuffer(struct brw_context *brw,
                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
                       GLbitfield mask, GLenum filter);
 
 bool
-brw_blorp_copytexsubimage(struct intel_context *intel,
+brw_blorp_copytexsubimage(struct brw_context *brw,
                           struct gl_renderbuffer *src_rb,
                           struct gl_texture_image *dst_image,
                           int slice,
@@ -1338,7 +1338,7 @@ brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
 }
 
 bool brw_do_cubemap_normalize(struct exec_list *instructions);
-bool brw_lower_texture_gradients(struct intel_context *intel,
+bool brw_lower_texture_gradients(struct brw_context *brw,
                                  struct exec_list *instructions);
 
 struct opcode_desc {
index 3abd22b562938adc69a50ce3db900cbf18fce791..07f0642aa3bd22514104f6463a30303f9dc166e7 100644 (file)
@@ -146,8 +146,6 @@ const struct brw_tracked_state brw_curbe_offsets = {
  */
 void brw_upload_cs_urb_state(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    /* It appears that this is the state packet for the CS unit, ie. the
     * urb entries detailed here are housed in the CS range from the
index 5730eeddc01084ed2e29eda450a8a40207e7a26e..e5fdef31cf2acd28da7676e0c488b1c47496906b 100644 (file)
@@ -196,7 +196,7 @@ static void brw_emit_prim(struct brw_context *brw,
     * the besides the draw code.
     */
    if (intel->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    }
 
    BEGIN_BATCH(6);
@@ -213,7 +213,7 @@ static void brw_emit_prim(struct brw_context *brw,
    intel->batch.need_workaround_flush = true;
 
    if (intel->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    }
 }
 
@@ -253,7 +253,7 @@ static void gen7_emit_prim(struct brw_context *brw,
     * the besides the draw code.
     */
    if (intel->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    }
 
    BEGIN_BATCH(7);
@@ -267,7 +267,7 @@ static void gen7_emit_prim(struct brw_context *brw,
    ADVANCE_BATCH();
 
    if (intel->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    }
 }
 
@@ -302,14 +302,13 @@ static void
 brw_predraw_resolve_buffers(struct brw_context *brw)
 {
    struct gl_context *ctx = &brw->intel.ctx;
-   struct intel_context *intel = &brw->intel;
    struct intel_renderbuffer *depth_irb;
    struct intel_texture_object *tex_obj;
 
    /* Resolve the depth buffer's HiZ buffer. */
    depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
    if (depth_irb)
-      intel_renderbuffer_resolve_hiz(intel, depth_irb);
+      intel_renderbuffer_resolve_hiz(brw, depth_irb);
 
    /* Resolve depth buffer of each enabled depth texture, and color buffer of
     * each fast-clear-enabled color texture.
@@ -320,8 +319,8 @@ brw_predraw_resolve_buffers(struct brw_context *brw)
       tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
       if (!tex_obj || !tex_obj->mt)
         continue;
-      intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
-      intel_miptree_resolve_color(intel, tex_obj->mt);
+      intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
+      intel_miptree_resolve_color(brw, tex_obj->mt);
    }
 }
 
@@ -384,7 +383,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
     */
    brw_validate_textures( brw );
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    /* This workaround has to happen outside of brw_upload_state() because it
     * may flush the batchbuffer for a blit, affecting the state flags.
@@ -423,8 +422,8 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
        * we've got validated state that needs to be in the same batch as the
        * primitives.
        */
-      intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
-      intel_batchbuffer_save_state(intel);
+      intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
+      intel_batchbuffer_save_state(brw);
 
       if (brw->num_instances != prim->num_instances) {
          brw->num_instances = prim->num_instances;
@@ -459,12 +458,12 @@ retry:
 
       if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
         if (!fail_next) {
-           intel_batchbuffer_reset_to_saved(intel);
-           intel_batchbuffer_flush(intel);
+           intel_batchbuffer_reset_to_saved(brw);
+           intel_batchbuffer_flush(brw);
            fail_next = true;
            goto retry;
         } else {
-           if (intel_batchbuffer_flush(intel) == -ENOSPC) {
+           if (intel_batchbuffer_flush(brw) == -ENOSPC) {
               static bool warned = false;
 
               if (!warned) {
@@ -480,7 +479,7 @@ retry:
    }
 
    if (intel->always_flush_batch)
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    brw_state_cache_check_size(brw);
    brw_postdraw_set_buffers_need_resolve(brw);
index 74c9e2f40d447c5a7f17f633c8845f28ec2bae83..37cd9b524b54bca406c7f8630b0c4a0d46a3dd1f 100644 (file)
@@ -223,9 +223,10 @@ static GLuint byte_types_scale[5] = {
  * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
  */
 static unsigned
-get_surface_type(struct intel_context *intel,
+get_surface_type(struct brw_context *brw,
                  const struct gl_client_array *glarray)
 {
+   struct intel_context *intel = &brw->intel;
    int size = glarray->Size;
 
    if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
@@ -366,7 +367,7 @@ copy_array_to_vbo_array(struct brw_context *brw,
     * to replicate it out.
     */
    if (src_stride == 0) {
-      intel_upload_data(&brw->intel, element->glarray->Ptr,
+      intel_upload_data(brw, element->glarray->Ptr,
                         element->glarray->_ElementSize,
                         element->glarray->_ElementSize,
                        &buffer->bo, &buffer->offset);
@@ -380,10 +381,10 @@ copy_array_to_vbo_array(struct brw_context *brw,
    GLuint size = count * dst_stride;
 
    if (dst_stride == src_stride) {
-      intel_upload_data(&brw->intel, src, size, dst_stride,
+      intel_upload_data(brw, src, size, dst_stride,
                        &buffer->bo, &buffer->offset);
    } else {
-      char * const map = intel_upload_map(&brw->intel, size, dst_stride);
+      char * const map = intel_upload_map(brw, size, dst_stride);
       char *dst = map;
 
       while (count--) {
@@ -391,7 +392,7 @@ copy_array_to_vbo_array(struct brw_context *brw,
         src += src_stride;
         dst += dst_stride;
       }
-      intel_upload_unmap(&brw->intel, map, size, dst_stride,
+      intel_upload_unmap(brw, map, size, dst_stride,
                         &buffer->bo, &buffer->offset);
    }
    buffer->stride = dst_stride;
@@ -472,7 +473,7 @@ static void brw_prepare_vertices(struct brw_context *brw)
            struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
 
            /* Named buffer object: Just reference its contents directly. */
-            buffer->bo = intel_bufferobj_source(intel,
+            buffer->bo = intel_bufferobj_source(brw,
                                                 intel_buffer, 1,
                                                &buffer->offset);
            drm_intel_bo_reference(buffer->bo);
@@ -687,7 +688,7 @@ static void brw_emit_vertices(struct brw_context *brw)
    OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
    for (i = 0; i < brw->vb.nr_enabled; i++) {
       struct brw_vertex_element *input = brw->vb.enabled[i];
-      uint32_t format = get_surface_type(intel, input->glarray);
+      uint32_t format = get_surface_type(brw, input->glarray);
       uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
       uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
       uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
@@ -748,7 +749,7 @@ static void brw_emit_vertices(struct brw_context *brw)
    }
 
    if (intel->gen >= 6 && gen6_edgeflag_input) {
-      uint32_t format = get_surface_type(intel, gen6_edgeflag_input->glarray);
+      uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray);
 
       OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
                 GEN6_VE0_VALID |
@@ -820,7 +821,7 @@ static void brw_upload_indices(struct brw_context *brw)
 
       /* Get new bufferobj, offset:
        */
-      intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
+      intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
                        &bo, &offset);
       brw->ib.start_vertex_offset = offset / ib_type_size;
    } else {
@@ -839,8 +840,7 @@ static void brw_upload_indices(struct brw_context *brw)
                                                     GL_MAP_READ_BIT,
                                                     bufferobj);
 
-          intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
-                            &bo, &offset);
+          intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
           brw->ib.start_vertex_offset = offset / ib_type_size;
 
           ctx->Driver.UnmapBuffer(ctx, bufferobj);
@@ -851,7 +851,7 @@ static void brw_upload_indices(struct brw_context *brw)
           */
          brw->ib.start_vertex_offset = offset / ib_type_size;
 
-         bo = intel_bufferobj_source(intel,
+         bo = intel_bufferobj_source(brw,
                                      intel_buffer_object(bufferobj),
                                      ib_type_size,
                                      &offset);
index 360089cf13170179ea23ce8e6d6aa6f7f859a1f7..bab56575db2a98d30256df7fd29f967380977782 100644 (file)
@@ -213,7 +213,7 @@ brw_init_compile(struct brw_context *brw, struct brw_compile *p, void *mem_ctx)
    p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
    p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
 
-   brw_init_compaction_tables(&brw->intel);
+   brw_init_compaction_tables(brw);
 }
 
 
@@ -230,7 +230,6 @@ void
 brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
 {
    struct brw_context *brw = p->brw;
-   struct intel_context *intel = &brw->intel;
    void *store = p->store;
    bool dump_hex = false;
 
@@ -247,7 +246,7 @@ brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
                   ((uint32_t *)insn)[0]);
         }
 
-        brw_uncompact_instruction(intel, &uncompacted, compacted);
+        brw_uncompact_instruction(brw, &uncompacted, compacted);
         insn = &uncompacted;
         offset += 8;
       } else {
index ce8843d8a6e9490f5181cdad6e92e33118d6bbdb..0e08e89eecc6a41930ccbd47e6487a8e3829583f 100644 (file)
@@ -393,16 +393,16 @@ void brw_set_uip_jip(struct brw_compile *p);
 uint32_t brw_swap_cmod(uint32_t cmod);
 
 /* brw_eu_compact.c */
-void brw_init_compaction_tables(struct intel_context *intel);
+void brw_init_compaction_tables(struct brw_context *brw);
 void brw_compact_instructions(struct brw_compile *p);
-void brw_uncompact_instruction(struct intel_context *intel,
+void brw_uncompact_instruction(struct brw_context *brw,
                               struct brw_instruction *dst,
                               struct brw_compact_instruction *src);
 bool brw_try_compact_instruction(struct brw_compile *p,
                                  struct brw_compact_instruction *dst,
                                  struct brw_instruction *src);
 
-void brw_debug_compact_uncompact(struct intel_context *intel,
+void brw_debug_compact_uncompact(struct brw_context *brw,
                                 struct brw_instruction *orig,
                                 struct brw_instruction *uncompacted);
 
index 5d77f1835e23920a24ed0178c33ef2c1f55ef5dd..c7ebf535df041f7958ff5c90e1e9b52f9fe7bc96 100644 (file)
@@ -326,10 +326,11 @@ static const uint32_t *subreg_table;
 static const uint32_t *src_index_table;
 
 static bool
-set_control_index(struct intel_context *intel,
+set_control_index(struct brw_context *brw,
                   struct brw_compact_instruction *dst,
                   struct brw_instruction *src)
 {
+   struct intel_context *intel = &brw->intel;
    uint32_t *src_u32 = (uint32_t *)src;
    uint32_t uncompacted = 0;
 
@@ -473,7 +474,7 @@ brw_try_compact_instruction(struct brw_compile *p,
 
    temp.dw0.opcode = src->header.opcode;
    temp.dw0.debug_control = src->header.debug_control;
-   if (!set_control_index(intel, &temp, src))
+   if (!set_control_index(brw, &temp, src))
       return false;
    if (!set_datatype_index(&temp, src))
       return false;
@@ -498,10 +499,11 @@ brw_try_compact_instruction(struct brw_compile *p,
 }
 
 static void
-set_uncompacted_control(struct intel_context *intel,
+set_uncompacted_control(struct brw_context *brw,
                         struct brw_instruction *dst,
                         struct brw_compact_instruction *src)
 {
+   struct intel_context *intel = &brw->intel;
    uint32_t *dst_u32 = (uint32_t *)dst;
    uint32_t uncompacted = control_index_table[src->dw0.control_index];
 
@@ -555,16 +557,17 @@ set_uncompacted_src1(struct brw_instruction *dst,
 }
 
 void
-brw_uncompact_instruction(struct intel_context *intel,
+brw_uncompact_instruction(struct brw_context *brw,
                           struct brw_instruction *dst,
                           struct brw_compact_instruction *src)
 {
+   struct intel_context *intel = &brw->intel;
    memset(dst, 0, sizeof(*dst));
 
    dst->header.opcode = src->dw0.opcode;
    dst->header.debug_control = src->dw0.debug_control;
 
-   set_uncompacted_control(intel, dst, src);
+   set_uncompacted_control(brw, dst, src);
    set_uncompacted_datatype(dst, src);
    set_uncompacted_subreg(dst, src);
    dst->header.acc_wr_control = src->dw0.acc_wr_control;
@@ -578,10 +581,11 @@ brw_uncompact_instruction(struct intel_context *intel,
    dst->bits3.da1.src1_reg_nr = src->dw1.src1_reg_nr;
 }
 
-void brw_debug_compact_uncompact(struct intel_context *intel,
+void brw_debug_compact_uncompact(struct brw_context *brw,
                                  struct brw_instruction *orig,
                                  struct brw_instruction *uncompacted)
 {
+   struct intel_context *intel = &brw->intel;
    fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n",
            intel->gen);
 
@@ -632,8 +636,9 @@ update_uip_jip(struct brw_instruction *insn, int this_old_ip,
 }
 
 void
-brw_init_compaction_tables(struct intel_context *intel)
+brw_init_compaction_tables(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0);
    assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0);
    assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0);
@@ -697,9 +702,9 @@ brw_compact_instructions(struct brw_compile *p)
 
          if (INTEL_DEBUG) {
             struct brw_instruction uncompacted;
-            brw_uncompact_instruction(intel, &uncompacted, dst);
+            brw_uncompact_instruction(brw, &uncompacted, dst);
             if (memcmp(&saved, &uncompacted, sizeof(uncompacted))) {
-               brw_debug_compact_uncompact(intel, &saved, &uncompacted);
+               brw_debug_compact_uncompact(brw, &saved, &uncompacted);
             }
          }
 
index c5294bcf0cbc26fda6ecfd4792da522946070005..543c720aae533b1101cee12050e4af03e8b98b33 100644 (file)
@@ -165,9 +165,10 @@ lower_texture_grad_visitor::visit_leave(ir_texture *ir)
 extern "C" {
 
 bool
-brw_lower_texture_gradients(struct intel_context *intel,
+brw_lower_texture_gradients(struct brw_context *brw,
                             struct exec_list *instructions)
 {
+   struct intel_context *intel = &brw->intel;
    bool has_sample_d_c = intel->gen >= 8 || intel->is_haswell;
    lower_texture_grad_visitor v(has_sample_d_c);
 
index 23faee6150c033510a039124cfe08c6a186536b2..8d143be3c6e5ccf58f00aa4ff7677e48416b2d85 100644 (file)
@@ -76,8 +76,6 @@ const struct brw_tracked_state brw_drawing_rect = {
  */
 static void upload_binding_table_pointers(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(6);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
    OUT_BATCH(brw->vs.bind_bo_offset);
@@ -110,8 +108,6 @@ const struct brw_tracked_state brw_binding_table_pointers = {
  */
 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
             GEN6_BINDING_TABLE_MODIFY_VS |
@@ -398,7 +394,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
          perf_debug("HW workaround: blitting depth level %d to a temporary "
                     "to fix alignment (depth tile offset %d,%d)\n",
                     depth_irb->mt_level, tile_x, tile_y);
-         intel_renderbuffer_move_to_temp(intel, depth_irb, invalidate_depth);
+         intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
          /* In the case of stencil_irb being the same packed depth/stencil
           * texture but not the same rb, make it point at our rebased mt, too.
           */
@@ -459,7 +455,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
                  "to fix alignment (stencil tile offset %d,%d)\n",
                  stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
 
-      intel_renderbuffer_move_to_temp(intel, stencil_irb, invalidate_stencil);
+      intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
       stencil_mt = get_stencil_miptree(stencil_irb);
 
       intel_miptree_get_image_offset(stencil_mt,
@@ -483,8 +479,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
                        tile_x, tile_y,
                        stencil_tile_x, stencil_tile_y);
 
-            intel_renderbuffer_move_to_temp(intel, depth_irb,
-                                            invalidate_depth);
+            intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
 
             tile_x = depth_irb->draw_x & tile_mask_x;
             tile_y = depth_irb->draw_y & tile_mask_y;
@@ -675,8 +670,8 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
     * non-pipelined state that will need the PIPE_CONTROL workaround.
     */
    if (intel->gen == 6) {
-      intel_emit_post_sync_nonzero_flush(intel);
-      intel_emit_depth_stall_flushes(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
+      intel_emit_depth_stall_flushes(brw);
    }
 
    unsigned int len;
@@ -782,7 +777,7 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
     */
    if (intel->gen >= 6 || hiz) {
       if (intel->gen == 6)
-        intel_emit_post_sync_nonzero_flush(intel);
+        intel_emit_post_sync_nonzero_flush(brw);
 
       BEGIN_BATCH(2);
       OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
@@ -819,7 +814,7 @@ static void upload_polygon_stipple(struct brw_context *brw)
       return;
 
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    BEGIN_BATCH(33);
    OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
@@ -867,7 +862,7 @@ static void upload_polygon_stipple_offset(struct brw_context *brw)
       return;
 
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
@@ -909,7 +904,7 @@ static void upload_aa_line_parameters(struct brw_context *brw)
       return;
 
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
    /* use legacy aa line coverage computation */
@@ -942,7 +937,7 @@ static void upload_line_stipple(struct brw_context *brw)
       return;
 
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    BEGIN_BATCH(3);
    OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
@@ -985,7 +980,7 @@ brw_upload_invariant_state(struct brw_context *brw)
 
    /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    /* Select the 3D pipeline (as opposed to media) */
    BEGIN_BATCH(1);
@@ -1045,7 +1040,7 @@ static void upload_state_base_address( struct brw_context *brw )
 
    if (intel->gen >= 6) {
       if (intel->gen == 6)
-        intel_emit_post_sync_nonzero_flush(intel);
+        intel_emit_post_sync_nonzero_flush(brw);
 
        BEGIN_BATCH(10);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
index cd37e708ed80c2b41ae105a83199e6bde1e269b6..7b004568e1f5874e59cc657ab8527b679fa675f0 100644 (file)
@@ -205,9 +205,10 @@ brw_get_scratch_size(int size)
 }
 
 void
-brw_get_scratch_bo(struct intel_context *intel,
+brw_get_scratch_bo(struct brw_context *brw,
                   drm_intel_bo **scratch_bo, int size)
 {
+   struct intel_context *intel = &brw->intel;
    drm_intel_bo *old_bo = *scratch_bo;
 
    if (old_bo && old_bo->size < size) {
index 010a9b87490e959f251aa3247c128415daa2d979..2a944a0858a5ed86edcb209bb3954eebeadc75e6 100644 (file)
@@ -45,7 +45,7 @@ struct brw_sampler_prog_key_data {
 void brw_populate_sampler_prog_key_data(struct gl_context *ctx,
                                        const struct gl_program *prog,
                                        struct brw_sampler_prog_key_data *key);
-bool brw_debug_recompile_sampler_key(struct intel_context *intel,
+bool brw_debug_recompile_sampler_key(struct brw_context *brw,
                                      const struct brw_sampler_prog_key_data *old_key,
                                      const struct brw_sampler_prog_key_data *key);
 void brw_add_texrect_params(struct gl_program *prog);
index 985dbedddfca54a7f21d4121242cac1d7662c234..160df707488eedb383b1e99ae23c8c6657e5745a 100644 (file)
@@ -47,8 +47,9 @@
  * Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
  */
 static void
-write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    if (intel->gen >= 6) {
       /* Emit workaround flushes: */
       if (intel->gen == 6) {
@@ -92,8 +93,9 @@ write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
  * Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
  */
 static void
-write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen < 6);
 
    BEGIN_BATCH(4);
@@ -120,6 +122,7 @@ static void
 brw_queryobj_get_results(struct gl_context *ctx,
                         struct brw_query_object *query)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    int i;
@@ -135,7 +138,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
     * when mapped.
     */
    if (drm_intel_bo_references(intel->batch.bo, query->bo))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (unlikely(intel->perf_debug)) {
       if (drm_intel_bo_busy(query->bo)) {
@@ -270,7 +273,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
        */
       drm_intel_bo_unreference(query->bo);
       query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096);
-      write_timestamp(intel, query->bo, 0);
+      write_timestamp(brw, query->bo, 0);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
@@ -323,7 +326,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
    switch (query->Base.Target) {
    case GL_TIME_ELAPSED_EXT:
       /* Write the final timestamp. */
-      write_timestamp(intel, query->bo, 1);
+      write_timestamp(brw, query->bo, 1);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
@@ -386,6 +389,7 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
  */
 static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
@@ -399,7 +403,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
     *      the async query will return true in finite time.
     */
    if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
       brw_queryobj_get_results(ctx, query);
@@ -473,7 +477,7 @@ brw_emit_query_begin(struct brw_context *brw)
 
    ensure_bo_has_space(ctx, query);
 
-   write_depth_count(intel, query->bo, query->last_index * 2);
+   write_depth_count(brw, query->bo, query->last_index * 2);
 
    brw->query.begin_emitted = true;
 }
@@ -496,7 +500,7 @@ brw_emit_query_end(struct brw_context *brw)
    if (!brw->query.begin_emitted)
       return;
 
-   write_depth_count(intel, query->bo, query->last_index * 2 + 1);
+   write_depth_count(brw, query->bo, query->last_index * 2 + 1);
 
    brw->query.begin_emitted = false;
    query->last_index++;
@@ -512,6 +516,7 @@ brw_emit_query_end(struct brw_context *brw)
 static void
 brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *) q;
 
@@ -519,7 +524,7 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
 
    drm_intel_bo_unreference(query->bo);
    query->bo = drm_intel_bo_alloc(intel->bufmgr, "timestamp query", 4096, 4096);
-   write_timestamp(intel, query->bo, 0);
+   write_timestamp(brw, query->bo, 0);
 }
 
 /**
index 2b2706ff0b6652a1f38439f557496accff67d560..f95305acb879a36e9b9576e3aab9f65b45975268 100644 (file)
@@ -59,8 +59,10 @@ static bool debug = false;
 class schedule_node : public exec_node
 {
 public:
-   schedule_node(backend_instruction *inst, const struct intel_context *intel)
+   schedule_node(backend_instruction *inst, const struct brw_context *brw)
    {
+      const struct intel_context *intel = &brw->intel;
+
       this->inst = inst;
       this->child_array_size = 0;
       this->children = NULL;
@@ -428,7 +430,7 @@ vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
 void
 instruction_scheduler::add_inst(backend_instruction *inst)
 {
-   schedule_node *n = new(mem_ctx) schedule_node(inst, bv->intel);
+   schedule_node *n = new(mem_ctx) schedule_node(inst, bv->brw);
 
    assert(!inst->is_head_sentinel());
    assert(!inst->is_tail_sentinel());
index 584b4c23f7d6abfd249ee0c9f474c28a97b8549c..0c5a6cc872b19755ba0d3f3d60d5bac52800d013 100644 (file)
@@ -166,7 +166,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
         lower_if_to_cond_assign(shader->ir, 16);
 
       do_lower_texture_projection(shader->ir);
-      brw_lower_texture_gradients(intel, shader->ir);
+      brw_lower_texture_gradients(brw, shader->ir);
       do_vec_index_to_cond_assign(shader->ir);
       lower_vector_insert(shader->ir, true);
       brw_do_cubemap_normalize(shader->ir);
index 3ac65cf9871126d9f9c89d93bdb4875d7374dd98..ed1df87accdc44a411bfaacd941d20414ef7ea74 100644 (file)
@@ -160,7 +160,7 @@ void brw_destroy_caches( struct brw_context *brw );
 /***********************************************************************
  * brw_state_batch.c
  */
-#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(&brw->intel, (s), \
+#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(brw, (s), \
                                                        sizeof(*(s)), false)
 
 void *brw_state_batch(struct brw_context *brw,
@@ -178,7 +178,7 @@ uint32_t brw_format_for_mesa_format(gl_format mesa_format);
 
 GLuint translate_tex_target(GLenum target);
 
-GLuint translate_tex_format(struct intel_context *intel,
+GLuint translate_tex_format(struct brw_context *brw,
                             gl_format mesa_format,
                            GLenum depth_mode,
                            GLenum srgb_decode);
index 84683511066e309ac5705d873158531495408be3..ea1fe8148e51dfe1adda835f1a7a19a783e6f5d3 100644 (file)
@@ -79,9 +79,9 @@ make_annotation(drm_intel_aub_annotation *annotation, uint32_t type,
  * is annotated according to the type of each data structure.
  */
 void
-brw_annotate_aub(struct intel_context *intel)
+brw_annotate_aub(struct brw_context *brw)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
 
    unsigned annotation_count = 2 * brw->state_batch_count + 1;
    drm_intel_aub_annotation annotations[annotation_count];
@@ -135,7 +135,7 @@ brw_state_batch(struct brw_context *brw,
     */
    if (batch->state_batch_offset < size ||
        offset < 4*batch->used + batch->reserved_space) {
-      intel_batchbuffer_flush(&brw->intel);
+      intel_batchbuffer_flush(brw);
       offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
    }
 
index 505c7e8ad92036192b52c24298442932899f5d8c..e26f33ad0820e6920b3ccf9b78f3c0177e58fda5 100644 (file)
@@ -351,7 +351,6 @@ brw_init_caches(struct brw_context *brw)
 static void
 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
 {
-   struct intel_context *intel = &brw->intel;
    struct brw_cache_item *c, *next;
    GLuint i;
 
@@ -383,7 +382,7 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
    brw->state.dirty.mesa |= ~0;
    brw->state.dirty.brw |= ~0;
    brw->state.dirty.cache |= ~0;
-   intel_batchbuffer_flush(intel);
+   intel_batchbuffer_flush(brw);
 }
 
 void
index 6fc883753cc069d27e38cd7799f4895ada2cde49..1b5a93355db6016456cdbbc7e00b38bfb8e7647e 100644 (file)
@@ -641,9 +641,9 @@ dump_state_batch(struct brw_context *brw)
  * The buffer offsets printed rely on the buffer containing the last offset
  * it was validated at.
  */
-void brw_debug_batch(struct intel_context *intel)
+void brw_debug_batch(struct brw_context *brw)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
 
    drm_intel_bo_map(intel->batch.bo, false);
    dump_state_batch(brw);
index b04f9af75c03cb626bb369cde1185d299ec7b5f0..2adf20396c5937d771e0af35fc88433248f8f6d6 100644 (file)
@@ -478,7 +478,7 @@ void brw_upload_state(struct brw_context *brw)
    if ((state->mesa | state->cache | state->brw) == 0)
       return;
 
-   intel_check_front_buffer_rendering(intel);
+   intel_check_front_buffer_rendering(brw);
 
    if (unlikely(INTEL_DEBUG)) {
       /* Debug version which enforces various sanity checks on the
index 28a4b3abc128e994b0b6e23db9b9ffff1093e604..a1215eedf9252382dd48c37d6f569c70ab2801c8 100644 (file)
@@ -649,10 +649,10 @@ brw_init_surface_formats(struct brw_context *brw)
 }
 
 bool
-brw_render_target_supported(struct intel_context *intel,
+brw_render_target_supported(struct brw_context *brw,
                            struct gl_renderbuffer *rb)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
    gl_format format = rb->Format;
 
    /* Many integer formats are promoted to RGBA (like XRGB8888 is), which means
@@ -683,12 +683,13 @@ brw_render_target_supported(struct intel_context *intel,
 }
 
 GLuint
-translate_tex_format(struct intel_context *intel,
+translate_tex_format(struct brw_context *brw,
                      gl_format mesa_format,
                     GLenum depth_mode,
                     GLenum srgb_decode)
 {
-   struct gl_context *ctx = &intel->ctx;
+   struct intel_context *intel = &brw->intel;
+   struct gl_context *ctx = &brw->intel.ctx;
    if (srgb_decode == GL_SKIP_DECODE_EXT)
       mesa_format = _mesa_get_srgb_format_linear(mesa_format);
 
@@ -732,8 +733,9 @@ translate_tex_format(struct intel_context *intel,
 
 /** Can HiZ be enabled on a depthbuffer of the given format? */
 bool
-brw_is_hiz_depth_format(struct intel_context *intel, gl_format format)
+brw_is_hiz_depth_format(struct brw_context *brw, gl_format format)
 {
+   struct intel_context *intel = &brw->intel;
    if (!intel->has_hiz)
       return false;
 
index 53c41e235c51603b611eadc7e33b1a082877fdee..2393709a0f2fde97c871803fb620939117a97d6f 100644 (file)
 void brw_validate_textures( struct brw_context *brw )
 {
    struct gl_context *ctx = &brw->intel.ctx;
-   struct intel_context *intel = &brw->intel;
    int i;
 
    for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
       struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
 
       if (texUnit->_ReallyEnabled) {
-        intel_finalize_mipmap_tree(intel, i);
+        intel_finalize_mipmap_tree(brw, i);
       }
    }
 }
index ac02b5c94d136730139be762f7a7519a7c614592..a2870a25536f06bd76af287af1a658b423fcec2b 100644 (file)
 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
 
 static unsigned int
-intel_horizontal_texture_alignment_unit(struct intel_context *intel,
+intel_horizontal_texture_alignment_unit(struct brw_context *brw,
                                        gl_format format)
 {
+   struct intel_context *intel = &brw->intel;
    /**
     * From the "Alignment Unit Size" section of various specs, namely:
     * - Gen3 Spec: "Memory Data Formats" Volume,         Section 1.20.1.4
@@ -93,9 +94,10 @@ intel_horizontal_texture_alignment_unit(struct intel_context *intel,
 }
 
 static unsigned int
-intel_vertical_texture_alignment_unit(struct intel_context *intel,
+intel_vertical_texture_alignment_unit(struct brw_context *brw,
                                      gl_format format)
 {
+   struct intel_context *intel = &brw->intel;
    /**
     * From the "Alignment Unit Size" section of various specs, namely:
     * - Gen3 Spec: "Memory Data Formats" Volume,         Section 1.20.1.4
@@ -205,9 +207,10 @@ brw_miptree_layout_2d(struct intel_mipmap_tree *mt)
 }
 
 static void
-brw_miptree_layout_texture_array(struct intel_context *intel,
+brw_miptree_layout_texture_array(struct brw_context *brw,
                                 struct intel_mipmap_tree *mt)
 {
+   struct intel_context *intel = &brw->intel;
    unsigned qpitch = 0;
    int h0, h1;
 
@@ -231,7 +234,7 @@ brw_miptree_layout_texture_array(struct intel_context *intel,
 }
 
 static void
-brw_miptree_layout_texture_3d(struct intel_context *intel,
+brw_miptree_layout_texture_3d(struct brw_context *brw,
                               struct intel_mipmap_tree *mt)
 {
    unsigned width  = mt->physical_width0;
@@ -309,39 +312,40 @@ brw_miptree_layout_texture_3d(struct intel_context *intel,
 }
 
 void
-brw_miptree_layout(struct intel_context *intel, struct intel_mipmap_tree *mt)
+brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt)
 {
-   mt->align_w = intel_horizontal_texture_alignment_unit(intel, mt->format);
-   mt->align_h = intel_vertical_texture_alignment_unit(intel, mt->format);
+   struct intel_context *intel = &brw->intel;
+   mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format);
+   mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format);
 
    switch (mt->target) {
    case GL_TEXTURE_CUBE_MAP:
       if (intel->gen == 4) {
          /* Gen4 stores cube maps as 3D textures. */
          assert(mt->physical_depth0 == 6);
-         brw_miptree_layout_texture_3d(intel, mt);
+         brw_miptree_layout_texture_3d(brw, mt);
       } else {
          /* All other hardware stores cube maps as 2D arrays. */
-        brw_miptree_layout_texture_array(intel, mt);
+        brw_miptree_layout_texture_array(brw, mt);
       }
       break;
 
    case GL_TEXTURE_3D:
-      brw_miptree_layout_texture_3d(intel, mt);
+      brw_miptree_layout_texture_3d(brw, mt);
       break;
 
    case GL_TEXTURE_1D_ARRAY:
    case GL_TEXTURE_2D_ARRAY:
    case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
    case GL_TEXTURE_CUBE_MAP_ARRAY:
-      brw_miptree_layout_texture_array(intel, mt);
+      brw_miptree_layout_texture_array(brw, mt);
       break;
 
    default:
       switch (mt->msaa_layout) {
       case INTEL_MSAA_LAYOUT_UMS:
       case INTEL_MSAA_LAYOUT_CMS:
-         brw_miptree_layout_texture_array(intel, mt);
+         brw_miptree_layout_texture_array(brw, mt);
          break;
       case INTEL_MSAA_LAYOUT_NONE:
       case INTEL_MSAA_LAYOUT_IMS:
index d173d2e31b8e3ef589cfd702b4bfbc3a4d39a486..15bd1872f016963752287de7e1ddf0df7d36c264 100644 (file)
@@ -317,7 +317,7 @@ do_vs_prog(struct brw_context *brw,
       prog_data.base.total_scratch
          = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
 
-      brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
+      brw_get_scratch_bo(brw, &brw->vs.scratch_bo,
                         prog_data.base.total_scratch * brw->max_vs_threads);
    }
 
@@ -332,8 +332,9 @@ do_vs_prog(struct brw_context *brw,
 }
 
 static bool
-key_debug(struct intel_context *intel, const char *name, int a, int b)
+key_debug(struct brw_context *brw, const char *name, int a, int b)
 {
+   struct intel_context *intel = &brw->intel;
    if (a != b) {
       perf_debug("  %s %d->%d\n", name, a, b);
       return true;
@@ -373,31 +374,31 @@ brw_vs_debug_recompile(struct brw_context *brw,
    }
 
    for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
-      found |= key_debug(intel, "Vertex attrib w/a flags",
+      found |= key_debug(brw, "Vertex attrib w/a flags",
                          old_key->gl_attrib_wa_flags[i],
                          key->gl_attrib_wa_flags[i]);
    }
 
-   found |= key_debug(intel, "user clip flags",
+   found |= key_debug(brw, "user clip flags",
                       old_key->base.userclip_active, key->base.userclip_active);
 
-   found |= key_debug(intel, "user clipping planes as push constants",
+   found |= key_debug(brw, "user clipping planes as push constants",
                       old_key->base.nr_userclip_plane_consts,
                       key->base.nr_userclip_plane_consts);
 
-   found |= key_debug(intel, "clip distance enable",
+   found |= key_debug(brw, "clip distance enable",
                       old_key->base.uses_clip_distance, key->base.uses_clip_distance);
-   found |= key_debug(intel, "clip plane enable bitfield",
+   found |= key_debug(brw, "clip plane enable bitfield",
                       old_key->base.userclip_planes_enabled_gen_4_5,
                       key->base.userclip_planes_enabled_gen_4_5);
-   found |= key_debug(intel, "copy edgeflag",
+   found |= key_debug(brw, "copy edgeflag",
                       old_key->copy_edgeflag, key->copy_edgeflag);
-   found |= key_debug(intel, "PointCoord replace",
+   found |= key_debug(brw, "PointCoord replace",
                       old_key->point_coord_replace, key->point_coord_replace);
-   found |= key_debug(intel, "vertex color clamping",
+   found |= key_debug(brw, "vertex color clamping",
                       old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
 
-   found |= brw_debug_recompile_sampler_key(intel, &old_key->base.tex,
+   found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
                                             &key->base.tex);
 
    if (!found) {
index f9a541fa636f84948daf286dc9cb148b8bfcad51..dadee9d365188e688e1c27d3bf4dface6afb3f1f 100644 (file)
@@ -66,10 +66,10 @@ dri_bo_release(drm_intel_bo **bo)
 /**
  * called from intelDestroyContext()
  */
-static void brw_destroy_context( struct intel_context *intel )
+static void
+brw_destroy_context(struct brw_context *brw)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
-
+   struct intel_context *intel = &brw->intel;
    if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
       /* Force a report. */
       brw->shader_time.report_time = 0;
@@ -99,9 +99,9 @@ static void brw_destroy_context( struct intel_context *intel )
  * at the end of a batchbuffer.  If you add more GPU state, increase
  * the BATCH_RESERVED macro.
  */
-static void brw_finish_batch(struct intel_context *intel)
+static void
+brw_finish_batch(struct brw_context *brw)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
    brw_emit_query_end(brw);
 
    if (brw->curbe.curbe_bo) {
@@ -115,9 +115,10 @@ static void brw_finish_batch(struct intel_context *intel)
 /**
  * called from intelFlushBatchLocked
  */
-static void brw_new_batch( struct intel_context *intel )
+static void
+brw_new_batch(struct brw_context *brw)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
 
    /* If the kernel supports hardware contexts, then most hardware state is
     * preserved between batches; we only need to re-emit state that is required
index 5f681f6caf3578647663104b92457f0eb7d3f79c..c560af725031bcdc96475ca8e0606a6c476bb1af 100644 (file)
@@ -184,7 +184,7 @@ bool do_wm_prog(struct brw_context *brw,
 
       c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
 
-      brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
+      brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
                         c->prog_data.total_scratch * brw->max_wm_threads);
    }
 
@@ -203,8 +203,9 @@ bool do_wm_prog(struct brw_context *brw,
 }
 
 static bool
-key_debug(struct intel_context *intel, const char *name, int a, int b)
+key_debug(struct brw_context *brw, const char *name, int a, int b)
 {
+   struct intel_context *intel = &brw->intel;
    if (a != b) {
       perf_debug("  %s %d->%d\n", name, a, b);
       return true;
@@ -214,25 +215,25 @@ key_debug(struct intel_context *intel, const char *name, int a, int b)
 }
 
 bool
-brw_debug_recompile_sampler_key(struct intel_context *intel,
+brw_debug_recompile_sampler_key(struct brw_context *brw,
                                 const struct brw_sampler_prog_key_data *old_key,
                                 const struct brw_sampler_prog_key_data *key)
 {
    bool found = false;
 
    for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
-      found |= key_debug(intel, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
+      found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
                          old_key->swizzles[i], key->swizzles[i]);
    }
-   found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 1st coordinate",
+   found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
                       old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
-   found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
+   found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
                       old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
-   found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
+   found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
                       old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
-   found |= key_debug(intel, "GL_MESA_ycbcr texturing\n",
+   found |= key_debug(brw, "GL_MESA_ycbcr texturing\n",
                       old_key->yuvtex_mask, key->yuvtex_mask);
-   found |= key_debug(intel, "GL_MESA_ycbcr UV swapping\n",
+   found |= key_debug(brw, "GL_MESA_ycbcr UV swapping\n",
                       old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
 
    return found;
@@ -268,29 +269,29 @@ brw_wm_debug_recompile(struct brw_context *brw,
       return;
    }
 
-   found |= key_debug(intel, "alphatest, computed depth, depth test, or "
+   found |= key_debug(brw, "alphatest, computed depth, depth test, or "
                       "depth write",
                       old_key->iz_lookup, key->iz_lookup);
-   found |= key_debug(intel, "depth statistics",
+   found |= key_debug(brw, "depth statistics",
                       old_key->stats_wm, key->stats_wm);
-   found |= key_debug(intel, "flat shading",
+   found |= key_debug(brw, "flat shading",
                       old_key->flat_shade, key->flat_shade);
-   found |= key_debug(intel, "number of color buffers",
+   found |= key_debug(brw, "number of color buffers",
                       old_key->nr_color_regions, key->nr_color_regions);
-   found |= key_debug(intel, "MRT alpha test or alpha-to-coverage",
+   found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
                       old_key->replicate_alpha, key->replicate_alpha);
-   found |= key_debug(intel, "rendering to FBO",
+   found |= key_debug(brw, "rendering to FBO",
                       old_key->render_to_fbo, key->render_to_fbo);
-   found |= key_debug(intel, "fragment color clamping",
+   found |= key_debug(brw, "fragment color clamping",
                       old_key->clamp_fragment_color, key->clamp_fragment_color);
-   found |= key_debug(intel, "line smoothing",
+   found |= key_debug(brw, "line smoothing",
                       old_key->line_aa, key->line_aa);
-   found |= key_debug(intel, "renderbuffer height",
+   found |= key_debug(brw, "renderbuffer height",
                       old_key->drawable_height, key->drawable_height);
-   found |= key_debug(intel, "input slots valid",
+   found |= key_debug(brw, "input slots valid",
                       old_key->input_slots_valid, key->input_slots_valid);
 
-   found |= brw_debug_recompile_sampler_key(intel, &old_key->tex, &key->tex);
+   found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
 
    if (!found) {
       perf_debug("  Something else\n");
index 27a265102398ddfccd201a6424d7ff05130c905b..16e3cc087d1ecd9756e1659221f344f258e1319d 100644 (file)
@@ -251,7 +251,6 @@ brw_update_texture_surface(struct gl_context *ctx,
                            uint32_t *binding_table,
                            unsigned surf_index)
 {
-   struct intel_context *intel = intel_context(ctx);
    struct brw_context *brw = brw_context(ctx);
    struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
    struct intel_texture_object *intelObj = intel_texture_object(tObj);
@@ -272,7 +271,7 @@ brw_update_texture_surface(struct gl_context *ctx,
    surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
              BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
              BRW_SURFACE_CUBEFACE_ENABLES |
-             (translate_tex_format(intel,
+             (translate_tex_format(brw,
                                     mt->format,
                                    tObj->DepthMode,
                                    sampler->sRGBDecode) <<
@@ -374,8 +373,7 @@ brw_update_sol_surface(struct brw_context *brw,
 {
    struct intel_context *intel = &brw->intel;
    struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
-   drm_intel_bo *bo =
-      intel_bufferobj_buffer(intel, intel_bo, INTEL_WRITE_PART);
+   drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART);
    uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
                                     out_offset);
    uint32_t pitch_minus_1 = 4*stride_dwords - 1;
@@ -561,7 +559,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
       unsigned width_in_tiles = ALIGN(fb->Width, 16) / 16;
       unsigned height_in_tiles = ALIGN(fb->Height, 16) / 16;
       unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
-      brw_get_scratch_bo(intel, &brw->wm.multisampled_null_render_target_bo,
+      brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
                          size_needed);
       bo = brw->wm.multisampled_null_render_target_bo;
       surface_type = BRW_SURFACE_2D;
@@ -634,7 +632,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
          * select the image.  So, instead, we just make a new single-level
          * miptree and render into that.
          */
-        intel_renderbuffer_move_to_temp(intel, irb, false);
+        intel_renderbuffer_move_to_temp(brw, irb, false);
         mt = irb->mt;
       }
    }
@@ -831,7 +829,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw,
 
       binding = &ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding];
       intel_bo = intel_buffer_object(binding->BufferObject);
-      drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_bo, INTEL_READ);
+      drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_READ);
 
       /* Because behavior for referencing outside of the binding's size in the
        * glBindBufferRange case is undefined, we can just bind the whole buffer
index d900734d7404d552cfbd066e6bf6a780b94f03e8..6e7440ae89375233f43969ed781f3d667752990b 100644 (file)
@@ -231,8 +231,6 @@ static void
 gen6_blorp_emit_urb_config(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(3);
    OUT_BATCH(_3DSTATE_URB << 16 | (3 - 2));
    OUT_BATCH(brw->urb.max_vs_entries << GEN6_URB_VS_ENTRIES_SHIFT);
@@ -351,8 +349,6 @@ gen6_blorp_emit_cc_state_pointers(struct brw_context *brw,
                                   uint32_t depthstencil_offset,
                                   uint32_t cc_state_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
    OUT_BATCH(cc_blend_state_offset | 1); /* BLEND_STATE offset */
@@ -539,8 +535,6 @@ gen6_blorp_emit_sampler_state_pointers(struct brw_context *brw,
                                        const brw_blorp_params *params,
                                        uint32_t sampler_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
              VS_SAMPLER_STATE_CHANGE |
@@ -573,7 +567,7 @@ gen6_blorp_emit_vs_disable(struct brw_context *brw,
        *   toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
        *   command with CS stall bit set and a post sync operation.
        */
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
    }
 
    /* Disable the push constant buffers. */
@@ -604,8 +598,6 @@ void
 gen6_blorp_emit_gs_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* Disable all the constant buffers. */
    BEGIN_BATCH(5);
    OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
@@ -645,8 +637,6 @@ void
 gen6_blorp_emit_clip_disable(struct brw_context *brw,
                              const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_CLIP << 16 | (4 - 2));
    OUT_BATCH(0);
@@ -678,8 +668,6 @@ static void
 gen6_blorp_emit_sf_config(struct brw_context *brw,
                           const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(20);
    OUT_BATCH(_3DSTATE_SF << 16 | (20 - 2));
    OUT_BATCH((1 - 1) << GEN6_SF_NUM_OUTPUTS_SHIFT | /* only position */
@@ -702,7 +690,6 @@ gen6_blorp_emit_wm_config(struct brw_context *brw,
                           uint32_t prog_offset,
                           brw_blorp_prog_data *prog_data)
 {
-   struct intel_context *intel = &brw->intel;
    uint32_t dw2, dw4, dw5, dw6;
 
    /* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
@@ -774,8 +761,6 @@ gen6_blorp_emit_constant_ps(struct brw_context *brw,
                             const brw_blorp_params *params,
                             uint32_t wm_push_const_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* Make sure the push constants fill an exact integer number of
     * registers.
     */
@@ -800,8 +785,6 @@ static void
 gen6_blorp_emit_constant_ps_disable(struct brw_context *brw,
                                     const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* Disable the push constant buffers. */
    BEGIN_BATCH(5);
    OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (5 - 2));
@@ -820,8 +803,6 @@ gen6_blorp_emit_binding_table_pointers(struct brw_context *brw,
                                        const brw_blorp_params *params,
                                        uint32_t wm_bind_bo_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
              GEN6_BINDING_TABLE_MODIFY_PS |
@@ -879,8 +860,8 @@ gen6_blorp_emit_depth_stencil_config(struct brw_context *brw,
       tile_x &= ~7;
       tile_y &= ~7;
 
-      intel_emit_post_sync_nonzero_flush(intel);
-      intel_emit_depth_stall_flushes(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
+      intel_emit_depth_stall_flushes(brw);
 
       BEGIN_BATCH(7);
       OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
@@ -936,8 +917,6 @@ static void
 gen6_blorp_emit_depth_disable(struct brw_context *brw,
                               const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
    OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
@@ -961,8 +940,6 @@ static void
 gen6_blorp_emit_clear_params(struct brw_context *brw,
                              const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
             GEN5_DEPTH_CLEAR_VALID |
@@ -977,8 +954,6 @@ void
 gen6_blorp_emit_drawing_rectangle(struct brw_context *brw,
                                   const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
    OUT_BATCH(0);
@@ -993,7 +968,6 @@ void
 gen6_blorp_emit_viewport_state(struct brw_context *brw,
                               const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
    struct brw_cc_viewport *ccv;
    uint32_t cc_vp_offset;
 
@@ -1019,8 +993,6 @@ static void
 gen6_blorp_emit_primitive(struct brw_context *brw,
                           const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(6);
    OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
              _3DPRIM_RECTLIST << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
@@ -1044,11 +1016,9 @@ gen6_blorp_emit_primitive(struct brw_context *brw,
  * This function alters no GL state.
  */
 void
-gen6_blorp_exec(struct intel_context *intel,
+gen6_blorp_exec(struct brw_context *brw,
                 const brw_blorp_params *params)
 {
-   struct gl_context *ctx = &intel->ctx;
-   struct brw_context *brw = brw_context(ctx);
    brw_blorp_prog_data *prog_data = NULL;
    uint32_t cc_blend_state_offset = 0;
    uint32_t cc_state_offset = 0;
index 969ead1a2e7c41b9eb6cd644a591d960802db1ae..429cd4d1a34fffcbb46ab1f636821bfd0ccbefca 100644 (file)
@@ -36,7 +36,7 @@ struct intel_mipmap_tree;
 }
 
 void
-gen6_blorp_exec(struct intel_context *intel,
+gen6_blorp_exec(struct brw_context *brw,
                 const brw_blorp_params *params);
 
 #endif
index e5dbd902390e232ece70120fc3fe4362d8a566e0..5f32bf9bb6c1b054f4c81c044580cd65f6a44ab5 100644 (file)
@@ -33,8 +33,6 @@
 static void
 upload_gs_state(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* Disable all the constant buffers. */
    BEGIN_BATCH(5);
    OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
index 534abc212b684e8acbd018c48d2caca9de2ddd9b..3247bb90c08f9b852ea32e63c52db5f60d8b1a39 100644 (file)
@@ -149,8 +149,6 @@ gen6_emit_3dstate_sample_mask(struct brw_context *brw,
                               unsigned num_samples, float coverage,
                               bool coverage_invert, unsigned sample_mask)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
    if (num_samples > 1) {
@@ -189,7 +187,7 @@ static void upload_multisample_state(struct brw_context *brw)
    }
 
    /* 3DSTATE_MULTISAMPLE is nonpipelined. */
-   intel_emit_post_sync_nonzero_flush(intel);
+   intel_emit_post_sync_nonzero_flush(brw);
 
    gen6_emit_3dstate_multisample(brw, num_samples);
    gen6_emit_3dstate_sample_mask(brw, num_samples, coverage,
index 8c38bd5e2c77bce1bbebd9f32c0453698a7a4705..918e50c5a6bacb0d112e14550333fc5c35ea2eae 100644 (file)
@@ -43,8 +43,9 @@
  * Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
  */
 static void
-write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    /* Emit workaround flushes: */
    if (intel->gen == 6) {
       /* The timestamp write below is a non-zero post-sync op, which on
@@ -75,11 +76,12 @@ write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
  * Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
  */
 static void
-write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    /* Emit Sandybridge workaround flush: */
    if (intel->gen == 6)
-      intel_emit_post_sync_nonzero_flush(intel);
+      intel_emit_post_sync_nonzero_flush(brw);
 
    BEGIN_BATCH(5);
    OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
@@ -102,12 +104,13 @@ write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
  * function also performs a pipeline flush for proper synchronization.
  */
 static void
-write_reg(struct intel_context *intel,
+write_reg(struct brw_context *brw,
           drm_intel_bo *query_bo, uint32_t reg, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen >= 6);
 
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 
    /* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
     * read a full 64-bit register, we need to do two of them.
@@ -128,20 +131,21 @@ write_reg(struct intel_context *intel,
 }
 
 static void
-write_primitives_generated(struct intel_context *intel,
+write_primitives_generated(struct brw_context *brw,
                            drm_intel_bo *query_bo, int idx)
 {
-   write_reg(intel, query_bo, CL_INVOCATION_COUNT, idx);
+   write_reg(brw, query_bo, CL_INVOCATION_COUNT, idx);
 }
 
 static void
-write_xfb_primitives_written(struct intel_context *intel,
+write_xfb_primitives_written(struct brw_context *brw,
                              drm_intel_bo *query_bo, int idx)
 {
+   struct intel_context *intel = &brw->intel;
    if (intel->gen >= 7) {
-      write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
+      write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
    } else {
-      write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
+      write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
    }
 }
 
@@ -152,6 +156,7 @@ static void
 gen6_queryobj_get_results(struct gl_context *ctx,
                           struct brw_query_object *query)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    if (query->bo == NULL)
@@ -162,7 +167,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
     * when mapped.
     */
    if (drm_intel_bo_references(intel->batch.bo, query->bo))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (unlikely(intel->perf_debug)) {
       if (drm_intel_bo_busy(query->bo)) {
@@ -243,6 +248,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
 static void
 gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
@@ -271,21 +277,21 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
        * obtain the time elapsed.  Notably, this includes time elapsed while
        * the system was doing other work, such as running other applications.
        */
-      write_timestamp(intel, query->bo, 0);
+      write_timestamp(brw, query->bo, 0);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
    case GL_SAMPLES_PASSED_ARB:
-      write_depth_count(intel, query->bo, 0);
+      write_depth_count(brw, query->bo, 0);
       break;
 
    case GL_PRIMITIVES_GENERATED:
-      write_primitives_generated(intel, query->bo, 0);
+      write_primitives_generated(brw, query->bo, 0);
       break;
 
    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
-      write_xfb_primitives_written(intel, query->bo, 0);
+      write_xfb_primitives_written(brw, query->bo, 0);
       break;
 
    default:
@@ -305,26 +311,26 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
 static void
 gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
    switch (query->Base.Target) {
    case GL_TIME_ELAPSED:
-      write_timestamp(intel, query->bo, 1);
+      write_timestamp(brw, query->bo, 1);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
    case GL_SAMPLES_PASSED_ARB:
-      write_depth_count(intel, query->bo, 1);
+      write_depth_count(brw, query->bo, 1);
       break;
 
    case GL_PRIMITIVES_GENERATED:
-      write_primitives_generated(intel, query->bo, 1);
+      write_primitives_generated(brw, query->bo, 1);
       break;
 
    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
-      write_xfb_primitives_written(intel, query->bo, 1);
+      write_xfb_primitives_written(brw, query->bo, 1);
       break;
 
    default:
@@ -355,6 +361,7 @@ static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
  */
 static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
@@ -366,7 +373,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
     *      the async query will return true in finite time.
     */
    if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
       gen6_queryobj_get_results(ctx, query);
index a9a9df5f8a33c9f5a39394590054ac729370c9ef..0cc2a4566e12068bf507c10bfec022c62a43ded6 100644 (file)
@@ -33,8 +33,6 @@
 static void
 upload_sampler_state_pointers(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
             VS_SAMPLER_STATE_CHANGE |
index 0215a9b2c7f0be76dde3bd39adb54c7f0b41b691..35944ef35e0d2892bfd4be7c3794cde4da36304c 100644 (file)
@@ -187,6 +187,5 @@ brw_end_transform_feedback(struct gl_context *ctx,
     * simplicity, just do a full flush.
     */
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = &brw->intel;
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 }
index d47bf9ea46681a383a03dbb515ede0abf6c69f40..8bb0d1b0a3bcf9f312914a783d80caf7585ef7f3 100644 (file)
@@ -49,7 +49,6 @@
 static void
 gen6_upload_urb( struct brw_context *brw )
 {
-   struct intel_context *intel = &brw->intel;
    int nr_vs_entries, nr_gs_entries;
    int total_urb_size = brw->urb.size * 1024; /* in bytes */
 
@@ -111,7 +110,7 @@ gen6_upload_urb( struct brw_context *brw )
     * a workaround.
     */
    if (brw->urb.gen6_gs_previously_active && !brw->gs.prog_active)
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
    brw->urb.gen6_gs_previously_active = brw->gs.prog_active;
 }
 
index cffb16c60581a6daf3975bf8a4838bf200a81e0e..b6dbd7788c8a770786b7e05b308fbad4dc64e679 100644 (file)
@@ -120,8 +120,6 @@ const struct brw_tracked_state gen6_sf_vp = {
 
 static void upload_viewport_state_pointers(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS << 16 | (4 - 2) |
             GEN6_CC_VIEWPORT_MODIFY |
index ae1a8412a659bb9eb94d84b4e434dce93caa64c1..31d87a477a48248a5e2a13e244562a8b0d45dd69 100644 (file)
@@ -109,7 +109,7 @@ upload_vs_state(struct brw_context *brw)
     *   flush can be executed by sending a PIPE_CONTROL command with CS
     *   stall bit set and a post sync operation.
     */
-   intel_emit_post_sync_nonzero_flush(intel);
+   intel_emit_post_sync_nonzero_flush(brw);
 
    if (brw->vs.push_const_size == 0) {
       /* Disable the push constant buffers. */
@@ -182,7 +182,7 @@ upload_vs_state(struct brw_context *brw)
     * bug reports that led to this workaround, and may be more than
     * what is strictly required to avoid the issue.
     */
-   intel_emit_post_sync_nonzero_flush(intel);
+   intel_emit_post_sync_nonzero_flush(brw);
 
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
index 822f954804eb5ad76726763be4be22483bc60e43..c8135632dd73104c97f095e962c322c367195c18 100644 (file)
@@ -67,8 +67,6 @@ gen7_blorp_emit_blend_state_pointer(struct brw_context *brw,
                                     const brw_blorp_params *params,
                                     uint32_t cc_blend_state_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_BLEND_STATE_POINTERS << 16 | (2 - 2));
    OUT_BATCH(cc_blend_state_offset | 1);
@@ -82,8 +80,6 @@ gen7_blorp_emit_cc_state_pointer(struct brw_context *brw,
                                  const brw_blorp_params *params,
                                  uint32_t cc_state_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
    OUT_BATCH(cc_state_offset | 1);
@@ -94,7 +90,6 @@ static void
 gen7_blorp_emit_cc_viewport(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
    struct brw_cc_viewport *ccv;
    uint32_t cc_vp_offset;
 
@@ -120,8 +115,6 @@ gen7_blorp_emit_depth_stencil_state_pointers(struct brw_context *brw,
                                              const brw_blorp_params *params,
                                              uint32_t depthstencil_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_DEPTH_STENCIL_STATE_POINTERS << 16 | (2 - 2));
    OUT_BATCH(depthstencil_offset | 1);
@@ -286,8 +279,6 @@ static void
 gen7_blorp_emit_vs_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 | (7 - 2));
    OUT_BATCH(0);
@@ -317,8 +308,6 @@ static void
 gen7_blorp_emit_hs_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_CONSTANT_HS << 16 | (7 - 2));
    OUT_BATCH(0);
@@ -349,8 +338,6 @@ static void
 gen7_blorp_emit_te_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(4);
    OUT_BATCH(_3DSTATE_TE << 16 | (4 - 2));
    OUT_BATCH(0);
@@ -368,8 +355,6 @@ static void
 gen7_blorp_emit_ds_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_CONSTANT_DS << 16 | (7 - 2));
    OUT_BATCH(0);
@@ -398,8 +383,6 @@ static void
 gen7_blorp_emit_gs_disable(struct brw_context *brw,
                            const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (7 - 2));
    OUT_BATCH(0);
@@ -429,8 +412,6 @@ static void
 gen7_blorp_emit_streamout_disable(struct brw_context *brw,
                                   const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(3);
    OUT_BATCH(_3DSTATE_STREAMOUT << 16 | (3 - 2));
    OUT_BATCH(0);
@@ -443,8 +424,6 @@ static void
 gen7_blorp_emit_sf_config(struct brw_context *brw,
                           const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* 3DSTATE_SF
     *
     * Disable ViewportTransformEnable (dw1.1)
@@ -498,8 +477,6 @@ gen7_blorp_emit_wm_config(struct brw_context *brw,
                           const brw_blorp_params *params,
                           brw_blorp_prog_data *prog_data)
 {
-   struct intel_context *intel = &brw->intel;
-
    uint32_t dw1 = 0, dw2 = 0;
 
    switch (params->hiz_op) {
@@ -615,8 +592,6 @@ gen7_blorp_emit_binding_table_pointers_ps(struct brw_context *brw,
                                           const brw_blorp_params *params,
                                           uint32_t wm_bind_bo_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_PS << 16 | (2 - 2));
    OUT_BATCH(wm_bind_bo_offset);
@@ -629,8 +604,6 @@ gen7_blorp_emit_sampler_state_pointers_ps(struct brw_context *brw,
                                           const brw_blorp_params *params,
                                           uint32_t sampler_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
    OUT_BATCH(sampler_offset);
@@ -643,8 +616,6 @@ gen7_blorp_emit_constant_ps(struct brw_context *brw,
                             const brw_blorp_params *params,
                             uint32_t wm_push_const_offset)
 {
-   struct intel_context *intel = &brw->intel;
-
    /* Make sure the push constants fill an exact integer number of
     * registers.
     */
@@ -670,8 +641,6 @@ static void
 gen7_blorp_emit_constant_ps_disable(struct brw_context *brw,
                                     const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (7 - 2));
    OUT_BATCH(0);
@@ -687,8 +656,7 @@ static void
 gen7_blorp_emit_depth_stencil_config(struct brw_context *brw,
                                      const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-   struct gl_context *ctx = &intel->ctx;
+   struct gl_context *ctx = &brw->intel.ctx;
    uint32_t draw_x = params->depth.x_offset;
    uint32_t draw_y = params->depth.y_offset;
    uint32_t tile_mask_x, tile_mask_y;
@@ -729,7 +697,7 @@ gen7_blorp_emit_depth_stencil_config(struct brw_context *brw,
       tile_x &= ~7;
       tile_y &= ~7;
 
-      intel_emit_depth_stall_flushes(intel);
+      intel_emit_depth_stall_flushes(brw);
 
       BEGIN_BATCH(7);
       OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
@@ -782,8 +750,6 @@ static void
 gen7_blorp_emit_depth_disable(struct brw_context *brw,
                               const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
    OUT_BATCH(BRW_DEPTHFORMAT_D32_FLOAT << 18 | (BRW_SURFACE_NULL << 29));
@@ -808,8 +774,6 @@ static void
 gen7_blorp_emit_clear_params(struct brw_context *brw,
                              const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(3);
    OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
    OUT_BATCH(params->depth.mt ? params->depth.mt->depth_clear_value : 0);
@@ -823,8 +787,6 @@ static void
 gen7_blorp_emit_primitive(struct brw_context *brw,
                           const brw_blorp_params *params)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(7);
    OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
    OUT_BATCH(GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL |
@@ -842,11 +804,9 @@ gen7_blorp_emit_primitive(struct brw_context *brw,
  * \copydoc gen6_blorp_exec()
  */
 void
-gen7_blorp_exec(struct intel_context *intel,
+gen7_blorp_exec(struct brw_context *brw,
                 const brw_blorp_params *params)
 {
-   struct gl_context *ctx = &intel->ctx;
-   struct brw_context *brw = brw_context(ctx);
    brw_blorp_prog_data *prog_data = NULL;
    uint32_t cc_blend_state_offset = 0;
    uint32_t cc_state_offset = 0;
index caf5640dcd146411a5d36febfa3bd297a1f9bab8..7517b6cce0a52145e81407df08fd2e66532147a5 100644 (file)
@@ -36,7 +36,7 @@ struct intel_mipmap_tree;
 }
 
 void
-gen7_blorp_exec(struct intel_context *intel,
+gen7_blorp_exec(struct brw_context *brw,
                 const brw_blorp_params *params);
 
 #endif
index aaf88a8bba88eed4b47c803cc8f148036827fd03..c9564c696145759a37454b59fb227eeb9dcaf6f3 100644 (file)
@@ -29,8 +29,6 @@
 static void
 disable_stages(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    assert(!brw->gs.prog_active);
 
    /* Disable the Geometry Shader (GS) Unit */
index 539fc321fbb7e35fcde41cf29e733ee06d29a494..9f40690e51ab40d33a1d469ef09045f7d38e12b9 100644 (file)
@@ -42,7 +42,7 @@ gen7_emit_depth_stencil_hiz(struct brw_context *brw,
    struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
 
-   intel_emit_depth_stall_flushes(intel);
+   intel_emit_depth_stall_flushes(brw);
 
    /* _NEW_DEPTH, _NEW_STENCIL, _NEW_BUFFERS */
    BEGIN_BATCH(7);
index 9e5f5f72e837bb5437dc9fd50dcd32ff75a755d4..2a9d8f1985cf516a4a54881b1d95ac85d722a3b2 100644 (file)
@@ -74,7 +74,7 @@ upload_3dstate_so_buffers(struct brw_context *brw)
         continue;
       }
 
-      bo = intel_bufferobj_buffer(intel, bufferobj, INTEL_WRITE_PART);
+      bo = intel_bufferobj_buffer(brw, bufferobj, INTEL_WRITE_PART);
       stride = linked_xfb_info->BufferStride[i] * 4;
 
       start = xfb_obj->Offset[i];
@@ -260,7 +260,7 @@ gen7_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
    struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = &brw->intel;
 
-   intel_batchbuffer_flush(intel);
+   intel_batchbuffer_flush(brw);
    intel->batch.needs_sol_reset = true;
 }
 
@@ -275,7 +275,6 @@ gen7_end_transform_feedback(struct gl_context *ctx,
     * This also covers any cache flushing required.
     */
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = &brw->intel;
 
-   intel_batchbuffer_flush(intel);
+   intel_batchbuffer_flush(brw);
 }
index 919918639c89f3647c8ff20ee9308321440236f1..82eac5add900deffc155976fe47d3910420048ba 100644 (file)
@@ -100,7 +100,7 @@ gen7_upload_urb(struct brw_context *brw)
    /* GS requirement */
    assert(!brw->gs.prog_active);
 
-   gen7_emit_vs_workaround_flush(intel);
+   gen7_emit_vs_workaround_flush(brw);
    gen7_emit_urb_state(brw, brw->urb.nr_vs_entries, vs_size, brw->urb.vs_start);
 }
 
@@ -108,8 +108,6 @@ void
 gen7_emit_urb_state(struct brw_context *brw, GLuint nr_vs_entries,
                     GLuint vs_size, GLuint vs_start)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_URB_VS << 16 | (2 - 2));
    OUT_BATCH(nr_vs_entries |
index 150e89044ecc38a6f7224b94eabd139601077819..ecd7922e75792c52377fe45f2b1af7c7f89abc28 100644 (file)
@@ -99,8 +99,6 @@ const struct brw_tracked_state gen7_sf_clip_viewport = {
 
 static void upload_cc_viewport_state_pointer(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS_CC << 16 | (2 - 2));
    OUT_BATCH(brw->cc.vp_offset);
index 1b97e8c078352e1ed20254b2b7d0ee76057ea1d1..1fad26020050e7c4ffdcf45867aa060c6cef4e85 100644 (file)
@@ -37,7 +37,7 @@ upload_vs_state(struct brw_context *brw)
    const int max_threads_shift = brw->intel.is_haswell ?
       HSW_VS_MAX_THREADS_SHIFT : GEN6_VS_MAX_THREADS_SHIFT;
 
-   gen7_emit_vs_workaround_flush(intel);
+   gen7_emit_vs_workaround_flush(brw);
 
    /* BRW_NEW_VS_BINDING_TABLE */
    BEGIN_BATCH(2);
index 99b00e36819029bc26ba9b11182d199c291554e4..1241656fe74926e3ff09c74e6b4870d3ddc82b96 100644 (file)
@@ -306,7 +306,7 @@ gen7_update_texture_surface(struct gl_context *ctx,
                                     8 * 4, 32, &binding_table[surf_index]);
    memset(surf, 0, 8 * 4);
 
-   uint32_t tex_format = translate_tex_format(intel,
+   uint32_t tex_format = translate_tex_format(brw,
                                               mt->format,
                                               tObj->DepthMode,
                                               sampler->sRGBDecode);
@@ -546,7 +546,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
    /* Render targets can't use IMS layout */
    assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
 
-   assert(brw_render_target_supported(intel, rb));
+   assert(brw_render_target_supported(brw, rb));
    format = brw->render_target_format[rb_format];
    if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
       _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
index 4da37c11a83fc33b2f674352f72fdbab204d4711..646b1f7571793fcdd38c75daa0e55e8b0ccf118b 100644 (file)
@@ -33,7 +33,7 @@
 #include "brw_context.h"
 
 static void
-intel_batchbuffer_reset(struct intel_context *intel);
+intel_batchbuffer_reset(struct brw_context *brw);
 
 struct cached_batch_item {
    struct cached_batch_item *next;
@@ -41,8 +41,10 @@ struct cached_batch_item {
    uint16_t size;
 };
 
-static void clear_cache( struct intel_context *intel )
+static void
+clear_cache(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    struct cached_batch_item *item = intel->batch.cached_items;
 
    while (item) {
@@ -55,9 +57,10 @@ static void clear_cache( struct intel_context *intel )
 }
 
 void
-intel_batchbuffer_init(struct intel_context *intel)
+intel_batchbuffer_init(struct brw_context *brw)
 {
-   intel_batchbuffer_reset(intel);
+   struct intel_context *intel = &brw->intel;
+   intel_batchbuffer_reset(brw);
 
    if (intel->gen >= 6) {
       /* We can't just use brw_state_batch to get a chunk of space for
@@ -76,15 +79,16 @@ intel_batchbuffer_init(struct intel_context *intel)
 }
 
 static void
-intel_batchbuffer_reset(struct intel_context *intel)
+intel_batchbuffer_reset(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    if (intel->batch.last_bo != NULL) {
       drm_intel_bo_unreference(intel->batch.last_bo);
       intel->batch.last_bo = NULL;
    }
    intel->batch.last_bo = intel->batch.bo;
 
-   clear_cache(intel);
+   clear_cache(brw);
 
    intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
                                        BATCH_SZ, 4096);
@@ -100,16 +104,18 @@ intel_batchbuffer_reset(struct intel_context *intel)
 }
 
 void
-intel_batchbuffer_save_state(struct intel_context *intel)
+intel_batchbuffer_save_state(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    intel->batch.saved.used = intel->batch.used;
    intel->batch.saved.reloc_count =
       drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
 }
 
 void
-intel_batchbuffer_reset_to_saved(struct intel_context *intel)
+intel_batchbuffer_reset_to_saved(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
 
    intel->batch.used = intel->batch.saved.used;
@@ -117,22 +123,24 @@ intel_batchbuffer_reset_to_saved(struct intel_context *intel)
    /* Cached batch state is dead, since we just cleared some unknown part of the
     * batchbuffer.  Assume that the caller resets any other state necessary.
     */
-   clear_cache(intel);
+   clear_cache(brw);
 }
 
 void
-intel_batchbuffer_free(struct intel_context *intel)
+intel_batchbuffer_free(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    free(intel->batch.cpu_map);
    drm_intel_bo_unreference(intel->batch.last_bo);
    drm_intel_bo_unreference(intel->batch.bo);
    drm_intel_bo_unreference(intel->batch.workaround_bo);
-   clear_cache(intel);
+   clear_cache(brw);
 }
 
 static void
-do_batch_dump(struct intel_context *intel)
+do_batch_dump(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    struct drm_intel_decode *decode;
    struct intel_batchbuffer *batch = &intel->batch;
    int ret;
@@ -165,15 +173,16 @@ do_batch_dump(struct intel_context *intel)
    if (ret == 0) {
       drm_intel_bo_unmap(batch->bo);
 
-      brw_debug_batch(intel);
+      brw_debug_batch(brw);
    }
 }
 
 /* TODO: Push this whole function into bufmgr.
  */
 static int
-do_flush_locked(struct intel_context *intel)
+do_flush_locked(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_batchbuffer *batch = &intel->batch;
    int ret = 0;
 
@@ -203,7 +212,7 @@ do_flush_locked(struct intel_context *intel)
 
       if (ret == 0) {
          if (unlikely(INTEL_DEBUG & DEBUG_AUB))
-            brw_annotate_aub(intel);
+            brw_annotate_aub(brw);
         if (intel->hw_ctx == NULL || batch->is_blit) {
            ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
                                        flags);
@@ -215,21 +224,22 @@ do_flush_locked(struct intel_context *intel)
    }
 
    if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
-      do_batch_dump(intel);
+      do_batch_dump(brw);
 
    if (ret != 0) {
       fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
       exit(1);
    }
-   intel->vtbl.new_batch(intel);
+   intel->vtbl.new_batch(brw);
 
    return ret;
 }
 
 int
-_intel_batchbuffer_flush(struct intel_context *intel,
+_intel_batchbuffer_flush(struct brw_context *brw,
                         const char *file, int line)
 {
+   struct intel_context *intel = &brw->intel;
    int ret;
 
    if (intel->batch.used == 0)
@@ -247,21 +257,21 @@ _intel_batchbuffer_flush(struct intel_context *intel,
    intel->batch.reserved_space = 0;
 
    if (intel->vtbl.finish_batch)
-      intel->vtbl.finish_batch(intel);
+      intel->vtbl.finish_batch(brw);
 
    /* Mark the end of the buffer. */
-   intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+   intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
    if (intel->batch.used & 1) {
       /* Round batchbuffer usage to 2 DWORDs. */
-      intel_batchbuffer_emit_dword(intel, MI_NOOP);
+      intel_batchbuffer_emit_dword(brw, MI_NOOP);
    }
 
-   intel_upload_finish(intel);
+   intel_upload_finish(brw);
 
    /* Check that we didn't just wrap our batchbuffer at a bad time. */
    assert(!intel->no_batch_wrap);
 
-   ret = do_flush_locked(intel);
+   ret = do_flush_locked(brw);
 
    if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
       fprintf(stderr, "waiting for idle\n");
@@ -270,7 +280,7 @@ _intel_batchbuffer_flush(struct intel_context *intel,
 
    /* Reset the buffer:
     */
-   intel_batchbuffer_reset(intel);
+   intel_batchbuffer_reset(brw);
 
    return ret;
 }
@@ -279,11 +289,12 @@ _intel_batchbuffer_flush(struct intel_context *intel,
 /*  This is the only way buffers get added to the validate list.
  */
 bool
-intel_batchbuffer_emit_reloc(struct intel_context *intel,
+intel_batchbuffer_emit_reloc(struct brw_context *brw,
                              drm_intel_bo *buffer,
                              uint32_t read_domains, uint32_t write_domain,
                             uint32_t delta)
 {
+   struct intel_context *intel = &brw->intel;
    int ret;
 
    ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
@@ -297,18 +308,19 @@ intel_batchbuffer_emit_reloc(struct intel_context *intel,
     * the buffer doesn't move and we can short-circuit the relocation processing
     * in the kernel
     */
-   intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+   intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
 
    return true;
 }
 
 bool
-intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
                                    drm_intel_bo *buffer,
                                    uint32_t read_domains,
                                    uint32_t write_domain,
                                    uint32_t delta)
 {
+   struct intel_context *intel = &brw->intel;
    int ret;
 
    ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
@@ -322,24 +334,26 @@ intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
     * be, in case the buffer doesn't move and we can short-circuit the
     * relocation processing in the kernel
     */
-   intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+   intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
 
    return true;
 }
 
 void
-intel_batchbuffer_data(struct intel_context *intel,
+intel_batchbuffer_data(struct brw_context *brw,
                        const void *data, GLuint bytes, bool is_blit)
 {
+   struct intel_context *intel = &brw->intel;
    assert((bytes & 3) == 0);
-   intel_batchbuffer_require_space(intel, bytes, is_blit);
+   intel_batchbuffer_require_space(brw, bytes, is_blit);
    __memcpy(intel->batch.map + intel->batch.used, data, bytes);
    intel->batch.used += bytes >> 2;
 }
 
 void
-intel_batchbuffer_cached_advance(struct intel_context *intel)
+intel_batchbuffer_cached_advance(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    struct cached_batch_item **prev = &intel->batch.cached_items, *item;
    uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
    uint32_t *start = intel->batch.map + intel->batch.emit;
@@ -391,8 +405,9 @@ emit:
  * already flushed (e.g., via a preceding MI_FLUSH).
  */
 void
-intel_emit_depth_stall_flushes(struct intel_context *intel)
+intel_emit_depth_stall_flushes(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen >= 6 && intel->gen <= 7);
 
    BEGIN_BATCH(4);
@@ -426,8 +441,9 @@ intel_emit_depth_stall_flushes(struct intel_context *intel)
  *  to be sent before any combination of VS associated 3DSTATE."
  */
 void
-gen7_emit_vs_workaround_flush(struct intel_context *intel)
+gen7_emit_vs_workaround_flush(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen == 7);
 
    BEGIN_BATCH(4);
@@ -477,8 +493,9 @@ gen7_emit_vs_workaround_flush(struct intel_context *intel)
  * really our business.  That leaves only stall at scoreboard.
  */
 void
-intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
+intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    if (!intel->batch.need_workaround_flush)
       return;
 
@@ -508,8 +525,9 @@ intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
  * This is also used for the always_flush_cache driconf debug option.
  */
 void
-intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
+intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    if (intel->gen >= 6) {
       if (intel->batch.is_blit) {
         BEGIN_BATCH_BLT(4);
@@ -526,7 +544,7 @@ intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
             * Flush Enable =1, a PIPE_CONTROL with any non-zero
             * post-sync-op is required.
             */
-           intel_emit_post_sync_nonzero_flush(intel);
+           intel_emit_post_sync_nonzero_flush(brw);
         }
 
         BEGIN_BATCH(4);
index 8f915bf56868f934c449c8d2391b2220884786a6..48439513d58c00f0578cef7563a1adc2ea7b265a 100644 (file)
@@ -24,12 +24,12 @@ extern "C" {
 
 struct intel_batchbuffer;
 
-void intel_batchbuffer_init(struct intel_context *intel);
-void intel_batchbuffer_free(struct intel_context *intel);
-void intel_batchbuffer_save_state(struct intel_context *intel);
-void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
+void intel_batchbuffer_init(struct brw_context *brw);
+void intel_batchbuffer_free(struct brw_context *brw);
+void intel_batchbuffer_save_state(struct brw_context *brw);
+void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
 
-int _intel_batchbuffer_flush(struct intel_context *intel,
+int _intel_batchbuffer_flush(struct brw_context *brw,
                             const char *file, int line);
 
 #define intel_batchbuffer_flush(intel) \
@@ -41,23 +41,23 @@ int _intel_batchbuffer_flush(struct intel_context *intel,
  * Consider it a convenience function wrapping multple
  * intel_buffer_dword() calls.
  */
-void intel_batchbuffer_data(struct intel_context *intel,
+void intel_batchbuffer_data(struct brw_context *brw,
                             const void *data, GLuint bytes, bool is_blit);
 
-bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc(struct brw_context *brw,
                                        drm_intel_bo *buffer,
                                       uint32_t read_domains,
                                       uint32_t write_domain,
                                       uint32_t offset);
-bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
                                              drm_intel_bo *buffer,
                                              uint32_t read_domains,
                                              uint32_t write_domain,
                                              uint32_t offset);
-void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
-void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
-void intel_emit_depth_stall_flushes(struct intel_context *intel);
-void gen7_emit_vs_workaround_flush(struct intel_context *intel);
+void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
+void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
+void intel_emit_depth_stall_flushes(struct brw_context *brw);
+void gen7_emit_vs_workaround_flush(struct brw_context *brw);
 
 static INLINE uint32_t float_as_int(float f)
 {
@@ -76,36 +76,37 @@ static INLINE uint32_t float_as_int(float f)
  * work...
  */
 static INLINE unsigned
-intel_batchbuffer_space(struct intel_context *intel)
+intel_batchbuffer_space(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    return (intel->batch.state_batch_offset - intel->batch.reserved_space)
       - intel->batch.used*4;
 }
 
 
 static INLINE void
-intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
+intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
 {
+   struct intel_context *intel = &brw->intel;
 #ifdef DEBUG
-   assert(intel_batchbuffer_space(intel) >= 4);
+   assert(intel_batchbuffer_space(brw) >= 4);
 #endif
    intel->batch.map[intel->batch.used++] = dword;
 }
 
 static INLINE void
-intel_batchbuffer_emit_float(struct intel_context *intel, float f)
+intel_batchbuffer_emit_float(struct brw_context *brw, float f)
 {
-   intel_batchbuffer_emit_dword(intel, float_as_int(f));
+   intel_batchbuffer_emit_dword(brw, float_as_int(f));
 }
 
 static INLINE void
-intel_batchbuffer_require_space(struct intel_context *intel,
-                                GLuint sz, int is_blit)
+intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
 {
-
+   struct intel_context *intel = &brw->intel;
    if (intel->gen >= 6 &&
        intel->batch.is_blit != is_blit && intel->batch.used) {
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
    }
 
    intel->batch.is_blit = is_blit;
@@ -113,14 +114,15 @@ intel_batchbuffer_require_space(struct intel_context *intel,
 #ifdef DEBUG
    assert(sz < BATCH_SZ - BATCH_RESERVED);
 #endif
-   if (intel_batchbuffer_space(intel) < sz)
-      intel_batchbuffer_flush(intel);
+   if (intel_batchbuffer_space(brw) < sz)
+      intel_batchbuffer_flush(brw);
 }
 
 static INLINE void
-intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
+intel_batchbuffer_begin(struct brw_context *brw, int n, bool is_blit)
 {
-   intel_batchbuffer_require_space(intel, n * 4, is_blit);
+   struct intel_context *intel = &brw->intel;
+   intel_batchbuffer_require_space(brw, n * 4, is_blit);
 
    intel->batch.emit = intel->batch.used;
 #ifdef DEBUG
@@ -129,9 +131,10 @@ intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
 }
 
 static INLINE void
-intel_batchbuffer_advance(struct intel_context *intel)
+intel_batchbuffer_advance(struct brw_context *brw)
 {
 #ifdef DEBUG
+   struct intel_context *intel = &brw->intel;
    struct intel_batchbuffer *batch = &intel->batch;
    unsigned int _n = batch->used - batch->emit;
    assert(batch->total != 0);
@@ -144,27 +147,27 @@ intel_batchbuffer_advance(struct intel_context *intel)
 #endif
 }
 
-void intel_batchbuffer_cached_advance(struct intel_context *intel);
+void intel_batchbuffer_cached_advance(struct brw_context *brw);
 
 /* Here are the crusty old macros, to be removed:
  */
 #define BATCH_LOCALS
 
-#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
-#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
-#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
-#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
+#define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, false)
+#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, true)
+#define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
+#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
 #define OUT_RELOC(buf, read_domains, write_domain, delta) do {         \
-   intel_batchbuffer_emit_reloc(intel, buf,                    \
+   intel_batchbuffer_emit_reloc(brw, buf,                      \
                                read_domains, write_domain, delta);     \
 } while (0)
 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do {  \
-   intel_batchbuffer_emit_reloc_fenced(intel, buf,             \
+   intel_batchbuffer_emit_reloc_fenced(brw, buf,               \
                                       read_domains, write_domain, delta); \
 } while (0)
 
-#define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
-#define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
+#define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
+#define CACHED_BATCH() intel_batchbuffer_cached_advance(brw);
 
 #ifdef __cplusplus
 }
index e063062e2ee38010a38e7d7c7bfab0866e934185..7d57d6e998d0acfdca25a0912691622e2c540766 100644 (file)
@@ -44,7 +44,7 @@
 #define FILE_DEBUG_FLAG DEBUG_BLIT
 
 static void
-intel_miptree_set_alpha_to_one(struct intel_context *intel,
+intel_miptree_set_alpha_to_one(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                int x, int y, int width, int height);
 
@@ -101,9 +101,10 @@ br13_for_cpp(int cpp)
  * server).
  */
 static void
-set_blitter_tiling(struct intel_context *intel,
+set_blitter_tiling(struct brw_context *brw,
                    bool dst_y_tiled, bool src_y_tiled)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen >= 6);
 
    /* Idle the blitter before we update how tiling is interpreted. */
@@ -122,12 +123,12 @@ set_blitter_tiling(struct intel_context *intel,
 #define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) do {         \
       BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0));     \
       if (dst_y_tiled || src_y_tiled)                                   \
-         set_blitter_tiling(intel, dst_y_tiled, src_y_tiled);           \
+         set_blitter_tiling(brw, dst_y_tiled, src_y_tiled);             \
    } while (0)
 
 #define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) do {              \
       if (dst_y_tiled || src_y_tiled)                                   \
-         set_blitter_tiling(intel, false, false);                       \
+         set_blitter_tiling(brw, false, false);                         \
       ADVANCE_BATCH();                                                  \
    } while (0)
 
@@ -147,7 +148,7 @@ set_blitter_tiling(struct intel_context *intel,
  * renderbuffers/textures.
  */
 bool
-intel_miptree_blit(struct intel_context *intel,
+intel_miptree_blit(struct brw_context *brw,
                    struct intel_mipmap_tree *src_mt,
                    int src_level, int src_slice,
                    uint32_t src_x, uint32_t src_y, bool src_flip,
@@ -157,6 +158,7 @@ intel_miptree_blit(struct intel_context *intel,
                    uint32_t width, uint32_t height,
                    GLenum logicop)
 {
+   struct intel_context *intel = &brw->intel;
    /* No sRGB decode or encode is done by the hardware blitter, which is
     * consistent with what we want in the callers (glCopyTexSubImage(),
     * glBlitFramebuffer(), texture validation, etc.).
@@ -208,10 +210,10 @@ intel_miptree_blit(struct intel_context *intel,
    /* The blitter has no idea about HiZ or fast color clears, so we need to
     * resolve the miptrees before we do anything.
     */
-   intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_slice);
-   intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_slice);
-   intel_miptree_resolve_color(intel, src_mt);
-   intel_miptree_resolve_color(intel, dst_mt);
+   intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_slice);
+   intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_slice);
+   intel_miptree_resolve_color(brw, src_mt);
+   intel_miptree_resolve_color(brw, dst_mt);
 
    if (src_flip)
       src_y = src_mt->level[src_level].height - src_y - height;
@@ -235,7 +237,7 @@ intel_miptree_blit(struct intel_context *intel,
    dst_x += dst_image_x;
    dst_y += dst_image_y;
 
-   if (!intelEmitCopyBlit(intel,
+   if (!intelEmitCopyBlit(brw,
                           src_mt->cpp,
                           src_pitch,
                           src_mt->region->bo, src_mt->offset,
@@ -252,7 +254,7 @@ intel_miptree_blit(struct intel_context *intel,
 
    if (src_mt->format == MESA_FORMAT_XRGB8888 &&
        dst_mt->format == MESA_FORMAT_ARGB8888) {
-      intel_miptree_set_alpha_to_one(intel, dst_mt,
+      intel_miptree_set_alpha_to_one(brw, dst_mt,
                                      dst_x, dst_y,
                                      width, height);
    }
@@ -263,7 +265,7 @@ intel_miptree_blit(struct intel_context *intel,
 /* Copy BitBlt
  */
 bool
-intelEmitCopyBlit(struct intel_context *intel,
+intelEmitCopyBlit(struct brw_context *brw,
                  GLuint cpp,
                  GLshort src_pitch,
                  drm_intel_bo *src_buffer,
@@ -278,6 +280,7 @@ intelEmitCopyBlit(struct intel_context *intel,
                  GLshort w, GLshort h,
                  GLenum logic_op)
 {
+   struct intel_context *intel = &brw->intel;
    GLuint CMD, BR13, pass = 0;
    int dst_y2 = dst_y + h;
    int dst_x2 = dst_x + w;
@@ -304,7 +307,7 @@ intelEmitCopyBlit(struct intel_context *intel,
        aper_array[2] = src_buffer;
 
        if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
-           intel_batchbuffer_flush(intel);
+           intel_batchbuffer_flush(brw);
            pass++;
        } else
            break;
@@ -313,7 +316,7 @@ intelEmitCopyBlit(struct intel_context *intel,
    if (pass >= 2)
       return false;
 
-   intel_batchbuffer_require_space(intel, 8 * 4, true);
+   intel_batchbuffer_require_space(brw, 8 * 4, true);
    DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
        __FUNCTION__,
        src_buffer, src_pitch, src_offset, src_x, src_y,
@@ -390,13 +393,13 @@ intelEmitCopyBlit(struct intel_context *intel,
 
    ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
 
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 
    return true;
 }
 
 bool
-intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+intelEmitImmediateColorExpandBlit(struct brw_context *brw,
                                  GLuint cpp,
                                  GLubyte *src_bits, GLuint src_size,
                                  GLuint fg_color,
@@ -429,10 +432,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
        __FUNCTION__,
        dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
 
-   intel_batchbuffer_require_space(intel,
-                                  (8 * 4) +
-                                  (3 * 4) +
-                                  dwords * 4, true);
+   intel_batchbuffer_require_space(brw, (8 * 4) + (3 * 4) + dwords * 4, true);
 
    opcode = XY_SETUP_BLT_CMD;
    if (cpp == 4)
@@ -466,9 +466,9 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
    OUT_BATCH(((y + h) << 16) | (x + w));
    ADVANCE_BATCH();
 
-   intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
+   intel_batchbuffer_data(brw, src_bits, dwords * 4, true);
 
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 
    return true;
 }
@@ -478,13 +478,14 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
  * end to cover the last if we need.
  */
 void
-intel_emit_linear_blit(struct intel_context *intel,
+intel_emit_linear_blit(struct brw_context *brw,
                       drm_intel_bo *dst_bo,
                       unsigned int dst_offset,
                       drm_intel_bo *src_bo,
                       unsigned int src_offset,
                       unsigned int size)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    GLuint pitch, height;
    bool ok;
@@ -495,7 +496,7 @@ intel_emit_linear_blit(struct intel_context *intel,
     */
    pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
    height = (pitch == 0) ? 1 : size / pitch;
-   ok = intelEmitCopyBlit(intel, 1,
+   ok = intelEmitCopyBlit(brw, 1,
                          pitch, src_bo, src_offset, I915_TILING_NONE,
                          pitch, dst_bo, dst_offset, I915_TILING_NONE,
                          0, 0, /* src x/y */
@@ -511,7 +512,7 @@ intel_emit_linear_blit(struct intel_context *intel,
    assert (size < (1 << 15));
    pitch = ALIGN(size, 4);
    if (size != 0) {
-      ok = intelEmitCopyBlit(intel, 1,
+      ok = intelEmitCopyBlit(brw, 1,
                             pitch, src_bo, src_offset, I915_TILING_NONE,
                             pitch, dst_bo, dst_offset, I915_TILING_NONE,
                             0, 0, /* src x/y */
@@ -532,10 +533,11 @@ intel_emit_linear_blit(struct intel_context *intel,
  * miptree.
  */
 static void
-intel_miptree_set_alpha_to_one(struct intel_context *intel,
+intel_miptree_set_alpha_to_one(struct brw_context *brw,
                               struct intel_mipmap_tree *mt,
                               int x, int y, int width, int height)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_region *region = mt->region;
    uint32_t BR13, CMD;
    int pitch, cpp;
@@ -564,7 +566,7 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
 
    if (drm_intel_bufmgr_check_aperture_space(aper_array,
                                             ARRAY_SIZE(aper_array)) != 0) {
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
    }
 
    bool dst_y_tiled = region->tiling == I915_TILING_Y;
@@ -580,5 +582,5 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
    OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
    ADVANCE_BATCH_TILED(dst_y_tiled, false);
 
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 }
index 91f10a87840abaf72b52b90250c7865549b8b535..785f74fa7594670a8d425fc32861b3e3bfe47cf9 100644 (file)
@@ -31,7 +31,7 @@
 #include "brw_context.h"
 
 bool
-intelEmitCopyBlit(struct intel_context *intel,
+intelEmitCopyBlit(struct brw_context *brw,
                               GLuint cpp,
                               GLshort src_pitch,
                               drm_intel_bo *src_buffer,
@@ -46,7 +46,7 @@ intelEmitCopyBlit(struct intel_context *intel,
                               GLshort w, GLshort h,
                              GLenum logicop );
 
-bool intel_miptree_blit(struct intel_context *intel,
+bool intel_miptree_blit(struct brw_context *brw,
                         struct intel_mipmap_tree *src_mt,
                         int src_level, int src_slice,
                         uint32_t src_x, uint32_t src_y, bool src_flip,
@@ -57,7 +57,7 @@ bool intel_miptree_blit(struct intel_context *intel,
                         GLenum logicop);
 
 bool
-intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+intelEmitImmediateColorExpandBlit(struct brw_context *brw,
                                  GLuint cpp,
                                  GLubyte *src_bits, GLuint src_size,
                                  GLuint fg_color,
@@ -68,7 +68,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
                                  GLshort x, GLshort y,
                                  GLshort w, GLshort h,
                                  GLenum logic_op);
-void intel_emit_linear_blit(struct intel_context *intel,
+void intel_emit_linear_blit(struct brw_context *brw,
                            drm_intel_bo *dst_bo,
                            unsigned int dst_offset,
                            drm_intel_bo *src_bo,
index d23df75c19638fcb4ff7b4f4fd1bb21659cc78a9..1281c9b4d871b89a8e92f740db0edbdded53e46e 100644 (file)
@@ -46,11 +46,10 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
 
 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
 static void
-intel_bufferobj_alloc_buffer(struct intel_context *intel,
+intel_bufferobj_alloc_buffer(struct brw_context *brw,
                             struct intel_buffer_object *intel_obj)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
-
+   struct intel_context *intel = &brw->intel;
    intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
                                          intel_obj->Base.Size, 64);
 
@@ -123,7 +122,7 @@ intel_bufferobj_data(struct gl_context * ctx,
                      const GLvoid * data,
                      GLenum usage, struct gl_buffer_object *obj)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
 
    /* Part of the ABI, but this function doesn't use it.
@@ -139,7 +138,7 @@ intel_bufferobj_data(struct gl_context * ctx,
       release_buffer(intel_obj);
 
    if (size != 0) {
-      intel_bufferobj_alloc_buffer(intel, intel_obj);
+      intel_bufferobj_alloc_buffer(brw, intel_obj);
       if (!intel_obj->buffer)
          return false;
 
@@ -163,6 +162,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
                         GLsizeiptrARB size,
                         const GLvoid * data, struct gl_buffer_object *obj)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
    bool busy;
@@ -180,7 +180,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
       if (size == intel_obj->Base.Size) {
         /* Replace the current busy bo with fresh data. */
         drm_intel_bo_unreference(intel_obj->buffer);
-        intel_bufferobj_alloc_buffer(intel, intel_obj);
+        intel_bufferobj_alloc_buffer(brw, intel_obj);
         drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
       } else {
          perf_debug("Using a blit copy to avoid stalling on %ldb "
@@ -191,7 +191,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
 
         drm_intel_bo_subdata(temp_bo, 0, size, data);
 
-        intel_emit_linear_blit(intel,
+        intel_emit_linear_blit(brw,
                                intel_obj->buffer, offset,
                                temp_bo, 0,
                                size);
@@ -214,11 +214,12 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
                             GLvoid * data, struct gl_buffer_object *obj)
 {
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    assert(intel_obj);
    if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
    }
    drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
 }
@@ -246,6 +247,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
                          GLintptr offset, GLsizeiptr length,
                          GLbitfield access, struct gl_buffer_object *obj)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
 
@@ -275,7 +277,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
       if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
         if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
            drm_intel_bo_unreference(intel_obj->buffer);
-           intel_bufferobj_alloc_buffer(intel, intel_obj);
+           intel_bufferobj_alloc_buffer(brw, intel_obj);
         } else {
             perf_debug("Stalling on the GPU for mapping a busy buffer "
                        "object\n");
@@ -284,7 +286,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
       } else if (drm_intel_bo_busy(intel_obj->buffer) &&
                 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
         drm_intel_bo_unreference(intel_obj->buffer);
-        intel_bufferobj_alloc_buffer(intel, intel_obj);
+        intel_bufferobj_alloc_buffer(brw, intel_obj);
       }
    }
 
@@ -335,6 +337,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
                                   GLintptr offset, GLsizeiptr length,
                                   struct gl_buffer_object *obj)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
    drm_intel_bo *temp_bo;
@@ -352,7 +355,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
 
    drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
 
-   intel_emit_linear_blit(intel,
+   intel_emit_linear_blit(brw,
                          intel_obj->buffer, obj->Offset + offset,
                          temp_bo, 0,
                          length);
@@ -367,7 +370,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
 static GLboolean
 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
 
    assert(intel_obj);
@@ -378,13 +381,13 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
        * flush.  Once again, we wish for a domain tracker in libdrm to cover
        * usage inside of a batchbuffer.
        */
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
       free(intel_obj->range_map_buffer);
       intel_obj->range_map_buffer = NULL;
    } else if (intel_obj->range_map_bo != NULL) {
       drm_intel_bo_unmap(intel_obj->range_map_bo);
 
-      intel_emit_linear_blit(intel,
+      intel_emit_linear_blit(brw,
                             intel_obj->buffer, obj->Offset,
                             intel_obj->range_map_bo, 0,
                             obj->Length);
@@ -394,7 +397,7 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
        * flush.  Once again, we wish for a domain tracker in libdrm to cover
        * usage inside of a batchbuffer.
        */
-      intel_batchbuffer_emit_mi_flush(intel);
+      intel_batchbuffer_emit_mi_flush(brw);
 
       drm_intel_bo_unreference(intel_obj->range_map_bo);
       intel_obj->range_map_bo = NULL;
@@ -409,12 +412,12 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 }
 
 drm_intel_bo *
-intel_bufferobj_buffer(struct intel_context *intel,
+intel_bufferobj_buffer(struct brw_context *brw,
                        struct intel_buffer_object *intel_obj,
                       GLuint flag)
 {
    if (intel_obj->buffer == NULL)
-      intel_bufferobj_alloc_buffer(intel, intel_obj);
+      intel_bufferobj_alloc_buffer(brw, intel_obj);
 
    return intel_obj->buffer;
 }
@@ -422,8 +425,9 @@ intel_bufferobj_buffer(struct intel_context *intel,
 #define INTEL_UPLOAD_SIZE (64*1024)
 
 void
-intel_upload_finish(struct intel_context *intel)
+intel_upload_finish(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    if (!intel->upload.bo)
           return;
 
@@ -439,9 +443,10 @@ intel_upload_finish(struct intel_context *intel)
    intel->upload.bo = NULL;
 }
 
-static void wrap_buffers(struct intel_context *intel, GLuint size)
+static void wrap_buffers(struct brw_context *brw, GLuint size)
 {
-   intel_upload_finish(intel);
+   struct intel_context *intel = &brw->intel;
+   intel_upload_finish(brw);
 
    if (size < INTEL_UPLOAD_SIZE)
       size = INTEL_UPLOAD_SIZE;
@@ -450,16 +455,17 @@ static void wrap_buffers(struct intel_context *intel, GLuint size)
    intel->upload.offset = 0;
 }
 
-void intel_upload_data(struct intel_context *intel,
+void intel_upload_data(struct brw_context *brw,
                       const void *ptr, GLuint size, GLuint align,
                       drm_intel_bo **return_bo,
                       GLuint *return_offset)
 {
+   struct intel_context *intel = &brw->intel;
    GLuint base, delta;
 
    base = (intel->upload.offset + align - 1) / align * align;
    if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
-      wrap_buffers(intel, size);
+      wrap_buffers(brw, size);
       base = 0;
    }
 
@@ -496,14 +502,15 @@ void intel_upload_data(struct intel_context *intel,
    intel->upload.offset = base + size;
 }
 
-void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
+void *intel_upload_map(struct brw_context *brw, GLuint size, GLuint align)
 {
+   struct intel_context *intel = &brw->intel;
    GLuint base, delta;
    char *ptr;
 
    base = (intel->upload.offset + align - 1) / align * align;
    if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
-      wrap_buffers(intel, size);
+      wrap_buffers(brw, size);
       base = 0;
    }
 
@@ -532,11 +539,12 @@ void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
    return ptr;
 }
 
-void intel_upload_unmap(struct intel_context *intel,
+void intel_upload_unmap(struct brw_context *brw,
                        const void *ptr, GLuint size, GLuint align,
                        drm_intel_bo **return_bo,
                        GLuint *return_offset)
 {
+   struct intel_context *intel = &brw->intel;
    GLuint base;
 
    base = (intel->upload.offset + align - 1) / align * align;
@@ -553,7 +561,7 @@ void intel_upload_unmap(struct intel_context *intel,
 }
 
 drm_intel_bo *
-intel_bufferobj_source(struct intel_context *intel,
+intel_bufferobj_source(struct brw_context *brw,
                        struct intel_buffer_object *intel_obj,
                       GLuint align, GLuint *offset)
 {
@@ -568,7 +576,7 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
                             GLintptr read_offset, GLintptr write_offset,
                             GLsizeiptr size)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_buffer_object *intel_src = intel_buffer_object(src);
    struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
    drm_intel_bo *src_bo, *dst_bo;
@@ -577,10 +585,10 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
    if (size == 0)
       return;
 
-   dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
-   src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
+   dst_bo = intel_bufferobj_buffer(brw, intel_dst, INTEL_WRITE_PART);
+   src_bo = intel_bufferobj_source(brw, intel_src, 64, &src_offset);
 
-   intel_emit_linear_blit(intel,
+   intel_emit_linear_blit(brw,
                          dst_bo, write_offset,
                          src_bo, read_offset + src_offset, size);
 
@@ -589,7 +597,7 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
     * flush.  Once again, we wish for a domain tracker in libdrm to cover
     * usage inside of a batchbuffer.
     */
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 }
 
 static GLenum
@@ -617,8 +625,8 @@ intel_buffer_object_purgeable(struct gl_context * ctx,
       return GL_RELEASED_APPLE;
    } else {
       /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
-      struct intel_context *intel = intel_context(ctx);
-      drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
+      struct brw_context *brw = brw_context(ctx);
+      drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_obj, INTEL_READ);
 
       return intel_buffer_purgeable(bo);
    }
index 3c83c068b7662de3e0cc5511e1b11a431bcb660b..d320208ed85765bb537bef58cf02670e6a376cc9 100644 (file)
@@ -52,27 +52,27 @@ struct intel_buffer_object
 
 /* Get the bm buffer associated with a GL bufferobject:
  */
-drm_intel_bo *intel_bufferobj_buffer(struct intel_context *intel,
+drm_intel_bo *intel_bufferobj_buffer(struct brw_context *brw,
                                     struct intel_buffer_object *obj,
                                     GLuint flag);
-drm_intel_bo *intel_bufferobj_source(struct intel_context *intel,
+drm_intel_bo *intel_bufferobj_source(struct brw_context *brw,
                                     struct intel_buffer_object *obj,
                                     GLuint align,
                                     GLuint *offset);
 
-void intel_upload_data(struct intel_context *intel,
+void intel_upload_data(struct brw_context *brw,
                       const void *ptr, GLuint size, GLuint align,
                       drm_intel_bo **return_bo,
                       GLuint *return_offset);
 
-void *intel_upload_map(struct intel_context *intel,
+void *intel_upload_map(struct brw_context *brw,
                       GLuint size, GLuint align);
-void intel_upload_unmap(struct intel_context *intel,
+void intel_upload_unmap(struct brw_context *brw,
                        const void *ptr, GLuint size, GLuint align,
                        drm_intel_bo **return_bo,
                        GLuint *return_offset);
 
-void intel_upload_finish(struct intel_context *intel);
+void intel_upload_finish(struct brw_context *brw);
 
 /* Hook the bufferobject implementation into mesa:
  */
index 4608471d8193ec26071ff3c85dc63475ccb8984c..502eba52eb296938152079abbccb1f87d57e246d 100644 (file)
@@ -39,8 +39,9 @@
  * If so, set the intel->front_buffer_dirty field to true.
  */
 void
-intel_check_front_buffer_rendering(struct intel_context *intel)
+intel_check_front_buffer_rendering(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
    if (_mesa_is_winsys_fbo(fb)) {
       /* drawing to window system buffer */
index 0ec495e065d99242df495eecac4b168988374c28..4f115f7c2278bbde25a146a11961aa702c3c69d4 100644 (file)
@@ -36,7 +36,7 @@
 struct intel_context;
 struct intel_framebuffer;
 
-extern void intel_check_front_buffer_rendering(struct intel_context *intel);
+extern void intel_check_front_buffer_rendering(struct brw_context *brw);
 
 extern void intelInitBufferFuncs(struct dd_function_table *functions);
 
index 358fa07e9f439b684779a904d826d12d69d34033..2baca61a95a233ff49c09f2674cf9416ffc83925 100644 (file)
@@ -91,9 +91,10 @@ intelGetString(struct gl_context * ctx, GLenum name)
 }
 
 void
-intel_resolve_for_dri2_flush(struct intel_context *intel,
+intel_resolve_for_dri2_flush(struct brw_context *brw,
                              __DRIdrawable *drawable)
 {
+   struct intel_context *intel = &brw->intel;
    if (intel->gen < 6) {
       /* MSAA and fast color clear are not supported, so don't waste time
        * checking whether a resolve is needed.
@@ -117,15 +118,16 @@ intel_resolve_for_dri2_flush(struct intel_context *intel,
       if (rb == NULL || rb->mt == NULL)
          continue;
       if (rb->mt->num_samples <= 1)
-         intel_miptree_resolve_color(intel, rb->mt);
+         intel_miptree_resolve_color(brw, rb->mt);
       else
-         intel_miptree_downsample(intel, rb->mt);
+         intel_miptree_downsample(brw, rb->mt);
    }
 }
 
 static void
 intel_flush_front(struct gl_context *ctx)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
     __DRIcontext *driContext = intel->driContext;
     __DRIdrawable *driDrawable = driContext->driDrawablePriv;
@@ -143,7 +145,7 @@ intel_flush_front(struct gl_context *ctx)
           * performance. And no one cares about front-buffer render
           * performance.
           */
-         intel_resolve_for_dri2_flush(intel, driDrawable);
+         intel_resolve_for_dri2_flush(brw, driDrawable);
 
          screen->dri2.loader->flushFrontBuffer(driDrawable,
                                                driDrawable->loaderPrivate);
@@ -163,13 +165,13 @@ intel_bits_per_pixel(const struct intel_renderbuffer *rb)
 }
 
 static void
-intel_query_dri2_buffers(struct intel_context *intel,
+intel_query_dri2_buffers(struct brw_context *brw,
                         __DRIdrawable *drawable,
                         __DRIbuffer **buffers,
                         int *count);
 
 static void
-intel_process_dri2_buffer(struct intel_context *intel,
+intel_process_dri2_buffer(struct brw_context *brw,
                          __DRIdrawable *drawable,
                          __DRIbuffer *buffer,
                          struct intel_renderbuffer *rb,
@@ -180,7 +182,8 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
 {
    struct gl_framebuffer *fb = drawable->driverPrivate;
    struct intel_renderbuffer *rb;
-   struct intel_context *intel = context->driverPrivate;
+   struct brw_context *brw = context->driverPrivate;
+   struct intel_context *intel = &brw->intel;
    __DRIbuffer *buffers = NULL;
    int i, count;
    const char *region_name;
@@ -193,7 +196,7 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
    if (unlikely(INTEL_DEBUG & DEBUG_DRI))
       fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
 
-   intel_query_dri2_buffers(intel, drawable, &buffers, &count);
+   intel_query_dri2_buffers(brw, drawable, &buffers, &count);
 
    if (buffers == NULL)
       return;
@@ -227,7 +230,7 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
           return;
        }
 
-       intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
+       intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
    }
 
    driUpdateFramebufferSize(&intel->ctx, drawable);
@@ -238,8 +241,9 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
  * state is required.
  */
 void
-intel_prepare_render(struct intel_context *intel)
+intel_prepare_render(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    __DRIcontext *driContext = intel->driContext;
    __DRIdrawable *drawable;
 
@@ -336,10 +340,11 @@ intelInvalidateState(struct gl_context * ctx, GLuint new_state)
 void
 _intel_flush(struct gl_context *ctx, const char *file, int line)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    if (intel->batch.used)
-      _intel_batchbuffer_flush(intel, file, line);
+      _intel_batchbuffer_flush(brw, file, line);
 }
 
 static void
@@ -426,7 +431,7 @@ validate_context_version(struct intel_screen *screen,
 }
 
 bool
-intelInitContext(struct intel_context *intel,
+intelInitContext(struct brw_context *brw,
                  int api,
                  unsigned major_version,
                  unsigned minor_version,
@@ -436,6 +441,7 @@ intelInitContext(struct intel_context *intel,
                  struct dd_function_table *functions,
                  unsigned *dri_ctx_error)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
    __DRIscreen *sPriv = driContextPriv->driScreenPriv;
@@ -569,9 +575,9 @@ intelInitContext(struct intel_context *intel,
    if (INTEL_DEBUG & DEBUG_AUB)
       drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
 
-   intel_batchbuffer_init(intel);
+   intel_batchbuffer_init(brw);
 
-   intel_fbo_init(intel);
+   intel_fbo_init(brw);
 
    if (!driQueryOptionb(&intel->optionCache, "hiz")) {
        intel->has_hiz = false;
@@ -601,21 +607,22 @@ intelInitContext(struct intel_context *intel,
 void
 intelDestroyContext(__DRIcontext * driContextPriv)
 {
-   struct intel_context *intel =
-      (struct intel_context *) driContextPriv->driverPrivate;
+   struct brw_context *brw =
+      (struct brw_context *) driContextPriv->driverPrivate;
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
 
    assert(intel);               /* should never be null */
    if (intel) {
       /* Dump a final BMP in case the application doesn't call SwapBuffers */
       if (INTEL_DEBUG & DEBUG_AUB) {
-         intel_batchbuffer_flush(intel);
+         intel_batchbuffer_flush(brw);
         aub_dump_bmp(&intel->ctx);
       }
 
       _mesa_meta_free(&intel->ctx);
 
-      intel->vtbl.destroy(intel);
+      intel->vtbl.destroy(brw);
 
       if (ctx->swrast_context) {
          _swsetup_DestroyContext(&intel->ctx);
@@ -626,7 +633,7 @@ intelDestroyContext(__DRIcontext * driContextPriv)
       if (ctx->swrast_context)
          _swrast_DestroyContext(&intel->ctx);
 
-      intel_batchbuffer_free(intel);
+      intel_batchbuffer_free(brw);
 
       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
       intel->first_post_swapbuffers_batch = NULL;
@@ -679,9 +686,10 @@ intelUnbindContext(__DRIcontext * driContextPriv)
  * yet), we go turn that back off before anyone finds out.
  */
 static void
-intel_gles3_srgb_workaround(struct intel_context *intel,
+intel_gles3_srgb_workaround(struct brw_context *brw,
                             struct gl_framebuffer *fb)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
 
    if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
@@ -704,22 +712,24 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
                  __DRIdrawable * driDrawPriv,
                  __DRIdrawable * driReadPriv)
 {
-   struct intel_context *intel;
+   struct brw_context *brw;
    GET_CURRENT_CONTEXT(curCtx);
 
    if (driContextPriv)
-      intel = (struct intel_context *) driContextPriv->driverPrivate;
+      brw = (struct brw_context *) driContextPriv->driverPrivate;
    else
-      intel = NULL;
+      brw = NULL;
 
    /* According to the glXMakeCurrent() man page: "Pending commands to
     * the previous context, if any, are flushed before it is released."
     * But only flush if we're actually changing contexts.
     */
-   if (intel_context(curCtx) && intel_context(curCtx) != intel) {
+   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
       _mesa_flush(curCtx);
    }
 
+   struct intel_context *intel = &brw->intel;
+
    if (driContextPriv) {
       struct gl_context *ctx = &intel->ctx;
       struct gl_framebuffer *fb, *readFb;
@@ -734,11 +744,11 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
         driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
       }
 
-      intel_prepare_render(intel);
+      intel_prepare_render(brw);
       _mesa_make_current(ctx, fb, readFb);
 
-      intel_gles3_srgb_workaround(intel, ctx->WinSysDrawBuffer);
-      intel_gles3_srgb_workaround(intel, ctx->WinSysReadBuffer);
+      intel_gles3_srgb_workaround(brw, ctx->WinSysDrawBuffer);
+      intel_gles3_srgb_workaround(brw, ctx->WinSysReadBuffer);
    }
    else {
       _mesa_make_current(NULL, NULL, NULL);
@@ -765,11 +775,12 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
  * \see DRI2GetBuffersWithFormat()
  */
 static void
-intel_query_dri2_buffers(struct intel_context *intel,
+intel_query_dri2_buffers(struct brw_context *brw,
                         __DRIdrawable *drawable,
                         __DRIbuffer **buffers,
                         int *buffer_count)
 {
+   struct intel_context *intel = &brw->intel;
    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
    struct gl_framebuffer *fb = drawable->driverPrivate;
    int i = 0;
@@ -838,12 +849,13 @@ intel_query_dri2_buffers(struct intel_context *intel,
  * \see intel_region_alloc_for_handle()
  */
 static void
-intel_process_dri2_buffer(struct intel_context *intel,
+intel_process_dri2_buffer(struct brw_context *brw,
                          __DRIdrawable *drawable,
                          __DRIbuffer *buffer,
                          struct intel_renderbuffer *rb,
                          const char *buffer_name)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_region *region = NULL;
 
    if (!rb)
@@ -886,7 +898,7 @@ intel_process_dri2_buffer(struct intel_context *intel,
    if (!region)
       return;
 
-   rb->mt = intel_miptree_create_for_dri2_buffer(intel,
+   rb->mt = intel_miptree_create_for_dri2_buffer(brw,
                                                  buffer->attachment,
                                                  intel_rb_format(rb),
                                                  num_samples,
index 9be7fc8a38c9c24f833d5865b509ec19f2c70443..a67c096bd6566fe21e7dc8c2c21bc91953d57bfa 100644 (file)
@@ -115,9 +115,9 @@ struct intel_context
 
    struct
    {
-      void (*destroy) (struct intel_context * intel);
-      void (*finish_batch) (struct intel_context * intel);
-      void (*new_batch) (struct intel_context * intel);
+      void (*destroy) (struct brw_context * brw);
+      void (*finish_batch) (struct brw_context * brw);
+      void (*new_batch) (struct brw_context * brw);
 
       void (*update_texture_surface)(struct gl_context *ctx,
                                      unsigned unit,
@@ -372,7 +372,7 @@ extern int INTEL_DEBUG;
  * intel_context.c:
  */
 
-extern bool intelInitContext(struct intel_context *intel,
+extern bool intelInitContext(struct brw_context *brw,
                              int api,
                              unsigned major_version,
                              unsigned minor_version,
@@ -403,10 +403,10 @@ extern int intel_translate_logic_op(GLenum opcode);
 
 void intel_update_renderbuffers(__DRIcontext *context,
                                __DRIdrawable *drawable);
-void intel_prepare_render(struct intel_context *intel);
+void intel_prepare_render(struct brw_context *brw);
 
 void
-intel_resolve_for_dri2_flush(struct intel_context *intel,
+intel_resolve_for_dri2_flush(struct brw_context *brw,
                              __DRIdrawable *drawable);
 
 extern void
index 84dbbc1e0324b20ad3ed40af59d14f55f5ec6a25..def162d10d0df32c42f65cf1d6c5c22e6c686b5e 100644 (file)
@@ -88,7 +88,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
                       GLubyte **out_map,
                       GLint *out_stride)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    void *map;
@@ -103,7 +103,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
       return;
    }
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    /* For a window-system renderbuffer, we need to flip the mapping we receive
     * upside-down.  So we need to ask for a rectangle on flipped vertically, and
@@ -113,7 +113,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
       y = rb->Height - y - h;
    }
 
-   intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+   intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
                     x, y, w, h, mode, &map, &stride);
 
    if (rb->Name == 0) {
@@ -136,7 +136,7 @@ static void
 intel_unmap_renderbuffer(struct gl_context *ctx,
                         struct gl_renderbuffer *rb)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
 
@@ -149,7 +149,7 @@ intel_unmap_renderbuffer(struct gl_context *ctx,
       return;
    }
 
-   intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
+   intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
 }
 
 
@@ -191,6 +191,7 @@ intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer
                                  GLenum internalFormat,
                                  GLuint width, GLuint height)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_screen *screen = intel->intelScreen;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
@@ -235,7 +236,7 @@ intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer
    if (width == 0 || height == 0)
       return true;
 
-   irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+   irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
                                                   width, height,
                                                    rb->NumSamples);
    if (!irb->mt)
@@ -250,6 +251,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
                                        struct gl_renderbuffer *rb,
                                        void *image_handle)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_renderbuffer *irb;
    __DRIscreen *screen;
@@ -264,7 +266,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
    /* __DRIimage is opaque to the core so it has to be checked here */
    switch (image->format) {
    case MESA_FORMAT_RGBA8888_REV:
-      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
+      _mesa_error(ctx, GL_INVALID_OPERATION,
             "glEGLImageTargetRenderbufferStorage(unsupported image format");
       return;
       break;
@@ -274,7 +276,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
 
    irb = intel_renderbuffer(rb);
    intel_miptree_release(&irb->mt);
-   irb->mt = intel_miptree_create_for_bo(intel,
+   irb->mt = intel_miptree_create_for_bo(brw,
                                          image->region->bo,
                                          image->format,
                                          image->offset,
@@ -289,8 +291,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
    rb->Width = image->region->width;
    rb->Height = image->region->height;
    rb->Format = image->format;
-   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
-                                          image->internal_format);
+   rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
    rb->NeedsFinishRenderTexture = true;
 }
 
@@ -409,7 +410,7 @@ intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
 }
 
 static bool
-intel_renderbuffer_update_wrapper(struct intel_context *intel,
+intel_renderbuffer_update_wrapper(struct brw_context *brw,
                                   struct intel_renderbuffer *irb,
                                  struct gl_texture_image *image,
                                   uint32_t layer)
@@ -440,8 +441,8 @@ intel_renderbuffer_update_wrapper(struct intel_context *intel,
 
    intel_renderbuffer_set_draw_offset(irb);
 
-   if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(intel, rb->Format)) {
-      intel_miptree_alloc_hiz(intel, mt);
+   if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
+      intel_miptree_alloc_hiz(brw, mt);
       if (!mt->hiz_mt)
         return false;
    }
@@ -475,7 +476,7 @@ intel_render_texture(struct gl_context * ctx,
                      struct gl_framebuffer *fb,
                      struct gl_renderbuffer_attachment *att)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct gl_renderbuffer *rb = att->Renderbuffer;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    struct gl_texture_image *image = rb->TexImage;
@@ -502,7 +503,7 @@ intel_render_texture(struct gl_context * ctx,
 
    intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
 
-   if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
+   if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
        _swrast_render_texture(ctx, fb, att);
        return;
    }
@@ -520,7 +521,7 @@ intel_render_texture(struct gl_context * ctx,
 static void
 intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
 
    DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
 
@@ -529,7 +530,7 @@ intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
     * batch.  Once again, we wish for a domain tracker in libdrm to cover
     * usage inside of a batchbuffer like GEM does in the kernel.
     */
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 }
 
 #define fbo_incomplete(fb, ...) do {                                          \
@@ -550,6 +551,7 @@ intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
 static void
 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_renderbuffer *depthRb =
       intel_get_renderbuffer(fb, BUFFER_DEPTH);
@@ -641,7 +643,7 @@ intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
         continue;
       }
 
-      if (!brw_render_target_supported(intel, rb)) {
+      if (!brw_render_target_supported(brw, rb)) {
         fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
                         "texture/renderbuffer format attached: %s\n",
                         _mesa_get_format_name(intel_rb_format(irb)));
@@ -665,6 +667,7 @@ intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
                                     GLint dstX1, GLint dstY1,
                                     GLbitfield mask, GLenum filter)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    if (mask & GL_COLOR_BUFFER_BIT) {
@@ -726,7 +729,7 @@ intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
             return mask;
          }
 
-         if (!intel_miptree_blit(intel,
+         if (!intel_miptree_blit(brw,
                                  src_irb->mt,
                                  src_irb->mt_level, src_irb->mt_layer,
                                  srcX0, srcY0, src_rb->Name == 0,
@@ -752,7 +755,7 @@ intel_blit_framebuffer(struct gl_context *ctx,
                        GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
                        GLbitfield mask, GLenum filter)
 {
-   mask = brw_blorp_framebuffer(intel_context(ctx),
+   mask = brw_blorp_framebuffer(brw_context(ctx),
                                 srcX0, srcY0, srcX1, srcY1,
                                 dstX0, dstY0, dstX1, dstY1,
                                 mask, filter);
@@ -814,11 +817,11 @@ intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
 }
 
 bool
-intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+intel_renderbuffer_resolve_hiz(struct brw_context *brw,
                               struct intel_renderbuffer *irb)
 {
    if (irb->mt)
-      return intel_miptree_slice_resolve_hiz(intel,
+      return intel_miptree_slice_resolve_hiz(brw,
                                              irb->mt,
                                              irb->mt_level,
                                              irb->mt_layer);
@@ -827,11 +830,11 @@ intel_renderbuffer_resolve_hiz(struct intel_context *intel,
 }
 
 bool
-intel_renderbuffer_resolve_depth(struct intel_context *intel,
+intel_renderbuffer_resolve_depth(struct brw_context *brw,
                                 struct intel_renderbuffer *irb)
 {
    if (irb->mt)
-      return intel_miptree_slice_resolve_depth(intel,
+      return intel_miptree_slice_resolve_depth(brw,
                                                irb->mt,
                                                irb->mt_level,
                                                irb->mt_layer);
@@ -840,7 +843,7 @@ intel_renderbuffer_resolve_depth(struct intel_context *intel,
 }
 
 void
-intel_renderbuffer_move_to_temp(struct intel_context *intel,
+intel_renderbuffer_move_to_temp(struct brw_context *brw,
                                 struct intel_renderbuffer *irb,
                                 bool invalidate)
 {
@@ -851,7 +854,7 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
 
    intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
 
-   new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
+   new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
                                  intel_image->base.Base.TexFormat,
                                  intel_image->base.Base.Level,
                                  intel_image->base.Base.Level,
@@ -860,11 +863,11 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
                                  irb->mt->num_samples,
                                  INTEL_MIPTREE_TILING_ANY);
 
-   if (brw_is_hiz_depth_format(intel, new_mt->format)) {
-      intel_miptree_alloc_hiz(intel, new_mt);
+   if (brw_is_hiz_depth_format(brw, new_mt->format)) {
+      intel_miptree_alloc_hiz(brw, new_mt);
    }
 
-   intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
+   intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate);
 
    intel_miptree_reference(&irb->mt, intel_image->mt);
    intel_renderbuffer_set_draw_offset(irb);
@@ -876,8 +879,9 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
  * Hook in device driver functions.
  */
 void
-intel_fbo_init(struct intel_context *intel)
+intel_fbo_init(struct brw_context *brw)
 {
+   struct intel_context *intel = &brw->intel;
    intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
    intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
    intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
index 339879cecb299d0f24b7845e9fc297d8f8c025cc..b3f333701f657a64c3d0b6feea89e2e5aa9b9b9e 100644 (file)
@@ -139,7 +139,7 @@ intel_create_wrapped_renderbuffer(struct gl_context * ctx,
                                  gl_format format);
 
 extern void
-intel_fbo_init(struct intel_context *intel);
+intel_fbo_init(struct brw_context *brw);
 
 void
 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb);
@@ -175,7 +175,7 @@ intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb);
  * \return false if no resolve was needed
  */
 bool
-intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+intel_renderbuffer_resolve_hiz(struct brw_context *brw,
                               struct intel_renderbuffer *irb);
 
 /**
@@ -187,10 +187,10 @@ intel_renderbuffer_resolve_hiz(struct intel_context *intel,
  * \return false if no resolve was needed
  */
 bool
-intel_renderbuffer_resolve_depth(struct intel_context *intel,
+intel_renderbuffer_resolve_depth(struct brw_context *brw,
                                 struct intel_renderbuffer *irb);
 
-void intel_renderbuffer_move_to_temp(struct intel_context *intel,
+void intel_renderbuffer_move_to_temp(struct brw_context *brw,
                                      struct intel_renderbuffer *irb,
                                      bool invalidate);
 
index 65b03198e5defb5e0cc0ee04ca90a196f2b3c549..bc2f10c986c82bdf52262bc5241b9b14e3c7a6e4 100644 (file)
@@ -69,8 +69,9 @@ target_to_target(GLenum target)
  * created, based on the chip generation and the surface type.
  */
 static enum intel_msaa_layout
-compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
+compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target)
 {
+   struct intel_context *intel = &brw->intel;
    /* Prior to Gen7, all MSAA surfaces used IMS layout. */
    if (intel->gen < 7)
       return INTEL_MSAA_LAYOUT_IMS;
@@ -162,7 +163,7 @@ compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target
  *   by half the block width, and Y coordinates by half the block height.
  */
 void
-intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
+intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt,
                                  unsigned *width_px, unsigned *height)
 {
@@ -197,10 +198,10 @@ intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
  *       64bpp, and 128bpp.
  */
 bool
-intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
+intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
                                        struct intel_mipmap_tree *mt)
 {
-   struct brw_context *brw = brw_context(&intel->ctx);
+   struct intel_context *intel = &brw->intel;
 
    /* MCS support does not exist prior to Gen7 */
    if (intel->gen < 7)
@@ -240,7 +241,7 @@ intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
  *        \c stencil_mt.
  */
 struct intel_mipmap_tree *
-intel_miptree_create_layout(struct intel_context *intel,
+intel_miptree_create_layout(struct brw_context *brw,
                             GLenum target,
                             gl_format format,
                             GLuint first_level,
@@ -251,6 +252,7 @@ intel_miptree_create_layout(struct intel_context *intel,
                             bool for_bo,
                             GLuint num_samples)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
    if (!mt)
       return NULL;
@@ -284,7 +286,7 @@ intel_miptree_create_layout(struct intel_context *intel,
 
    if (num_samples > 1) {
       /* Adjust width/height/depth for MSAA */
-      mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
+      mt->msaa_layout = compute_msaa_layout(brw, format, mt->target);
       if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
          /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
           *
@@ -368,8 +370,8 @@ intel_miptree_create_layout(struct intel_context *intel,
        _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
        (intel->must_use_separate_stencil ||
        (intel->has_separate_stencil &&
-        brw_is_hiz_depth_format(intel, format)))) {
-      mt->stencil_mt = intel_miptree_create(intel,
+        brw_is_hiz_depth_format(brw, format)))) {
+      mt->stencil_mt = intel_miptree_create(brw,
                                             mt->target,
                                             MESA_FORMAT_S8,
                                             mt->first_level,
@@ -399,7 +401,7 @@ intel_miptree_create_layout(struct intel_context *intel,
       }
    }
 
-   brw_miptree_layout(intel, mt);
+   brw_miptree_layout(brw, mt);
 
    return mt;
 }
@@ -408,14 +410,14 @@ intel_miptree_create_layout(struct intel_context *intel,
  * \brief Helper function for intel_miptree_create().
  */
 static uint32_t
-intel_miptree_choose_tiling(struct intel_context *intel,
+intel_miptree_choose_tiling(struct brw_context *brw,
                             gl_format format,
                             uint32_t width0,
                             uint32_t num_samples,
                             enum intel_miptree_tiling_mode requested,
                             struct intel_mipmap_tree *mt)
 {
-
+   struct intel_context *intel = &brw->intel;
    if (format == MESA_FORMAT_S8) {
       /* The stencil buffer is W tiled. However, we request from the kernel a
        * non-tiled buffer because the GTT is incapable of W fencing.
@@ -476,7 +478,7 @@ intel_miptree_choose_tiling(struct intel_context *intel,
 }
 
 struct intel_mipmap_tree *
-intel_miptree_create(struct intel_context *intel,
+intel_miptree_create(struct brw_context *brw,
                     GLenum target,
                     gl_format format,
                     GLuint first_level,
@@ -488,6 +490,7 @@ intel_miptree_create(struct intel_context *intel,
                      GLuint num_samples,
                      enum intel_miptree_tiling_mode requested_tiling)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_mipmap_tree *mt;
    gl_format tex_format = format;
    gl_format etc_format = MESA_FORMAT_NONE;
@@ -530,7 +533,7 @@ intel_miptree_create(struct intel_context *intel,
 
    etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
 
-   mt = intel_miptree_create_layout(intel, target, format,
+   mt = intel_miptree_create_layout(brw, target, format,
                                      first_level, last_level, width0,
                                      height0, depth0,
                                      false, num_samples);
@@ -551,7 +554,7 @@ intel_miptree_create(struct intel_context *intel,
       total_height = ALIGN(total_height, 64);
    }
 
-   uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
+   uint32_t tiling = intel_miptree_choose_tiling(brw, format, width0,
                                                  num_samples, requested_tiling,
                                                  mt);
    bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
@@ -593,14 +596,14 @@ intel_miptree_create(struct intel_context *intel,
     * Allocation of the MCS miptree will be deferred until the first fast
     * clear actually occurs.
     */
-   if (intel_is_non_msrt_mcs_buffer_supported(intel, mt))
+   if (intel_is_non_msrt_mcs_buffer_supported(brw, mt))
       mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
 
    return mt;
 }
 
 struct intel_mipmap_tree *
-intel_miptree_create_for_bo(struct intel_context *intel,
+intel_miptree_create_for_bo(struct brw_context *brw,
                             drm_intel_bo *bo,
                             gl_format format,
                             uint32_t offset,
@@ -626,7 +629,7 @@ intel_miptree_create_for_bo(struct intel_context *intel,
     */
    assert(pitch >= 0);
 
-   mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
+   mt = intel_miptree_create_layout(brw, GL_TEXTURE_2D, format,
                                     0, 0,
                                     width, height, 1,
                                     true, 0 /* num_samples */);
@@ -657,12 +660,13 @@ intel_miptree_create_for_bo(struct intel_context *intel,
  * singlesample miptree is embedded as a child.
  */
 struct intel_mipmap_tree*
-intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+intel_miptree_create_for_dri2_buffer(struct brw_context *brw,
                                      unsigned dri_attachment,
                                      gl_format format,
                                      uint32_t num_samples,
                                      struct intel_region *region)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_mipmap_tree *singlesample_mt = NULL;
    struct intel_mipmap_tree *multisample_mt = NULL;
 
@@ -675,7 +679,7 @@ intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
    assert(_mesa_get_format_base_format(format) == GL_RGB ||
           _mesa_get_format_base_format(format) == GL_RGBA);
 
-   singlesample_mt = intel_miptree_create_for_bo(intel,
+   singlesample_mt = intel_miptree_create_for_bo(brw,
                                                  region->bo,
                                                  format,
                                                  0,
@@ -692,13 +696,13 @@ intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
     * Allocation of the MCS miptree will be deferred until the first fast
     * clear actually occurs.
     */
-   if (intel_is_non_msrt_mcs_buffer_supported(intel, singlesample_mt))
+   if (intel_is_non_msrt_mcs_buffer_supported(brw, singlesample_mt))
       singlesample_mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
 
    if (num_samples == 0)
       return singlesample_mt;
 
-   multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+   multisample_mt = intel_miptree_create_for_renderbuffer(brw,
                                                           format,
                                                           region->width,
                                                           region->height,
@@ -714,14 +718,14 @@ intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
    if (intel->is_front_buffer_rendering &&
        (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
         dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
-      intel_miptree_upsample(intel, multisample_mt);
+      intel_miptree_upsample(brw, multisample_mt);
    }
 
    return multisample_mt;
 }
 
 struct intel_mipmap_tree*
-intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+intel_miptree_create_for_renderbuffer(struct brw_context *brw,
                                       gl_format format,
                                       uint32_t width,
                                       uint32_t height,
@@ -731,20 +735,20 @@ intel_miptree_create_for_renderbuffer(struct intel_context *intel,
    uint32_t depth = 1;
    bool ok;
 
-   mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
+   mt = intel_miptree_create(brw, GL_TEXTURE_2D, format, 0, 0,
                             width, height, depth, true, num_samples,
                              INTEL_MIPTREE_TILING_ANY);
    if (!mt)
       goto fail;
 
-   if (brw_is_hiz_depth_format(intel, format)) {
-      ok = intel_miptree_alloc_hiz(intel, mt);
+   if (brw_is_hiz_depth_format(brw, format)) {
+      ok = intel_miptree_alloc_hiz(brw, mt);
       if (!ok)
          goto fail;
    }
 
    if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
-      ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+      ok = intel_miptree_alloc_mcs(brw, mt, num_samples);
       if (!ok)
          goto fail;
    }
@@ -971,7 +975,7 @@ intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
 }
 
 static void
-intel_miptree_copy_slice_sw(struct intel_context *intel,
+intel_miptree_copy_slice_sw(struct brw_context *brw,
                             struct intel_mipmap_tree *dst_mt,
                             struct intel_mipmap_tree *src_mt,
                             int level,
@@ -983,14 +987,14 @@ intel_miptree_copy_slice_sw(struct intel_context *intel,
    int src_stride, dst_stride;
    int cpp = dst_mt->cpp;
 
-   intel_miptree_map(intel, src_mt,
+   intel_miptree_map(brw, src_mt,
                      level, slice,
                      0, 0,
                      width, height,
                      GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
                      &src, &src_stride);
 
-   intel_miptree_map(intel, dst_mt,
+   intel_miptree_map(brw, dst_mt,
                      level, slice,
                      0, 0,
                      width, height,
@@ -1017,8 +1021,8 @@ intel_miptree_copy_slice_sw(struct intel_context *intel,
       }
    }
 
-   intel_miptree_unmap(intel, dst_mt, level, slice);
-   intel_miptree_unmap(intel, src_mt, level, slice);
+   intel_miptree_unmap(brw, dst_mt, level, slice);
+   intel_miptree_unmap(brw, src_mt, level, slice);
 
    /* Don't forget to copy the stencil data over, too.  We could have skipped
     * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
@@ -1027,13 +1031,13 @@ intel_miptree_copy_slice_sw(struct intel_context *intel,
     */
    if (dst_mt->stencil_mt) {
       assert(src_mt->stencil_mt);
-      intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
+      intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
                                   level, slice, width, height);
    }
 }
 
 static void
-intel_miptree_copy_slice(struct intel_context *intel,
+intel_miptree_copy_slice(struct brw_context *brw,
                         struct intel_mipmap_tree *dst_mt,
                         struct intel_mipmap_tree *src_mt,
                         int level,
@@ -1041,6 +1045,7 @@ intel_miptree_copy_slice(struct intel_context *intel,
                         int depth)
 
 {
+   struct intel_context *intel = &brw->intel;
    gl_format format = src_mt->format;
    uint32_t width = src_mt->level[level].width;
    uint32_t height = src_mt->level[level].height;
@@ -1064,7 +1069,7 @@ intel_miptree_copy_slice(struct intel_context *intel,
     * stencil's W tiling in the blitter.
     */
    if (src_mt->stencil_mt) {
-      intel_miptree_copy_slice_sw(intel,
+      intel_miptree_copy_slice_sw(brw,
                                   dst_mt, src_mt,
                                   level, slice,
                                   width, height);
@@ -1082,14 +1087,14 @@ intel_miptree_copy_slice(struct intel_context *intel,
        dst_mt, dst_x, dst_y, dst_mt->region->pitch,
        width, height);
 
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            src_mt, level, slice, 0, 0, false,
                            dst_mt, level, slice, 0, 0, false,
                            width, height, GL_COPY)) {
       perf_debug("miptree validate blit for %s failed\n",
                  _mesa_get_format_name(format));
 
-      intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
+      intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
                                   width, height);
    }
 }
@@ -1103,7 +1108,7 @@ intel_miptree_copy_slice(struct intel_context *intel,
  * is set to true if we're about to clear the image).
  */
 void
-intel_miptree_copy_teximage(struct intel_context *intel,
+intel_miptree_copy_teximage(struct brw_context *brw,
                            struct intel_texture_image *intelImage,
                            struct intel_mipmap_tree *dst_mt,
                             bool invalidate)
@@ -1117,7 +1122,7 @@ intel_miptree_copy_teximage(struct intel_context *intel,
 
    if (!invalidate) {
       for (int slice = 0; slice < depth; slice++) {
-         intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+         intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
       }
    }
 
@@ -1126,10 +1131,11 @@ intel_miptree_copy_teximage(struct intel_context *intel,
 }
 
 bool
-intel_miptree_alloc_mcs(struct intel_context *intel,
+intel_miptree_alloc_mcs(struct brw_context *brw,
                         struct intel_mipmap_tree *mt,
                         GLuint num_samples)
 {
+   struct intel_context *intel = &brw->intel;
    assert(intel->gen >= 7); /* MCS only used on Gen7+ */
    assert(mt->mcs_mt == NULL);
 
@@ -1162,7 +1168,7 @@ intel_miptree_alloc_mcs(struct intel_context *intel,
     *     "The MCS surface must be stored as Tile Y."
     */
    mt->mcs_state = INTEL_MCS_STATE_MSAA;
-   mt->mcs_mt = intel_miptree_create(intel,
+   mt->mcs_mt = intel_miptree_create(brw,
                                      mt->target,
                                      format,
                                      mt->first_level,
@@ -1184,16 +1190,16 @@ intel_miptree_alloc_mcs(struct intel_context *intel,
     *
     * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
     */
-   void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
+   void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
    memset(data, 0xff, mt->mcs_mt->region->bo->size);
-   intel_miptree_unmap_raw(intel, mt->mcs_mt);
+   intel_miptree_unmap_raw(brw, mt->mcs_mt);
 
    return mt->mcs_mt;
 }
 
 
 bool
-intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
+intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt)
 {
    assert(mt->mcs_mt == NULL);
@@ -1210,7 +1216,7 @@ intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
    const gl_format format = MESA_FORMAT_R_UINT32;
    unsigned block_width_px;
    unsigned block_height;
-   intel_get_non_msrt_mcs_alignment(intel, mt, &block_width_px, &block_height);
+   intel_get_non_msrt_mcs_alignment(brw, mt, &block_width_px, &block_height);
    unsigned width_divisor = block_width_px * 4;
    unsigned height_divisor = block_height * 8;
    unsigned mcs_width =
@@ -1218,7 +1224,7 @@ intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
    unsigned mcs_height =
       ALIGN(mt->logical_height0, height_divisor) / height_divisor;
    assert(mt->logical_depth0 == 1);
-   mt->mcs_mt = intel_miptree_create(intel,
+   mt->mcs_mt = intel_miptree_create(brw,
                                      mt->target,
                                      format,
                                      mt->first_level,
@@ -1240,11 +1246,12 @@ intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
  * \c has_hiz was set.
  */
 static bool
-intel_miptree_slice_enable_hiz(struct intel_context *intel,
+intel_miptree_slice_enable_hiz(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                uint32_t level,
                                uint32_t layer)
 {
+   struct intel_context *intel = &brw->intel;
    assert(mt->hiz_mt);
 
    if (intel->is_haswell) {
@@ -1282,11 +1289,11 @@ intel_miptree_slice_enable_hiz(struct intel_context *intel,
 
 
 bool
-intel_miptree_alloc_hiz(struct intel_context *intel,
+intel_miptree_alloc_hiz(struct brw_context *brw,
                        struct intel_mipmap_tree *mt)
 {
    assert(mt->hiz_mt == NULL);
-   mt->hiz_mt = intel_miptree_create(intel,
+   mt->hiz_mt = intel_miptree_create(brw,
                                      mt->target,
                                      mt->format,
                                      mt->first_level,
@@ -1305,7 +1312,7 @@ intel_miptree_alloc_hiz(struct intel_context *intel,
    struct intel_resolve_map *head = &mt->hiz_map;
    for (int level = mt->first_level; level <= mt->last_level; ++level) {
       for (int layer = 0; layer < mt->level[level].depth; ++layer) {
-         if (!intel_miptree_slice_enable_hiz(intel, mt, level, layer))
+         if (!intel_miptree_slice_enable_hiz(brw, mt, level, layer))
             continue;
 
         head->next = malloc(sizeof(*head->next));
@@ -1360,7 +1367,7 @@ intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
 }
 
 static bool
-intel_miptree_slice_resolve(struct intel_context *intel,
+intel_miptree_slice_resolve(struct brw_context *brw,
                            struct intel_mipmap_tree *mt,
                            uint32_t level,
                            uint32_t layer,
@@ -1374,33 +1381,33 @@ intel_miptree_slice_resolve(struct intel_context *intel,
    if (!item || item->need != need)
       return false;
 
-   intel_hiz_exec(intel, mt, level, layer, need);
+   intel_hiz_exec(brw, mt, level, layer, need);
    intel_resolve_map_remove(item);
    return true;
 }
 
 bool
-intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+intel_miptree_slice_resolve_hiz(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                uint32_t level,
                                uint32_t layer)
 {
-   return intel_miptree_slice_resolve(intel, mt, level, layer,
+   return intel_miptree_slice_resolve(brw, mt, level, layer,
                                      GEN6_HIZ_OP_HIZ_RESOLVE);
 }
 
 bool
-intel_miptree_slice_resolve_depth(struct intel_context *intel,
+intel_miptree_slice_resolve_depth(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt,
                                  uint32_t level,
                                  uint32_t layer)
 {
-   return intel_miptree_slice_resolve(intel, mt, level, layer,
+   return intel_miptree_slice_resolve(brw, mt, level, layer,
                                      GEN6_HIZ_OP_DEPTH_RESOLVE);
 }
 
 static bool
-intel_miptree_all_slices_resolve(struct intel_context *intel,
+intel_miptree_all_slices_resolve(struct brw_context *brw,
                                 struct intel_mipmap_tree *mt,
                                 enum gen6_hiz_op need)
 {
@@ -1412,7 +1419,7 @@ intel_miptree_all_slices_resolve(struct intel_context *intel,
       if (i->need != need)
         continue;
 
-      intel_hiz_exec(intel, mt, i->level, i->layer, need);
+      intel_hiz_exec(brw, mt, i->level, i->layer, need);
       intel_resolve_map_remove(i);
       did_resolve = true;
    }
@@ -1421,24 +1428,24 @@ intel_miptree_all_slices_resolve(struct intel_context *intel,
 }
 
 bool
-intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
                                     struct intel_mipmap_tree *mt)
 {
-   return intel_miptree_all_slices_resolve(intel, mt,
+   return intel_miptree_all_slices_resolve(brw, mt,
                                           GEN6_HIZ_OP_HIZ_RESOLVE);
 }
 
 bool
-intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
                                       struct intel_mipmap_tree *mt)
 {
-   return intel_miptree_all_slices_resolve(intel, mt,
+   return intel_miptree_all_slices_resolve(brw, mt,
                                           GEN6_HIZ_OP_DEPTH_RESOLVE);
 }
 
 
 void
-intel_miptree_resolve_color(struct intel_context *intel,
+intel_miptree_resolve_color(struct brw_context *brw,
                             struct intel_mipmap_tree *mt)
 {
    switch (mt->mcs_state) {
@@ -1449,7 +1456,7 @@ intel_miptree_resolve_color(struct intel_context *intel,
       break;
    case INTEL_MCS_STATE_UNRESOLVED:
    case INTEL_MCS_STATE_CLEAR:
-      brw_blorp_resolve_color(intel, mt);
+      brw_blorp_resolve_color(brw, mt);
       break;
    }
 }
@@ -1465,7 +1472,7 @@ intel_miptree_resolve_color(struct intel_context *intel,
  * future.
  */
 void
-intel_miptree_make_shareable(struct intel_context *intel,
+intel_miptree_make_shareable(struct brw_context *brw,
                              struct intel_mipmap_tree *mt)
 {
    /* MCS buffers are also used for multisample buffers, but we can't resolve
@@ -1476,7 +1483,7 @@ intel_miptree_make_shareable(struct intel_context *intel,
    assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
 
    if (mt->mcs_mt) {
-      intel_miptree_resolve_color(intel, mt);
+      intel_miptree_resolve_color(brw, mt);
       intel_miptree_release(&mt->mcs_mt);
       mt->mcs_state = INTEL_MCS_STATE_NONE;
    }
@@ -1540,7 +1547,7 @@ intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
 }
 
 static void
-intel_miptree_updownsample(struct intel_context *intel,
+intel_miptree_updownsample(struct brw_context *brw,
                            struct intel_mipmap_tree *src,
                            struct intel_mipmap_tree *dst,
                            unsigned width,
@@ -1551,7 +1558,7 @@ intel_miptree_updownsample(struct intel_context *intel,
    int dst_x0 = 0;
    int dst_y0 = 0;
 
-   brw_blorp_blit_miptrees(intel,
+   brw_blorp_blit_miptrees(brw,
                            src, 0 /* level */, 0 /* layer */,
                            dst, 0 /* level */, 0 /* layer */,
                            src_x0, src_y0,
@@ -1561,7 +1568,7 @@ intel_miptree_updownsample(struct intel_context *intel,
                            false, false /*mirror x, y*/);
 
    if (src->stencil_mt) {
-      brw_blorp_blit_miptrees(intel,
+      brw_blorp_blit_miptrees(brw,
                               src->stencil_mt, 0 /* level */, 0 /* layer */,
                               dst->stencil_mt, 0 /* level */, 0 /* layer */,
                               src_x0, src_y0,
@@ -1586,7 +1593,7 @@ assert_is_flat(struct intel_mipmap_tree *mt)
  * If the miptree needs no downsample, then skip.
  */
 void
-intel_miptree_downsample(struct intel_context *intel,
+intel_miptree_downsample(struct brw_context *brw,
                          struct intel_mipmap_tree *mt)
 {
    /* Only flat, renderbuffer-like miptrees are supported. */
@@ -1594,7 +1601,7 @@ intel_miptree_downsample(struct intel_context *intel,
 
    if (!mt->need_downsample)
       return;
-   intel_miptree_updownsample(intel,
+   intel_miptree_updownsample(brw,
                               mt, mt->singlesample_mt,
                               mt->logical_width0,
                               mt->logical_height0);
@@ -1607,26 +1614,28 @@ intel_miptree_downsample(struct intel_context *intel,
  * The upsample is done unconditionally.
  */
 void
-intel_miptree_upsample(struct intel_context *intel,
+intel_miptree_upsample(struct brw_context *brw,
                        struct intel_mipmap_tree *mt)
 {
    /* Only flat, renderbuffer-like miptrees are supported. */
    assert_is_flat(mt);
    assert(!mt->need_downsample);
 
-   intel_miptree_updownsample(intel,
+   intel_miptree_updownsample(brw,
                               mt->singlesample_mt, mt,
                               mt->logical_width0,
                               mt->logical_height0);
 }
 
 void *
-intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
+intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
 {
+   struct intel_context *intel = &brw->intel;
+   struct gl_context *ctx = &intel->ctx;
    /* CPU accesses to color buffers don't understand fast color clears, so
     * resolve any pending fast color clears before we map.
     */
-   intel_miptree_resolve_color(intel, mt);
+   intel_miptree_resolve_color(brw, mt);
 
    drm_intel_bo *bo = mt->region->bo;
 
@@ -1636,7 +1645,7 @@ intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
       }
    }
 
-   intel_flush(&intel->ctx);
+   intel_flush(ctx);
 
    if (mt->region->tiling != I915_TILING_NONE)
       drm_intel_gem_bo_map_gtt(bo);
@@ -1647,14 +1656,14 @@ intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
 }
 
 void
-intel_miptree_unmap_raw(struct intel_context *intel,
+intel_miptree_unmap_raw(struct brw_context *brw,
                         struct intel_mipmap_tree *mt)
 {
    drm_intel_bo_unmap(mt->region->bo);
 }
 
 static void
-intel_miptree_map_gtt(struct intel_context *intel,
+intel_miptree_map_gtt(struct brw_context *brw,
                      struct intel_mipmap_tree *mt,
                      struct intel_miptree_map *map,
                      unsigned int level, unsigned int slice)
@@ -1673,7 +1682,7 @@ intel_miptree_map_gtt(struct intel_context *intel,
    assert(y % bh == 0);
    y /= bh;
 
-   base = intel_miptree_map_raw(intel, mt) + mt->offset;
+   base = intel_miptree_map_raw(brw, mt) + mt->offset;
 
    if (base == NULL)
       map->ptr = NULL;
@@ -1696,22 +1705,22 @@ intel_miptree_map_gtt(struct intel_context *intel,
 }
 
 static void
-intel_miptree_unmap_gtt(struct intel_context *intel,
+intel_miptree_unmap_gtt(struct brw_context *brw,
                        struct intel_mipmap_tree *mt,
                        struct intel_miptree_map *map,
                        unsigned int level,
                        unsigned int slice)
 {
-   intel_miptree_unmap_raw(intel, mt);
+   intel_miptree_unmap_raw(brw, mt);
 }
 
 static void
-intel_miptree_map_blit(struct intel_context *intel,
+intel_miptree_map_blit(struct brw_context *brw,
                       struct intel_mipmap_tree *mt,
                       struct intel_miptree_map *map,
                       unsigned int level, unsigned int slice)
 {
-   map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
+   map->mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format,
                                   0, 0,
                                   map->w, map->h, 1,
                                   false, 0,
@@ -1722,7 +1731,7 @@ intel_miptree_map_blit(struct intel_context *intel,
    }
    map->stride = map->mt->region->pitch;
 
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            mt, level, slice,
                            map->x, map->y, false,
                            map->mt, 0, 0,
@@ -1732,8 +1741,8 @@ intel_miptree_map_blit(struct intel_context *intel,
       goto fail;
    }
 
-   intel_batchbuffer_flush(intel);
-   map->ptr = intel_miptree_map_raw(intel, map->mt);
+   intel_batchbuffer_flush(brw);
+   map->ptr = intel_miptree_map_raw(brw, map->mt);
 
    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
        map->x, map->y, map->w, map->h,
@@ -1749,18 +1758,19 @@ fail:
 }
 
 static void
-intel_miptree_unmap_blit(struct intel_context *intel,
+intel_miptree_unmap_blit(struct brw_context *brw,
                         struct intel_mipmap_tree *mt,
                         struct intel_miptree_map *map,
                         unsigned int level,
                         unsigned int slice)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
 
-   intel_miptree_unmap_raw(intel, map->mt);
+   intel_miptree_unmap_raw(brw, map->mt);
 
    if (map->mode & GL_MAP_WRITE_BIT) {
-      bool ok = intel_miptree_blit(intel,
+      bool ok = intel_miptree_blit(brw,
                                    map->mt, 0, 0,
                                    0, 0, false,
                                    mt, level, slice,
@@ -1773,11 +1783,12 @@ intel_miptree_unmap_blit(struct intel_context *intel,
 }
 
 static void
-intel_miptree_map_s8(struct intel_context *intel,
+intel_miptree_map_s8(struct brw_context *brw,
                     struct intel_mipmap_tree *mt,
                     struct intel_miptree_map *map,
                     unsigned int level, unsigned int slice)
 {
+   struct intel_context *intel = &brw->intel;
    map->stride = map->w;
    map->buffer = map->ptr = malloc(map->stride * map->h);
    if (!map->buffer)
@@ -1790,7 +1801,7 @@ intel_miptree_map_s8(struct intel_context *intel,
     */
    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
       uint8_t *untiled_s8_map = map->ptr;
-      uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
+      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
       unsigned int image_x, image_y;
 
       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
@@ -1805,7 +1816,7 @@ intel_miptree_map_s8(struct intel_context *intel,
         }
       }
 
-      intel_miptree_unmap_raw(intel, mt);
+      intel_miptree_unmap_raw(brw, mt);
 
       DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
          map->x, map->y, map->w, map->h,
@@ -1818,16 +1829,17 @@ intel_miptree_map_s8(struct intel_context *intel,
 }
 
 static void
-intel_miptree_unmap_s8(struct intel_context *intel,
+intel_miptree_unmap_s8(struct brw_context *brw,
                       struct intel_mipmap_tree *mt,
                       struct intel_miptree_map *map,
                       unsigned int level,
                       unsigned int slice)
 {
+   struct intel_context *intel = &brw->intel;
    if (map->mode & GL_MAP_WRITE_BIT) {
       unsigned int image_x, image_y;
       uint8_t *untiled_s8_map = map->ptr;
-      uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
+      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
 
       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
 
@@ -1841,14 +1853,14 @@ intel_miptree_unmap_s8(struct intel_context *intel,
         }
       }
 
-      intel_miptree_unmap_raw(intel, mt);
+      intel_miptree_unmap_raw(brw, mt);
    }
 
    free(map->buffer);
 }
 
 static void
-intel_miptree_map_etc(struct intel_context *intel,
+intel_miptree_map_etc(struct brw_context *brw,
                       struct intel_mipmap_tree *mt,
                       struct intel_miptree_map *map,
                       unsigned int level,
@@ -1869,7 +1881,7 @@ intel_miptree_map_etc(struct intel_context *intel,
 }
 
 static void
-intel_miptree_unmap_etc(struct intel_context *intel,
+intel_miptree_unmap_etc(struct brw_context *brw,
                         struct intel_mipmap_tree *mt,
                         struct intel_miptree_map *map,
                         unsigned int level,
@@ -1882,7 +1894,7 @@ intel_miptree_unmap_etc(struct intel_context *intel,
    image_x += map->x;
    image_y += map->y;
 
-   uint8_t *dst = intel_miptree_map_raw(intel, mt)
+   uint8_t *dst = intel_miptree_map_raw(brw, mt)
                 + image_y * mt->region->pitch
                 + image_x * mt->region->cpp;
 
@@ -1895,7 +1907,7 @@ intel_miptree_unmap_etc(struct intel_context *intel,
                                map->ptr, map->stride,
                                map->w, map->h, mt->etc_format);
 
-   intel_miptree_unmap_raw(intel, mt);
+   intel_miptree_unmap_raw(brw, mt);
    free(map->buffer);
 }
 
@@ -1911,11 +1923,12 @@ intel_miptree_unmap_etc(struct intel_context *intel,
  * copying the data between the actual backing store and the temporary.
  */
 static void
-intel_miptree_map_depthstencil(struct intel_context *intel,
+intel_miptree_map_depthstencil(struct brw_context *brw,
                               struct intel_mipmap_tree *mt,
                               struct intel_miptree_map *map,
                               unsigned int level, unsigned int slice)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_mipmap_tree *z_mt = mt;
    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
@@ -1933,8 +1946,8 @@ intel_miptree_map_depthstencil(struct intel_context *intel,
     */
    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
-      uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -1965,8 +1978,8 @@ intel_miptree_map_depthstencil(struct intel_context *intel,
         }
       }
 
-      intel_miptree_unmap_raw(intel, s_mt);
-      intel_miptree_unmap_raw(intel, z_mt);
+      intel_miptree_unmap_raw(brw, s_mt);
+      intel_miptree_unmap_raw(brw, z_mt);
 
       DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
          __FUNCTION__,
@@ -1982,20 +1995,21 @@ intel_miptree_map_depthstencil(struct intel_context *intel,
 }
 
 static void
-intel_miptree_unmap_depthstencil(struct intel_context *intel,
+intel_miptree_unmap_depthstencil(struct brw_context *brw,
                                 struct intel_mipmap_tree *mt,
                                 struct intel_miptree_map *map,
                                 unsigned int level,
                                 unsigned int slice)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_mipmap_tree *z_mt = mt;
    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
 
    if (map->mode & GL_MAP_WRITE_BIT) {
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
-      uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -2025,8 +2039,8 @@ intel_miptree_unmap_depthstencil(struct intel_context *intel,
         }
       }
 
-      intel_miptree_unmap_raw(intel, s_mt);
-      intel_miptree_unmap_raw(intel, z_mt);
+      intel_miptree_unmap_raw(brw, s_mt);
+      intel_miptree_unmap_raw(brw, z_mt);
 
       DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
          __FUNCTION__,
@@ -2087,7 +2101,7 @@ intel_miptree_release_map(struct intel_mipmap_tree *mt,
 }
 
 static void
-intel_miptree_map_singlesample(struct intel_context *intel,
+intel_miptree_map_singlesample(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                unsigned int level,
                                unsigned int slice,
@@ -2099,6 +2113,7 @@ intel_miptree_map_singlesample(struct intel_context *intel,
                                void **out_ptr,
                                int *out_stride)
 {
+   struct intel_context *intel = &brw->intel;
    struct intel_miptree_map *map;
 
    assert(mt->num_samples <= 1);
@@ -2110,18 +2125,18 @@ intel_miptree_map_singlesample(struct intel_context *intel,
       return;
    }
 
-   intel_miptree_slice_resolve_depth(intel, mt, level, slice);
+   intel_miptree_slice_resolve_depth(brw, mt, level, slice);
    if (map->mode & GL_MAP_WRITE_BIT) {
       intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
    }
 
    if (mt->format == MESA_FORMAT_S8) {
-      intel_miptree_map_s8(intel, mt, map, level, slice);
+      intel_miptree_map_s8(brw, mt, map, level, slice);
    } else if (mt->etc_format != MESA_FORMAT_NONE &&
               !(mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_map_etc(intel, mt, map, level, slice);
+      intel_miptree_map_etc(brw, mt, map, level, slice);
    } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_map_depthstencil(intel, mt, map, level, slice);
+      intel_miptree_map_depthstencil(brw, mt, map, level, slice);
    }
    /* See intel_miptree_blit() for details on the 32k pitch limit. */
    else if (intel->has_llc &&
@@ -2130,13 +2145,13 @@ intel_miptree_map_singlesample(struct intel_context *intel,
             (mt->region->tiling == I915_TILING_X ||
              (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
             mt->region->pitch < 32768) {
-      intel_miptree_map_blit(intel, mt, map, level, slice);
+      intel_miptree_map_blit(brw, mt, map, level, slice);
    } else if (mt->region->tiling != I915_TILING_NONE &&
               mt->region->bo->size >= intel->max_gtt_map_object_size) {
       assert(mt->region->pitch < 32768);
-      intel_miptree_map_blit(intel, mt, map, level, slice);
+      intel_miptree_map_blit(brw, mt, map, level, slice);
    } else {
-      intel_miptree_map_gtt(intel, mt, map, level, slice);
+      intel_miptree_map_gtt(brw, mt, map, level, slice);
    }
 
    *out_ptr = map->ptr;
@@ -2147,7 +2162,7 @@ intel_miptree_map_singlesample(struct intel_context *intel,
 }
 
 static void
-intel_miptree_unmap_singlesample(struct intel_context *intel,
+intel_miptree_unmap_singlesample(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt,
                                  unsigned int level,
                                  unsigned int slice)
@@ -2163,23 +2178,23 @@ intel_miptree_unmap_singlesample(struct intel_context *intel,
        mt, _mesa_get_format_name(mt->format), level, slice);
 
    if (mt->format == MESA_FORMAT_S8) {
-      intel_miptree_unmap_s8(intel, mt, map, level, slice);
+      intel_miptree_unmap_s8(brw, mt, map, level, slice);
    } else if (mt->etc_format != MESA_FORMAT_NONE &&
               !(map->mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_unmap_etc(intel, mt, map, level, slice);
+      intel_miptree_unmap_etc(brw, mt, map, level, slice);
    } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
+      intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
    } else if (map->mt) {
-      intel_miptree_unmap_blit(intel, mt, map, level, slice);
+      intel_miptree_unmap_blit(brw, mt, map, level, slice);
    } else {
-      intel_miptree_unmap_gtt(intel, mt, map, level, slice);
+      intel_miptree_unmap_gtt(brw, mt, map, level, slice);
    }
 
    intel_miptree_release_map(mt, level, slice);
 }
 
 static void
-intel_miptree_map_multisample(struct intel_context *intel,
+intel_miptree_map_multisample(struct brw_context *brw,
                               struct intel_mipmap_tree *mt,
                               unsigned int level,
                               unsigned int slice,
@@ -2191,6 +2206,7 @@ intel_miptree_map_multisample(struct intel_context *intel,
                               void **out_ptr,
                               int *out_stride)
 {
+   struct gl_context *ctx = &brw->intel.ctx;
    struct intel_miptree_map *map;
 
    assert(mt->num_samples > 1);
@@ -2199,7 +2215,7 @@ intel_miptree_map_multisample(struct intel_context *intel,
    if (mt->target != GL_TEXTURE_2D ||
        mt->first_level != 0 ||
        mt->last_level != 0) {
-      _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+      _mesa_problem(ctx, "attempt to map a multisample miptree for "
                     "which (target, first_level, last_level != "
                     "(GL_TEXTURE_2D, 0, 0)");
       goto fail;
@@ -2211,7 +2227,7 @@ intel_miptree_map_multisample(struct intel_context *intel,
 
    if (!mt->singlesample_mt) {
       mt->singlesample_mt =
-         intel_miptree_create_for_renderbuffer(intel,
+         intel_miptree_create_for_renderbuffer(brw,
                                                mt->format,
                                                mt->logical_width0,
                                                mt->logical_height0,
@@ -2223,8 +2239,8 @@ intel_miptree_map_multisample(struct intel_context *intel,
       mt->need_downsample = true;
    }
 
-   intel_miptree_downsample(intel, mt);
-   intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+   intel_miptree_downsample(brw, mt);
+   intel_miptree_map_singlesample(brw, mt->singlesample_mt,
                                   level, slice,
                                   x, y, w, h,
                                   mode,
@@ -2238,7 +2254,7 @@ fail:
 }
 
 static void
-intel_miptree_unmap_multisample(struct intel_context *intel,
+intel_miptree_unmap_multisample(struct brw_context *brw,
                                 struct intel_mipmap_tree *mt,
                                 unsigned int level,
                                 unsigned int slice)
@@ -2250,11 +2266,11 @@ intel_miptree_unmap_multisample(struct intel_context *intel,
    if (!map)
       return;
 
-   intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+   intel_miptree_unmap_singlesample(brw, mt->singlesample_mt, level, slice);
 
    mt->need_downsample = false;
    if (map->mode & GL_MAP_WRITE_BIT)
-      intel_miptree_upsample(intel, mt);
+      intel_miptree_upsample(brw, mt);
 
    if (map->singlesample_mt_is_tmp)
       intel_miptree_release(&mt->singlesample_mt);
@@ -2263,7 +2279,7 @@ intel_miptree_unmap_multisample(struct intel_context *intel,
 }
 
 void
-intel_miptree_map(struct intel_context *intel,
+intel_miptree_map(struct brw_context *brw,
                  struct intel_mipmap_tree *mt,
                  unsigned int level,
                  unsigned int slice,
@@ -2276,13 +2292,13 @@ intel_miptree_map(struct intel_context *intel,
                  int *out_stride)
 {
    if (mt->num_samples <= 1)
-      intel_miptree_map_singlesample(intel, mt,
+      intel_miptree_map_singlesample(brw, mt,
                                      level, slice,
                                      x, y, w, h,
                                      mode,
                                      out_ptr, out_stride);
    else
-      intel_miptree_map_multisample(intel, mt,
+      intel_miptree_map_multisample(brw, mt,
                                     level, slice,
                                     x, y, w, h,
                                     mode,
@@ -2290,13 +2306,13 @@ intel_miptree_map(struct intel_context *intel,
 }
 
 void
-intel_miptree_unmap(struct intel_context *intel,
+intel_miptree_unmap(struct brw_context *brw,
                    struct intel_mipmap_tree *mt,
                    unsigned int level,
                    unsigned int slice)
 {
    if (mt->num_samples <= 1)
-      intel_miptree_unmap_singlesample(intel, mt, level, slice);
+      intel_miptree_unmap_singlesample(brw, mt, level, slice);
    else
-      intel_miptree_unmap_multisample(intel, mt, level, slice);
+      intel_miptree_unmap_multisample(brw, mt, level, slice);
 }
index 456ee9735361f835334df8a99d8040ec83599d78..3304c3ed9b2bf0cdd8ec8ba3e14bd3e91fb512ab 100644 (file)
@@ -476,19 +476,19 @@ enum intel_miptree_tiling_mode {
 };
 
 bool
-intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
+intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
                                        struct intel_mipmap_tree *mt);
 
 void
-intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
+intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt,
                                  unsigned *width_px, unsigned *height);
 
 bool
-intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
+intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt);
 
-struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
+struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
                                                GLenum target,
                                               gl_format format,
                                                GLuint first_level,
@@ -501,7 +501,7 @@ struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
                                                enum intel_miptree_tiling_mode);
 
 struct intel_mipmap_tree *
-intel_miptree_create_layout(struct intel_context *intel,
+intel_miptree_create_layout(struct brw_context *brw,
                             GLenum target,
                             gl_format format,
                             GLuint first_level,
@@ -513,7 +513,7 @@ intel_miptree_create_layout(struct intel_context *intel,
                             GLuint num_samples);
 
 struct intel_mipmap_tree *
-intel_miptree_create_for_bo(struct intel_context *intel,
+intel_miptree_create_for_bo(struct brw_context *brw,
                             drm_intel_bo *bo,
                             gl_format format,
                             uint32_t offset,
@@ -523,7 +523,7 @@ intel_miptree_create_for_bo(struct intel_context *intel,
                             uint32_t tiling);
 
 struct intel_mipmap_tree*
-intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+intel_miptree_create_for_dri2_buffer(struct brw_context *brw,
                                      unsigned dri_attachment,
                                      gl_format format,
                                      uint32_t num_samples,
@@ -537,7 +537,7 @@ intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
  *     - Depth is 1.
  */
 struct intel_mipmap_tree*
-intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+intel_miptree_create_for_renderbuffer(struct brw_context *brw,
                                       gl_format format,
                                       uint32_t width,
                                       uint32_t height,
@@ -589,12 +589,12 @@ void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
                                     GLuint img, GLuint x, GLuint y);
 
 void
-intel_miptree_copy_teximage(struct intel_context *intel,
+intel_miptree_copy_teximage(struct brw_context *brw,
                             struct intel_texture_image *intelImage,
                             struct intel_mipmap_tree *dst_mt, bool invalidate);
 
 bool
-intel_miptree_alloc_mcs(struct intel_context *intel,
+intel_miptree_alloc_mcs(struct brw_context *brw,
                         struct intel_mipmap_tree *mt,
                         GLuint num_samples);
 
@@ -613,7 +613,7 @@ intel_miptree_alloc_mcs(struct intel_context *intel,
  */
 
 bool
-intel_miptree_alloc_hiz(struct intel_context *intel,
+intel_miptree_alloc_hiz(struct brw_context *brw,
                        struct intel_mipmap_tree *mt);
 
 bool
@@ -634,7 +634,7 @@ intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
  * \return false if no resolve was needed
  */
 bool
-intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+intel_miptree_slice_resolve_hiz(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                unsigned int level,
                                unsigned int depth);
@@ -643,7 +643,7 @@ intel_miptree_slice_resolve_hiz(struct intel_context *intel,
  * \return false if no resolve was needed
  */
 bool
-intel_miptree_slice_resolve_depth(struct intel_context *intel,
+intel_miptree_slice_resolve_depth(struct brw_context *brw,
                                  struct intel_mipmap_tree *mt,
                                  unsigned int level,
                                  unsigned int depth);
@@ -652,14 +652,14 @@ intel_miptree_slice_resolve_depth(struct intel_context *intel,
  * \return false if no resolve was needed
  */
 bool
-intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
                                     struct intel_mipmap_tree *mt);
 
 /**
  * \return false if no resolve was needed
  */
 bool
-intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
                                       struct intel_mipmap_tree *mt);
 
 /**\}*/
@@ -680,36 +680,31 @@ intel_miptree_used_for_rendering(struct intel_mipmap_tree *mt)
 }
 
 void
-intel_miptree_resolve_color(struct intel_context *intel,
+intel_miptree_resolve_color(struct brw_context *brw,
                             struct intel_mipmap_tree *mt);
 
 void
-intel_miptree_make_shareable(struct intel_context *intel,
+intel_miptree_make_shareable(struct brw_context *brw,
                              struct intel_mipmap_tree *mt);
 
 void
-intel_miptree_downsample(struct intel_context *intel,
+intel_miptree_downsample(struct brw_context *brw,
                          struct intel_mipmap_tree *mt);
 
 void
-intel_miptree_upsample(struct intel_context *intel,
+intel_miptree_upsample(struct brw_context *brw,
                        struct intel_mipmap_tree *mt);
 
-/* i915_mipmap_tree.c:
- */
-void i915_miptree_layout(struct intel_mipmap_tree *mt);
-void i945_miptree_layout(struct intel_mipmap_tree *mt);
-void brw_miptree_layout(struct intel_context *intel,
-                       struct intel_mipmap_tree *mt);
+void brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt);
 
-void *intel_miptree_map_raw(struct intel_context *intel,
+void *intel_miptree_map_raw(struct brw_context *brw,
                             struct intel_mipmap_tree *mt);
 
-void intel_miptree_unmap_raw(struct intel_context *intel,
+void intel_miptree_unmap_raw(struct brw_context *brw,
                              struct intel_mipmap_tree *mt);
 
 void
-intel_miptree_map(struct intel_context *intel,
+intel_miptree_map(struct brw_context *brw,
                  struct intel_mipmap_tree *mt,
                  unsigned int level,
                  unsigned int slice,
@@ -722,13 +717,13 @@ intel_miptree_map(struct intel_context *intel,
                  int *out_stride);
 
 void
-intel_miptree_unmap(struct intel_context *intel,
+intel_miptree_unmap(struct brw_context *brw,
                    struct intel_mipmap_tree *mt,
                    unsigned int level,
                    unsigned int slice);
 
 void
-intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
+intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
               unsigned int level, unsigned int layer, enum gen6_hiz_op op);
 
 #ifdef __cplusplus
index 772959b589ede32900d359066df71b581599e7cd..c7e1512e3b8989715b77d95a80db2f266da819c9 100644 (file)
@@ -176,6 +176,7 @@ do_blit_bitmap( struct gl_context *ctx,
                const struct gl_pixelstore_attrib *unpack,
                const GLubyte *bitmap )
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct gl_framebuffer *fb = ctx->DrawBuffer;
    struct intel_renderbuffer *irb;
@@ -200,7 +201,7 @@ do_blit_bitmap( struct gl_context *ctx,
       return false;
    }
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    if (fb->_NumColorDrawBuffers != 1) {
       perf_debug("accelerated glBitmap() only supports rendering to a "
@@ -258,7 +259,7 @@ do_blit_bitmap( struct gl_context *ctx,
    /* The blitter has no idea about fast color clears, so we need to resolve
     * the miptree before we do anything.
     */
-   intel_miptree_resolve_color(intel, irb->mt);
+   intel_miptree_resolve_color(brw, irb->mt);
 
    /* Chop it all into chunks that can be digested by hardware: */
    for (py = 0; py < height; py += DY) {
@@ -289,7 +290,7 @@ do_blit_bitmap( struct gl_context *ctx,
          if (count == 0)
            continue;
 
-        if (!intelEmitImmediateColorExpandBlit(intel,
+        if (!intelEmitImmediateColorExpandBlit(brw,
                                                irb->mt->cpp,
                                                (GLubyte *)stipple,
                                                sz,
@@ -312,14 +313,14 @@ do_blit_bitmap( struct gl_context *ctx,
 out:
 
    if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (_mesa_is_bufferobj(unpack->BufferObj)) {
       /* done with PBO so unmap it now */
       ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
    }
 
-   intel_check_front_buffer_rendering(intel);
+   intel_check_front_buffer_rendering(brw);
 
    return true;
 }
index 79dbeb20cd1cf74bc995661e29513ff39a2f17ad..632b9b045122af41569bb10f07e300adffc9a2dd 100644 (file)
@@ -52,6 +52,7 @@ do_blit_copypixels(struct gl_context * ctx,
                    GLsizei width, GLsizei height,
                    GLint dstx, GLint dsty, GLenum type)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct gl_framebuffer *fb = ctx->DrawBuffer;
    struct gl_framebuffer *read_fb = ctx->ReadBuffer;
@@ -142,7 +143,7 @@ do_blit_copypixels(struct gl_context * ctx,
       return false;
    }
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    intel_flush(&intel->ctx);
 
@@ -168,7 +169,7 @@ do_blit_copypixels(struct gl_context * ctx,
    dstx += srcx - orig_srcx;
    dsty += srcy - orig_srcy;
 
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
                            srcx, srcy, _mesa_is_winsys_fbo(read_fb),
                            draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
@@ -184,7 +185,7 @@ do_blit_copypixels(struct gl_context * ctx,
       ctx->Query.CurrentOcclusionObject->Result += width * height;
 
 out:
-   intel_check_front_buffer_rendering(intel);
+   intel_check_front_buffer_rendering(brw);
 
    DBG("%s: success\n", __FUNCTION__);
    return true;
index 7ada13763102ddb178bcdc1cc60140b6b431d169..c1d5fb00eee735fdbdb5d2de912979643ff0c087 100644 (file)
@@ -75,6 +75,7 @@ do_blit_readpixels(struct gl_context * ctx,
                    GLenum format, GLenum type,
                    const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
    GLuint dst_offset;
@@ -124,25 +125,25 @@ do_blit_readpixels(struct gl_context * ctx,
    }
 
    dirty = intel->front_buffer_dirty;
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
    intel->front_buffer_dirty = dirty;
 
    all = (width * height * irb->mt->cpp == dst->Base.Size &&
          x == 0 && dst_offset == 0);
 
-   dst_buffer = intel_bufferobj_buffer(intel, dst,
+   dst_buffer = intel_bufferobj_buffer(brw, dst,
                                       all ? INTEL_WRITE_FULL :
                                       INTEL_WRITE_PART);
 
    struct intel_mipmap_tree *pbo_mt =
-      intel_miptree_create_for_bo(intel,
+      intel_miptree_create_for_bo(brw,
                                   dst_buffer,
                                   irb->mt->format,
                                   dst_offset,
                                   width, height,
                                   dst_stride, I915_TILING_NONE);
 
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            irb->mt, irb->mt_level, irb->mt_layer,
                            x, y, _mesa_is_winsys_fbo(ctx->ReadBuffer),
                            pbo_mt, 0, 0,
@@ -164,6 +165,7 @@ intelReadPixels(struct gl_context * ctx,
                 GLenum format, GLenum type,
                 const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    bool dirty;
 
@@ -182,7 +184,7 @@ intelReadPixels(struct gl_context * ctx,
    /* glReadPixels() wont dirty the front buffer, so reset the dirty
     * flag after calling intel_prepare_render(). */
    dirty = intel->front_buffer_dirty;
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
    intel->front_buffer_dirty = dirty;
 
    /* Update Mesa state before calling _mesa_readpixels().
index 252d356c18775a44bcc2f8576b5c3f3954588294..c122c46221e6b5918a629dacfca9fd76c7085d73 100644 (file)
@@ -157,14 +157,15 @@ intelDRI2Flush(__DRIdrawable *drawable)
 {
    GET_CURRENT_CONTEXT(ctx);
    struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    if (intel == NULL)
       return;
 
-   intel_resolve_for_dri2_flush(intel, drawable);
+   intel_resolve_for_dri2_flush(brw, drawable);
    intel->need_throttle = true;
 
    if (intel->batch.used)
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
 
    if (INTEL_DEBUG & DEBUG_AUB) {
       aub_dump_bmp(ctx);
@@ -283,14 +284,14 @@ intel_allocate_image(int dri_format, void *loaderPrivate)
  * Sets up a DRIImage structure to point to our shared image in a region
  */
 static void
-intel_setup_image_from_mipmap_tree(struct intel_context *intel, __DRIimage *image,
+intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
                                    struct intel_mipmap_tree *mt, GLuint level,
                                    GLuint zoffset)
 {
    unsigned int draw_x, draw_y;
    uint32_t mask_x, mask_y;
 
-   intel_miptree_make_shareable(intel, mt);
+   intel_miptree_make_shareable(brw, mt);
 
    intel_miptree_check_level_layer(mt, level, zoffset);
 
@@ -376,19 +377,19 @@ intel_create_image_from_renderbuffer(__DRIcontext *context,
                                     int renderbuffer, void *loaderPrivate)
 {
    __DRIimage *image;
-   struct intel_context *intel = context->driverPrivate;
+   struct brw_context *brw = context->driverPrivate;
+   struct gl_context *ctx = &brw->intel.ctx;
    struct gl_renderbuffer *rb;
    struct intel_renderbuffer *irb;
 
-   rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
+   rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
    if (!rb) {
-      _mesa_error(&intel->ctx,
-                 GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+      _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
       return NULL;
    }
 
    irb = intel_renderbuffer(rb);
-   intel_miptree_make_shareable(intel, irb->mt);
+   intel_miptree_make_shareable(brw, irb->mt);
    image = calloc(1, sizeof *image);
    if (image == NULL)
       return NULL;
@@ -414,7 +415,8 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
                                 void *loaderPrivate)
 {
    __DRIimage *image;
-   struct intel_context *intel = context->driverPrivate;
+   struct brw_context *brw = context->driverPrivate;
+   struct intel_context *intel = &brw->intel;
    struct gl_texture_object *obj;
    struct intel_texture_object *iobj;
    GLuint face = 0;
@@ -453,7 +455,7 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
    image->internal_format = obj->Image[face][level]->InternalFormat;
    image->format = obj->Image[face][level]->TexFormat;
    image->data = loaderPrivate;
-   intel_setup_image_from_mipmap_tree(intel, image, iobj->mt, level, zoffset);
+   intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
    image->dri_format = intel_dri_format(image->format);
    image->has_depthstencil = iobj->mt->stencil_mt? true : false;
    if (image->dri_format == MESA_FORMAT_NONE) {
index c9bafbd96eb8459acd4417f5beccff1dc4ac888a..b6dfc5b059e55e1ffb6b077f4daf426b800bdeff 100644 (file)
@@ -68,11 +68,12 @@ static void
 intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
               GLenum condition, GLbitfield flags)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_sync_object *sync = (struct intel_sync_object *)s;
 
    assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
-   intel_batchbuffer_emit_mi_flush(intel);
+   intel_batchbuffer_emit_mi_flush(brw);
 
    sync->bo = intel->batch.bo;
    drm_intel_bo_reference(sync->bo);
index 8d07718811774eb52e71470ba362ab8b939930ef..df128e7a759066268bb6563d7e644b18119dc68d 100644 (file)
@@ -60,6 +60,7 @@ static GLboolean
 intel_alloc_texture_image_buffer(struct gl_context *ctx,
                                 struct gl_texture_image *image)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_texture_image *intel_image = intel_texture_image(image);
    struct gl_texture_object *texobj = image->TexObject;
@@ -90,7 +91,7 @@ intel_alloc_texture_image_buffer(struct gl_context *ctx,
           __FUNCTION__, texobj, image->Level,
           image->Width, image->Height, image->Depth, intel_texobj->mt);
    } else {
-      intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
+      intel_image->mt = intel_miptree_create_for_teximage(brw, intel_texobj,
                                                           intel_image,
                                                           false);
 
@@ -140,7 +141,7 @@ intel_map_texture_image(struct gl_context *ctx,
                        GLubyte **map,
                        GLint *stride)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_texture_image *intel_image = intel_texture_image(tex_image);
    struct intel_mipmap_tree *mt = intel_image->mt;
 
@@ -157,7 +158,7 @@ intel_map_texture_image(struct gl_context *ctx,
    if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
       slice = tex_image->Face;
 
-   intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
+   intel_miptree_map(brw, mt, tex_image->Level, slice, x, y, w, h, mode,
                     (void **)map, stride);
 }
 
@@ -165,14 +166,14 @@ static void
 intel_unmap_texture_image(struct gl_context *ctx,
                          struct gl_texture_image *tex_image, GLuint slice)
 {
-   struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_texture_image *intel_image = intel_texture_image(tex_image);
    struct intel_mipmap_tree *mt = intel_image->mt;
 
    if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
       slice = tex_image->Face;
 
-   intel_miptree_unmap(intel, mt, tex_image->Level, slice);
+   intel_miptree_unmap(brw, mt, tex_image->Level, slice);
 }
 
 void
index 19385dba4a4b79f7a56c7fc894bcdabe7405960b..1a5b0e4dfc4110bfd0a2ff469285488e7610e732 100644 (file)
@@ -48,12 +48,12 @@ void intelSetTexBuffer2(__DRIcontext *pDRICtx,
                        GLint target, GLint format, __DRIdrawable *pDraw);
 
 struct intel_mipmap_tree *
-intel_miptree_create_for_teximage(struct intel_context *intel,
+intel_miptree_create_for_teximage(struct brw_context *brw,
                                  struct intel_texture_object *intelObj,
                                  struct intel_texture_image *intelImage,
                                  bool expect_accelerated_upload);
 
-GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
+GLuint intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit);
 
 bool
 intel_texsubimage_tiled_memcpy(struct gl_context *ctx,
index 72a4431ebd59702f2a72f5206734eafdc438ba78..fece5a9a83031df05bdf8a55967106c8739d9487 100644 (file)
@@ -46,7 +46,7 @@
 
 
 static bool
-intel_copy_texsubimage(struct intel_context *intel,
+intel_copy_texsubimage(struct brw_context *brw,
                        struct intel_texture_image *intelImage,
                        GLint dstx, GLint dsty, GLint slice,
                        struct intel_renderbuffer *irb,
@@ -54,7 +54,7 @@ intel_copy_texsubimage(struct intel_context *intel,
 {
    const GLenum internalFormat = intelImage->base.Base.InternalFormat;
 
-   intel_prepare_render(intel);
+   intel_prepare_render(brw);
 
    /* glCopyTexSubImage() can be called on a multisampled renderbuffer (if
     * that renderbuffer is associated with the window system framebuffer),
@@ -75,7 +75,7 @@ intel_copy_texsubimage(struct intel_context *intel,
    }
 
    /* blit from src buffer to texture */
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            irb->mt, irb->mt_level, irb->mt_layer,
                            x, y, irb->Base.Base.Name == 0,
                            intelImage->mt, intelImage->base.Base.Level,
@@ -97,15 +97,16 @@ intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
                      GLint x, GLint y,
                      GLsizei width, GLsizei height)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
 
    /* Try BLORP first.  It can handle almost everything. */
-   if (brw_blorp_copytexsubimage(intel, rb, texImage, slice, x, y,
+   if (brw_blorp_copytexsubimage(brw, rb, texImage, slice, x, y,
                                  xoffset, yoffset, width, height))
       return;
 
    /* Next, try the BLT engine. */
-   if (intel_copy_texsubimage(intel,
+   if (intel_copy_texsubimage(brw,
                               intel_texture_image(texImage),
                               xoffset, yoffset, slice,
                               intel_renderbuffer(rb), x, y, width, height)) {
index 92f1ccede95429bff036a89a4739a12f141b1b2a..9e3f02c7497f452852d24b9066d9f55f8586084b 100644 (file)
@@ -30,7 +30,7 @@
  * miptree of that size.
  */
 struct intel_mipmap_tree *
-intel_miptree_create_for_teximage(struct intel_context *intel,
+intel_miptree_create_for_teximage(struct brw_context *brw,
                                  struct intel_texture_object *intelObj,
                                  struct intel_texture_image *intelImage,
                                  bool expect_accelerated_upload)
@@ -91,7 +91,7 @@ intel_miptree_create_for_teximage(struct intel_context *intel,
       }
    }
 
-   return intel_miptree_create(intel,
+   return intel_miptree_create(brw,
                               intelObj->base.Target,
                               intelImage->base.Base.TexFormat,
                               firstLevel,
@@ -114,6 +114,7 @@ try_pbo_upload(struct gl_context *ctx,
 {
    struct intel_texture_image *intelImage = intel_texture_image(image);
    struct intel_context *intel = intel_context(ctx);
+   struct brw_context *brw = brw_context(ctx);
    struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
    GLuint src_offset;
    drm_intel_bo *src_buffer;
@@ -150,7 +151,7 @@ try_pbo_upload(struct gl_context *ctx,
       return false;
    }
 
-   src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
+   src_buffer = intel_bufferobj_source(brw, pbo, 64, &src_offset);
    /* note: potential 64-bit ptr to 32-bit int cast */
    src_offset += (GLuint) (unsigned long) pixels;
 
@@ -158,7 +159,7 @@ try_pbo_upload(struct gl_context *ctx,
       _mesa_image_row_stride(unpack, image->Width, format, type);
 
    struct intel_mipmap_tree *pbo_mt =
-      intel_miptree_create_for_bo(intel,
+      intel_miptree_create_for_bo(brw,
                                   src_buffer,
                                   intelImage->mt->format,
                                   src_offset,
@@ -167,7 +168,7 @@ try_pbo_upload(struct gl_context *ctx,
    if (!pbo_mt)
       return false;
 
-   if (!intel_miptree_blit(intel,
+   if (!intel_miptree_blit(brw,
                            pbo_mt, 0, 0,
                            0, 0, false,
                            intelImage->mt, image->Level, image->Face,
@@ -253,7 +254,7 @@ intel_set_texture_image_region(struct gl_context *ctx,
 
    ctx->Driver.FreeTextureImageBuffer(ctx, image);
 
-   intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat,
+   intel_image->mt = intel_miptree_create_layout(brw, target, image->TexFormat,
                                                  0, 0,
                                                  width, height, 1,
                                                  true, 0 /* num_samples */);
@@ -294,7 +295,8 @@ intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
                   __DRIdrawable *dPriv)
 {
    struct gl_framebuffer *fb = dPriv->driverPrivate;
-   struct intel_context *intel = pDRICtx->driverPrivate;
+   struct brw_context *brw = pDRICtx->driverPrivate;
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    struct intel_texture_object *intelObj;
    struct intel_renderbuffer *rb;
@@ -335,7 +337,7 @@ intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
 
    _mesa_lock_texture(&intel->ctx, texObj);
    texImage = _mesa_get_tex_image(ctx, texObj, target, level);
-   intel_miptree_make_shareable(intel, rb->mt);
+   intel_miptree_make_shareable(brw, rb->mt);
    intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
                                   internalFormat, texFormat, 0,
                                   rb->mt->region->width,
index c18525c66d3a27c2cb15e39c22faadd0f5dcc941..e96c29a14368b630ecbddb885485984aa737d397 100644 (file)
@@ -51,6 +51,7 @@ intel_blit_texsubimage(struct gl_context * ctx,
                       GLenum format, GLenum type, const void *pixels,
                       const struct gl_pixelstore_attrib *packing)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_texture_image *intelImage = intel_texture_image(texImage);
 
@@ -88,14 +89,14 @@ intel_blit_texsubimage(struct gl_context * ctx,
       return false;
 
    struct intel_mipmap_tree *temp_mt =
-      intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat,
+      intel_miptree_create(brw, GL_TEXTURE_2D, texImage->TexFormat,
                            0, 0,
                            width, height, 1,
                            false, 0, INTEL_MIPTREE_TILING_NONE);
    if (!temp_mt)
       goto err;
 
-   GLubyte *dst = intel_miptree_map_raw(intel, temp_mt);
+   GLubyte *dst = intel_miptree_map_raw(brw, temp_mt);
    if (!dst)
       goto err;
 
@@ -108,11 +109,11 @@ intel_blit_texsubimage(struct gl_context * ctx,
       _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
    }
 
-   intel_miptree_unmap_raw(intel, temp_mt);
+   intel_miptree_unmap_raw(brw, temp_mt);
 
    bool ret;
 
-   ret = intel_miptree_blit(intel,
+   ret = intel_miptree_blit(brw,
                             temp_mt, 0, 0,
                             0, 0, false,
                             intelImage->mt, texImage->Level, texImage->Face,
@@ -168,6 +169,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
                                const struct gl_pixelstore_attrib *packing,
                                bool for_glTexImage)
 {
+   struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct intel_texture_image *image = intel_texture_image(texImage);
 
@@ -209,13 +211,13 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
    /* Since we are going to write raw data to the miptree, we need to resolve
     * any pending fast color clears before we start.
     */
-   intel_miptree_resolve_color(intel, image->mt);
+   intel_miptree_resolve_color(brw, image->mt);
 
    bo = image->mt->region->bo;
 
    if (drm_intel_bo_references(intel->batch.bo, bo)) {
       perf_debug("Flushing before mapping a referenced bo.\n");
-      intel_batchbuffer_flush(intel);
+      intel_batchbuffer_flush(brw);
    }
 
    if (unlikely(intel->perf_debug)) {
index e678d80cfce13049678c6dd5be858f6bea272518..3665119d084ca33a60dedd219bbf8d1dbbdeb74e 100644 (file)
@@ -38,8 +38,9 @@ intel_update_max_level(struct intel_texture_object *intelObj,
 /*  
  */
 GLuint
-intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit)
 {
+   struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
    struct intel_texture_object *intelObj = intel_texture_object(tObj);
@@ -95,7 +96,7 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
                  _mesa_get_format_name(firstImage->base.Base.TexFormat),
                  width, height, depth, tObj->BaseLevel, intelObj->_MaxLevel);
 
-      intelObj->mt = intel_miptree_create(intel,
+      intelObj->mt = intel_miptree_create(brw,
                                           intelObj->base.Target,
                                          firstImage->base.Base.TexFormat,
                                           tObj->BaseLevel,
@@ -122,7 +123,7 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
                 break;
 
          if (intelObj->mt != intelImage->mt) {
-            intel_miptree_copy_teximage(intel, intelImage, intelObj->mt,
+            intel_miptree_copy_teximage(brw, intelImage, intelObj->mt,
                                         false /* invalidate */);
          }
 
index 41a5ef55dbef1d350917903ae03293272f6cfbea..6259969e37918daf0443b00dec7110b89c38ffc5 100644 (file)
@@ -40,9 +40,9 @@ test_compact_instruction(struct brw_compile *p, struct brw_instruction src)
    if (brw_try_compact_instruction(p, &dst, &src)) {
       struct brw_instruction uncompacted;
 
-      brw_uncompact_instruction(intel, &uncompacted, &dst);
+      brw_uncompact_instruction(brw, &uncompacted, &dst);
       if (memcmp(&uncompacted, &src, sizeof(src))) {
-        brw_debug_compact_uncompact(intel, &src, &uncompacted);
+        brw_debug_compact_uncompact(brw, &src, &uncompacted);
         return false;
       }
    } else {