i965/vec4: Don't lose the force_writemask_all flag during CSE.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.h
index fde4177301895824b075ec9e5def20a1b5ce9c41..e025011e2ae7203b8af13aa299127a1dc5e2bbe7 100644 (file)
@@ -341,6 +341,7 @@ struct brw_stage_prog_data {
       uint32_t gather_texture_start;
       uint32_t ubo_start;
       uint32_t abo_start;
+      uint32_t image_start;
       uint32_t shader_time_start;
       /** @} */
    } binding_table;
@@ -621,6 +622,9 @@ struct brw_vs_prog_data {
 /** Max number of atomic counter buffer objects in a shader */
 #define BRW_MAX_ABO 16
 
+/** Max number of image uniforms in a shader */
+#define BRW_MAX_IMAGES 32
+
 /**
  * Max number of binding table entries used for stream output.
  *
@@ -653,6 +657,7 @@ struct brw_vs_prog_data {
                             BRW_MAX_TEX_UNIT * 2 + /* normal, gather */ \
                             12 + /* ubo */                              \
                             BRW_MAX_ABO +                               \
+                            BRW_MAX_IMAGES +                            \
                             2 /* shader time, pull constants */)
 
 #define SURF_INDEX_GEN6_SOL_BINDING(t) (t)
@@ -856,7 +861,6 @@ struct intel_batchbuffer {
    drm_intel_bo *last_bo;
    /** BO for post-sync nonzero writes for gen6 workaround. */
    drm_intel_bo *workaround_bo;
-   bool need_workaround_flush;
 
    uint16_t emit, total;
    uint16_t used, reserved_space;
@@ -868,6 +872,8 @@ struct intel_batchbuffer {
    enum brw_gpu_ring ring;
    bool needs_sol_reset;
 
+   uint8_t pipe_controls_since_last_cs_stall;
+
    struct {
       uint16_t used;
       int reloc_count;
@@ -957,15 +963,7 @@ struct brw_context
                                          struct gl_renderbuffer *rb,
                                          bool layered,
                                          unsigned unit);
-      void (*update_null_renderbuffer_surface)(struct brw_context *brw,
-                                              unsigned unit);
 
-      void (*create_raw_surface)(struct brw_context *brw,
-                                 drm_intel_bo *bo,
-                                 uint32_t offset,
-                                 uint32_t size,
-                                 uint32_t *out_offset,
-                                 bool rw);
       void (*emit_buffer_surface_state)(struct brw_context *brw,
                                         uint32_t *out_offset,
                                         drm_intel_bo *bo,
@@ -973,8 +971,12 @@ struct brw_context
                                         unsigned surface_format,
                                         unsigned buffer_size,
                                         unsigned pitch,
-                                        unsigned mocs,
                                         bool rw);
+      void (*emit_null_surface_state)(struct brw_context *brw,
+                                      unsigned width,
+                                      unsigned height,
+                                      unsigned samples,
+                                      uint32_t *out_offset);
 
       /**
        * Send the appropriate state packets to configure depth, stencil, and
@@ -1028,8 +1030,20 @@ struct brw_context
    bool front_buffer_dirty;
 
    /** Framerate throttling: @{ */
-   drm_intel_bo *first_post_swapbuffers_batch;
-   bool need_throttle;
+   drm_intel_bo *throttle_batch[2];
+
+   /* Limit the number of outstanding SwapBuffers by waiting for an earlier
+    * frame of rendering to complete. This gives a very precise cap to the
+    * latency between input and output such that rendering never gets more
+    * than a frame behind the user. (With the caveat that we technically are
+    * not using the SwapBuffers itself as a barrier but the first batch
+    * submitted afterwards, which may be immediately prior to the next
+    * SwapBuffers.)
+    */
+   bool need_swap_throttle;
+
+   /** General throttling, not caught by throttling between SwapBuffers */
+   bool need_flush_throttle;
    /** @} */
 
    GLuint stats_wm;
@@ -1062,6 +1076,10 @@ struct brw_context
 
    int gen;
    int gt;
+   /* GT revision. This will be -1 if the revision couldn't be determined (eg,
+    * if the kernel doesn't support the query).
+    */
+   int revision;
 
    bool is_g4x;
    bool is_baytrail;
@@ -1184,6 +1202,8 @@ struct brw_context
     * for each pipeline stage.
     */
    int max_vs_threads;
+   int max_hs_threads;
+   int max_ds_threads;
    int max_gs_threads;
    int max_wm_threads;
 
@@ -1199,6 +1219,8 @@ struct brw_context
 
       GLuint min_vs_entries;    /* Minimum number of VS entries */
       GLuint max_vs_entries;   /* Maximum number of VS entries */
+      GLuint max_hs_entries;   /* Maximum number of HS entries */
+      GLuint max_ds_entries;   /* Maximum number of DS entries */
       GLuint max_gs_entries;   /* Maximum number of GS entries */
 
       GLuint nr_vs_entries;
@@ -1324,7 +1346,7 @@ struct brw_context
 
       /**
        * Buffer object used in place of multisampled null render targets on
-       * Gen6.  See brw_update_null_renderbuffer_surface().
+       * Gen6.  See brw_emit_null_surface_state().
        */
       drm_intel_bo *multisampled_null_render_target_bo;
       uint32_t fast_clear_op;
@@ -1381,7 +1403,7 @@ struct brw_context
    } perfmon;
 
    int num_atoms;
-   const struct brw_tracked_state **atoms;
+   const struct brw_tracked_state atoms[57];
 
    /* If (INTEL_DEBUG & DEBUG_BATCH) */
    struct {
@@ -1488,6 +1510,8 @@ void brw_meta_updownsample(struct brw_context *brw,
                            struct intel_mipmap_tree *dst);
 
 void brw_meta_fbo_stencil_blit(struct brw_context *brw,
+                               struct gl_framebuffer *read_fb,
+                               struct gl_framebuffer *draw_fb,
                                GLfloat srcX0, GLfloat srcY0,
                                GLfloat srcX1, GLfloat srcY1,
                                GLfloat dstX0, GLfloat dstY0,
@@ -1599,7 +1623,27 @@ gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx);
 /* brw_draw_upload.c */
 unsigned brw_get_vertex_surface_type(struct brw_context *brw,
                                      const struct gl_client_array *glarray);
-unsigned brw_get_index_type(GLenum type);
+
+static inline unsigned
+brw_get_index_type(GLenum type)
+{
+   assert((type == GL_UNSIGNED_BYTE)
+          || (type == GL_UNSIGNED_SHORT)
+          || (type == GL_UNSIGNED_INT));
+
+   /* The possible values for type are GL_UNSIGNED_BYTE (0x1401),
+    * GL_UNSIGNED_SHORT (0x1403), and GL_UNSIGNED_INT (0x1405) which we want
+    * to map to scale factors of 0, 1, and 2, respectively.  These scale
+    * factors are then left-shfited by 8 to be in the correct position in the
+    * CMD_INDEX_BUFFER packet.
+    *
+    * Subtracting 0x1401 gives 0, 2, and 4.  Shifting left by 7 afterwards
+    * gives 0x00000000, 0x00000100, and 0x00000200.  These just happen to be
+    * the values the need to be written in the CMD_INDEX_BUFFER packet.
+    */
+   return (type - 0x1401) << 7;
+}
+
 void brw_prepare_vertices(struct brw_context *brw);
 
 /* brw_wm_surface_state.c */
@@ -1692,6 +1736,8 @@ gen7_resume_transform_feedback(struct gl_context *ctx,
 /* brw_blorp_blit.cpp */
 GLbitfield
 brw_blorp_framebuffer(struct brw_context *brw,
+                      struct gl_framebuffer *readFb,
+                      struct gl_framebuffer *drawFb,
                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
                       GLbitfield mask, GLenum filter);
@@ -1884,33 +1930,6 @@ gen6_upload_push_constants(struct brw_context *brw,
                            struct brw_stage_state *stage_state,
                            enum aub_state_struct_type type);
 
-/* ================================================================
- * From linux kernel i386 header files, copes with odd sizes better
- * than COPY_DWORDS would:
- * XXX Put this in src/mesa/main/imports.h ???
- */
-#if defined(i386) || defined(__i386__)
-static inline void * __memcpy(void * to, const void * from, size_t n)
-{
-   int d0, d1, d2;
-   __asm__ __volatile__(
-      "rep ; movsl\n\t"
-      "testb $2,%b4\n\t"
-      "je 1f\n\t"
-      "movsw\n"
-      "1:\ttestb $1,%b4\n\t"
-      "je 2f\n\t"
-      "movsb\n"
-      "2:"
-      : "=&c" (d0), "=&D" (d1), "=&S" (d2)
-      :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
-      : "memory");
-   return (to);
-}
-#else
-#define __memcpy(a,b,c) memcpy(a,b,c)
-#endif
-
 #ifdef __cplusplus
 }
 #endif