r600g/llvm: Don't feed LLVM output through r600_bytecode_build()
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.h
index 6b7c053760460d2613996326ce104d6a73541a25..bb4e429aeb203ea225f4895263c4764ff1e21a08 100644 (file)
 #include "util/u_blitter.h"
 #include "util/u_slab.h"
 #include "util/u_suballoc.h"
-#include "r600.h"
+#include "util/u_double_list.h"
+#include "util/u_transfer.h"
 #include "r600_llvm.h"
 #include "r600_public.h"
 #include "r600_resource.h"
-#include "evergreen_compute.h"
 
-#define R600_NUM_ATOMS 37
+#define R600_NUM_ATOMS 41
 
-#define R600_TRACE_CS 0
+/* the number of CS dwords for flushing and drawing */
+#define R600_MAX_FLUSH_CS_DWORDS       16
+#define R600_MAX_DRAW_CS_DWORDS                34
+#define R600_TRACE_CS_DWORDS           7
 
-#define R600_MAX_USER_CONST_BUFFERS 1
-#define R600_MAX_DRIVER_CONST_BUFFERS 2
+#define R600_MAX_USER_CONST_BUFFERS 13
+#define R600_MAX_DRIVER_CONST_BUFFERS 3
 #define R600_MAX_CONST_BUFFERS (R600_MAX_USER_CONST_BUFFERS + R600_MAX_DRIVER_CONST_BUFFERS)
 
 /* start driver buffers after user buffers */
 #define R600_UCP_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS)
 #define R600_TXQ_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 1)
+#define R600_BUFFER_INFO_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 2)
 
 #define R600_MAX_CONST_BUFFER_SIZE 4096
 
 
 #define R600_MAP_BUFFER_ALIGNMENT 64
 
+#define R600_ERR(fmt, args...) \
+       fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
+
+#define R600_CONTEXT_INVAL_READ_CACHES         (1 << 0)
+#define R600_CONTEXT_STREAMOUT_FLUSH           (1 << 1)
+#define R600_CONTEXT_WAIT_3D_IDLE              (1 << 2)
+#define R600_CONTEXT_WAIT_CP_DMA_IDLE          (1 << 3)
+#define R600_CONTEXT_FLUSH_AND_INV             (1 << 4)
+#define R600_CONTEXT_FLUSH_AND_INV_CB_META     (1 << 5)
+#define R600_CONTEXT_PS_PARTIAL_FLUSH          (1 << 6)
+#define R600_CONTEXT_FLUSH_AND_INV_DB_META      (1 << 7)
+
+#define R600_QUERY_DRAW_CALLS          (PIPE_QUERY_DRIVER_SPECIFIC + 0)
+#define R600_QUERY_REQUESTED_VRAM      (PIPE_QUERY_DRIVER_SPECIFIC + 1)
+#define R600_QUERY_REQUESTED_GTT       (PIPE_QUERY_DRIVER_SPECIFIC + 2)
+#define R600_QUERY_BUFFER_WAIT_TIME    (PIPE_QUERY_DRIVER_SPECIFIC + 3)
+
+struct r600_context;
 struct r600_bytecode;
 struct r600_shader_key;
 
@@ -126,10 +148,6 @@ struct r600_vgt_state {
        struct r600_atom atom;
        uint32_t vgt_multi_prim_ib_reset_en;
        uint32_t vgt_multi_prim_ib_reset_indx;
-};
-
-struct r600_vgt2_state {
-       struct r600_atom atom;
        uint32_t vgt_indx_offset;
 };
 
@@ -187,11 +205,6 @@ struct r600_viewport_state {
        struct pipe_viewport_state state;
 };
 
-struct compute_memory_pool;
-void compute_memory_pool_delete(struct compute_memory_pool* pool);
-struct compute_memory_pool* compute_memory_pool_new(
-       struct r600_screen *rscreen);
-
 struct r600_pipe_fences {
        struct r600_resource            *bo;
        unsigned                        *data;
@@ -219,16 +232,57 @@ enum r600_msaa_texture_mode {
        MSAA_TEXTURE_COMPRESSED
 };
 
+typedef boolean (*r600g_dma_blit_t)(struct pipe_context *ctx,
+                               struct pipe_resource *dst,
+                               unsigned dst_level,
+                               unsigned dst_x, unsigned dst_y, unsigned dst_z,
+                               struct pipe_resource *src,
+                               unsigned src_level,
+                               const struct pipe_box *src_box);
+
+/* logging */
+#define DBG_TEX_DEPTH          (1 << 0)
+#define DBG_COMPUTE            (1 << 1)
+#define DBG_VM                 (1 << 2)
+#define DBG_TRACE_CS           (1 << 3)
+/* shaders */
+#define DBG_FS                 (1 << 8)
+#define DBG_VS                 (1 << 9)
+#define DBG_GS                 (1 << 10)
+#define DBG_PS                 (1 << 11)
+#define DBG_CS                 (1 << 12)
+/* features */
+#define DBG_NO_HYPERZ          (1 << 16)
+#define DBG_NO_LLVM            (1 << 17)
+#define DBG_NO_CP_DMA          (1 << 18)
+#define DBG_NO_ASYNC_DMA       (1 << 19)
+#define DBG_NO_DISCARD_RANGE   (1 << 20)
+/* shader backend */
+#define DBG_SB                 (1 << 21)
+#define DBG_SB_CS              (1 << 22)
+#define DBG_SB_DRY_RUN (1 << 23)
+#define DBG_SB_STAT            (1 << 24)
+#define DBG_SB_DUMP            (1 << 25)
+#define DBG_SB_NO_FALLBACK     (1 << 26)
+#define DBG_SB_DISASM  (1 << 27)
+
+struct r600_tiling_info {
+       unsigned num_channels;
+       unsigned num_banks;
+       unsigned group_bytes;
+};
+
 struct r600_screen {
        struct pipe_screen              screen;
        struct radeon_winsys            *ws;
+       unsigned                        debug_flags;
        unsigned                        family;
        enum chip_class                 chip_class;
        struct radeon_info              info;
        bool                            has_streamout;
        bool                            has_msaa;
+       bool                            has_cp_dma;
        enum r600_msaa_texture_mode     msaa_texture_support;
-       bool                            use_hyperz;
        struct r600_tiling_info         tiling_info;
        struct r600_pipe_fences         fences;
 
@@ -237,11 +291,15 @@ struct r600_screen {
         * XXX: Not sure if this is the best place for global_pool.  Also,
         * it's not thread safe, so it won't work with multiple contexts. */
        struct compute_memory_pool *global_pool;
-#if R600_TRACE_CS
        struct r600_resource            *trace_bo;
        uint32_t                        *trace_ptr;
        unsigned                        cs_count;
-#endif
+       r600g_dma_blit_t                dma_blit;
+
+       /* Auxiliary context. Mainly used to initialize resources.
+        * It must be locked prior to using and flushed before unlocking. */
+       struct pipe_context             *aux_context;
+       pipe_mutex                      aux_context_lock;
 };
 
 struct r600_pipe_sampler_view {
@@ -288,7 +346,8 @@ struct r600_dsa_state {
        unsigned                        alpha_ref;
        ubyte                           valuemask[2];
        ubyte                           writemask[2];
-       unsigned                        sx_alpha_test_control;
+       unsigned                        zwritemask;
+       unsigned                        sx_alpha_test_control;
 };
 
 struct r600_pipe_shader;
@@ -330,6 +389,7 @@ struct r600_samplerview_state {
        uint32_t                        compressed_depthtex_mask; /* which textures are depth */
        uint32_t                        compressed_colortex_mask;
        boolean                         dirty_txq_constants;
+       boolean                         dirty_buffer_constants;
 };
 
 struct r600_sampler_states {
@@ -347,6 +407,8 @@ struct r600_textures_info {
 
        /* cube array txq workaround */
        uint32_t                        *txq_constants;
+       /* buffer related workarounds */
+       uint32_t                        *buffer_constants;
 };
 
 struct r600_fence {
@@ -402,13 +464,81 @@ struct r600_fetch_shader {
        unsigned                        offset;
 };
 
+struct r600_shader_state {
+       struct r600_atom                atom;
+       struct r600_pipe_shader_selector *shader;
+};
+
+struct r600_query_buffer {
+       /* The buffer where query results are stored. */
+       struct r600_resource                    *buf;
+       /* Offset of the next free result after current query data */
+       unsigned                                results_end;
+       /* If a query buffer is full, a new buffer is created and the old one
+        * is put in here. When we calculate the result, we sum up the samples
+        * from all buffers. */
+       struct r600_query_buffer                *previous;
+};
+
+struct r600_query {
+       /* The query buffer and how many results are in it. */
+       struct r600_query_buffer                buffer;
+       /* The type of query */
+       unsigned                                type;
+       /* Size of the result in memory for both begin_query and end_query,
+        * this can be one or two numbers, or it could even be a size of a structure. */
+       unsigned                                result_size;
+       /* The number of dwords for begin_query or end_query. */
+       unsigned                                num_cs_dw;
+       /* linked list of queries */
+       struct list_head                        list;
+       /* for custom non-GPU queries */
+       uint64_t begin_result;
+       uint64_t end_result;
+};
+
+struct r600_so_target {
+       struct pipe_stream_output_target b;
+
+       /* The buffer where BUFFER_FILLED_SIZE is stored. */
+       struct r600_resource    *buf_filled_size;
+       unsigned                buf_filled_size_offset;
+
+       unsigned                stride_in_dw;
+       unsigned                so_index;
+};
+
+struct r600_streamout {
+       struct r600_atom                begin_atom;
+       bool                            begin_emitted;
+       unsigned                        num_dw_for_end;
+
+       unsigned                        enabled_mask;
+       unsigned                        num_targets;
+       struct r600_so_target           *targets[PIPE_MAX_SO_BUFFERS];
+
+       unsigned                        append_bitmask;
+       bool                            suspended;
+};
+
+struct r600_ring {
+       struct radeon_winsys_cs         *cs;
+       bool                            flushing;
+       void (*flush)(void *ctx, unsigned flags);
+};
+
+struct r600_rings {
+       struct r600_ring                gfx;
+       struct r600_ring                dma;
+};
+
 struct r600_context {
        struct pipe_context             context;
        struct r600_screen              *screen;
        struct radeon_winsys            *ws;
-       struct radeon_winsys_cs         *cs;
+       struct r600_rings               rings;
        struct blitter_context          *blitter;
-       struct u_upload_mgr             *uploader;
+       struct u_upload_mgr             *uploader;
        struct u_suballocator           *allocator_so_filled_size;
        struct u_suballocator           *allocator_fetch_shader;
        struct util_slab_mempool        pool_transfers;
@@ -423,6 +553,10 @@ struct r600_context {
        unsigned                        backend_mask;
        unsigned                        max_db; /* for OQ */
 
+       /* current unaccounted memory usage */
+       uint64_t                        vram;
+       uint64_t                        gtt;
+
        /* Miscellaneous state objects. */
        void                            *custom_dsa_flush;
        void                            *custom_blend_resolve;
@@ -464,10 +598,11 @@ struct r600_context {
        struct r600_config_state        config_state;
        struct r600_stencil_ref_state   stencil_ref;
        struct r600_vgt_state           vgt_state;
-       struct r600_vgt2_state          vgt2_state;
        struct r600_viewport_state      viewport;
        /* Shaders and shader resources. */
        struct r600_cso_state           vertex_fetch_shader;
+       struct r600_shader_state        vertex_shader;
+       struct r600_shader_state        pixel_shader;
        struct r600_cs_shader_state     cs_shader_state;
        struct r600_constbuf_state      constbuf_state[PIPE_SHADER_TYPES];
        struct r600_textures_info       samplers[PIPE_SHADER_TYPES];
@@ -475,16 +610,18 @@ struct r600_context {
        struct r600_vertexbuf_state     vertex_buffer_state;
        /** Vertex buffers for compute shaders */
        struct r600_vertexbuf_state     cs_vertex_buffer_state;
+       struct r600_streamout           streamout;
 
        /* Additional context states. */
        unsigned                        flags;
        unsigned                        compute_cb_target_mask;
-       struct r600_pipe_shader_selector        *ps_shader;
-       struct r600_pipe_shader_selector        *vs_shader;
+       struct r600_pipe_shader_selector *ps_shader;
+       struct r600_pipe_shader_selector *vs_shader;
        struct r600_rasterizer_state    *rasterizer;
        bool                            alpha_to_one;
        bool                            force_blend_disable;
        boolean                         dual_src_blend;
+       unsigned                        zwritemask;
 
        /* Index buffer. */
        struct pipe_index_buffer        index_buffer;
@@ -496,6 +633,7 @@ struct r600_context {
        /* Queries. */
        /* The list of active queries. Only one query of each type can be active. */
        int                             num_occlusion_queries;
+       int                             num_pipelinestat_queries;
        /* Keep track of non-timer queries, because they should be suspended
         * during context flushing.
         * The timer queries (TIME_ELAPSED) shouldn't be suspended. */
@@ -503,27 +641,15 @@ struct r600_context {
        unsigned                        num_cs_dw_nontimer_queries_suspend;
        /* If queries have been suspended. */
        bool                            nontimer_queries_suspended;
+       unsigned                        num_draw_calls;
 
        /* Render condition. */
        struct pipe_query               *current_render_cond;
        unsigned                        current_render_cond_mode;
        boolean                         predicate_drawing;
 
-       /* Streamout state. */
-       unsigned                        num_cs_dw_streamout_end;
-       unsigned                        num_so_targets;
-       struct r600_so_target           *so_targets[PIPE_MAX_SO_BUFFERS];
-       boolean                         streamout_start;
-       unsigned                        streamout_append_bitmask;
-       bool                            streamout_suspended;
-
-       /* Deprecated state management. */
-       struct r600_range               *range;
-       unsigned                        nblocks;
-       struct r600_block               **blocks;
-       struct list_head                dirty;
-       struct list_head                enable_list;
-       unsigned                        pm4_dirty_cdwords;
+       void                            *sb_context;
+       struct r600_isa         *isa;
 };
 
 static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,
@@ -534,19 +660,15 @@ static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,
        cs->cdw += cb->num_dw;
 }
 
-#if R600_TRACE_CS
 void r600_trace_emit(struct r600_context *rctx);
-#endif
 
 static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
 {
        atom->emit(rctx, atom);
        atom->dirty = false;
-#if R600_TRACE_CS
        if (rctx->screen->trace_bo) {
                r600_trace_emit(rctx);
        }
-#endif
 }
 
 static INLINE void r600_set_cso_state(struct r600_cso_state *state, void *cso)
@@ -563,6 +685,12 @@ static INLINE void r600_set_cso_state_with_cb(struct r600_cso_state *state, void
        r600_set_cso_state(state, cso);
 }
 
+/* compute_memory_pool.c */
+struct compute_memory_pool;
+void compute_memory_pool_delete(struct compute_memory_pool* pool);
+struct compute_memory_pool* compute_memory_pool_new(
+       struct r600_screen *rscreen);
+
 /* evergreen_state.c */
 struct pipe_sampler_view *
 evergreen_create_sampler_view_custom(struct pipe_context *ctx,
@@ -580,8 +708,8 @@ void cayman_init_common_regs(struct r600_command_buffer *cb,
 
 void evergreen_init_state_functions(struct r600_context *rctx);
 void evergreen_init_atom_start_cs(struct r600_context *rctx);
-void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
-void evergreen_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void evergreen_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void evergreen_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
 void *evergreen_create_db_flush_dsa(struct r600_context *rctx);
 void *evergreen_create_resolve_blend(struct r600_context *rctx);
 void *evergreen_create_decompress_blend(struct r600_context *rctx);
@@ -598,9 +726,10 @@ void evergreen_init_color_surface_rat(struct r600_context *rctx,
 void evergreen_update_db_shader_control(struct r600_context * rctx);
 
 /* r600_blit.c */
-void r600_copy_buffer(struct pipe_context *ctx, struct
-                     pipe_resource *dst, unsigned dstx,
+void r600_copy_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dstx,
                      struct pipe_resource *src, const struct pipe_box *src_box);
+void r600_screen_clear_buffer(struct r600_screen *rscreen, struct pipe_resource *dst,
+                             unsigned offset, unsigned size, unsigned char value);
 void r600_init_blit_functions(struct r600_context *rctx);
 void r600_blit_decompress_depth(struct pipe_context *ctx,
                struct r600_texture *texture,
@@ -623,8 +752,14 @@ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
                                         unsigned alignment);
 
 /* r600_pipe.c */
-void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
-               unsigned flags);
+boolean r600_rings_is_buffer_referenced(struct r600_context *ctx,
+                                       struct radeon_winsys_cs_handle *buf,
+                                       enum radeon_bo_usage usage);
+void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
+                                       struct r600_resource *resource,
+                                       unsigned usage);
+const char * r600_llvm_gpu_string(enum radeon_family family);
+
 
 /* r600_query.c */
 void r600_init_query_functions(struct r600_context *rctx);
@@ -652,8 +787,8 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
                                unsigned width_first_level, unsigned height_first_level);
 void r600_init_state_functions(struct r600_context *rctx);
 void r600_init_atom_start_cs(struct r600_context *rctx);
-void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
-void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
 void *r600_create_db_flush_dsa(struct r600_context *rctx);
 void *r600_create_resolve_blend(struct r600_context *rctx);
 void *r700_create_resolve_blend(struct r600_context *rctx);
@@ -679,16 +814,71 @@ struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
                                                const struct pipe_surface *templ,
                                                unsigned width, unsigned height);
 
+unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
+                                  const unsigned char *swizzle_view,
+                                  boolean vtx);
+
+/* r600_hw_context.c */
+void r600_get_backend_mask(struct r600_context *ctx);
+void r600_context_flush(struct r600_context *ctx, unsigned flags);
+void r600_begin_new_cs(struct r600_context *ctx);
+void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
+                             unsigned offset, unsigned value);
+void r600_flush_emit(struct r600_context *ctx);
+void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
+void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw);
+void r600_cp_dma_copy_buffer(struct r600_context *rctx,
+                            struct pipe_resource *dst, uint64_t dst_offset,
+                            struct pipe_resource *src, uint64_t src_offset,
+                            unsigned size);
+void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
+                                  struct pipe_resource *dst, uint64_t offset,
+                                  unsigned size, uint32_t clear_value);
+void r600_dma_copy(struct r600_context *rctx,
+               struct pipe_resource *dst,
+               struct pipe_resource *src,
+               uint64_t dst_offset,
+               uint64_t src_offset,
+               uint64_t size);
+boolean r600_dma_blit(struct pipe_context *ctx,
+                       struct pipe_resource *dst,
+                       unsigned dst_level,
+                       unsigned dst_x, unsigned dst_y, unsigned dst_z,
+                       struct pipe_resource *src,
+                       unsigned src_level,
+                       const struct pipe_box *src_box);
+void r600_emit_streamout_begin(struct r600_context *ctx, struct r600_atom *atom);
+void r600_emit_streamout_end(struct r600_context *ctx);
+
+/*
+ * evergreen_hw_context.c
+ */
+void evergreen_flush_vgt_streamout(struct r600_context *ctx);
+void evergreen_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit);
+void evergreen_dma_copy(struct r600_context *rctx,
+               struct pipe_resource *dst,
+               struct pipe_resource *src,
+               uint64_t dst_offset,
+               uint64_t src_offset,
+               uint64_t size);
+boolean evergreen_dma_blit(struct pipe_context *ctx,
+                       struct pipe_resource *dst,
+                       unsigned dst_level,
+                       unsigned dst_x, unsigned dst_y, unsigned dst_z,
+                       struct pipe_resource *src,
+                       unsigned src_level,
+                       const struct pipe_box *src_box);
+
 /* r600_state_common.c */
 void r600_init_common_state_functions(struct r600_context *rctx);
 void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom);
-void r600_emit_vgt2_state(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom);
 void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom);
+void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a);
 void r600_init_atom(struct r600_context *rctx, struct r600_atom *atom, unsigned id,
                    void (*emit)(struct r600_context *ctx, struct r600_atom *state),
                    unsigned num_dw);
@@ -698,6 +888,7 @@ void r600_sampler_views_dirty(struct r600_context *rctx,
 void r600_sampler_states_dirty(struct r600_context *rctx,
                               struct r600_sampler_states *state);
 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state);
+void r600_streamout_buffers_dirty(struct r600_context *rctx);
 void r600_draw_rectangle(struct blitter_context *blitter,
                         int x1, int y1, int x2, int y2, float depth,
                         enum blitter_attrib_type type, const union pipe_color_union *attrib);
@@ -709,6 +900,21 @@ unsigned r600_tex_mipfilter(unsigned filter);
 unsigned r600_tex_compare(unsigned compare);
 bool sampler_state_needs_border_color(const struct pipe_sampler_state *state);
 
+/* r600_uvd.c */
+struct pipe_video_decoder *r600_uvd_create_decoder(struct pipe_context *context,
+                                                   enum pipe_video_profile profile,
+                                                   enum pipe_video_entrypoint entrypoint,
+                                                   enum pipe_video_chroma_format chroma_format,
+                                                   unsigned width, unsigned height,
+                                                  unsigned max_references, bool expect_chunked_decode);
+
+struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe,
+                                                  const struct pipe_video_buffer *tmpl);
+
+int r600_uvd_get_video_param(struct pipe_screen *screen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param);
+
 /*
  * Helpers for building command buffers
  */
@@ -740,6 +946,13 @@ static INLINE void r600_store_value(struct r600_command_buffer *cb, unsigned val
        cb->buf[cb->num_dw++] = value;
 }
 
+static INLINE void r600_store_array(struct r600_command_buffer *cb, unsigned num, unsigned *ptr)
+{
+       assert(cb->num_dw+num <= cb->max_num_dw);
+       memcpy(&cb->buf[cb->num_dw], ptr, num * sizeof(ptr[0]));
+       cb->num_dw += num;
+}
+
 static INLINE void r600_store_config_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
 {
        assert(reg < R600_CONTEXT_REG_OFFSET);
@@ -828,12 +1041,27 @@ void r600_release_command_buffer(struct r600_command_buffer *cb);
 /*
  * Helpers for emitting state into a command stream directly.
  */
-
-static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo,
+static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx,
+                                            struct r600_ring *ring,
+                                            struct r600_resource *rbo,
                                             enum radeon_bo_usage usage)
 {
        assert(usage);
-       return ctx->ws->cs_add_reloc(ctx->cs, rbo->cs_buf, usage, rbo->domains) * 4;
+       /* make sure that all previous ring use are flushed so everything
+        * look serialized from driver pov
+        */
+       if (!ring->flushing) {
+               if (ring == &ctx->rings.gfx) {
+                       if (ctx->rings.dma.cs) {
+                               /* flush dma ring */
+                               ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+                       }
+               } else {
+                       /* flush gfx ring */
+                       ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+               }
+       }
+       return ctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage, rbo->domains) * 4;
 }
 
 static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value)
@@ -897,6 +1125,15 @@ static INLINE void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, u
        r600_write_value(cs, value);
 }
 
+static INLINE void r600_write_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag)
+{
+       if (flag & RADEON_CP_PACKET3_COMPUTE_MODE) {
+               r600_write_compute_context_reg(cs, reg, value);
+       } else {
+               r600_write_context_reg(cs, reg, value);
+       }
+
+}
 static INLINE void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
 {
        r600_write_ctl_const_seq(cs, reg, 1);
@@ -936,19 +1173,27 @@ static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, struct pipe_
        return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf);
 }
 
-static INLINE unsigned u_max_layer(struct pipe_resource *r, unsigned level)
+static INLINE void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
 {
-       switch (r->target) {
-       case PIPE_TEXTURE_CUBE:
-               return 6 - 1;
-       case PIPE_TEXTURE_3D:
-               return u_minify(r->depth0, level) - 1;
-       case PIPE_TEXTURE_1D_ARRAY:
-       case PIPE_TEXTURE_2D_ARRAY:
-       case PIPE_TEXTURE_CUBE_ARRAY:
-               return r->array_size - 1;
-       default:
-               return 0;
+       struct r600_context *rctx = (struct r600_context *)ctx;
+       struct r600_resource *rr = (struct r600_resource *)r;
+
+       if (r == NULL) {
+               return;
+       }
+
+       /*
+        * The idea is to compute a gross estimate of memory requirement of
+        * each draw call. After each draw call, memory will be precisely
+        * accounted. So the uncertainty is only on the current draw call.
+        * In practice this gave very good estimate (+/- 10% of the target
+        * memory limit).
+        */
+       if (rr->domains & RADEON_DOMAIN_GTT) {
+               rctx->gtt += rr->buf->size;
+       }
+       if (rr->domains & RADEON_DOMAIN_VRAM) {
+               rctx->vram += rr->buf->size;
        }
 }