i965: Update brw_wm_debug_recompile() for newer key entries.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "brw_context.h"
7 #include "brw_bufmgr.h"
8
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12
13 struct intel_batchbuffer;
14
15 void intel_batchbuffer_init(struct brw_context *brw);
16 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
17 void intel_batchbuffer_save_state(struct brw_context *brw);
18 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
19 void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
20 enum brw_gpu_ring ring);
21 int _intel_batchbuffer_flush_fence(struct brw_context *brw,
22 int in_fence_fd, int *out_fence_fd,
23 const char *file, int line);
24
25 #define intel_batchbuffer_flush(brw) \
26 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
27
28 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
29 _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
30 __FILE__, __LINE__)
31
32 /* Unlike bmBufferData, this currently requires the buffer be mapped.
33 * Consider it a convenience function wrapping multple
34 * intel_buffer_dword() calls.
35 */
36 void intel_batchbuffer_data(struct brw_context *brw,
37 const void *data, GLuint bytes,
38 enum brw_gpu_ring ring);
39
40 bool brw_batch_has_aperture_space(struct brw_context *brw,
41 unsigned extra_space_in_bytes);
42
43 bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
44
45 #define RELOC_WRITE EXEC_OBJECT_WRITE
46 #define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
47 uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
48 uint32_t batch_offset,
49 struct brw_bo *target,
50 uint32_t target_offset,
51 unsigned flags);
52 uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
53 uint32_t batch_offset,
54 struct brw_bo *target,
55 uint32_t target_offset,
56 unsigned flags);
57
58 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
59
60 static inline uint32_t float_as_int(float f)
61 {
62 union {
63 float f;
64 uint32_t d;
65 } fi;
66
67 fi.f = f;
68 return fi.d;
69 }
70
71 static inline void
72 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
73 {
74 *batch->map_next++ = dword;
75 assert(batch->ring != UNKNOWN_RING);
76 }
77
78 static inline void
79 intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
80 {
81 intel_batchbuffer_emit_dword(batch, float_as_int(f));
82 }
83
84 static inline void
85 intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
86 {
87 intel_batchbuffer_require_space(brw, n * 4, ring);
88
89 #ifdef DEBUG
90 brw->batch.emit = USED_BATCH(brw->batch);
91 brw->batch.total = n;
92 #endif
93 }
94
95 static inline void
96 intel_batchbuffer_advance(struct brw_context *brw)
97 {
98 #ifdef DEBUG
99 struct intel_batchbuffer *batch = &brw->batch;
100 unsigned int _n = USED_BATCH(*batch) - batch->emit;
101 assert(batch->total != 0);
102 if (_n != batch->total) {
103 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
104 _n, batch->total);
105 abort();
106 }
107 batch->total = 0;
108 #else
109 (void) brw;
110 #endif
111 }
112
113 static inline bool
114 brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
115 {
116 return (char *) p >= (char *) batch->state_map &&
117 (char *) p < (char *) batch->state_map + batch->state_bo->size;
118 }
119
120 #define BEGIN_BATCH(n) do { \
121 intel_batchbuffer_begin(brw, (n), RENDER_RING); \
122 uint32_t *__map = brw->batch.map_next; \
123 brw->batch.map_next += (n)
124
125 #define BEGIN_BATCH_BLT(n) do { \
126 intel_batchbuffer_begin(brw, (n), BLT_RING); \
127 uint32_t *__map = brw->batch.map_next; \
128 brw->batch.map_next += (n)
129
130 #define OUT_BATCH(d) *__map++ = (d)
131 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
132
133 #define OUT_RELOC(buf, flags, delta) do { \
134 uint32_t __offset = (__map - brw->batch.map) * 4; \
135 uint32_t reloc = \
136 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
137 OUT_BATCH(reloc); \
138 } while (0)
139
140 /* Handle 48-bit address relocations for Gen8+ */
141 #define OUT_RELOC64(buf, flags, delta) do { \
142 uint32_t __offset = (__map - brw->batch.map) * 4; \
143 uint64_t reloc64 = \
144 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
145 OUT_BATCH(reloc64); \
146 OUT_BATCH(reloc64 >> 32); \
147 } while (0)
148
149 #define ADVANCE_BATCH() \
150 assert(__map == brw->batch.map_next); \
151 intel_batchbuffer_advance(brw); \
152 } while (0)
153
154 #ifdef __cplusplus
155 }
156 #endif
157
158 #endif