android: change include "cutils/log.h" to "log/log.h" on Android API >=26
[mesa.git] / src / mesa / drivers / dri / i915 / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 /**
11 * Number of bytes to reserve for commands necessary to complete a batch.
12 *
13 * This includes:
14 * - MI_BATCHBUFFER_END (4 bytes)
15 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
16 * - Any state emitted by vtbl->finish_batch():
17 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
18 */
19 #define BATCH_RESERVED 24
20
21 struct intel_batchbuffer;
22
23 void intel_batchbuffer_init(struct intel_context *intel);
24 void intel_batchbuffer_free(struct intel_context *intel);
25
26 int _intel_batchbuffer_flush(struct intel_context *intel,
27 const char *file, int line);
28
29 #define intel_batchbuffer_flush(intel) \
30 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
31
32
33
34 /* Unlike bmBufferData, this currently requires the buffer be mapped.
35 * Consider it a convenience function wrapping multple
36 * intel_buffer_dword() calls.
37 */
38 void intel_batchbuffer_data(struct intel_context *intel,
39 const void *data, GLuint bytes);
40
41 bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
42 drm_intel_bo *buffer,
43 uint32_t read_domains,
44 uint32_t write_domain,
45 uint32_t offset);
46 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
47 drm_intel_bo *buffer,
48 uint32_t read_domains,
49 uint32_t write_domain,
50 uint32_t offset);
51 void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
52
53 static inline uint32_t float_as_int(float f)
54 {
55 union {
56 float f;
57 uint32_t d;
58 } fi;
59
60 fi.f = f;
61 return fi.d;
62 }
63
64 /* Inline functions - might actually be better off with these
65 * non-inlined. Certainly better off switching all command packets to
66 * be passed as structs rather than dwords, but that's a little bit of
67 * work...
68 */
69 static inline unsigned
70 intel_batchbuffer_space(struct intel_context *intel)
71 {
72 return (intel->batch.bo->size - intel->batch.reserved_space)
73 - intel->batch.used*4;
74 }
75
76
77 static inline void
78 intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
79 {
80 #ifdef DEBUG
81 assert(intel_batchbuffer_space(intel) >= 4);
82 #endif
83 intel->batch.map[intel->batch.used++] = dword;
84 }
85
86 static inline void
87 intel_batchbuffer_emit_float(struct intel_context *intel, float f)
88 {
89 intel_batchbuffer_emit_dword(intel, float_as_int(f));
90 }
91
92 static inline void
93 intel_batchbuffer_require_space(struct intel_context *intel,
94 GLuint sz)
95 {
96 #ifdef DEBUG
97 assert(sz < intel->maxBatchSize - BATCH_RESERVED);
98 #endif
99 if (intel_batchbuffer_space(intel) < sz)
100 intel_batchbuffer_flush(intel);
101 }
102
103 static inline void
104 intel_batchbuffer_begin(struct intel_context *intel, int n)
105 {
106 intel_batchbuffer_require_space(intel, n * 4);
107
108 intel->batch.emit = intel->batch.used;
109 #ifdef DEBUG
110 intel->batch.total = n;
111 #endif
112 }
113
114 static inline void
115 intel_batchbuffer_advance(struct intel_context *intel)
116 {
117 #ifdef DEBUG
118 struct intel_batchbuffer *batch = &intel->batch;
119 unsigned int _n = batch->used - batch->emit;
120 assert(batch->total != 0);
121 if (_n != batch->total) {
122 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
123 _n, batch->total);
124 abort();
125 }
126 batch->total = 0;
127 #else
128 (void) intel;
129 #endif
130 }
131
132 /* Here are the crusty old macros, to be removed:
133 */
134 #define BATCH_LOCALS
135
136 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n)
137 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
138 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
139 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
140 intel_batchbuffer_emit_reloc(intel, buf, \
141 read_domains, write_domain, delta); \
142 } while (0)
143 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
144 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
145 read_domains, write_domain, delta); \
146 } while (0)
147
148 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
149 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
150
151 #endif