iris: Allow inlining of require/get_command_space
[mesa.git] / src / gallium / drivers / iris / iris_batch.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef IRIS_BATCH_DOT_H
25 #define IRIS_BATCH_DOT_H
26
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 #include "i915_drm.h"
31 #include "common/gen_decoder.h"
32 #include "iris_binder.h"
33
34 /* The kernel assumes batchbuffers are smaller than 256kB. */
35 #define MAX_BATCH_SIZE (256 * 1024)
36
37 /* Our target batch size - flush approximately at this point. */
38 #define BATCH_SZ (20 * 1024)
39
40 struct iris_address {
41 struct iris_bo *bo;
42 uint64_t offset;
43 bool write;
44 };
45
46 struct iris_batch {
47 struct iris_screen *screen;
48 struct iris_vtable *vtbl;
49 struct pipe_debug_callback *dbg;
50
51 /** Current batchbuffer being queued up. */
52 struct iris_bo *bo;
53 void *map;
54 void *map_next;
55 /** Size of the primary batch if we've moved on to a secondary. */
56 unsigned primary_batch_size;
57
58 /** Last BO submitted to the hardware. Used for glFinish(). */
59 struct iris_bo *last_bo;
60
61 uint32_t hw_ctx_id;
62
63 /** Which engine this batch targets - a I915_EXEC_RING_MASK value */
64 uint8_t engine;
65
66 /** The validation list */
67 struct drm_i915_gem_exec_object2 *validation_list;
68 struct iris_bo **exec_bos;
69 int exec_count;
70 int exec_array_size;
71
72 /** The amount of aperture space (in bytes) used by all exec_bos */
73 int aperture_space;
74
75 /** Binder (containing binding tables) */
76 struct iris_binder binder;
77
78 struct {
79 /**
80 * Set of struct brw_bo * that have been rendered to within this
81 * batchbuffer and would need flushing before being used from another
82 * cache domain that isn't coherent with it (i.e. the sampler).
83 */
84 struct hash_table *render;
85
86 /**
87 * Set of struct brw_bo * that have been used as a depth buffer within
88 * this batchbuffer and would need flushing before being used from
89 * another cache domain that isn't coherent with it (i.e. the sampler).
90 */
91 struct set *depth;
92 } cache;
93
94 /** Map from batch offset to iris_alloc_state data (with DEBUG_BATCH) */
95 // XXX: unused
96 struct hash_table *state_sizes;
97 struct gen_batch_decode_ctx decoder;
98
99 /** Have we emitted any draw calls to this batch? */
100 bool contains_draw;
101 };
102
103 void iris_init_batch(struct iris_batch *batch,
104 struct iris_screen *screen,
105 struct iris_vtable *vtbl,
106 struct pipe_debug_callback *dbg,
107 uint8_t ring);
108 void iris_chain_to_new_batch(struct iris_batch *batch);
109 void iris_batch_free(struct iris_batch *batch);
110 void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate);
111
112 int _iris_batch_flush_fence(struct iris_batch *batch,
113 int in_fence_fd, int *out_fence_fd,
114 const char *file, int line);
115
116
117 #define iris_batch_flush_fence(batch, in_fence_fd, out_fence_fd) \
118 _iris_batch_flush_fence((batch), (in_fence_fd), (out_fence_fd), \
119 __FILE__, __LINE__)
120
121 #define iris_batch_flush(batch) iris_batch_flush_fence((batch), -1, NULL)
122
123 bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
124
125 #define RELOC_WRITE EXEC_OBJECT_WRITE
126
127 void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
128 bool writable);
129
130 static inline unsigned
131 iris_batch_bytes_used(struct iris_batch *batch)
132 {
133 return batch->map_next - batch->map;
134 }
135
136 /**
137 * Ensure the current command buffer has \param size bytes of space
138 * remaining. If not, this creates a secondary batch buffer and emits
139 * a jump from the primary batch to the start of the secondary.
140 *
141 * Most callers want iris_get_command_space() instead.
142 */
143 static inline void
144 iris_require_command_space(struct iris_batch *batch, unsigned size)
145 {
146 const unsigned required_bytes = iris_batch_bytes_used(batch) + size;
147
148 if (required_bytes >= BATCH_SZ) {
149 iris_chain_to_new_batch(batch);
150 }
151 }
152
153 /**
154 * Allocate space in the current command buffer, and return a pointer
155 * to the mapped area so the caller can write commands there.
156 *
157 * This should be called whenever emitting commands.
158 */
159 static inline void *
160 iris_get_command_space(struct iris_batch *batch, unsigned bytes)
161 {
162 iris_require_command_space(batch, bytes);
163 void *map = batch->map_next;
164 batch->map_next += bytes;
165 return map;
166 }
167
168 /**
169 * Helper to emit GPU commands - allocates space, copies them there.
170 */
171 static inline void
172 iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
173 {
174 void *map = iris_get_command_space(batch, size);
175 memcpy(map, data, size);
176 }
177
178 #endif