Revert "winsys/amdgpu: Add R600_DEBUG flag to reserve VMID per ctx."
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.h
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #ifndef AMDGPU_CS_H
33 #define AMDGPU_CS_H
34
35 #include "amdgpu_bo.h"
36 #include "util/u_memory.h"
37 #include <amdgpu_drm.h>
38
39 struct amdgpu_ctx {
40 struct amdgpu_winsys *ws;
41 amdgpu_context_handle ctx;
42 amdgpu_bo_handle user_fence_bo;
43 uint64_t *user_fence_cpu_address_base;
44 int refcount;
45 unsigned initial_num_total_rejected_cs;
46 unsigned num_rejected_cs;
47 };
48
49 struct amdgpu_cs_buffer {
50 struct amdgpu_winsys_bo *bo;
51 union {
52 struct {
53 uint64_t priority_usage;
54 } real;
55 struct {
56 uint32_t real_idx; /* index of underlying real BO */
57 } slab;
58 } u;
59 enum radeon_bo_usage usage;
60 };
61
62 enum ib_type {
63 IB_MAIN,
64 IB_NUM,
65 };
66
67 struct amdgpu_ib {
68 struct radeon_winsys_cs base;
69
70 /* A buffer out of which new IBs are allocated. */
71 struct pb_buffer *big_ib_buffer;
72 uint8_t *ib_mapped;
73 unsigned used_ib_space;
74 unsigned max_ib_size;
75 uint32_t *ptr_ib_size;
76 bool ptr_ib_size_inside_ib;
77 enum ib_type ib_type;
78 };
79
80 struct amdgpu_cs_context {
81 struct drm_amdgpu_cs_chunk_ib ib[IB_NUM];
82
83 /* Buffers. */
84 unsigned max_real_buffers;
85 unsigned num_real_buffers;
86 struct amdgpu_cs_buffer *real_buffers;
87
88 unsigned max_real_submit;
89 amdgpu_bo_handle *handles;
90 uint8_t *flags;
91
92 unsigned num_slab_buffers;
93 unsigned max_slab_buffers;
94 struct amdgpu_cs_buffer *slab_buffers;
95
96 unsigned num_sparse_buffers;
97 unsigned max_sparse_buffers;
98 struct amdgpu_cs_buffer *sparse_buffers;
99
100 int buffer_indices_hashlist[4096];
101
102 struct amdgpu_winsys_bo *last_added_bo;
103 unsigned last_added_bo_index;
104 unsigned last_added_bo_usage;
105 uint64_t last_added_bo_priority_usage;
106
107 struct pipe_fence_handle **fence_dependencies;
108 unsigned num_fence_dependencies;
109 unsigned max_fence_dependencies;
110
111 struct pipe_fence_handle *fence;
112
113 /* the error returned from cs_flush for non-async submissions */
114 int error_code;
115 };
116
117 struct amdgpu_cs {
118 struct amdgpu_ib main; /* must be first because this is inherited */
119 struct amdgpu_ctx *ctx;
120 enum ring_type ring_type;
121 struct drm_amdgpu_cs_chunk_fence fence_chunk;
122
123 /* We flip between these two CS. While one is being consumed
124 * by the kernel in another thread, the other one is being filled
125 * by the pipe driver. */
126 struct amdgpu_cs_context csc1;
127 struct amdgpu_cs_context csc2;
128 /* The currently-used CS. */
129 struct amdgpu_cs_context *csc;
130 /* The CS being currently-owned by the other thread. */
131 struct amdgpu_cs_context *cst;
132
133 /* Flush CS. */
134 void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
135 void *flush_data;
136
137 struct util_queue_fence flush_completed;
138 struct pipe_fence_handle *next_fence;
139 };
140
141 struct amdgpu_fence {
142 struct pipe_reference reference;
143 /* If ctx == NULL, this fence is syncobj-based. */
144 uint32_t syncobj;
145
146 struct amdgpu_winsys *ws;
147 struct amdgpu_ctx *ctx; /* submission context */
148 struct amdgpu_cs_fence fence;
149 uint64_t *user_fence_cpu_address;
150
151 /* If the fence is unknown due to an IB still being submitted
152 * in the other thread. */
153 volatile int submission_in_progress; /* bool (int for atomicity) */
154 volatile int signalled; /* bool (int for atomicity) */
155 };
156
157 static inline bool amdgpu_fence_is_syncobj(struct amdgpu_fence *fence)
158 {
159 return fence->ctx == NULL;
160 }
161
162 static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
163 {
164 if (p_atomic_dec_zero(&ctx->refcount)) {
165 amdgpu_cs_ctx_free(ctx->ctx);
166 amdgpu_bo_free(ctx->user_fence_bo);
167 FREE(ctx);
168 }
169 }
170
171 static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
172 struct pipe_fence_handle *src)
173 {
174 struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
175 struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
176
177 if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
178 struct amdgpu_fence *fence = *rdst;
179
180 if (amdgpu_fence_is_syncobj(fence))
181 amdgpu_cs_destroy_syncobj(fence->ws->dev, fence->syncobj);
182 else
183 amdgpu_ctx_unref(fence->ctx);
184
185 FREE(fence);
186 }
187 *rdst = rsrc;
188 }
189
190 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
191
192 static inline struct amdgpu_ib *
193 amdgpu_ib(struct radeon_winsys_cs *base)
194 {
195 return (struct amdgpu_ib *)base;
196 }
197
198 static inline struct amdgpu_cs *
199 amdgpu_cs(struct radeon_winsys_cs *base)
200 {
201 assert(amdgpu_ib(base)->ib_type == IB_MAIN);
202 return (struct amdgpu_cs*)base;
203 }
204
205 #define get_container(member_ptr, container_type, container_member) \
206 (container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
207
208 static inline struct amdgpu_cs *
209 amdgpu_cs_from_ib(struct amdgpu_ib *ib)
210 {
211 switch (ib->ib_type) {
212 case IB_MAIN:
213 return get_container(ib, struct amdgpu_cs, main);
214 default:
215 unreachable("bad ib_type");
216 }
217 }
218
219 static inline bool
220 amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
221 struct amdgpu_winsys_bo *bo)
222 {
223 int num_refs = bo->num_cs_references;
224 return num_refs == bo->ws->num_cs ||
225 (num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
226 }
227
228 static inline bool
229 amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
230 struct amdgpu_winsys_bo *bo,
231 enum radeon_bo_usage usage)
232 {
233 int index;
234 struct amdgpu_cs_buffer *buffer;
235
236 if (!bo->num_cs_references)
237 return false;
238
239 index = amdgpu_lookup_buffer(cs->csc, bo);
240 if (index == -1)
241 return false;
242
243 buffer = bo->bo ? &cs->csc->real_buffers[index] :
244 bo->sparse ? &cs->csc->sparse_buffers[index] :
245 &cs->csc->slab_buffers[index];
246
247 return (buffer->usage & usage) != 0;
248 }
249
250 static inline bool
251 amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
252 {
253 return bo->num_cs_references != 0;
254 }
255
256 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
257 bool absolute);
258 void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
259 unsigned num_fences,
260 struct pipe_fence_handle **fences);
261 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
262 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
263 void amdgpu_cs_submit_ib(void *job, int thread_index);
264
265 #endif