gallium/util: import the multithreaded job queue from amdgpu winsys (v2)
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.h
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #ifndef AMDGPU_CS_H
33 #define AMDGPU_CS_H
34
35 #include "amdgpu_bo.h"
36 #include "util/u_memory.h"
37
38 struct amdgpu_ctx {
39 struct amdgpu_winsys *ws;
40 amdgpu_context_handle ctx;
41 amdgpu_bo_handle user_fence_bo;
42 uint64_t *user_fence_cpu_address_base;
43 int refcount;
44 };
45
46 struct amdgpu_cs_buffer {
47 struct amdgpu_winsys_bo *bo;
48 uint64_t priority_usage;
49 enum radeon_bo_usage usage;
50 enum radeon_bo_domain domains;
51 };
52
53 enum ib_type {
54 IB_CONST_PREAMBLE = 0,
55 IB_CONST = 1, /* the const IB must be first */
56 IB_MAIN = 2,
57 IB_NUM
58 };
59
60 struct amdgpu_ib {
61 struct radeon_winsys_cs base;
62
63 /* A buffer out of which new IBs are allocated. */
64 struct pb_buffer *big_ib_buffer;
65 uint8_t *ib_mapped;
66 unsigned used_ib_space;
67 unsigned max_ib_size;
68 uint32_t *ptr_ib_size;
69 enum ib_type ib_type;
70 };
71
72 struct amdgpu_cs_context {
73 struct amdgpu_cs_request request;
74 struct amdgpu_cs_ib_info ib[IB_NUM];
75
76 /* Buffers. */
77 unsigned max_num_buffers;
78 unsigned num_buffers;
79 amdgpu_bo_handle *handles;
80 uint8_t *flags;
81 struct amdgpu_cs_buffer *buffers;
82
83 int buffer_indices_hashlist[4096];
84
85 uint64_t used_vram;
86 uint64_t used_gart;
87
88 unsigned max_dependencies;
89
90 struct pipe_fence_handle *fence;
91 };
92
93 struct amdgpu_cs {
94 struct amdgpu_ib main; /* must be first because this is inherited */
95 struct amdgpu_ib const_ib; /* optional constant engine IB */
96 struct amdgpu_ib const_preamble_ib;
97 struct amdgpu_ctx *ctx;
98 enum ring_type ring_type;
99
100 /* We flip between these two CS. While one is being consumed
101 * by the kernel in another thread, the other one is being filled
102 * by the pipe driver. */
103 struct amdgpu_cs_context csc1;
104 struct amdgpu_cs_context csc2;
105 /* The currently-used CS. */
106 struct amdgpu_cs_context *csc;
107 /* The CS being currently-owned by the other thread. */
108 struct amdgpu_cs_context *cst;
109
110 /* Flush CS. */
111 void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
112 void *flush_data;
113
114 struct util_queue_fence flush_completed;
115 };
116
117 struct amdgpu_fence {
118 struct pipe_reference reference;
119
120 struct amdgpu_ctx *ctx; /* submission context */
121 struct amdgpu_cs_fence fence;
122 uint64_t *user_fence_cpu_address;
123
124 /* If the fence is unknown due to an IB still being submitted
125 * in the other thread. */
126 volatile int submission_in_progress; /* bool (int for atomicity) */
127 volatile int signalled; /* bool (int for atomicity) */
128 };
129
130 static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
131 {
132 if (p_atomic_dec_zero(&ctx->refcount)) {
133 amdgpu_cs_ctx_free(ctx->ctx);
134 amdgpu_bo_free(ctx->user_fence_bo);
135 FREE(ctx);
136 }
137 }
138
139 static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
140 struct pipe_fence_handle *src)
141 {
142 struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
143 struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
144
145 if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
146 amdgpu_ctx_unref((*rdst)->ctx);
147 FREE(*rdst);
148 }
149 *rdst = rsrc;
150 }
151
152 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
153
154 static inline struct amdgpu_ib *
155 amdgpu_ib(struct radeon_winsys_cs *base)
156 {
157 return (struct amdgpu_ib *)base;
158 }
159
160 static inline struct amdgpu_cs *
161 amdgpu_cs(struct radeon_winsys_cs *base)
162 {
163 assert(amdgpu_ib(base)->ib_type == IB_MAIN);
164 return (struct amdgpu_cs*)base;
165 }
166
167 #define get_container(member_ptr, container_type, container_member) \
168 (container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
169
170 static inline struct amdgpu_cs *
171 amdgpu_cs_from_ib(struct amdgpu_ib *ib)
172 {
173 switch (ib->ib_type) {
174 case IB_MAIN:
175 return get_container(ib, struct amdgpu_cs, main);
176 case IB_CONST:
177 return get_container(ib, struct amdgpu_cs, const_ib);
178 case IB_CONST_PREAMBLE:
179 return get_container(ib, struct amdgpu_cs, const_preamble_ib);
180 default:
181 unreachable("bad ib_type");
182 }
183 }
184
185 static inline boolean
186 amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
187 struct amdgpu_winsys_bo *bo)
188 {
189 int num_refs = bo->num_cs_references;
190 return num_refs == bo->ws->num_cs ||
191 (num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
192 }
193
194 static inline boolean
195 amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
196 struct amdgpu_winsys_bo *bo,
197 enum radeon_bo_usage usage)
198 {
199 int index;
200
201 if (!bo->num_cs_references)
202 return FALSE;
203
204 index = amdgpu_lookup_buffer(cs->csc, bo);
205 if (index == -1)
206 return FALSE;
207
208 return (cs->csc->buffers[index].usage & usage) != 0;
209 }
210
211 static inline boolean
212 amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
213 {
214 return bo->num_cs_references != 0;
215 }
216
217 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
218 bool absolute);
219 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
220 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
221 void amdgpu_cs_submit_ib(void *job);
222
223 #endif