panfrost: Pass size to panfrost_batch_get_scratchpad
[mesa.git] / src / gallium / drivers / panfrost / pan_job.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #ifndef __PAN_JOB_H__
27 #define __PAN_JOB_H__
28
29 #include "util/u_dynarray.h"
30 #include "pipe/p_state.h"
31 #include "pan_allocate.h"
32 #include "pan_resource.h"
33
34 /* panfrost_batch_fence is the out fence of a batch that users or other batches
35 * might want to wait on. The batch fence lifetime is different from the batch
36 * one as want will certainly want to wait upon the fence after the batch has
37 * been submitted (which is when panfrost_batch objects are freed).
38 */
39 struct panfrost_batch_fence {
40 /* Refcounting object for the fence. */
41 struct pipe_reference reference;
42
43 /* Batch that created this fence object. Will become NULL at batch
44 * submission time. This field is mainly here to know whether the
45 * batch has been flushed or not.
46 */
47 struct panfrost_batch *batch;
48
49 /* Context this fence is attached to. We need both ctx and batch, as
50 * the batch will go away after it's been submitted, but the fence
51 * will stay a bit longer.
52 */
53 struct panfrost_context *ctx;
54
55 /* Sync object backing this fence. */
56 uint32_t syncobj;
57
58 /* Cached value of the signaled state to avoid calling WAIT_SYNCOBJs
59 * when we know the fence has already been signaled.
60 */
61 bool signaled;
62 };
63
64 #define PAN_REQ_MSAA (1 << 0)
65 #define PAN_REQ_DEPTH_WRITE (1 << 1)
66
67 /* A panfrost_batch corresponds to a bound FBO we're rendering to,
68 * collecting over multiple draws. */
69
70 struct panfrost_batch {
71 struct panfrost_context *ctx;
72 struct pipe_framebuffer_state key;
73
74 /* Buffers cleared (PIPE_CLEAR_* bitmask) */
75 unsigned clear;
76
77 /* Packed clear values, indexed by both render target as well as word.
78 * Essentially, a single pixel is packed, with some padding to bring it
79 * up to a 32-bit interval; that pixel is then duplicated over to fill
80 * all 16-bytes */
81
82 uint32_t clear_color[PIPE_MAX_COLOR_BUFS][4];
83 float clear_depth;
84 unsigned clear_stencil;
85
86 /* Amount of thread local storage required per thread */
87 unsigned stack_size;
88
89 /* Whether this job uses the corresponding requirement (PAN_REQ_*
90 * bitmask) */
91 unsigned requirements;
92
93 /* The bounding box covered by this job, taking scissors into account.
94 * Basically, the bounding box we have to run fragment shaders for */
95
96 unsigned minx, miny;
97 unsigned maxx, maxy;
98
99 /* CPU pointers to the job descriptor headers. next_job is only
100 * set at submit time (since only then are all the dependencies
101 * known). The upshot is that this is append-only.
102 *
103 * These arrays contain the headers for the "primary batch", our jargon
104 * referring to the part of the panfrost_job that actually contains
105 * meaningful work. In an OpenGL ES setting, that means the
106 * WRITE_VALUE/VERTEX/TILER jobs. Excluded is specifically the FRAGMENT
107 * job, which is sent on as a secondary batch containing only a single
108 * hardware job. Since there's one and only one FRAGMENT job issued per
109 * panfrost_job, there is no need to do any scoreboarding / management;
110 * it's easy enough to open-code it and it's not like we can get any
111 * better anyway. */
112 struct util_dynarray headers;
113
114 /* (And the GPU versions; TODO maybe combine) */
115 struct util_dynarray gpu_headers;
116
117 /* The last job in the primary batch */
118 struct panfrost_transfer last_job;
119
120 /* The first/last tiler job */
121 struct panfrost_transfer first_tiler;
122 struct panfrost_transfer last_tiler;
123
124 /* The first vertex job used as the input to a tiler job */
125 struct panfrost_transfer first_vertex_for_tiler;
126
127 /* The first job. Notice we've created a linked list */
128 struct panfrost_transfer first_job;
129
130 /* The number of jobs in the primary batch, essentially */
131 unsigned job_index;
132
133 /* BOs referenced -- will be used for flushing logic */
134 struct hash_table *bos;
135
136 /* Current transient BO */
137 struct panfrost_bo *transient_bo;
138
139 /* Within the topmost transient BO, how much has been used? */
140 unsigned transient_offset;
141
142 /* Polygon list bound to the batch, or NULL if none bound yet */
143 struct panfrost_bo *polygon_list;
144
145 /* Scratchpath BO bound to the batch, or NULL if none bound yet */
146 struct panfrost_bo *scratchpad;
147
148 /* Tiler heap BO bound to the batch, or NULL if none bound yet */
149 struct panfrost_bo *tiler_heap;
150
151 /* Dummy tiler BO bound to the batch, or NULL if none bound yet */
152 struct panfrost_bo *tiler_dummy;
153
154 /* Framebuffer descriptor. */
155 struct panfrost_transfer framebuffer;
156
157 /* Output sync object. Only valid when submitted is true. */
158 struct panfrost_batch_fence *out_sync;
159
160 /* Batch dependencies */
161 struct util_dynarray dependencies;
162 };
163
164 /* Functions for managing the above */
165
166 void
167 panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence);
168
169 void
170 panfrost_batch_fence_reference(struct panfrost_batch_fence *batch);
171
172 struct panfrost_batch *
173 panfrost_get_batch_for_fbo(struct panfrost_context *ctx);
174
175 struct panfrost_batch *
176 panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx);
177
178 void
179 panfrost_batch_init(struct panfrost_context *ctx);
180
181 void
182 panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo,
183 uint32_t flags);
184
185 void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch);
186
187 struct panfrost_bo *
188 panfrost_batch_create_bo(struct panfrost_batch *batch, size_t size,
189 uint32_t create_flags, uint32_t access_flags);
190
191 void
192 panfrost_flush_all_batches(struct panfrost_context *ctx, bool wait);
193
194 bool
195 panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
196 const struct panfrost_bo *bo);
197
198 void
199 panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
200 struct panfrost_bo *bo, uint32_t flags);
201
202 void
203 panfrost_batch_set_requirements(struct panfrost_batch *batch);
204
205 struct panfrost_bo *
206 panfrost_batch_get_scratchpad(struct panfrost_batch *batch, unsigned shift, unsigned thread_tls_alloc, unsigned core_count);
207
208 mali_ptr
209 panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size);
210
211 struct panfrost_bo *
212 panfrost_batch_get_tiler_heap(struct panfrost_batch *batch);
213
214 struct panfrost_bo *
215 panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch);
216
217 void
218 panfrost_batch_clear(struct panfrost_batch *batch,
219 unsigned buffers,
220 const union pipe_color_union *color,
221 double depth, unsigned stencil);
222
223 void
224 panfrost_batch_union_scissor(struct panfrost_batch *batch,
225 unsigned minx, unsigned miny,
226 unsigned maxx, unsigned maxy);
227
228 void
229 panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
230 unsigned minx, unsigned miny,
231 unsigned maxx, unsigned maxy);
232
233 /* Scoreboarding */
234
235 void
236 panfrost_scoreboard_queue_compute_job(
237 struct panfrost_batch *batch,
238 struct panfrost_transfer job);
239
240 void
241 panfrost_scoreboard_queue_vertex_job(
242 struct panfrost_batch *batch,
243 struct panfrost_transfer vertex,
244 bool requires_tiling);
245
246 void
247 panfrost_scoreboard_queue_tiler_job(
248 struct panfrost_batch *batch,
249 struct panfrost_transfer tiler);
250
251 void
252 panfrost_scoreboard_queue_fused_job(
253 struct panfrost_batch *batch,
254 struct panfrost_transfer vertex,
255 struct panfrost_transfer tiler);
256 void
257 panfrost_scoreboard_queue_fused_job_prepend(
258 struct panfrost_batch *batch,
259 struct panfrost_transfer vertex,
260 struct panfrost_transfer tiler);
261
262 void
263 panfrost_scoreboard_link_batch(struct panfrost_batch *batch);
264
265 bool
266 panfrost_batch_is_scanout(struct panfrost_batch *batch);
267
268 #endif