00edd9574cb9699b6385775a4a8b8a71ce25572c
[mesa.git] / src / gallium / drivers / panfrost / pan_job.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #ifndef __PAN_JOB_H__
27 #define __PAN_JOB_H__
28
29 #include "util/u_dynarray.h"
30 #include "pipe/p_state.h"
31 #include "pan_pool.h"
32 #include "pan_resource.h"
33 #include "pan_scoreboard.h"
34
35 /* panfrost_batch_fence is the out fence of a batch that users or other batches
36 * might want to wait on. The batch fence lifetime is different from the batch
37 * one as want will certainly want to wait upon the fence after the batch has
38 * been submitted (which is when panfrost_batch objects are freed).
39 */
40 struct panfrost_batch_fence {
41 /* Refcounting object for the fence. */
42 struct pipe_reference reference;
43
44 /* Batch that created this fence object. Will become NULL at batch
45 * submission time. This field is mainly here to know whether the
46 * batch has been flushed or not.
47 */
48 struct panfrost_batch *batch;
49 };
50
51 #define PAN_REQ_MSAA (1 << 0)
52 #define PAN_REQ_DEPTH_WRITE (1 << 1)
53
54 /* A panfrost_batch corresponds to a bound FBO we're rendering to,
55 * collecting over multiple draws. */
56
57 struct panfrost_batch {
58 struct panfrost_context *ctx;
59 struct pipe_framebuffer_state key;
60
61 /* Buffers cleared (PIPE_CLEAR_* bitmask) */
62 unsigned clear;
63
64 /* Buffers drawn */
65 unsigned draws;
66
67 /* Packed clear values, indexed by both render target as well as word.
68 * Essentially, a single pixel is packed, with some padding to bring it
69 * up to a 32-bit interval; that pixel is then duplicated over to fill
70 * all 16-bytes */
71
72 uint32_t clear_color[PIPE_MAX_COLOR_BUFS][4];
73 float clear_depth;
74 unsigned clear_stencil;
75
76 /* Amount of thread local storage required per thread */
77 unsigned stack_size;
78
79 /* Amount of shared memory needed per workgroup (for compute) */
80 unsigned shared_size;
81
82 /* Whether this job uses the corresponding requirement (PAN_REQ_*
83 * bitmask) */
84 unsigned requirements;
85
86 /* The bounding box covered by this job, taking scissors into account.
87 * Basically, the bounding box we have to run fragment shaders for */
88
89 unsigned minx, miny;
90 unsigned maxx, maxy;
91
92 /* BOs referenced not in the pool */
93 struct hash_table *bos;
94
95 /* Pool owned by this batch (released when the batch is released) used for temporary descriptors */
96 struct pan_pool pool;
97
98 /* Pool also owned by this batch that is not CPU mapped (created as
99 * INVISIBLE) used for private GPU-internal structures, particularly
100 * varyings */
101 struct pan_pool invisible_pool;
102
103 /* Job scoreboarding state */
104 struct pan_scoreboard scoreboard;
105
106 /* Polygon list bound to the batch, or NULL if none bound yet */
107 struct panfrost_bo *polygon_list;
108
109 /* Scratchpad BO bound to the batch, or NULL if none bound yet */
110 struct panfrost_bo *scratchpad;
111
112 /* Shared memory BO bound to the batch, or NULL if none bound yet */
113 struct panfrost_bo *shared_memory;
114
115 /* Tiler heap BO bound to the batch, or NULL if none bound yet */
116 struct panfrost_bo *tiler_heap;
117
118 /* Dummy tiler BO bound to the batch, or NULL if none bound yet */
119 struct panfrost_bo *tiler_dummy;
120
121 /* Framebuffer descriptor. */
122 struct panfrost_transfer framebuffer;
123
124 /* Bifrost tiler meta descriptor. */
125 mali_ptr tiler_meta;
126
127 /* Output sync object. Only valid when submitted is true. */
128 struct panfrost_batch_fence *out_sync;
129
130 /* Batch dependencies */
131 struct util_dynarray dependencies;
132 };
133
134 /* Functions for managing the above */
135
136 void
137 panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence);
138
139 void
140 panfrost_batch_fence_reference(struct panfrost_batch_fence *batch);
141
142 struct panfrost_batch *
143 panfrost_get_batch_for_fbo(struct panfrost_context *ctx);
144
145 struct panfrost_batch *
146 panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx);
147
148 void
149 panfrost_batch_init(struct panfrost_context *ctx);
150
151 void
152 panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo,
153 uint32_t flags);
154
155 struct panfrost_bo *
156 panfrost_batch_create_bo(struct panfrost_batch *batch, size_t size,
157 uint32_t create_flags, uint32_t access_flags);
158
159 void
160 panfrost_flush_all_batches(struct panfrost_context *ctx, uint32_t out_sync);
161
162 bool
163 panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
164 const struct panfrost_bo *bo);
165
166 void
167 panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
168 struct panfrost_bo *bo, bool flush_readers);
169
170 void
171 panfrost_batch_set_requirements(struct panfrost_batch *batch);
172
173 void
174 panfrost_batch_adjust_stack_size(struct panfrost_batch *batch);
175
176 struct panfrost_bo *
177 panfrost_batch_get_scratchpad(struct panfrost_batch *batch, unsigned shift, unsigned thread_tls_alloc, unsigned core_count);
178
179 struct panfrost_bo *
180 panfrost_batch_get_shared_memory(struct panfrost_batch *batch, unsigned size, unsigned workgroup_count);
181
182 mali_ptr
183 panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size);
184
185 struct panfrost_bo *
186 panfrost_batch_get_tiler_heap(struct panfrost_batch *batch);
187
188 struct panfrost_bo *
189 panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch);
190
191 void
192 panfrost_batch_clear(struct panfrost_batch *batch,
193 unsigned buffers,
194 const union pipe_color_union *color,
195 double depth, unsigned stencil);
196
197 void
198 panfrost_batch_union_scissor(struct panfrost_batch *batch,
199 unsigned minx, unsigned miny,
200 unsigned maxx, unsigned maxy);
201
202 void
203 panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
204 unsigned minx, unsigned miny,
205 unsigned maxx, unsigned maxy);
206
207 bool
208 panfrost_batch_is_scanout(struct panfrost_batch *batch);
209
210 mali_ptr
211 panfrost_batch_get_tiler_meta(struct panfrost_batch *batch, unsigned vertex_count);
212
213 mali_ptr
214 panfrost_batch_reserve_framebuffer(struct panfrost_batch *batch);
215
216 #endif