2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_BATCH_H_
28 #define FREEDRENO_BATCH_H_
30 #include "util/u_inlines.h"
31 #include "util/u_queue.h"
32 #include "util/list.h"
34 #include "freedreno_util.h"
38 enum fd_resource_status
;
40 /* Bitmask of stages in rendering that a particular query query is
41 * active. Queries will be automatically started/stopped (generating
42 * additional fd_hw_sample_period's) on entrance/exit from stages that
43 * are applicable to the query.
45 * NOTE: set the stage to NULL at end of IB to ensure no query is still
46 * active. Things aren't going to work out the way you want if a query
47 * is active across IB's (or between tile IB and draw IB)
49 enum fd_render_stage
{
52 FD_STAGE_CLEAR
= 0x04,
53 /* used for driver internal draws (ie. util_blitter_blit()): */
58 #define MAX_HW_SAMPLE_PROVIDERS 4
59 struct fd_hw_sample_provider
;
62 /* A batch tracks everything about a cmdstream batch/submit, including the
63 * ringbuffers used for binning, draw, and gmem cmds, list of associated
67 struct pipe_reference reference
;
71 struct fd_context
*ctx
;
73 struct util_queue_fence flush_fence
;
75 /* do we need to mem2gmem before rendering. We don't, if for example,
76 * there was a glClear() that invalidated the entire previous buffer
77 * contents. Keep track of which buffer(s) are cleared, or needs
78 * restore. Masks of PIPE_CLEAR_*
80 * The 'cleared' bits will be set for buffers which are *entirely*
81 * cleared, and 'partial_cleared' bits will be set if you must
82 * check cleared_scissor.
85 /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
86 FD_BUFFER_COLOR
= PIPE_CLEAR_COLOR
,
87 FD_BUFFER_DEPTH
= PIPE_CLEAR_DEPTH
,
88 FD_BUFFER_STENCIL
= PIPE_CLEAR_STENCIL
,
89 FD_BUFFER_ALL
= FD_BUFFER_COLOR
| FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
,
90 } cleared
, partial_cleared
, restore
, resolve
;
94 bool back_blit
: 1; /* only blit so far is resource shadowing back-blit */
96 /* Keep track if WAIT_FOR_IDLE is needed for registers we need
101 /* To decide whether to render to system memory, keep track of the
102 * number of draws, and whether any of them require multisample,
103 * depth_test (or depth write), stencil_test, blending, and
104 * color_logic_Op (since those functions are disabled when by-
108 FD_GMEM_CLEARS_DEPTH_STENCIL
= 0x01,
109 FD_GMEM_DEPTH_ENABLED
= 0x02,
110 FD_GMEM_STENCIL_ENABLED
= 0x04,
112 FD_GMEM_MSAA_ENABLED
= 0x08,
113 FD_GMEM_BLEND_ENABLED
= 0x10,
114 FD_GMEM_LOGICOP_ENABLED
= 0x20,
116 unsigned num_draws
; /* number of draws in current batch */
118 /* Track the maximal bounds of the scissor of all the draws within a
119 * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
120 * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
122 struct pipe_scissor_state max_scissor
;
124 /* Track the cleared scissor for color/depth/stencil, so we know
125 * which, if any, tiles need to be restored (mem2gmem). Only valid
126 * if the corresponding bit in ctx->cleared is set.
129 struct pipe_scissor_state color
, depth
, stencil
;
132 /* Keep track of DRAW initiators that need to be patched up depending
133 * on whether we using binning or not:
135 struct util_dynarray draw_patches
;
137 /* Keep track of writes to RB_RENDER_CONTROL which need to be patched
138 * once we know whether or not to use GMEM, and GMEM tile pitch.
140 * (only for a3xx.. but having gen specific subclasses of fd_batch
141 * seemed overkill for now)
143 struct util_dynarray rbrc_patches
;
145 struct pipe_framebuffer_state framebuffer
;
147 /** draw pass cmdstream: */
148 struct fd_ringbuffer
*draw
;
149 /** binning pass cmdstream: */
150 struct fd_ringbuffer
*binning
;
151 /** tiling/gmem (IB0) cmdstream: */
152 struct fd_ringbuffer
*gmem
;
155 * hw query related state:
158 /* next sample offset.. incremented for each sample in the batch/
159 * submit, reset to zero on next submit.
161 uint32_t next_sample_offset
;
163 /* cached samples (in case multiple queries need to reference
164 * the same sample snapshot)
166 struct fd_hw_sample
*sample_cache
[MAX_HW_SAMPLE_PROVIDERS
];
168 /* which sample providers were active in the current batch: */
169 uint32_t active_providers
;
171 /* tracking for current stage, to know when to start/stop
172 * any active queries:
174 enum fd_render_stage stage
;
176 /* list of samples in current batch: */
177 struct util_dynarray samples
;
179 /* current query result bo and tile stride: */
180 struct pipe_resource
*query_buf
;
181 uint32_t query_tile_stride
;
185 /* Set of resources used by currently-unsubmitted batch (read or
186 * write).. does not hold a reference to the resource.
188 struct set
*resources
;
190 /** key in batch-cache (if not null): */
194 /** set of dependent batches.. holds refs to dependent batches: */
195 uint32_t dependents_mask
;
198 struct fd_batch
* fd_batch_create(struct fd_context
*ctx
);
200 void fd_batch_reset(struct fd_batch
*batch
);
201 void fd_batch_sync(struct fd_batch
*batch
);
202 void fd_batch_flush(struct fd_batch
*batch
, bool sync
);
203 void fd_batch_resource_used(struct fd_batch
*batch
, struct fd_resource
*rsc
, bool write
);
204 void fd_batch_check_size(struct fd_batch
*batch
);
206 /* not called directly: */
207 void __fd_batch_describe(char* buf
, const struct fd_batch
*batch
);
208 void __fd_batch_destroy(struct fd_batch
*batch
);
211 * NOTE the rule is, you need to hold the screen->lock when destroying
212 * a batch.. so either use fd_batch_reference() (which grabs the lock
213 * for you) if you don't hold the lock, or fd_batch_reference_locked()
214 * if you do hold the lock.
216 * WARNING the _locked() version can briefly drop the lock. Without
217 * recursive mutexes, I'm not sure there is much else we can do (since
218 * __fd_batch_destroy() needs to unref resources)
222 fd_batch_reference(struct fd_batch
**ptr
, struct fd_batch
*batch
)
224 struct fd_batch
*old_batch
= *ptr
;
225 if (pipe_reference_described(&(*ptr
)->reference
, &batch
->reference
,
226 (debug_reference_descriptor
)__fd_batch_describe
))
227 __fd_batch_destroy(old_batch
);
231 /* fwd-decl prototypes to untangle header dependency :-/ */
232 static inline void fd_context_assert_locked(struct fd_context
*ctx
);
233 static inline void fd_context_lock(struct fd_context
*ctx
);
234 static inline void fd_context_unlock(struct fd_context
*ctx
);
237 fd_batch_reference_locked(struct fd_batch
**ptr
, struct fd_batch
*batch
)
239 struct fd_batch
*old_batch
= *ptr
;
242 fd_context_assert_locked(old_batch
->ctx
);
244 fd_context_assert_locked(batch
->ctx
);
246 if (pipe_reference_described(&(*ptr
)->reference
, &batch
->reference
,
247 (debug_reference_descriptor
)__fd_batch_describe
)) {
248 struct fd_context
*ctx
= old_batch
->ctx
;
249 fd_context_unlock(ctx
);
250 __fd_batch_destroy(old_batch
);
251 fd_context_lock(ctx
);
256 #include "freedreno_context.h"
259 fd_reset_wfi(struct fd_batch
*batch
)
261 batch
->needs_wfi
= true;
264 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
265 * been one since last draw:
268 fd_wfi(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
270 if (batch
->needs_wfi
) {
272 batch
->needs_wfi
= false;
276 /* emit a CP_EVENT_WRITE:
279 fd_event_write(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
280 enum vgt_event_type evt
)
282 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
287 #endif /* FREEDRENO_BATCH_H_ */