2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_QUERY_HW_H_
28 #define FREEDRENO_QUERY_HW_H_
30 #include "util/list.h"
32 #include "freedreno_query.h"
33 #include "freedreno_context.h"
39 * See: https://github.com/freedreno/freedreno/wiki/Queries#hardware-queries
41 * Hardware queries will be specific to gpu generation, but they need
42 * some common infrastructure for triggering start/stop samples at
43 * various points (for example, to exclude mem2gmem/gmem2mem or clear)
44 * as well as per tile tracking.
46 * NOTE: in at least some cases hw writes sample values to memory addr
47 * specified in some register. So we don't really have the option to
48 * just sample the same counter multiple times for multiple different
49 * queries with the same query_type. So we cache per sample provider
50 * the most recent sample since the last draw. This way multiple
51 * sample periods for multiple queries can reference the same sample.
53 * fd_hw_sample_provider:
54 * - one per query type, registered/implemented by gpu generation
56 * - can construct fd_hw_samples on demand
57 * - most recent sample (since last draw) cached so multiple
58 * different queries can ref the same sample
61 * - abstracts one snapshot of counter value(s) across N tiles
62 * - backing object not allocated until submit time when number
63 * of samples and number of tiles is known
65 * fd_hw_sample_period:
66 * - consists of start and stop sample
67 * - a query accumulates a list of sample periods
68 * - the query result is the sum of the sample periods
71 struct fd_hw_sample_provider
{
74 /* stages applicable to the query type: */
75 enum fd_render_stage active
;
77 /* Optional hook for enabling a counter. Guaranteed to happen
78 * at least once before the first ->get_sample() in a batch.
80 void (*enable
)(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
);
82 /* when a new sample is required, emit appropriate cmdstream
83 * and return a sample object:
85 struct fd_hw_sample
*(*get_sample
)(struct fd_batch
*batch
,
86 struct fd_ringbuffer
*ring
);
88 /* accumulate the results from specified sample period: */
89 void (*accumulate_result
)(struct fd_context
*ctx
,
90 const void *start
, const void *end
,
91 union pipe_query_result
*result
);
95 struct pipe_reference reference
; /* keep this first */
97 /* offset and size of the sample are know at the time the
98 * sample is constructed.
103 /* backing object, offset/stride/etc are determined not when
104 * the sample is constructed, but when the batch is submitted.
105 * This way we can defer allocation until total # of requested
106 * samples, and total # of tiles, is known.
108 struct pipe_resource
*prsc
;
110 uint32_t tile_stride
;
113 struct fd_hw_sample_period
;
116 struct fd_query base
;
118 const struct fd_hw_sample_provider
*provider
;
120 /* list of fd_hw_sample_periods: */
121 struct list_head periods
;
123 /* if active and not paused, the current sample period (not
124 * yet added to current_periods):
126 struct fd_hw_sample_period
*period
;
128 struct list_head list
; /* list-node in batch->active_queries */
130 int no_wait_cnt
; /* see fd_hw_get_query_result */
133 static inline struct fd_hw_query
*
134 fd_hw_query(struct fd_query
*q
)
136 return (struct fd_hw_query
*)q
;
139 struct fd_query
* fd_hw_create_query(struct fd_context
*ctx
, unsigned query_type
, unsigned index
);
140 /* helper for sample providers: */
141 struct fd_hw_sample
* fd_hw_sample_init(struct fd_batch
*batch
, uint32_t size
);
142 /* don't call directly, use fd_hw_sample_reference() */
143 void __fd_hw_sample_destroy(struct fd_context
*ctx
, struct fd_hw_sample
*samp
);
144 void fd_hw_query_prepare(struct fd_batch
*batch
, uint32_t num_tiles
);
145 void fd_hw_query_prepare_tile(struct fd_batch
*batch
, uint32_t n
,
146 struct fd_ringbuffer
*ring
);
147 void fd_hw_query_set_stage(struct fd_batch
*batch
, enum fd_render_stage stage
);
148 void fd_hw_query_enable(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
);
149 void fd_hw_query_register_provider(struct pipe_context
*pctx
,
150 const struct fd_hw_sample_provider
*provider
);
151 void fd_hw_query_init(struct pipe_context
*pctx
);
152 void fd_hw_query_fini(struct pipe_context
*pctx
);
155 fd_hw_sample_reference(struct fd_context
*ctx
,
156 struct fd_hw_sample
**ptr
, struct fd_hw_sample
*samp
)
158 struct fd_hw_sample
*old_samp
= *ptr
;
160 if (pipe_reference(&(*ptr
)->reference
, &samp
->reference
))
161 __fd_hw_sample_destroy(ctx
, old_samp
);
165 #endif /* FREEDRENO_QUERY_HW_H_ */