1 /**************************************************************************
3 * Copyright 2012 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "util/u_cpu_detect.h"
29 #include "util/u_helpers.h"
30 #include "util/u_inlines.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/u_thread.h"
36 * This function is used to copy an array of pipe_vertex_buffer structures,
37 * while properly referencing the pipe_vertex_buffer::buffer member.
39 * enabled_buffers is updated such that the bits corresponding to the indices
40 * of disabled buffers are set to 0 and the enabled ones are set to 1.
42 * \sa util_copy_framebuffer_state
44 void util_set_vertex_buffers_mask(struct pipe_vertex_buffer
*dst
,
45 uint32_t *enabled_buffers
,
46 const struct pipe_vertex_buffer
*src
,
47 unsigned start_slot
, unsigned count
)
55 for (i
= 0; i
< count
; i
++) {
56 if (src
[i
].buffer
.resource
)
59 pipe_vertex_buffer_unreference(&dst
[i
]);
61 if (!src
[i
].is_user_buffer
)
62 pipe_resource_reference(&dst
[i
].buffer
.resource
, src
[i
].buffer
.resource
);
65 /* Copy over the other members of pipe_vertex_buffer. */
66 memcpy(dst
, src
, count
* sizeof(struct pipe_vertex_buffer
));
68 *enabled_buffers
&= ~(((1ull << count
) - 1) << start_slot
);
69 *enabled_buffers
|= bitmask
<< start_slot
;
72 /* Unreference the buffers. */
73 for (i
= 0; i
< count
; i
++)
74 pipe_vertex_buffer_unreference(&dst
[i
]);
76 *enabled_buffers
&= ~(((1ull << count
) - 1) << start_slot
);
81 * Same as util_set_vertex_buffers_mask, but it only returns the number
84 void util_set_vertex_buffers_count(struct pipe_vertex_buffer
*dst
,
86 const struct pipe_vertex_buffer
*src
,
87 unsigned start_slot
, unsigned count
)
90 uint32_t enabled_buffers
= 0;
92 for (i
= 0; i
< *dst_count
; i
++) {
93 if (dst
[i
].buffer
.resource
)
94 enabled_buffers
|= (1ull << i
);
97 util_set_vertex_buffers_mask(dst
, &enabled_buffers
, src
, start_slot
,
100 *dst_count
= util_last_bit(enabled_buffers
);
104 * Given a user index buffer, save the structure to "saved", and upload it.
107 util_upload_index_buffer(struct pipe_context
*pipe
,
108 const struct pipe_draw_info
*info
,
109 struct pipe_resource
**out_buffer
,
110 unsigned *out_offset
)
112 unsigned start_offset
= info
->start
* info
->index_size
;
114 u_upload_data(pipe
->stream_uploader
, start_offset
,
115 info
->count
* info
->index_size
, 4,
116 (char*)info
->index
.user
+ start_offset
,
117 out_offset
, out_buffer
);
118 u_upload_unmap(pipe
->stream_uploader
);
119 *out_offset
-= start_offset
;
120 return *out_buffer
!= NULL
;
124 * Called by MakeCurrent. Used to notify the driver that the application
125 * thread may have been changed.
127 * The function pins the current thread and driver threads to a group of
128 * CPU cores that share the same L3 cache. This is needed for good multi-
129 * threading performance on AMD Zen CPUs.
131 * \param upper_thread thread in the state tracker that also needs to be
135 util_context_thread_changed(struct pipe_context
*ctx
, thrd_t
*upper_thread
)
137 thrd_t current
= thrd_current();
138 int cache
= util_get_L3_for_pinned_thread(current
,
139 util_cpu_caps
.cores_per_L3
);
141 /* If the main thread is not pinned, choose the L3 cache. */
143 unsigned num_caches
= util_cpu_caps
.nr_cpus
/
144 util_cpu_caps
.cores_per_L3
;
145 static unsigned last_cache
;
147 /* Choose a different L3 cache for each subsequent MakeCurrent. */
148 cache
= p_atomic_inc_return(&last_cache
) % num_caches
;
149 util_pin_thread_to_L3(current
, cache
, util_cpu_caps
.cores_per_L3
);
152 /* Tell the driver to pin its threads to the same L3 cache. */
153 if (ctx
->set_context_param
) {
154 ctx
->set_context_param(ctx
, PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE
,
158 /* Do the same for the upper level thread if there is any (e.g. glthread) */
160 util_pin_thread_to_L3(*upper_thread
, cache
, util_cpu_caps
.cores_per_L3
);
163 /* This is a helper for hardware bring-up. Don't remove. */
165 util_begin_pipestat_query(struct pipe_context
*ctx
)
167 struct pipe_query
*q
=
168 ctx
->create_query(ctx
, PIPE_QUERY_PIPELINE_STATISTICS
, 0);
172 ctx
->begin_query(ctx
, q
);
176 /* This is a helper for hardware bring-up. Don't remove. */
178 util_end_pipestat_query(struct pipe_context
*ctx
, struct pipe_query
*q
,
181 static unsigned counter
;
182 struct pipe_query_data_pipeline_statistics stats
;
184 ctx
->end_query(ctx
, q
);
185 ctx
->get_query_result(ctx
, q
, true, (void*)&stats
);
186 ctx
->destroy_query(ctx
, q
);
190 " ia_vertices = %"PRIu64
"\n"
191 " ia_primitives = %"PRIu64
"\n"
192 " vs_invocations = %"PRIu64
"\n"
193 " gs_invocations = %"PRIu64
"\n"
194 " gs_primitives = %"PRIu64
"\n"
195 " c_invocations = %"PRIu64
"\n"
196 " c_primitives = %"PRIu64
"\n"
197 " ps_invocations = %"PRIu64
"\n"
198 " hs_invocations = %"PRIu64
"\n"
199 " ds_invocations = %"PRIu64
"\n"
200 " cs_invocations = %"PRIu64
"\n",
201 p_atomic_inc_return(&counter
),
204 stats
.vs_invocations
,
205 stats
.gs_invocations
,
209 stats
.ps_invocations
,
210 stats
.hs_invocations
,
211 stats
.ds_invocations
,
212 stats
.cs_invocations
);
215 /* This is a helper for hardware bring-up. Don't remove. */
217 util_wait_for_idle(struct pipe_context
*ctx
)
219 struct pipe_fence_handle
*fence
= NULL
;
221 ctx
->flush(ctx
, &fence
, 0);
222 ctx
->screen
->fence_finish(ctx
->screen
, NULL
, fence
, PIPE_TIMEOUT_INFINITE
);
226 util_throttle_init(struct util_throttle
*t
, uint64_t max_mem_usage
)
228 t
->max_mem_usage
= max_mem_usage
;
232 util_throttle_deinit(struct pipe_screen
*screen
, struct util_throttle
*t
)
234 for (unsigned i
= 0; i
< ARRAY_SIZE(t
->ring
); i
++)
235 screen
->fence_reference(screen
, &t
->ring
[i
].fence
, NULL
);
239 util_get_throttle_total_memory_usage(struct util_throttle
*t
)
241 uint64_t total_usage
= 0;
243 for (unsigned i
= 0; i
< ARRAY_SIZE(t
->ring
); i
++)
244 total_usage
+= t
->ring
[i
].mem_usage
;
248 static void util_dump_throttle_ring(struct util_throttle
*t
)
250 printf("Throttle:\n");
251 for (unsigned i
= 0; i
< ARRAY_SIZE(t
->ring
); i
++) {
252 printf(" ring[%u]: fence = %s, mem_usage = %"PRIu64
"%s%s\n",
253 i
, t
->ring
[i
].fence
? "yes" : " no",
254 t
->ring
[i
].mem_usage
,
255 t
->flush_index
== i
? " [flush]" : "",
256 t
->wait_index
== i
? " [wait]" : "");
261 * Notify util_throttle that the next operation allocates memory.
262 * util_throttle tracks memory usage and waits for fences until its tracked
263 * memory usage decreases.
266 * util_throttle_memory_usage(..., w*h*d*Bpp);
267 * TexSubImage(..., w, h, d, ...);
269 * This means that TexSubImage can't allocate more memory its maximum limit
270 * set during initialization.
273 util_throttle_memory_usage(struct pipe_context
*pipe
,
274 struct util_throttle
*t
, uint64_t memory_size
)
276 (void)util_dump_throttle_ring
; /* silence warning */
278 if (!t
->max_mem_usage
)
281 struct pipe_screen
*screen
= pipe
->screen
;
282 struct pipe_fence_handle
**fence
= NULL
;
283 unsigned ring_size
= ARRAY_SIZE(t
->ring
);
284 uint64_t total
= util_get_throttle_total_memory_usage(t
);
286 /* If there is not enough memory, walk the list of fences and find
287 * the latest one that we need to wait for.
289 while (t
->wait_index
!= t
->flush_index
&&
290 total
&& total
+ memory_size
> t
->max_mem_usage
) {
291 assert(t
->ring
[t
->wait_index
].fence
);
293 /* Release an older fence if we need to wait for a newer one. */
295 screen
->fence_reference(screen
, fence
, NULL
);
297 fence
= &t
->ring
[t
->wait_index
].fence
;
298 t
->ring
[t
->wait_index
].mem_usage
= 0;
299 t
->wait_index
= (t
->wait_index
+ 1) % ring_size
;
301 total
= util_get_throttle_total_memory_usage(t
);
304 /* Wait for the fence to decrease memory usage. */
306 screen
->fence_finish(screen
, pipe
, *fence
, PIPE_TIMEOUT_INFINITE
);
307 screen
->fence_reference(screen
, fence
, NULL
);
310 /* Flush and get a fence if we've exhausted memory usage for the current
313 if (t
->ring
[t
->flush_index
].mem_usage
&&
314 t
->ring
[t
->flush_index
].mem_usage
+ memory_size
>
315 t
->max_mem_usage
/ (ring_size
/ 2)) {
316 struct pipe_fence_handle
**fence
=
317 &t
->ring
[t
->flush_index
].fence
;
319 /* Expect that the current flush slot doesn't have a fence yet. */
322 pipe
->flush(pipe
, fence
, PIPE_FLUSH_ASYNC
);
323 t
->flush_index
= (t
->flush_index
+ 1) % ring_size
;
325 /* Vacate the next slot if it's occupied. This should be rare. */
326 if (t
->flush_index
== t
->wait_index
) {
327 struct pipe_fence_handle
**fence
=
328 &t
->ring
[t
->wait_index
].fence
;
330 t
->ring
[t
->wait_index
].mem_usage
= 0;
331 t
->wait_index
= (t
->wait_index
+ 1) % ring_size
;
334 screen
->fence_finish(screen
, pipe
, *fence
, PIPE_TIMEOUT_INFINITE
);
335 screen
->fence_reference(screen
, fence
, NULL
);
338 assert(!t
->ring
[t
->flush_index
].mem_usage
);
339 assert(!t
->ring
[t
->flush_index
].fence
);
342 t
->ring
[t
->flush_index
].mem_usage
+= memory_size
;