2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "r600_pipe_common.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "radeon/radeon_video.h"
35 static enum pipe_reset_status
r600_get_reset_status(struct pipe_context
*ctx
)
37 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
38 unsigned latest
= rctx
->ws
->query_value(rctx
->ws
,
39 RADEON_GPU_RESET_COUNTER
);
41 if (rctx
->gpu_reset_counter
== latest
)
44 rctx
->gpu_reset_counter
= latest
;
45 return PIPE_UNKNOWN_CONTEXT_RESET
;
48 static void r600_set_device_reset_callback(struct pipe_context
*ctx
,
49 const struct pipe_device_reset_callback
*cb
)
51 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
54 rctx
->device_reset_callback
= *cb
;
56 memset(&rctx
->device_reset_callback
, 0,
57 sizeof(rctx
->device_reset_callback
));
60 bool si_check_device_reset(struct r600_common_context
*rctx
)
62 enum pipe_reset_status status
;
64 if (!rctx
->device_reset_callback
.reset
)
67 if (!rctx
->b
.get_device_reset_status
)
70 status
= rctx
->b
.get_device_reset_status(&rctx
->b
);
71 if (status
== PIPE_NO_RESET
)
74 rctx
->device_reset_callback
.reset(rctx
->device_reset_callback
.data
, status
);
78 static bool r600_resource_commit(struct pipe_context
*pctx
,
79 struct pipe_resource
*resource
,
80 unsigned level
, struct pipe_box
*box
,
83 struct si_context
*ctx
= (struct si_context
*)pctx
;
84 struct r600_resource
*res
= r600_resource(resource
);
87 * Since buffer commitment changes cannot be pipelined, we need to
88 * (a) flush any pending commands that refer to the buffer we're about
90 * (b) wait for threaded submit to finish, including those that were
91 * triggered by some other, earlier operation.
93 if (radeon_emitted(ctx
->b
.gfx_cs
, ctx
->b
.initial_gfx_cs_size
) &&
94 ctx
->b
.ws
->cs_is_buffer_referenced(ctx
->b
.gfx_cs
,
95 res
->buf
, RADEON_USAGE_READWRITE
)) {
96 si_flush_gfx_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
98 if (radeon_emitted(ctx
->b
.dma_cs
, 0) &&
99 ctx
->b
.ws
->cs_is_buffer_referenced(ctx
->b
.dma_cs
,
100 res
->buf
, RADEON_USAGE_READWRITE
)) {
101 si_flush_dma_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
104 ctx
->b
.ws
->cs_sync_flush(ctx
->b
.dma_cs
);
105 ctx
->b
.ws
->cs_sync_flush(ctx
->b
.gfx_cs
);
107 assert(resource
->target
== PIPE_BUFFER
);
109 return ctx
->b
.ws
->buffer_commit(res
->buf
, box
->x
, box
->width
, commit
);
112 bool si_common_context_init(struct r600_common_context
*rctx
,
113 struct si_screen
*sscreen
,
114 unsigned context_flags
)
116 struct si_context
*sctx
= (struct si_context
*)rctx
;
118 slab_create_child(&rctx
->pool_transfers
, &sscreen
->pool_transfers
);
119 slab_create_child(&rctx
->pool_transfers_unsync
, &sscreen
->pool_transfers
);
121 rctx
->screen
= sscreen
;
122 rctx
->ws
= sscreen
->ws
;
123 rctx
->family
= sscreen
->info
.family
;
124 rctx
->chip_class
= sscreen
->info
.chip_class
;
126 rctx
->b
.resource_commit
= r600_resource_commit
;
128 if (sscreen
->info
.drm_major
== 2 && sscreen
->info
.drm_minor
>= 43) {
129 rctx
->b
.get_device_reset_status
= r600_get_reset_status
;
130 rctx
->gpu_reset_counter
=
131 rctx
->ws
->query_value(rctx
->ws
,
132 RADEON_GPU_RESET_COUNTER
);
135 rctx
->b
.set_device_reset_callback
= r600_set_device_reset_callback
;
137 si_init_context_texture_functions(sctx
);
138 si_init_query_functions(sctx
);
140 if (rctx
->chip_class
== CIK
||
141 rctx
->chip_class
== VI
||
142 rctx
->chip_class
== GFX9
) {
143 rctx
->eop_bug_scratch
= (struct r600_resource
*)
144 pipe_buffer_create(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
,
145 16 * sscreen
->info
.num_render_backends
);
146 if (!rctx
->eop_bug_scratch
)
150 rctx
->allocator_zeroed_memory
=
151 u_suballocator_create(&rctx
->b
, sscreen
->info
.gart_page_size
,
152 0, PIPE_USAGE_DEFAULT
, 0, true);
153 if (!rctx
->allocator_zeroed_memory
)
156 rctx
->b
.stream_uploader
= u_upload_create(&rctx
->b
, 1024 * 1024,
157 0, PIPE_USAGE_STREAM
,
158 R600_RESOURCE_FLAG_READ_ONLY
);
159 if (!rctx
->b
.stream_uploader
)
162 rctx
->b
.const_uploader
= u_upload_create(&rctx
->b
, 128 * 1024,
163 0, PIPE_USAGE_DEFAULT
,
164 R600_RESOURCE_FLAG_32BIT
|
165 (sscreen
->cpdma_prefetch_writes_memory
?
166 0 : R600_RESOURCE_FLAG_READ_ONLY
));
167 if (!rctx
->b
.const_uploader
)
170 rctx
->cached_gtt_allocator
= u_upload_create(&rctx
->b
, 16 * 1024,
171 0, PIPE_USAGE_STAGING
, 0);
172 if (!rctx
->cached_gtt_allocator
)
175 rctx
->ctx
= rctx
->ws
->ctx_create(rctx
->ws
);
179 if (sscreen
->info
.num_sdma_rings
&& !(sscreen
->debug_flags
& DBG(NO_ASYNC_DMA
))) {
180 rctx
->dma_cs
= rctx
->ws
->cs_create(rctx
->ctx
, RING_DMA
,
181 (void*)si_flush_dma_cs
,
188 void si_common_context_cleanup(struct r600_common_context
*rctx
)
192 /* Release DCC stats. */
193 for (i
= 0; i
< ARRAY_SIZE(rctx
->dcc_stats
); i
++) {
194 assert(!rctx
->dcc_stats
[i
].query_active
);
196 for (j
= 0; j
< ARRAY_SIZE(rctx
->dcc_stats
[i
].ps_stats
); j
++)
197 if (rctx
->dcc_stats
[i
].ps_stats
[j
])
198 rctx
->b
.destroy_query(&rctx
->b
,
199 rctx
->dcc_stats
[i
].ps_stats
[j
]);
201 r600_texture_reference(&rctx
->dcc_stats
[i
].tex
, NULL
);
204 if (rctx
->query_result_shader
)
205 rctx
->b
.delete_compute_state(&rctx
->b
, rctx
->query_result_shader
);
208 rctx
->ws
->cs_destroy(rctx
->gfx_cs
);
210 rctx
->ws
->cs_destroy(rctx
->dma_cs
);
212 rctx
->ws
->ctx_destroy(rctx
->ctx
);
214 if (rctx
->b
.stream_uploader
)
215 u_upload_destroy(rctx
->b
.stream_uploader
);
216 if (rctx
->b
.const_uploader
)
217 u_upload_destroy(rctx
->b
.const_uploader
);
218 if (rctx
->cached_gtt_allocator
)
219 u_upload_destroy(rctx
->cached_gtt_allocator
);
221 slab_destroy_child(&rctx
->pool_transfers
);
222 slab_destroy_child(&rctx
->pool_transfers_unsync
);
224 if (rctx
->allocator_zeroed_memory
) {
225 u_suballocator_destroy(rctx
->allocator_zeroed_memory
);
227 rctx
->ws
->fence_reference(&rctx
->last_gfx_fence
, NULL
);
228 rctx
->ws
->fence_reference(&rctx
->last_sdma_fence
, NULL
);
229 r600_resource_reference(&rctx
->eop_bug_scratch
, NULL
);