afea9484a24dd8ae7ade22c01e610cd9666f824d
[mesa.git] / src / gallium / drivers / radeon / r600_pipe_common.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "r600_pipe_common.h"
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/radeon_video.h"
29
30 /*
31 * pipe_context
32 */
33
34 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
35 {
36 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
37 unsigned latest = rctx->ws->query_value(rctx->ws,
38 RADEON_GPU_RESET_COUNTER);
39
40 if (rctx->gpu_reset_counter == latest)
41 return PIPE_NO_RESET;
42
43 rctx->gpu_reset_counter = latest;
44 return PIPE_UNKNOWN_CONTEXT_RESET;
45 }
46
47 static void r600_set_device_reset_callback(struct pipe_context *ctx,
48 const struct pipe_device_reset_callback *cb)
49 {
50 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
51
52 if (cb)
53 rctx->device_reset_callback = *cb;
54 else
55 memset(&rctx->device_reset_callback, 0,
56 sizeof(rctx->device_reset_callback));
57 }
58
59 bool si_check_device_reset(struct r600_common_context *rctx)
60 {
61 enum pipe_reset_status status;
62
63 if (!rctx->device_reset_callback.reset)
64 return false;
65
66 if (!rctx->b.get_device_reset_status)
67 return false;
68
69 status = rctx->b.get_device_reset_status(&rctx->b);
70 if (status == PIPE_NO_RESET)
71 return false;
72
73 rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
74 return true;
75 }
76
77 static bool r600_resource_commit(struct pipe_context *pctx,
78 struct pipe_resource *resource,
79 unsigned level, struct pipe_box *box,
80 bool commit)
81 {
82 struct r600_common_context *ctx = (struct r600_common_context *)pctx;
83 struct r600_resource *res = r600_resource(resource);
84
85 /*
86 * Since buffer commitment changes cannot be pipelined, we need to
87 * (a) flush any pending commands that refer to the buffer we're about
88 * to change, and
89 * (b) wait for threaded submit to finish, including those that were
90 * triggered by some other, earlier operation.
91 */
92 if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
93 ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
94 res->buf, RADEON_USAGE_READWRITE)) {
95 si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
96 }
97 if (radeon_emitted(ctx->dma_cs, 0) &&
98 ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
99 res->buf, RADEON_USAGE_READWRITE)) {
100 si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
101 }
102
103 ctx->ws->cs_sync_flush(ctx->dma_cs);
104 ctx->ws->cs_sync_flush(ctx->gfx_cs);
105
106 assert(resource->target == PIPE_BUFFER);
107
108 return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
109 }
110
111 bool si_common_context_init(struct r600_common_context *rctx,
112 struct si_screen *sscreen,
113 unsigned context_flags)
114 {
115 slab_create_child(&rctx->pool_transfers, &sscreen->pool_transfers);
116 slab_create_child(&rctx->pool_transfers_unsync, &sscreen->pool_transfers);
117
118 rctx->screen = sscreen;
119 rctx->ws = sscreen->ws;
120 rctx->family = sscreen->info.family;
121 rctx->chip_class = sscreen->info.chip_class;
122
123 rctx->b.resource_commit = r600_resource_commit;
124
125 if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 43) {
126 rctx->b.get_device_reset_status = r600_get_reset_status;
127 rctx->gpu_reset_counter =
128 rctx->ws->query_value(rctx->ws,
129 RADEON_GPU_RESET_COUNTER);
130 }
131
132 rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
133
134 si_init_context_texture_functions(rctx);
135 si_init_query_functions(rctx);
136
137 if (rctx->chip_class == CIK ||
138 rctx->chip_class == VI ||
139 rctx->chip_class == GFX9) {
140 rctx->eop_bug_scratch = (struct r600_resource*)
141 pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
142 16 * sscreen->info.num_render_backends);
143 if (!rctx->eop_bug_scratch)
144 return false;
145 }
146
147 rctx->allocator_zeroed_memory =
148 u_suballocator_create(&rctx->b, sscreen->info.gart_page_size,
149 0, PIPE_USAGE_DEFAULT, 0, true);
150 if (!rctx->allocator_zeroed_memory)
151 return false;
152
153 rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
154 0, PIPE_USAGE_STREAM,
155 R600_RESOURCE_FLAG_READ_ONLY);
156 if (!rctx->b.stream_uploader)
157 return false;
158
159 rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
160 0, PIPE_USAGE_DEFAULT,
161 R600_RESOURCE_FLAG_32BIT |
162 (sscreen->cpdma_prefetch_writes_memory ?
163 0 : R600_RESOURCE_FLAG_READ_ONLY));
164 if (!rctx->b.const_uploader)
165 return false;
166
167 rctx->cached_gtt_allocator = u_upload_create(&rctx->b, 16 * 1024,
168 0, PIPE_USAGE_STAGING, 0);
169 if (!rctx->cached_gtt_allocator)
170 return false;
171
172 rctx->ctx = rctx->ws->ctx_create(rctx->ws);
173 if (!rctx->ctx)
174 return false;
175
176 if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
177 rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
178 si_flush_dma_cs,
179 rctx);
180 }
181
182 return true;
183 }
184
185 void si_common_context_cleanup(struct r600_common_context *rctx)
186 {
187 unsigned i,j;
188
189 /* Release DCC stats. */
190 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) {
191 assert(!rctx->dcc_stats[i].query_active);
192
193 for (j = 0; j < ARRAY_SIZE(rctx->dcc_stats[i].ps_stats); j++)
194 if (rctx->dcc_stats[i].ps_stats[j])
195 rctx->b.destroy_query(&rctx->b,
196 rctx->dcc_stats[i].ps_stats[j]);
197
198 r600_texture_reference(&rctx->dcc_stats[i].tex, NULL);
199 }
200
201 if (rctx->query_result_shader)
202 rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
203
204 if (rctx->gfx_cs)
205 rctx->ws->cs_destroy(rctx->gfx_cs);
206 if (rctx->dma_cs)
207 rctx->ws->cs_destroy(rctx->dma_cs);
208 if (rctx->ctx)
209 rctx->ws->ctx_destroy(rctx->ctx);
210
211 if (rctx->b.stream_uploader)
212 u_upload_destroy(rctx->b.stream_uploader);
213 if (rctx->b.const_uploader)
214 u_upload_destroy(rctx->b.const_uploader);
215 if (rctx->cached_gtt_allocator)
216 u_upload_destroy(rctx->cached_gtt_allocator);
217
218 slab_destroy_child(&rctx->pool_transfers);
219 slab_destroy_child(&rctx->pool_transfers_unsync);
220
221 if (rctx->allocator_zeroed_memory) {
222 u_suballocator_destroy(rctx->allocator_zeroed_memory);
223 }
224 rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
225 rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
226 r600_resource_reference(&rctx->eop_bug_scratch, NULL);
227 }