radeonsi: update copyrights
[mesa.git] / src / gallium / drivers / radeon / r600_pipe_common.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "r600_pipe_common.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "radeon/radeon_video.h"
30
31 /*
32 * pipe_context
33 */
34
35 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
36 {
37 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
38 unsigned latest = rctx->ws->query_value(rctx->ws,
39 RADEON_GPU_RESET_COUNTER);
40
41 if (rctx->gpu_reset_counter == latest)
42 return PIPE_NO_RESET;
43
44 rctx->gpu_reset_counter = latest;
45 return PIPE_UNKNOWN_CONTEXT_RESET;
46 }
47
48 static void r600_set_device_reset_callback(struct pipe_context *ctx,
49 const struct pipe_device_reset_callback *cb)
50 {
51 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
52
53 if (cb)
54 rctx->device_reset_callback = *cb;
55 else
56 memset(&rctx->device_reset_callback, 0,
57 sizeof(rctx->device_reset_callback));
58 }
59
60 bool si_check_device_reset(struct r600_common_context *rctx)
61 {
62 enum pipe_reset_status status;
63
64 if (!rctx->device_reset_callback.reset)
65 return false;
66
67 if (!rctx->b.get_device_reset_status)
68 return false;
69
70 status = rctx->b.get_device_reset_status(&rctx->b);
71 if (status == PIPE_NO_RESET)
72 return false;
73
74 rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
75 return true;
76 }
77
78 static bool r600_resource_commit(struct pipe_context *pctx,
79 struct pipe_resource *resource,
80 unsigned level, struct pipe_box *box,
81 bool commit)
82 {
83 struct si_context *ctx = (struct si_context *)pctx;
84 struct r600_resource *res = r600_resource(resource);
85
86 /*
87 * Since buffer commitment changes cannot be pipelined, we need to
88 * (a) flush any pending commands that refer to the buffer we're about
89 * to change, and
90 * (b) wait for threaded submit to finish, including those that were
91 * triggered by some other, earlier operation.
92 */
93 if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
94 ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
95 res->buf, RADEON_USAGE_READWRITE)) {
96 si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
97 }
98 if (radeon_emitted(ctx->b.dma_cs, 0) &&
99 ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
100 res->buf, RADEON_USAGE_READWRITE)) {
101 si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
102 }
103
104 ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
105 ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
106
107 assert(resource->target == PIPE_BUFFER);
108
109 return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
110 }
111
112 bool si_common_context_init(struct r600_common_context *rctx,
113 struct si_screen *sscreen,
114 unsigned context_flags)
115 {
116 struct si_context *sctx = (struct si_context*)rctx;
117
118 slab_create_child(&rctx->pool_transfers, &sscreen->pool_transfers);
119 slab_create_child(&rctx->pool_transfers_unsync, &sscreen->pool_transfers);
120
121 rctx->screen = sscreen;
122 rctx->ws = sscreen->ws;
123 rctx->family = sscreen->info.family;
124 rctx->chip_class = sscreen->info.chip_class;
125
126 rctx->b.resource_commit = r600_resource_commit;
127
128 if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 43) {
129 rctx->b.get_device_reset_status = r600_get_reset_status;
130 rctx->gpu_reset_counter =
131 rctx->ws->query_value(rctx->ws,
132 RADEON_GPU_RESET_COUNTER);
133 }
134
135 rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
136
137 si_init_context_texture_functions(sctx);
138 si_init_query_functions(sctx);
139
140 if (rctx->chip_class == CIK ||
141 rctx->chip_class == VI ||
142 rctx->chip_class == GFX9) {
143 rctx->eop_bug_scratch = (struct r600_resource*)
144 pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
145 16 * sscreen->info.num_render_backends);
146 if (!rctx->eop_bug_scratch)
147 return false;
148 }
149
150 rctx->allocator_zeroed_memory =
151 u_suballocator_create(&rctx->b, sscreen->info.gart_page_size,
152 0, PIPE_USAGE_DEFAULT, 0, true);
153 if (!rctx->allocator_zeroed_memory)
154 return false;
155
156 rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
157 0, PIPE_USAGE_STREAM,
158 R600_RESOURCE_FLAG_READ_ONLY);
159 if (!rctx->b.stream_uploader)
160 return false;
161
162 rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
163 0, PIPE_USAGE_DEFAULT,
164 R600_RESOURCE_FLAG_32BIT |
165 (sscreen->cpdma_prefetch_writes_memory ?
166 0 : R600_RESOURCE_FLAG_READ_ONLY));
167 if (!rctx->b.const_uploader)
168 return false;
169
170 rctx->cached_gtt_allocator = u_upload_create(&rctx->b, 16 * 1024,
171 0, PIPE_USAGE_STAGING, 0);
172 if (!rctx->cached_gtt_allocator)
173 return false;
174
175 rctx->ctx = rctx->ws->ctx_create(rctx->ws);
176 if (!rctx->ctx)
177 return false;
178
179 if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
180 rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
181 (void*)si_flush_dma_cs,
182 rctx);
183 }
184
185 return true;
186 }
187
188 void si_common_context_cleanup(struct r600_common_context *rctx)
189 {
190 unsigned i,j;
191
192 /* Release DCC stats. */
193 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) {
194 assert(!rctx->dcc_stats[i].query_active);
195
196 for (j = 0; j < ARRAY_SIZE(rctx->dcc_stats[i].ps_stats); j++)
197 if (rctx->dcc_stats[i].ps_stats[j])
198 rctx->b.destroy_query(&rctx->b,
199 rctx->dcc_stats[i].ps_stats[j]);
200
201 r600_texture_reference(&rctx->dcc_stats[i].tex, NULL);
202 }
203
204 if (rctx->query_result_shader)
205 rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
206
207 if (rctx->gfx_cs)
208 rctx->ws->cs_destroy(rctx->gfx_cs);
209 if (rctx->dma_cs)
210 rctx->ws->cs_destroy(rctx->dma_cs);
211 if (rctx->ctx)
212 rctx->ws->ctx_destroy(rctx->ctx);
213
214 if (rctx->b.stream_uploader)
215 u_upload_destroy(rctx->b.stream_uploader);
216 if (rctx->b.const_uploader)
217 u_upload_destroy(rctx->b.const_uploader);
218 if (rctx->cached_gtt_allocator)
219 u_upload_destroy(rctx->cached_gtt_allocator);
220
221 slab_destroy_child(&rctx->pool_transfers);
222 slab_destroy_child(&rctx->pool_transfers_unsync);
223
224 if (rctx->allocator_zeroed_memory) {
225 u_suballocator_destroy(rctx->allocator_zeroed_memory);
226 }
227 rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
228 rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
229 r600_resource_reference(&rctx->eop_bug_scratch, NULL);
230 }