2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #include "r600_pipe_common.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/radeon_video.h"
34 static enum pipe_reset_status
r600_get_reset_status(struct pipe_context
*ctx
)
36 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
37 unsigned latest
= rctx
->ws
->query_value(rctx
->ws
,
38 RADEON_GPU_RESET_COUNTER
);
40 if (rctx
->gpu_reset_counter
== latest
)
43 rctx
->gpu_reset_counter
= latest
;
44 return PIPE_UNKNOWN_CONTEXT_RESET
;
47 static void r600_set_device_reset_callback(struct pipe_context
*ctx
,
48 const struct pipe_device_reset_callback
*cb
)
50 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
53 rctx
->device_reset_callback
= *cb
;
55 memset(&rctx
->device_reset_callback
, 0,
56 sizeof(rctx
->device_reset_callback
));
59 bool si_check_device_reset(struct r600_common_context
*rctx
)
61 enum pipe_reset_status status
;
63 if (!rctx
->device_reset_callback
.reset
)
66 if (!rctx
->b
.get_device_reset_status
)
69 status
= rctx
->b
.get_device_reset_status(&rctx
->b
);
70 if (status
== PIPE_NO_RESET
)
73 rctx
->device_reset_callback
.reset(rctx
->device_reset_callback
.data
, status
);
77 static bool r600_resource_commit(struct pipe_context
*pctx
,
78 struct pipe_resource
*resource
,
79 unsigned level
, struct pipe_box
*box
,
82 struct r600_common_context
*ctx
= (struct r600_common_context
*)pctx
;
83 struct r600_resource
*res
= r600_resource(resource
);
86 * Since buffer commitment changes cannot be pipelined, we need to
87 * (a) flush any pending commands that refer to the buffer we're about
89 * (b) wait for threaded submit to finish, including those that were
90 * triggered by some other, earlier operation.
92 if (radeon_emitted(ctx
->gfx_cs
, ctx
->initial_gfx_cs_size
) &&
93 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx_cs
,
94 res
->buf
, RADEON_USAGE_READWRITE
)) {
95 si_flush_gfx_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
97 if (radeon_emitted(ctx
->dma_cs
, 0) &&
98 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma_cs
,
99 res
->buf
, RADEON_USAGE_READWRITE
)) {
100 si_flush_dma_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
103 ctx
->ws
->cs_sync_flush(ctx
->dma_cs
);
104 ctx
->ws
->cs_sync_flush(ctx
->gfx_cs
);
106 assert(resource
->target
== PIPE_BUFFER
);
108 return ctx
->ws
->buffer_commit(res
->buf
, box
->x
, box
->width
, commit
);
111 bool si_common_context_init(struct r600_common_context
*rctx
,
112 struct si_screen
*sscreen
,
113 unsigned context_flags
)
115 slab_create_child(&rctx
->pool_transfers
, &sscreen
->pool_transfers
);
116 slab_create_child(&rctx
->pool_transfers_unsync
, &sscreen
->pool_transfers
);
118 rctx
->screen
= sscreen
;
119 rctx
->ws
= sscreen
->ws
;
120 rctx
->family
= sscreen
->info
.family
;
121 rctx
->chip_class
= sscreen
->info
.chip_class
;
123 rctx
->b
.resource_commit
= r600_resource_commit
;
125 if (sscreen
->info
.drm_major
== 2 && sscreen
->info
.drm_minor
>= 43) {
126 rctx
->b
.get_device_reset_status
= r600_get_reset_status
;
127 rctx
->gpu_reset_counter
=
128 rctx
->ws
->query_value(rctx
->ws
,
129 RADEON_GPU_RESET_COUNTER
);
132 rctx
->b
.set_device_reset_callback
= r600_set_device_reset_callback
;
134 si_init_context_texture_functions(rctx
);
135 si_init_query_functions(rctx
);
137 if (rctx
->chip_class
== CIK
||
138 rctx
->chip_class
== VI
||
139 rctx
->chip_class
== GFX9
) {
140 rctx
->eop_bug_scratch
= (struct r600_resource
*)
141 pipe_buffer_create(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
,
142 16 * sscreen
->info
.num_render_backends
);
143 if (!rctx
->eop_bug_scratch
)
147 rctx
->allocator_zeroed_memory
=
148 u_suballocator_create(&rctx
->b
, sscreen
->info
.gart_page_size
,
149 0, PIPE_USAGE_DEFAULT
, 0, true);
150 if (!rctx
->allocator_zeroed_memory
)
153 rctx
->b
.stream_uploader
= u_upload_create(&rctx
->b
, 1024 * 1024,
154 0, PIPE_USAGE_STREAM
,
155 R600_RESOURCE_FLAG_READ_ONLY
);
156 if (!rctx
->b
.stream_uploader
)
159 rctx
->b
.const_uploader
= u_upload_create(&rctx
->b
, 128 * 1024,
160 0, PIPE_USAGE_DEFAULT
,
161 R600_RESOURCE_FLAG_32BIT
|
162 (sscreen
->cpdma_prefetch_writes_memory
?
163 0 : R600_RESOURCE_FLAG_READ_ONLY
));
164 if (!rctx
->b
.const_uploader
)
167 rctx
->cached_gtt_allocator
= u_upload_create(&rctx
->b
, 16 * 1024,
168 0, PIPE_USAGE_STAGING
, 0);
169 if (!rctx
->cached_gtt_allocator
)
172 rctx
->ctx
= rctx
->ws
->ctx_create(rctx
->ws
);
176 if (sscreen
->info
.num_sdma_rings
&& !(sscreen
->debug_flags
& DBG(NO_ASYNC_DMA
))) {
177 rctx
->dma_cs
= rctx
->ws
->cs_create(rctx
->ctx
, RING_DMA
,
185 void si_common_context_cleanup(struct r600_common_context
*rctx
)
189 /* Release DCC stats. */
190 for (i
= 0; i
< ARRAY_SIZE(rctx
->dcc_stats
); i
++) {
191 assert(!rctx
->dcc_stats
[i
].query_active
);
193 for (j
= 0; j
< ARRAY_SIZE(rctx
->dcc_stats
[i
].ps_stats
); j
++)
194 if (rctx
->dcc_stats
[i
].ps_stats
[j
])
195 rctx
->b
.destroy_query(&rctx
->b
,
196 rctx
->dcc_stats
[i
].ps_stats
[j
]);
198 r600_texture_reference(&rctx
->dcc_stats
[i
].tex
, NULL
);
201 if (rctx
->query_result_shader
)
202 rctx
->b
.delete_compute_state(&rctx
->b
, rctx
->query_result_shader
);
205 rctx
->ws
->cs_destroy(rctx
->gfx_cs
);
207 rctx
->ws
->cs_destroy(rctx
->dma_cs
);
209 rctx
->ws
->ctx_destroy(rctx
->ctx
);
211 if (rctx
->b
.stream_uploader
)
212 u_upload_destroy(rctx
->b
.stream_uploader
);
213 if (rctx
->b
.const_uploader
)
214 u_upload_destroy(rctx
->b
.const_uploader
);
215 if (rctx
->cached_gtt_allocator
)
216 u_upload_destroy(rctx
->cached_gtt_allocator
);
218 slab_destroy_child(&rctx
->pool_transfers
);
219 slab_destroy_child(&rctx
->pool_transfers_unsync
);
221 if (rctx
->allocator_zeroed_memory
) {
222 u_suballocator_destroy(rctx
->allocator_zeroed_memory
);
224 rctx
->ws
->fence_reference(&rctx
->last_gfx_fence
, NULL
);
225 rctx
->ws
->fence_reference(&rctx
->last_sdma_fence
, NULL
);
226 r600_resource_reference(&rctx
->eop_bug_scratch
, NULL
);