r600g: atomize stencil ref state
[mesa.git] / src / gallium / drivers / r600 / r600_buffer.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
26 */
27 #include "r600_pipe.h"
28 #include "util/u_upload_mgr.h"
29 #include "util/u_memory.h"
30
31 static void r600_buffer_destroy(struct pipe_screen *screen,
32 struct pipe_resource *buf)
33 {
34 struct r600_resource *rbuffer = r600_resource(buf);
35
36 pb_reference(&rbuffer->buf, NULL);
37 FREE(rbuffer);
38 }
39
40 static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx,
41 struct pipe_resource *resource,
42 unsigned level,
43 unsigned usage,
44 const struct pipe_box *box)
45 {
46 struct r600_context *rctx = (struct r600_context*)ctx;
47 struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
48
49 assert(box->x + box->width <= resource->width0);
50
51 transfer->transfer.resource = resource;
52 transfer->transfer.level = level;
53 transfer->transfer.usage = usage;
54 transfer->transfer.box = *box;
55 transfer->transfer.stride = 0;
56 transfer->transfer.layer_stride = 0;
57 transfer->transfer.data = NULL;
58 transfer->staging = NULL;
59 transfer->offset = 0;
60
61 /* Note strides are zero, this is ok for buffers, but not for
62 * textures 2d & higher at least.
63 */
64 return &transfer->transfer;
65 }
66
67 static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
68 struct r600_resource *rbuffer)
69 {
70 unsigned shader;
71
72 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
73 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
74 bool found = false;
75 uint32_t mask = state->enabled_mask;
76
77 while (mask) {
78 unsigned i = u_bit_scan(&mask);
79 if (state->cb[i].buffer == &rbuffer->b.b) {
80 found = true;
81 state->dirty_mask |= 1 << i;
82 }
83 }
84 if (found) {
85 r600_constant_buffers_dirty(rctx, state);
86 }
87 }
88 }
89
90 static void *r600_buffer_transfer_map(struct pipe_context *pipe,
91 struct pipe_transfer *transfer)
92 {
93 struct r600_resource *rbuffer = r600_resource(transfer->resource);
94 struct r600_context *rctx = (struct r600_context*)pipe;
95 uint8_t *data;
96
97 if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
98 !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
99 assert(transfer->usage & PIPE_TRANSFER_WRITE);
100
101 /* Check if mapping this buffer would cause waiting for the GPU. */
102 if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
103 rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
104 unsigned i, mask;
105
106 /* Discard the buffer. */
107 pb_reference(&rbuffer->buf, NULL);
108
109 /* Create a new one in the same pipe_resource. */
110 /* XXX We probably want a different alignment for buffers and textures. */
111 r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096,
112 rbuffer->b.b.bind, rbuffer->b.b.usage);
113
114 /* We changed the buffer, now we need to bind it where the old one was bound. */
115 /* Vertex buffers. */
116 mask = rctx->vertex_buffer_state.enabled_mask;
117 while (mask) {
118 i = u_bit_scan(&mask);
119 if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
120 rctx->vertex_buffer_state.dirty_mask |= 1 << i;
121 r600_vertex_buffers_dirty(rctx);
122 }
123 }
124 /* Streamout buffers. */
125 for (i = 0; i < rctx->num_so_targets; i++) {
126 if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b) {
127 r600_context_streamout_end(rctx);
128 rctx->streamout_start = TRUE;
129 rctx->streamout_append_bitmask = ~0;
130 }
131 }
132 /* Constant buffers. */
133 r600_set_constants_dirty_if_bound(rctx, rbuffer);
134 }
135 }
136 #if 0 /* this is broken (see Bug 53130) */
137 else if ((transfer->usage & PIPE_TRANSFER_DISCARD_RANGE) &&
138 !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
139 rctx->screen->has_streamout &&
140 /* The buffer range must be aligned to 4. */
141 transfer->box.x % 4 == 0 && transfer->box.width % 4 == 0) {
142 assert(transfer->usage & PIPE_TRANSFER_WRITE);
143
144 /* Check if mapping this buffer would cause waiting for the GPU. */
145 if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
146 rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
147 /* Do a wait-free write-only transfer using a temporary buffer. */
148 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
149
150 rtransfer->staging = (struct r600_resource*)
151 pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
152 PIPE_USAGE_STAGING, transfer->box.width);
153 return rctx->ws->buffer_map(rtransfer->staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
154 }
155 }
156 #endif
157
158 data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
159 if (!data)
160 return NULL;
161
162 return (uint8_t*)data + transfer->box.x;
163 }
164
165 static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
166 struct pipe_transfer *transfer)
167 {
168 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
169
170 if (rtransfer->staging) {
171 struct pipe_box box;
172 u_box_1d(0, transfer->box.width, &box);
173
174 /* Copy the staging buffer into the original one. */
175 r600_copy_buffer(pipe, transfer->resource, transfer->box.x,
176 &rtransfer->staging->b.b, &box);
177 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
178 }
179 }
180
181 static void r600_transfer_destroy(struct pipe_context *ctx,
182 struct pipe_transfer *transfer)
183 {
184 struct r600_context *rctx = (struct r600_context*)ctx;
185 util_slab_free(&rctx->pool_transfers, transfer);
186 }
187
188 static const struct u_resource_vtbl r600_buffer_vtbl =
189 {
190 u_default_resource_get_handle, /* get_handle */
191 r600_buffer_destroy, /* resource_destroy */
192 r600_get_transfer, /* get_transfer */
193 r600_transfer_destroy, /* transfer_destroy */
194 r600_buffer_transfer_map, /* transfer_map */
195 NULL, /* transfer_flush_region */
196 r600_buffer_transfer_unmap, /* transfer_unmap */
197 NULL /* transfer_inline_write */
198 };
199
200 bool r600_init_resource(struct r600_screen *rscreen,
201 struct r600_resource *res,
202 unsigned size, unsigned alignment,
203 unsigned bind, unsigned usage)
204 {
205 uint32_t initial_domain, domains;
206
207 /* Staging resources particpate in transfers and blits only
208 * and are used for uploads and downloads from regular
209 * resources. We generate them internally for some transfers.
210 */
211 if (usage == PIPE_USAGE_STAGING) {
212 domains = RADEON_DOMAIN_GTT;
213 initial_domain = RADEON_DOMAIN_GTT;
214 } else {
215 domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
216
217 switch(usage) {
218 case PIPE_USAGE_DYNAMIC:
219 case PIPE_USAGE_STREAM:
220 case PIPE_USAGE_STAGING:
221 initial_domain = RADEON_DOMAIN_GTT;
222 break;
223 case PIPE_USAGE_DEFAULT:
224 case PIPE_USAGE_STATIC:
225 case PIPE_USAGE_IMMUTABLE:
226 default:
227 initial_domain = RADEON_DOMAIN_VRAM;
228 break;
229 }
230 }
231
232 res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain);
233 if (!res->buf) {
234 return false;
235 }
236
237 res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
238 res->domains = domains;
239 return true;
240 }
241
242 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
243 const struct pipe_resource *templ,
244 unsigned alignment)
245 {
246 struct r600_screen *rscreen = (struct r600_screen*)screen;
247 struct r600_resource *rbuffer;
248
249 rbuffer = MALLOC_STRUCT(r600_resource);
250
251 rbuffer->b.b = *templ;
252 pipe_reference_init(&rbuffer->b.b.reference, 1);
253 rbuffer->b.b.screen = screen;
254 rbuffer->b.vtbl = &r600_buffer_vtbl;
255
256 if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
257 FREE(rbuffer);
258 return NULL;
259 }
260 return &rbuffer->b.b;
261 }