gallium: remove deprecated PIPE_TRANSFER_DISCARD
[mesa.git] / src / gallium / drivers / r600 / r600_buffer.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
26 */
27 #include <byteswap.h>
28
29 #include "pipe/p_screen.h"
30 #include "util/u_format.h"
31 #include "util/u_math.h"
32 #include "util/u_inlines.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35
36 #include "r600.h"
37 #include "r600_pipe.h"
38
39 static void r600_buffer_destroy(struct pipe_screen *screen,
40 struct pipe_resource *buf)
41 {
42 struct r600_screen *rscreen = (struct r600_screen*)screen;
43 struct r600_resource *rbuffer = r600_resource(buf);
44
45 pb_reference(&rbuffer->buf, NULL);
46 util_slab_free(&rscreen->pool_buffers, rbuffer);
47 }
48
49 static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx,
50 struct pipe_resource *resource,
51 unsigned level,
52 unsigned usage,
53 const struct pipe_box *box)
54 {
55 struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx;
56 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
57
58 transfer->resource = resource;
59 transfer->level = level;
60 transfer->usage = usage;
61 transfer->box = *box;
62 transfer->stride = 0;
63 transfer->layer_stride = 0;
64 transfer->data = NULL;
65
66 /* Note strides are zero, this is ok for buffers, but not for
67 * textures 2d & higher at least.
68 */
69 return transfer;
70 }
71
72 static void *r600_buffer_transfer_map(struct pipe_context *pipe,
73 struct pipe_transfer *transfer)
74 {
75 struct r600_resource *rbuffer = r600_resource(transfer->resource);
76 struct r600_pipe_context *rctx = (struct r600_pipe_context*)pipe;
77 uint8_t *data;
78
79 if (rbuffer->b.user_ptr)
80 return (uint8_t*)rbuffer->b.user_ptr + transfer->box.x;
81
82 data = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs, transfer->usage);
83 if (!data)
84 return NULL;
85
86 return (uint8_t*)data + transfer->box.x;
87 }
88
89 static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
90 struct pipe_transfer *transfer)
91 {
92 struct r600_resource *rbuffer = r600_resource(transfer->resource);
93 struct r600_pipe_context *rctx = (struct r600_pipe_context*)pipe;
94
95 if (rbuffer->b.user_ptr)
96 return;
97
98 rctx->ws->buffer_unmap(rbuffer->buf);
99 }
100
101 static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
102 struct pipe_transfer *transfer,
103 const struct pipe_box *box)
104 {
105 }
106
107 static void r600_transfer_destroy(struct pipe_context *ctx,
108 struct pipe_transfer *transfer)
109 {
110 struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx;
111 util_slab_free(&rctx->pool_transfers, transfer);
112 }
113
114 static void r600_buffer_transfer_inline_write(struct pipe_context *pipe,
115 struct pipe_resource *resource,
116 unsigned level,
117 unsigned usage,
118 const struct pipe_box *box,
119 const void *data,
120 unsigned stride,
121 unsigned layer_stride)
122 {
123 struct r600_pipe_context *rctx = (struct r600_pipe_context*)pipe;
124 struct r600_resource *rbuffer = r600_resource(resource);
125 uint8_t *map = NULL;
126
127 assert(rbuffer->b.user_ptr == NULL);
128
129 map = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs,
130 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage);
131
132 memcpy(map + box->x, data, box->width);
133
134 rctx->ws->buffer_unmap(rbuffer->buf);
135 }
136
137 static const struct u_resource_vtbl r600_buffer_vtbl =
138 {
139 u_default_resource_get_handle, /* get_handle */
140 r600_buffer_destroy, /* resource_destroy */
141 r600_get_transfer, /* get_transfer */
142 r600_transfer_destroy, /* transfer_destroy */
143 r600_buffer_transfer_map, /* transfer_map */
144 r600_buffer_transfer_flush_region, /* transfer_flush_region */
145 r600_buffer_transfer_unmap, /* transfer_unmap */
146 r600_buffer_transfer_inline_write /* transfer_inline_write */
147 };
148
149 bool r600_init_resource(struct r600_screen *rscreen,
150 struct r600_resource *res,
151 unsigned size, unsigned alignment,
152 unsigned bind, unsigned usage)
153 {
154 uint32_t initial_domain, domains;
155
156 /* Staging resources particpate in transfers and blits only
157 * and are used for uploads and downloads from regular
158 * resources. We generate them internally for some transfers.
159 */
160 if (usage == PIPE_USAGE_STAGING) {
161 domains = RADEON_DOMAIN_GTT;
162 initial_domain = RADEON_DOMAIN_GTT;
163 } else {
164 domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
165
166 switch(usage) {
167 case PIPE_USAGE_DYNAMIC:
168 case PIPE_USAGE_STREAM:
169 case PIPE_USAGE_STAGING:
170 initial_domain = RADEON_DOMAIN_GTT;
171 break;
172 case PIPE_USAGE_DEFAULT:
173 case PIPE_USAGE_STATIC:
174 case PIPE_USAGE_IMMUTABLE:
175 default:
176 initial_domain = RADEON_DOMAIN_VRAM;
177 break;
178 }
179 }
180
181 res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain);
182 if (!res->buf) {
183 return false;
184 }
185
186 res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
187 res->domains = domains;
188 return true;
189 }
190
191 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
192 const struct pipe_resource *templ)
193 {
194 struct r600_screen *rscreen = (struct r600_screen*)screen;
195 struct r600_resource *rbuffer;
196 /* XXX We probably want a different alignment for buffers and textures. */
197 unsigned alignment = 4096;
198
199 rbuffer = util_slab_alloc(&rscreen->pool_buffers);
200
201 rbuffer->b.b.b = *templ;
202 pipe_reference_init(&rbuffer->b.b.b.reference, 1);
203 rbuffer->b.b.b.screen = screen;
204 rbuffer->b.b.vtbl = &r600_buffer_vtbl;
205 rbuffer->b.user_ptr = NULL;
206
207 if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
208 util_slab_free(&rscreen->pool_buffers, rbuffer);
209 return NULL;
210 }
211 return &rbuffer->b.b.b;
212 }
213
214 struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
215 void *ptr, unsigned bytes,
216 unsigned bind)
217 {
218 struct r600_screen *rscreen = (struct r600_screen*)screen;
219 struct r600_resource *rbuffer;
220
221 rbuffer = util_slab_alloc(&rscreen->pool_buffers);
222
223 pipe_reference_init(&rbuffer->b.b.b.reference, 1);
224 rbuffer->b.b.vtbl = &r600_buffer_vtbl;
225 rbuffer->b.b.b.screen = screen;
226 rbuffer->b.b.b.target = PIPE_BUFFER;
227 rbuffer->b.b.b.format = PIPE_FORMAT_R8_UNORM;
228 rbuffer->b.b.b.usage = PIPE_USAGE_IMMUTABLE;
229 rbuffer->b.b.b.bind = bind;
230 rbuffer->b.b.b.width0 = bytes;
231 rbuffer->b.b.b.height0 = 1;
232 rbuffer->b.b.b.depth0 = 1;
233 rbuffer->b.b.b.array_size = 1;
234 rbuffer->b.b.b.flags = 0;
235 rbuffer->b.user_ptr = ptr;
236 rbuffer->buf = NULL;
237 return &rbuffer->b.b.b;
238 }
239
240 void r600_upload_index_buffer(struct r600_pipe_context *rctx,
241 struct pipe_index_buffer *ib, unsigned count)
242 {
243 struct r600_resource *rbuffer = r600_resource(ib->buffer);
244
245 u_upload_data(rctx->vbuf_mgr->uploader, 0, count * ib->index_size,
246 rbuffer->b.user_ptr, &ib->offset, &ib->buffer);
247 }
248
249 void r600_upload_const_buffer(struct r600_pipe_context *rctx, struct r600_resource **rbuffer,
250 uint32_t *const_offset)
251 {
252 if ((*rbuffer)->b.user_ptr) {
253 uint8_t *ptr = (*rbuffer)->b.user_ptr;
254 unsigned size = (*rbuffer)->b.b.b.width0;
255
256 *rbuffer = NULL;
257
258 if (R600_BIG_ENDIAN) {
259 uint32_t *tmpPtr;
260 unsigned i;
261
262 if (!(tmpPtr = malloc(size))) {
263 R600_ERR("Failed to allocate BE swap buffer.\n");
264 return;
265 }
266
267 for (i = 0; i < size / 4; ++i) {
268 tmpPtr[i] = bswap_32(((uint32_t *)ptr)[i]);
269 }
270
271 u_upload_data(rctx->vbuf_mgr->uploader, 0, size, tmpPtr, const_offset,
272 (struct pipe_resource**)rbuffer);
273
274 free(tmpPtr);
275 } else {
276 u_upload_data(rctx->vbuf_mgr->uploader, 0, size, ptr, const_offset,
277 (struct pipe_resource**)rbuffer);
278 }
279 } else {
280 *const_offset = 0;
281 }
282 }