nouveau: speed up user buffers.
[mesa.git] / src / mesa / drivers / dri / nouveau_winsys / nouveau_bo.c
1 /*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_drmif.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_local.h"
31
32 static void
33 nouveau_mem_free(struct nouveau_device *dev, struct drm_nouveau_mem_alloc *ma,
34 void **map)
35 {
36 struct nouveau_device_priv *nvdev = nouveau_device(dev);
37 struct drm_nouveau_mem_free mf;
38
39 if (map && *map) {
40 drmUnmap(*map, ma->size);
41 *map = NULL;
42 }
43
44 if (ma->size) {
45 mf.offset = ma->offset;
46 mf.flags = ma->flags;
47 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE,
48 &mf, sizeof(mf));
49 ma->size = 0;
50 }
51 }
52
53 static int
54 nouveau_mem_alloc(struct nouveau_device *dev, unsigned size, unsigned align,
55 uint32_t flags, struct drm_nouveau_mem_alloc *ma, void **map)
56 {
57 struct nouveau_device_priv *nvdev = nouveau_device(dev);
58 int ret;
59
60 ma->alignment = align;
61 ma->size = size;
62 ma->flags = flags;
63 if (map)
64 ma->flags |= NOUVEAU_MEM_MAPPED;
65 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC, ma,
66 sizeof(struct drm_nouveau_mem_alloc));
67 if (ret)
68 return ret;
69
70 if (map) {
71 ret = drmMap(nvdev->fd, ma->map_handle, ma->size, map);
72 if (ret) {
73 *map = NULL;
74 nouveau_mem_free(dev, ma, map);
75 return ret;
76 }
77 }
78
79 return 0;
80 }
81
82 static int
83 nouveau_bo_realloc_gpu(struct nouveau_bo_priv *nvbo, uint32_t flags, int size)
84 {
85 int ret;
86
87 if (nvbo->drm.size && nvbo->drm.size != size) {
88 nouveau_mem_free(nvbo->base.device, &nvbo->drm, &nvbo->map);
89 }
90
91 if (size && !nvbo->drm.size) {
92 if (flags) {
93 nvbo->drm.flags = 0;
94 if (flags & NOUVEAU_BO_VRAM)
95 nvbo->drm.flags |= NOUVEAU_MEM_FB;
96 if (flags & NOUVEAU_BO_GART)
97 nvbo->drm.flags |= (NOUVEAU_MEM_AGP |
98 NOUVEAU_MEM_PCI);
99 nvbo->drm.flags |= NOUVEAU_MEM_MAPPED;
100 }
101
102 ret = nouveau_mem_alloc(nvbo->base.device, size,
103 nvbo->drm.alignment, nvbo->drm.flags,
104 &nvbo->drm, &nvbo->map);
105 if (ret) {
106 assert(0);
107 }
108 }
109
110 return 0;
111 }
112
113 static void
114 nouveau_bo_tmp_del(void *priv)
115 {
116 struct nouveau_resource *r = priv;
117
118 nouveau_fence_del((struct nouveau_fence **)&r->priv);
119 nouveau_resource_free(&r);
120 }
121
122 static struct nouveau_resource *
123 nouveau_bo_tmp(struct nouveau_channel *chan, unsigned size,
124 struct nouveau_fence *fence)
125 {
126 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
127 struct nouveau_resource *r = NULL;
128 struct nouveau_fence *ref = NULL;
129
130 if (fence)
131 nouveau_fence_ref(fence, &ref);
132 else
133 nouveau_fence_new(chan, &ref);
134 assert(ref);
135
136 while (nouveau_resource_alloc(nvdev->sa_heap, size, ref, &r)) {
137 nouveau_fence_flush(chan);
138 }
139 nouveau_fence_signal_cb(ref, nouveau_bo_tmp_del, r);
140
141 return r;
142 }
143
144 int
145 nouveau_bo_init(struct nouveau_device *dev)
146 {
147 struct nouveau_device_priv *nvdev = nouveau_device(dev);
148 int ret;
149
150 ret = nouveau_mem_alloc(dev, 128*1024, 0, NOUVEAU_MEM_AGP |
151 NOUVEAU_MEM_PCI, &nvdev->sa, &nvdev->sa_map);
152 if (ret)
153 return ret;
154
155 ret = nouveau_resource_init(&nvdev->sa_heap, 0, nvdev->sa.size);
156 if (ret) {
157 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
158 return ret;
159 }
160
161 return 0;
162 }
163
164 void
165 nouveau_bo_takedown(struct nouveau_device *dev)
166 {
167 struct nouveau_device_priv *nvdev = nouveau_device(dev);
168
169 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
170 }
171
172 int
173 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
174 int size, struct nouveau_bo **bo)
175 {
176 struct nouveau_bo_priv *nvbo;
177 int ret;
178
179 if (!dev || !bo || *bo)
180 return -EINVAL;
181
182 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
183 if (!nvbo)
184 return -ENOMEM;
185 nvbo->base.device = dev;
186 nvbo->drm.alignment = align;
187
188 if (flags & NOUVEAU_BO_PIN) {
189 ret = nouveau_bo_realloc_gpu(nvbo, flags, size);
190 if (ret) {
191 free(nvbo);
192 return ret;
193 }
194 } else {
195 nvbo->sysmem = malloc(size);
196 if (!nvbo->sysmem) {
197 free(nvbo);
198 return -ENOMEM;
199 }
200 }
201
202 nvbo->base.size = size;
203 nvbo->base.offset = nvbo->drm.offset;
204 nvbo->base.handle = bo_to_ptr(nvbo);
205 nvbo->refcount = 1;
206 *bo = &nvbo->base;
207 return 0;
208 }
209
210 int
211 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
212 struct nouveau_bo **bo)
213 {
214 struct nouveau_bo_priv *nvbo;
215
216 if (!dev || !bo || *bo)
217 return -EINVAL;
218
219 nvbo = calloc(1, sizeof(*nvbo));
220 if (!nvbo)
221 return -ENOMEM;
222 nvbo->base.device = dev;
223
224 nvbo->sysmem = ptr;
225 nvbo->user = 1;
226
227 nvbo->base.size = size;
228 nvbo->base.offset = nvbo->drm.offset;
229 nvbo->base.handle = bo_to_ptr(nvbo);
230 nvbo->refcount = 1;
231 *bo = &nvbo->base;
232 return 0;
233 }
234
235 int
236 nouveau_bo_ref(struct nouveau_device *dev, uint64_t handle,
237 struct nouveau_bo **bo)
238 {
239 struct nouveau_bo_priv *nvbo = ptr_to_bo(handle);
240
241 if (!dev || !bo || *bo)
242 return -EINVAL;
243
244 nvbo->refcount++;
245 *bo = &nvbo->base;
246 return 0;
247 }
248
249 int
250 nouveau_bo_resize(struct nouveau_bo *bo, int size)
251 {
252 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
253 int ret;
254
255 if (!nvbo || nvbo->user)
256 return -EINVAL;
257
258 if (nvbo->sysmem) {
259 nvbo->sysmem = realloc(nvbo->sysmem, size);
260 if (!nvbo->sysmem)
261 return -ENOMEM;
262 } else {
263 ret = nouveau_bo_realloc_gpu(nvbo, 0, size);
264 if (ret)
265 return ret;
266 }
267
268 nvbo->base.size = size;
269 return 0;
270 }
271
272 void
273 nouveau_bo_del(struct nouveau_bo **bo)
274 {
275 struct nouveau_bo_priv *nvbo;
276
277 if (!bo || !*bo)
278 return;
279 nvbo = nouveau_bo(*bo);
280 *bo = NULL;
281
282 if (--nvbo->refcount)
283 return;
284
285 if (nvbo->fence)
286 nouveau_fence_wait(&nvbo->fence);
287
288 nouveau_bo_realloc_gpu(nvbo, 0, 0);
289 if (nvbo->sysmem && !nvbo->user)
290 free(nvbo->sysmem);
291 free(nvbo);
292 }
293
294 int
295 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
296 {
297 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
298
299 if (!nvbo)
300 return -EINVAL;
301
302 if (nvbo->fence)
303 nouveau_fence_wait(&nvbo->fence);
304
305 if (nvbo->sysmem)
306 bo->map = nvbo->sysmem;
307 else
308 bo->map = nvbo->map;
309 return 0;
310 }
311
312 void
313 nouveau_bo_unmap(struct nouveau_bo *bo)
314 {
315 bo->map = NULL;
316 }
317
318 static int
319 nouveau_bo_upload(struct nouveau_bo_priv *nvbo)
320 {
321 if (nvbo->fence)
322 nouveau_fence_wait(&nvbo->fence);
323 memcpy(nvbo->map, nvbo->sysmem, nvbo->drm.size);
324 return 0;
325 }
326
327 static int
328 nouveau_bo_validate_user(struct nouveau_channel *chan, struct nouveau_bo *bo,
329 struct nouveau_fence *fence, uint32_t flags)
330 {
331 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
332 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
333 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
334 struct nouveau_resource *r;
335
336 if (nvchan->user_charge + bo->size > nvdev->sa.size)
337 return 1;
338 nvchan->user_charge += bo->size;
339
340 if (!(flags & NOUVEAU_BO_GART))
341 return 1;
342
343 r = nouveau_bo_tmp(chan, bo->size, fence);
344 if (!r)
345 return 1;
346
347 memcpy(nvdev->sa_map + r->start, nvbo->sysmem, bo->size);
348
349 nvbo->base.offset = nvdev->sa.offset + r->start;
350 nvbo->base.flags = NOUVEAU_BO_GART;
351 return 0;
352 }
353
354 static int
355 nouveau_bo_validate_bo(struct nouveau_channel *chan, struct nouveau_bo *bo,
356 struct nouveau_fence *fence, uint32_t flags)
357 {
358 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
359
360 if (!nvbo->drm.size) {
361 nouveau_bo_realloc_gpu(nvbo, flags, nvbo->base.size);
362 nouveau_bo_upload(nvbo);
363 if (!nvbo->user) {
364 free(nvbo->sysmem);
365 nvbo->sysmem = NULL;
366 }
367 } else
368 if (nvbo->user) {
369 nouveau_bo_upload(nvbo);
370 }
371
372 nvbo->base.offset = nvbo->drm.offset;
373 if (nvbo->drm.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
374 nvbo->base.flags = NOUVEAU_BO_GART;
375 else
376 nvbo->base.flags = NOUVEAU_BO_VRAM;
377
378 return 0;
379 }
380
381 int
382 nouveau_bo_validate(struct nouveau_channel *chan, struct nouveau_bo *bo,
383 struct nouveau_fence *fence, uint32_t flags)
384 {
385 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
386 int ret;
387
388 if (nvbo->user) {
389 ret = nouveau_bo_validate_user(chan, bo, fence, flags);
390 if (ret) {
391 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
392 if (ret)
393 return ret;
394 }
395 } else {
396 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
397 if (ret)
398 return ret;
399 }
400
401 if (nvbo->fence)
402 nouveau_fence_del(&nvbo->fence);
403 nouveau_fence_ref(fence, &nvbo->fence);
404
405 return 0;
406 }
407