nouveau: ensure vbo_dirty is set when buffer write transfer complete
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.h
1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
3
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
6
7 struct pipe_resource;
8 struct nouveau_context;
9 struct nouveau_bo;
10
11 #define NOUVEAU_BUFFER_SCORE_MIN -25000
12 #define NOUVEAU_BUFFER_SCORE_MAX 25000
13 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
14
15 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
16 * resource->data has not been updated to reflect modified VRAM contents
17 *
18 * USER_MEMORY: resource->data is a pointer to client memory and may change
19 * between GL calls
20 */
21 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
22 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
23 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
24
25 /* Resources, if mapped into the GPU's address space, are guaranteed to
26 * have constant virtual addresses (nv50+).
27 *
28 * The address of a resource will lie within the nouveau_bo referenced,
29 * and this bo should be added to the memory manager's validation list.
30 */
31 struct nv04_resource {
32 struct pipe_resource base;
33 const struct u_resource_vtbl *vtbl;
34
35 uint8_t *data;
36 struct nouveau_bo *bo;
37 uint32_t offset;
38
39 uint8_t status;
40 uint8_t domain;
41
42 int16_t score; /* low if mapped very often, if high can move to VRAM */
43
44 struct nouveau_fence *fence;
45 struct nouveau_fence *fence_wr;
46
47 struct nouveau_mm_allocation *mm;
48 };
49
50 void
51 nouveau_buffer_release_gpu_storage(struct nv04_resource *);
52
53 boolean
54 nouveau_buffer_download(struct nouveau_context *, struct nv04_resource *,
55 unsigned start, unsigned size);
56
57 boolean
58 nouveau_buffer_migrate(struct nouveau_context *,
59 struct nv04_resource *, unsigned domain);
60
61 static INLINE void
62 nouveau_buffer_adjust_score(struct nouveau_context *pipe,
63 struct nv04_resource *res, int16_t score)
64 {
65 if (score < 0) {
66 if (res->score > NOUVEAU_BUFFER_SCORE_MIN)
67 res->score += score;
68 } else
69 if (score > 0){
70 if (res->score < NOUVEAU_BUFFER_SCORE_MAX)
71 res->score += score;
72 if (res->domain == NOUVEAU_BO_GART &&
73 res->score > NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD)
74 nouveau_buffer_migrate(pipe, res, NOUVEAU_BO_VRAM);
75 }
76 }
77
78 /* XXX: wait for fence (atm only using this for vertex push) */
79 static INLINE void *
80 nouveau_resource_map_offset(struct nouveau_context *pipe,
81 struct nv04_resource *res, uint32_t offset,
82 uint32_t flags)
83 {
84 void *map;
85
86 nouveau_buffer_adjust_score(pipe, res, -250);
87
88 if ((res->domain == NOUVEAU_BO_VRAM) &&
89 (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
90 nouveau_buffer_download(pipe, res, 0, res->base.width0);
91
92 if ((res->domain != NOUVEAU_BO_GART) ||
93 (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
94 return res->data + offset;
95
96 if (res->mm)
97 flags |= NOUVEAU_BO_NOSYNC;
98
99 if (nouveau_bo_map_range(res->bo, res->offset + offset,
100 res->base.width0, flags))
101 return NULL;
102
103 map = res->bo->map;
104 nouveau_bo_unmap(res->bo);
105 return map;
106 }
107
108 static INLINE void
109 nouveau_resource_unmap(struct nv04_resource *res)
110 {
111 /* no-op */
112 }
113
114 static INLINE struct nv04_resource *
115 nv04_resource(struct pipe_resource *resource)
116 {
117 return (struct nv04_resource *)resource;
118 }
119
120 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
121 static INLINE boolean
122 nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
123 {
124 return nv04_resource(resource)->domain != 0;
125 }
126
127 struct pipe_resource *
128 nouveau_buffer_create(struct pipe_screen *pscreen,
129 const struct pipe_resource *templ);
130
131 struct pipe_resource *
132 nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
133 unsigned bytes, unsigned usage);
134
135 boolean
136 nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
137 unsigned size);
138
139 #endif