1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
10 #define NOUVEAU_BUFFER_SCORE_MIN -25000
11 #define NOUVEAU_BUFFER_SCORE_MAX 25000
12 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
14 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
15 * resource->data has not been updated to reflect modified VRAM contents
17 * USER_MEMORY: resource->data is a pointer to client memory and may change
20 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
21 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
22 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
24 /* Resources, if mapped into the GPU's address space, are guaranteed to
25 * have constant virtual addresses (nv50+).
27 * The address of a resource will lie within the nouveau_bo referenced,
28 * and this bo should be added to the memory manager's validation list.
30 struct nv04_resource
{
31 struct pipe_resource base
;
32 const struct u_resource_vtbl
*vtbl
;
35 struct nouveau_bo
*bo
;
41 int16_t score
; /* low if mapped very often, if high can move to VRAM */
43 struct nouveau_fence
*fence
;
44 struct nouveau_fence
*fence_wr
;
46 struct nouveau_mm_allocation
*mm
;
50 nouveau_buffer_release_gpu_storage(struct nv04_resource
*);
53 nouveau_buffer_download(struct pipe_context
*, struct nv04_resource
*,
54 unsigned start
, unsigned size
);
57 nouveau_buffer_migrate(struct pipe_context
*,
58 struct nv04_resource
*, unsigned domain
);
61 nouveau_buffer_adjust_score(struct pipe_context
*pipe
,
62 struct nv04_resource
*res
, int16_t score
)
65 if (res
->score
> NOUVEAU_BUFFER_SCORE_MIN
)
69 if (res
->score
< NOUVEAU_BUFFER_SCORE_MAX
)
71 if (res
->domain
== NOUVEAU_BO_GART
&&
72 res
->score
> NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD
)
73 nouveau_buffer_migrate(pipe
, res
, NOUVEAU_BO_VRAM
);
77 /* XXX: wait for fence (atm only using this for vertex push) */
79 nouveau_resource_map_offset(struct pipe_context
*pipe
,
80 struct nv04_resource
*res
, uint32_t offset
,
85 nouveau_buffer_adjust_score(pipe
, res
, -250);
87 if ((res
->domain
== NOUVEAU_BO_VRAM
) &&
88 (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
))
89 nouveau_buffer_download(pipe
, res
, 0, res
->base
.width0
);
91 if ((res
->domain
!= NOUVEAU_BO_GART
) ||
92 (res
->status
& NOUVEAU_BUFFER_STATUS_USER_MEMORY
))
93 return res
->data
+ offset
;
96 flags
|= NOUVEAU_BO_NOSYNC
;
98 if (nouveau_bo_map_range(res
->bo
, res
->offset
+ offset
,
99 res
->base
.width0
, flags
))
103 nouveau_bo_unmap(res
->bo
);
108 nouveau_resource_unmap(struct nv04_resource
*res
)
113 static INLINE
struct nv04_resource
*
114 nv04_resource(struct pipe_resource
*resource
)
116 return (struct nv04_resource
*)resource
;
119 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
120 static INLINE boolean
121 nouveau_resource_mapped_by_gpu(struct pipe_resource
*resource
)
123 return nv04_resource(resource
)->domain
!= 0;
126 struct pipe_resource
*
127 nouveau_buffer_create(struct pipe_screen
*pscreen
,
128 const struct pipe_resource
*templ
);
130 struct pipe_resource
*
131 nouveau_user_buffer_create(struct pipe_screen
*screen
, void *ptr
,
132 unsigned bytes
, unsigned usage
);
135 nouveau_user_buffer_upload(struct nv04_resource
*, unsigned base
,