2 #ifndef __NVC0_RESOURCE_H__
3 #define __NVC0_RESOURCE_H__
5 #include "util/u_transfer.h"
6 #include "util/u_double_list.h"
8 #include "nouveau/nouveau_winsys.h"
11 #include "nvc0_fence.h"
16 /* Resources, if mapped into the GPU's address space, are guaranteed to
17 * have constant virtual addresses.
18 * The address of a resource will lie within the nouveau_bo referenced,
19 * and this bo should be added to the memory manager's validation list.
21 struct nvc0_resource
{
22 struct pipe_resource base
;
23 const struct u_resource_vtbl
*vtbl
;
27 struct nouveau_bo
*bo
;
33 int16_t score
; /* low if mapped very often, if high can move to VRAM */
35 struct nvc0_fence
*fence
;
36 struct nvc0_fence
*fence_wr
;
38 struct nvc0_mm_allocation
*mm
;
41 /* XXX: wait for fence (atm only using this for vertex push) */
43 nvc0_resource_map_offset(struct nvc0_resource
*res
, uint32_t offset
,
49 return res
->data
+ offset
;
51 if (nouveau_bo_map_range(res
->bo
, res
->offset
+ offset
,
52 res
->base
.width0
, flags
| NOUVEAU_BO_NOSYNC
))
55 /* With suballocation, the same bo can be mapped several times, so unmap
56 * immediately. Maps are guaranteed to persist. */
58 nouveau_bo_unmap(res
->bo
);
63 nvc0_resource_unmap(struct nvc0_resource
*res
)
65 if (res
->domain
!= 0 && 0)
66 nouveau_bo_unmap(res
->bo
);
69 #define NVC0_TILE_DIM_SHIFT(m, d) (((m) >> (d * 4)) & 0xf)
71 #define NVC0_TILE_PITCH(m) (64 << NVC0_TILE_DIM_SHIFT(m, 0))
72 #define NVC0_TILE_HEIGHT(m) ( 8 << NVC0_TILE_DIM_SHIFT(m, 1))
73 #define NVC0_TILE_DEPTH(m) ( 1 << NVC0_TILE_DIM_SHIFT(m, 2))
75 #define NVC0_TILE_SIZE_2D(m) (((64 * 8) << \
76 NVC0_TILE_DIM_SHIFT(m, 0)) << \
77 NVC0_TILE_DIM_SHIFT(m, 1))
79 #define NVC0_TILE_SIZE(m) (NVC0_TILE_SIZE_2D(m) << NVC0_TILE_DIM_SHIFT(m, 2))
81 struct nvc0_miptree_level
{
87 #define NVC0_MAX_TEXTURE_LEVELS 16
90 struct nvc0_resource base
;
91 struct nvc0_miptree_level level
[NVC0_MAX_TEXTURE_LEVELS
];
93 uint32_t layer_stride
;
94 boolean layout_3d
; /* TRUE if layer count varies with mip level */
97 static INLINE
struct nvc0_miptree
*
98 nvc0_miptree(struct pipe_resource
*pt
)
100 return (struct nvc0_miptree
*)pt
;
103 static INLINE
struct nvc0_resource
*
104 nvc0_resource(struct pipe_resource
*resource
)
106 return (struct nvc0_resource
*)resource
;
109 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
110 static INLINE boolean
111 nvc0_resource_mapped_by_gpu(struct pipe_resource
*resource
)
113 return nvc0_resource(resource
)->domain
!= 0;
117 nvc0_init_resource_functions(struct pipe_context
*pcontext
);
120 nvc0_screen_init_resource_functions(struct pipe_screen
*pscreen
);
122 /* Internal functions:
124 struct pipe_resource
*
125 nvc0_miptree_create(struct pipe_screen
*pscreen
,
126 const struct pipe_resource
*tmp
);
128 struct pipe_resource
*
129 nvc0_miptree_from_handle(struct pipe_screen
*pscreen
,
130 const struct pipe_resource
*template,
131 struct winsys_handle
*whandle
);
133 struct pipe_resource
*
134 nvc0_buffer_create(struct pipe_screen
*pscreen
,
135 const struct pipe_resource
*templ
);
137 struct pipe_resource
*
138 nvc0_user_buffer_create(struct pipe_screen
*screen
,
144 struct pipe_surface
*
145 nvc0_miptree_surface_new(struct pipe_context
*,
146 struct pipe_resource
*,
147 const struct pipe_surface
*templ
);
150 nvc0_miptree_surface_del(struct pipe_context
*, struct pipe_surface
*);
155 nvc0_buffer_migrate(struct nvc0_context
*,
156 struct nvc0_resource
*, unsigned domain
);
159 nvc0_migrate_vertices(struct nvc0_resource
*buf
, unsigned base
, unsigned size
);