aeb88e9ac96851cd5741ab8527a0a056d929322c
[mesa.git] / src / gallium / drivers / nvfx / nvfx_miptree.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
6
7 #include "nvfx_context.h"
8 #include "nvfx_resource.h"
9 #include "nvfx_transfer.h"
10 #include "nv04_surface_2d.h"
11
12 /* Currently using separate implementations for buffers and textures,
13 * even though gallium has a unified abstraction of these objects.
14 * Eventually these should be combined, and mechanisms like transfers
15 * be adapted to work for both buffer and texture uploads.
16 */
17
18 static void
19 nvfx_miptree_layout(struct nvfx_miptree *mt)
20 {
21 struct pipe_resource *pt = &mt->base.base;
22 uint width = pt->width0;
23 uint offset = 0;
24 int nr_faces, l, f;
25 uint wide_pitch = pt->bind & (PIPE_BIND_SAMPLER_VIEW |
26 PIPE_BIND_DEPTH_STENCIL |
27 PIPE_BIND_RENDER_TARGET |
28 PIPE_BIND_DISPLAY_TARGET |
29 PIPE_BIND_SCANOUT);
30
31 if (pt->target == PIPE_TEXTURE_CUBE) {
32 nr_faces = 6;
33 } else
34 if (pt->target == PIPE_TEXTURE_3D) {
35 nr_faces = pt->depth0;
36 } else {
37 nr_faces = 1;
38 }
39
40 for (l = 0; l <= pt->last_level; l++) {
41 if (wide_pitch && (pt->flags & NVFX_RESOURCE_FLAG_LINEAR))
42 mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
43 else
44 mt->level[l].pitch = util_format_get_stride(pt->format, width);
45
46 mt->level[l].image_offset =
47 CALLOC(nr_faces, sizeof(unsigned));
48
49 width = u_minify(width, 1);
50 }
51
52 for (f = 0; f < nr_faces; f++) {
53 for (l = 0; l < pt->last_level; l++) {
54 mt->level[l].image_offset[f] = offset;
55
56 if (!(pt->flags & NVFX_RESOURCE_FLAG_LINEAR) &&
57 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
58 offset += align(mt->level[l].pitch * u_minify(pt->height0, l), 64);
59 else
60 offset += mt->level[l].pitch * u_minify(pt->height0, l);
61 }
62
63 mt->level[l].image_offset[f] = offset;
64 offset += mt->level[l].pitch * u_minify(pt->height0, l);
65 }
66
67 mt->total_size = offset;
68 }
69
70 static boolean
71 nvfx_miptree_get_handle(struct pipe_screen *pscreen,
72 struct pipe_resource *ptexture,
73 struct winsys_handle *whandle)
74 {
75 struct nvfx_miptree* mt = (struct nvfx_miptree*)ptexture;
76
77 if (!mt || !mt->base.bo)
78 return FALSE;
79
80 return nouveau_screen_bo_get_handle(pscreen,
81 mt->base.bo,
82 mt->level[0].pitch,
83 whandle);
84 }
85
86
87 static void
88 nvfx_miptree_destroy(struct pipe_screen *screen, struct pipe_resource *pt)
89 {
90 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
91 int l;
92
93 nouveau_screen_bo_release(screen, mt->base.bo);
94
95 for (l = 0; l <= pt->last_level; l++) {
96 if (mt->level[l].image_offset)
97 FREE(mt->level[l].image_offset);
98 }
99
100 FREE(mt);
101 }
102
103
104
105
106 struct u_resource_vtbl nvfx_miptree_vtbl =
107 {
108 nvfx_miptree_get_handle, /* get_handle */
109 nvfx_miptree_destroy, /* resource_destroy */
110 NULL, /* is_resource_referenced */
111 nvfx_miptree_transfer_new, /* get_transfer */
112 nvfx_miptree_transfer_del, /* transfer_destroy */
113 nvfx_miptree_transfer_map, /* transfer_map */
114 u_default_transfer_flush_region, /* transfer_flush_region */
115 nvfx_miptree_transfer_unmap, /* transfer_unmap */
116 u_default_transfer_inline_write /* transfer_inline_write */
117 };
118
119
120
121 struct pipe_resource *
122 nvfx_miptree_create(struct pipe_screen *pscreen, const struct pipe_resource *pt)
123 {
124 struct nvfx_miptree *mt;
125 static int no_swizzle = -1;
126 if(no_swizzle < 0)
127 no_swizzle = debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE);
128
129 mt = CALLOC_STRUCT(nvfx_miptree);
130 if (!mt)
131 return NULL;
132
133 mt->base.base = *pt;
134 mt->base.vtbl = &nvfx_miptree_vtbl;
135 pipe_reference_init(&mt->base.base.reference, 1);
136 mt->base.base.screen = pscreen;
137
138 /* Swizzled textures must be POT */
139 if (pt->width0 & (pt->width0 - 1) ||
140 pt->height0 & (pt->height0 - 1))
141 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
142 else
143 if (pt->bind & (PIPE_BIND_SCANOUT |
144 PIPE_BIND_DISPLAY_TARGET |
145 PIPE_BIND_DEPTH_STENCIL))
146 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
147 else
148 if (pt->usage == PIPE_USAGE_DYNAMIC)
149 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
150 else {
151 switch (pt->format) {
152 case PIPE_FORMAT_B5G6R5_UNORM:
153 case PIPE_FORMAT_L8A8_UNORM:
154 case PIPE_FORMAT_A8_UNORM:
155 case PIPE_FORMAT_L8_UNORM:
156 case PIPE_FORMAT_I8_UNORM:
157 /* TODO: we can actually swizzle these formats on nv40, we
158 are just preserving the pre-unification behavior.
159 The whole 2D code is going to be rewritten anyway. */
160 if(nvfx_screen(pscreen)->is_nv4x) {
161 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
162 break;
163 }
164 /* TODO: Figure out which formats can be swizzled */
165 case PIPE_FORMAT_B8G8R8A8_UNORM:
166 case PIPE_FORMAT_B8G8R8X8_UNORM:
167 case PIPE_FORMAT_R16_SNORM:
168 {
169 if (no_swizzle)
170 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
171 break;
172 }
173 default:
174 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
175 }
176 }
177
178 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
179 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
180 * This also happens for small mipmaps of large textures. */
181 if (pt->bind & PIPE_BIND_RENDER_TARGET &&
182 util_format_get_stride(pt->format, pt->width0) < 64)
183 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
184
185 nvfx_miptree_layout(mt);
186
187 mt->base.bo = nouveau_screen_bo_new(pscreen, 256,
188 pt->usage, pt->bind, mt->total_size);
189 if (!mt->base.bo) {
190 FREE(mt);
191 return NULL;
192 }
193 return &mt->base.base;
194 }
195
196
197
198
199 struct pipe_resource *
200 nvfx_miptree_from_handle(struct pipe_screen *pscreen,
201 const struct pipe_resource *template,
202 struct winsys_handle *whandle)
203 {
204 struct nvfx_miptree *mt;
205 unsigned stride;
206
207 /* Only supports 2D, non-mipmapped textures for the moment */
208 if (template->target != PIPE_TEXTURE_2D ||
209 template->last_level != 0 ||
210 template->depth0 != 1)
211 return NULL;
212
213 mt = CALLOC_STRUCT(nvfx_miptree);
214 if (!mt)
215 return NULL;
216
217 mt->base.bo = nouveau_screen_bo_from_handle(pscreen, whandle, &stride);
218 if (mt->base.bo == NULL) {
219 FREE(mt);
220 return NULL;
221 }
222
223 mt->base.base = *template;
224 mt->base.vtbl = &nvfx_miptree_vtbl;
225 pipe_reference_init(&mt->base.base.reference, 1);
226 mt->base.base.screen = pscreen;
227 mt->level[0].pitch = stride;
228 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
229
230 /* Assume whoever created this buffer expects it to be linear for now */
231 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
232
233 /* XXX: Need to adjust bo refcount??
234 */
235 /* nouveau_bo_ref(bo, &mt->base.bo); */
236 return &mt->base.base;
237 }
238
239
240
241
242
243 /* Surface helpers, not strictly required to implement the resource vtbl:
244 */
245 struct pipe_surface *
246 nvfx_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_resource *pt,
247 unsigned face, unsigned level, unsigned zslice,
248 unsigned flags)
249 {
250 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
251 struct nv04_surface *ns;
252
253 ns = CALLOC_STRUCT(nv04_surface);
254 if (!ns)
255 return NULL;
256 pipe_resource_reference(&ns->base.texture, pt);
257 ns->base.format = pt->format;
258 ns->base.width = u_minify(pt->width0, level);
259 ns->base.height = u_minify(pt->height0, level);
260 ns->base.usage = flags;
261 pipe_reference_init(&ns->base.reference, 1);
262 ns->base.face = face;
263 ns->base.level = level;
264 ns->base.zslice = zslice;
265 ns->pitch = mt->level[level].pitch;
266
267 if (pt->target == PIPE_TEXTURE_CUBE) {
268 ns->base.offset = mt->level[level].image_offset[face];
269 } else
270 if (pt->target == PIPE_TEXTURE_3D) {
271 ns->base.offset = mt->level[level].image_offset[zslice];
272 } else {
273 ns->base.offset = mt->level[level].image_offset[0];
274 }
275
276 /* create a linear temporary that we can render into if
277 * necessary.
278 *
279 * Note that ns->pitch is always a multiple of 64 for linear
280 * surfaces and swizzled surfaces are POT, so ns->pitch & 63
281 * is equivalent to (ns->pitch < 64 && swizzled)
282 */
283
284 if ((ns->pitch & 63) &&
285 (ns->base.usage & PIPE_BIND_RENDER_TARGET))
286 {
287 struct nv04_surface_2d* eng2d =
288 ((struct nvfx_screen*)pscreen)->eng2d;
289
290 ns = nv04_surface_wrap_for_render(pscreen, eng2d, ns);
291 }
292
293 return &ns->base;
294 }
295
296 void
297 nvfx_miptree_surface_del(struct pipe_surface *ps)
298 {
299 struct nv04_surface* ns = (struct nv04_surface*)ps;
300 if(ns->backing)
301 {
302 struct nvfx_screen* screen = (struct nvfx_screen*)ps->texture->screen;
303 if(ns->backing->base.usage & PIPE_BIND_BLIT_DESTINATION)
304 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
305 nvfx_miptree_surface_del(&ns->backing->base);
306 }
307
308 pipe_resource_reference(&ps->texture, NULL);
309 FREE(ps);
310 }