Merge commit 'origin/7.8'
[mesa.git] / src / gallium / drivers / nvfx / nvfx_miptree.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
6
7 #include "nvfx_context.h"
8 #include "nvfx_resource.h"
9 #include "nvfx_transfer.h"
10 #include "nv04_surface_2d.h"
11
12 #include "nouveau/nouveau_util.h"
13
14 /* Currently using separate implementations for buffers and textures,
15 * even though gallium has a unified abstraction of these objects.
16 * Eventually these should be combined, and mechanisms like transfers
17 * be adapted to work for both buffer and texture uploads.
18 */
19
20 static void
21 nvfx_miptree_layout(struct nvfx_miptree *mt)
22 {
23 struct pipe_resource *pt = &mt->base.base;
24 uint width = pt->width0;
25 uint offset = 0;
26 int nr_faces, l, f;
27 uint wide_pitch = pt->bind & (PIPE_BIND_SAMPLER_VIEW |
28 PIPE_BIND_DEPTH_STENCIL |
29 PIPE_BIND_RENDER_TARGET |
30 PIPE_BIND_DISPLAY_TARGET |
31 PIPE_BIND_SCANOUT);
32
33 if (pt->target == PIPE_TEXTURE_CUBE) {
34 nr_faces = 6;
35 } else
36 if (pt->target == PIPE_TEXTURE_3D) {
37 nr_faces = pt->depth0;
38 } else {
39 nr_faces = 1;
40 }
41
42 for (l = 0; l <= pt->last_level; l++) {
43 if (wide_pitch && (pt->flags & NVFX_RESOURCE_FLAG_LINEAR))
44 mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
45 else
46 mt->level[l].pitch = util_format_get_stride(pt->format, width);
47
48 mt->level[l].image_offset =
49 CALLOC(nr_faces, sizeof(unsigned));
50
51 width = u_minify(width, 1);
52 }
53
54 for (f = 0; f < nr_faces; f++) {
55 for (l = 0; l < pt->last_level; l++) {
56 mt->level[l].image_offset[f] = offset;
57
58 if (!(pt->flags & NVFX_RESOURCE_FLAG_LINEAR) &&
59 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
60 offset += align(mt->level[l].pitch * u_minify(pt->height0, l), 64);
61 else
62 offset += mt->level[l].pitch * u_minify(pt->height0, l);
63 }
64
65 mt->level[l].image_offset[f] = offset;
66 offset += mt->level[l].pitch * u_minify(pt->height0, l);
67 }
68
69 mt->total_size = offset;
70 }
71
72 static boolean
73 nvfx_miptree_get_handle(struct pipe_screen *pscreen,
74 struct pipe_resource *ptexture,
75 struct winsys_handle *whandle)
76 {
77 struct nvfx_miptree* mt = (struct nvfx_miptree*)ptexture;
78
79 if (!mt || !mt->base.bo)
80 return FALSE;
81
82 return nouveau_screen_bo_get_handle(pscreen,
83 mt->base.bo,
84 mt->level[0].pitch,
85 whandle);
86 }
87
88
89 static void
90 nvfx_miptree_destroy(struct pipe_screen *screen, struct pipe_resource *pt)
91 {
92 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
93 int l;
94
95 nouveau_screen_bo_release(screen, mt->base.bo);
96
97 for (l = 0; l <= pt->last_level; l++) {
98 if (mt->level[l].image_offset)
99 FREE(mt->level[l].image_offset);
100 }
101
102 FREE(mt);
103 }
104
105
106
107
108 struct u_resource_vtbl nvfx_miptree_vtbl =
109 {
110 nvfx_miptree_get_handle, /* get_handle */
111 nvfx_miptree_destroy, /* resource_destroy */
112 NULL, /* is_resource_referenced */
113 nvfx_miptree_transfer_new, /* get_transfer */
114 nvfx_miptree_transfer_del, /* transfer_destroy */
115 nvfx_miptree_transfer_map, /* transfer_map */
116 u_default_transfer_flush_region, /* transfer_flush_region */
117 nvfx_miptree_transfer_unmap, /* transfer_unmap */
118 u_default_transfer_inline_write /* transfer_inline_write */
119 };
120
121
122
123 struct pipe_resource *
124 nvfx_miptree_create(struct pipe_screen *pscreen, const struct pipe_resource *pt)
125 {
126 struct nvfx_miptree *mt;
127 static int no_swizzle = -1;
128 if(no_swizzle < 0)
129 no_swizzle = debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE);
130
131 mt = CALLOC_STRUCT(nvfx_miptree);
132 if (!mt)
133 return NULL;
134
135 mt->base.base = *pt;
136 mt->base.vtbl = &nvfx_miptree_vtbl;
137 pipe_reference_init(&mt->base.base.reference, 1);
138 mt->base.base.screen = pscreen;
139
140 /* Swizzled textures must be POT */
141 if (pt->width0 & (pt->width0 - 1) ||
142 pt->height0 & (pt->height0 - 1))
143 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
144 else
145 if (pt->bind & (PIPE_BIND_SCANOUT |
146 PIPE_BIND_DISPLAY_TARGET |
147 PIPE_BIND_DEPTH_STENCIL))
148 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
149 else
150 if (pt->_usage == PIPE_USAGE_DYNAMIC)
151 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
152 else {
153 switch (pt->format) {
154 case PIPE_FORMAT_B5G6R5_UNORM:
155 case PIPE_FORMAT_L8A8_UNORM:
156 case PIPE_FORMAT_A8_UNORM:
157 case PIPE_FORMAT_L8_UNORM:
158 case PIPE_FORMAT_I8_UNORM:
159 /* TODO: we can actually swizzle these formats on nv40, we
160 are just preserving the pre-unification behavior.
161 The whole 2D code is going to be rewritten anyway. */
162 if(nvfx_screen(pscreen)->is_nv4x) {
163 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
164 break;
165 }
166 /* TODO: Figure out which formats can be swizzled */
167 case PIPE_FORMAT_B8G8R8A8_UNORM:
168 case PIPE_FORMAT_B8G8R8X8_UNORM:
169 case PIPE_FORMAT_R16_SNORM:
170 {
171 if (no_swizzle)
172 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
173 break;
174 }
175 default:
176 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
177 }
178 }
179
180 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
181 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
182 * This also happens for small mipmaps of large textures. */
183 if (pt->bind & PIPE_BIND_RENDER_TARGET &&
184 util_format_get_stride(pt->format, pt->width0) < 64)
185 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
186
187 nvfx_miptree_layout(mt);
188
189 mt->base.bo = nouveau_screen_bo_new(pscreen, 256,
190 pt->_usage, pt->bind, mt->total_size);
191 if (!mt->base.bo) {
192 FREE(mt);
193 return NULL;
194 }
195 return &mt->base.base;
196 }
197
198
199
200
201 struct pipe_resource *
202 nvfx_miptree_from_handle(struct pipe_screen *pscreen,
203 const struct pipe_resource *template,
204 struct winsys_handle *whandle)
205 {
206 struct nvfx_miptree *mt;
207 unsigned stride;
208
209 /* Only supports 2D, non-mipmapped textures for the moment */
210 if (template->target != PIPE_TEXTURE_2D ||
211 template->last_level != 0 ||
212 template->depth0 != 1)
213 return NULL;
214
215 mt = CALLOC_STRUCT(nvfx_miptree);
216 if (!mt)
217 return NULL;
218
219 mt->base.bo = nouveau_screen_bo_from_handle(pscreen, whandle, &stride);
220 if (mt->base.bo == NULL) {
221 FREE(mt);
222 return NULL;
223 }
224
225 mt->base.base = *template;
226 pipe_reference_init(&mt->base.base.reference, 1);
227 mt->base.base.screen = pscreen;
228 mt->level[0].pitch = stride;
229 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
230
231 /* Assume whoever created this buffer expects it to be linear for now */
232 mt->base.base.flags |= NVFX_RESOURCE_FLAG_LINEAR;
233
234 /* XXX: Need to adjust bo refcount??
235 */
236 /* nouveau_bo_ref(bo, &mt->base.bo); */
237 return &mt->base.base;
238 }
239
240
241
242
243
244 /* Surface helpers, not strictly required to implement the resource vtbl:
245 */
246 struct pipe_surface *
247 nvfx_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_resource *pt,
248 unsigned face, unsigned level, unsigned zslice,
249 unsigned flags)
250 {
251 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
252 struct nv04_surface *ns;
253
254 ns = CALLOC_STRUCT(nv04_surface);
255 if (!ns)
256 return NULL;
257 pipe_resource_reference(&ns->base.texture, pt);
258 ns->base.format = pt->format;
259 ns->base.width = u_minify(pt->width0, level);
260 ns->base.height = u_minify(pt->height0, level);
261 ns->base.usage = flags;
262 pipe_reference_init(&ns->base.reference, 1);
263 ns->base.face = face;
264 ns->base.level = level;
265 ns->base.zslice = zslice;
266 ns->pitch = mt->level[level].pitch;
267
268 if (pt->target == PIPE_TEXTURE_CUBE) {
269 ns->base.offset = mt->level[level].image_offset[face];
270 } else
271 if (pt->target == PIPE_TEXTURE_3D) {
272 ns->base.offset = mt->level[level].image_offset[zslice];
273 } else {
274 ns->base.offset = mt->level[level].image_offset[0];
275 }
276
277 /* create a linear temporary that we can render into if
278 * necessary.
279 *
280 * Note that ns->pitch is always a multiple of 64 for linear
281 * surfaces and swizzled surfaces are POT, so ns->pitch & 63
282 * is equivalent to (ns->pitch < 64 && swizzled)
283 */
284
285 if ((ns->pitch & 63) &&
286 (ns->base.usage & PIPE_BIND_RENDER_TARGET))
287 {
288 struct nv04_surface_2d* eng2d =
289 ((struct nvfx_screen*)pscreen)->eng2d;
290
291 ns = nv04_surface_wrap_for_render(pscreen, eng2d, ns);
292 }
293
294 return &ns->base;
295 }
296
297 void
298 nvfx_miptree_surface_del(struct pipe_surface *ps)
299 {
300 struct nv04_surface* ns = (struct nv04_surface*)ps;
301 if(ns->backing)
302 {
303 struct nvfx_screen* screen = (struct nvfx_screen*)ps->texture->screen;
304 if(ns->backing->base.usage & PIPE_BIND_RENDER_TARGET)
305 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
306 nvfx_miptree_surface_del(&ns->backing->base);
307 }
308
309 pipe_resource_reference(&ps->texture, NULL);
310 FREE(ps);
311 }