Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / drivers / nv30 / nv30_miptree.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
6
7 #include "nv30_context.h"
8 #include "../nv04/nv04_surface_2d.h"
9
10 static void
11 nv30_miptree_layout(struct nv30_miptree *nv30mt)
12 {
13 struct pipe_texture *pt = &nv30mt->base;
14 uint width = pt->width0;
15 uint offset = 0;
16 int nr_faces, l, f;
17 uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
18 PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
19 PIPE_TEXTURE_USAGE_RENDER_TARGET |
20 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
21 PIPE_TEXTURE_USAGE_PRIMARY);
22
23 if (pt->target == PIPE_TEXTURE_CUBE) {
24 nr_faces = 6;
25 } else
26 if (pt->target == PIPE_TEXTURE_3D) {
27 nr_faces = pt->depth0;
28 } else {
29 nr_faces = 1;
30 }
31
32 for (l = 0; l <= pt->last_level; l++) {
33 if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
34 nv30mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
35 else
36 nv30mt->level[l].pitch = util_format_get_stride(pt->format, width);
37
38 nv30mt->level[l].image_offset =
39 CALLOC(nr_faces, sizeof(unsigned));
40
41 width = u_minify(width, 1);
42 }
43
44 for (f = 0; f < nr_faces; f++) {
45 for (l = 0; l < pt->last_level; l++) {
46 nv30mt->level[l].image_offset[f] = offset;
47
48 if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
49 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
50 offset += align(nv30mt->level[l].pitch * u_minify(pt->height0, l), 64);
51 else
52 offset += nv30mt->level[l].pitch * u_minify(pt->height0, l);
53 }
54
55 nv30mt->level[l].image_offset[f] = offset;
56 offset += nv30mt->level[l].pitch * u_minify(pt->height0, l);
57 }
58
59 nv30mt->total_size = offset;
60 }
61
62 static struct pipe_texture *
63 nv30_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
64 {
65 struct nv30_miptree *mt;
66 unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
67 NOUVEAU_BUFFER_USAGE_TEXTURE;
68
69 mt = MALLOC(sizeof(struct nv30_miptree));
70 if (!mt)
71 return NULL;
72 mt->base = *pt;
73 pipe_reference_init(&mt->base.reference, 1);
74 mt->base.screen = pscreen;
75
76 /* Swizzled textures must be POT */
77 if (pt->width0 & (pt->width0 - 1) ||
78 pt->height0 & (pt->height0 - 1))
79 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
80 else
81 if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
82 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
83 PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
84 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
85 else
86 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
87 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
88 else {
89 switch (pt->format) {
90 /* TODO: Figure out which formats can be swizzled */
91 case PIPE_FORMAT_A8R8G8B8_UNORM:
92 case PIPE_FORMAT_X8R8G8B8_UNORM:
93 case PIPE_FORMAT_R16_SNORM:
94 case PIPE_FORMAT_R5G6B5_UNORM:
95 case PIPE_FORMAT_A8L8_UNORM:
96 case PIPE_FORMAT_A8_UNORM:
97 case PIPE_FORMAT_L8_UNORM:
98 case PIPE_FORMAT_I8_UNORM:
99 {
100 if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
101 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
102 break;
103 }
104 default:
105 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
106 }
107 }
108
109 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
110 buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
111
112 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
113 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
114 * This also happens for small mipmaps of large textures. */
115 if (pt->tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET && util_format_get_stride(pt->format, pt->width0) < 64)
116 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
117
118 nv30_miptree_layout(mt);
119
120 mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage,
121 mt->total_size);
122 if (!mt->buffer) {
123 FREE(mt);
124 return NULL;
125 }
126 mt->bo = nouveau_bo(mt->buffer);
127
128 return &mt->base;
129 }
130
131 static struct pipe_texture *
132 nv30_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
133 const unsigned *stride, struct pipe_buffer *pb)
134 {
135 struct nv30_miptree *mt;
136
137 /* Only supports 2D, non-mipmapped textures for the moment */
138 if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
139 pt->depth0 != 1)
140 return NULL;
141
142 mt = CALLOC_STRUCT(nv30_miptree);
143 if (!mt)
144 return NULL;
145
146 mt->base = *pt;
147 pipe_reference_init(&mt->base.reference, 1);
148 mt->base.screen = pscreen;
149 mt->level[0].pitch = stride[0];
150 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
151
152 /* Assume whoever created this buffer expects it to be linear for now */
153 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
154
155 pipe_buffer_reference(&mt->buffer, pb);
156 mt->bo = nouveau_bo(mt->buffer);
157 return &mt->base;
158 }
159
160 static void
161 nv30_miptree_destroy(struct pipe_texture *pt)
162 {
163 struct nv30_miptree *mt = (struct nv30_miptree *)pt;
164 int l;
165
166 pipe_buffer_reference(&mt->buffer, NULL);
167 for (l = 0; l <= pt->last_level; l++) {
168 if (mt->level[l].image_offset)
169 FREE(mt->level[l].image_offset);
170 }
171
172 FREE(mt);
173 }
174
175 static struct pipe_surface *
176 nv30_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
177 unsigned face, unsigned level, unsigned zslice,
178 unsigned flags)
179 {
180 struct nv30_miptree *nv30mt = (struct nv30_miptree *)pt;
181 struct nv04_surface *ns;
182
183 ns = CALLOC_STRUCT(nv04_surface);
184 if (!ns)
185 return NULL;
186 pipe_texture_reference(&ns->base.texture, pt);
187 ns->base.format = pt->format;
188 ns->base.width = u_minify(pt->width0, level);
189 ns->base.height = u_minify(pt->height0, level);
190 ns->base.usage = flags;
191 pipe_reference_init(&ns->base.reference, 1);
192 ns->base.face = face;
193 ns->base.level = level;
194 ns->base.zslice = zslice;
195 ns->pitch = nv30mt->level[level].pitch;
196
197 if (pt->target == PIPE_TEXTURE_CUBE) {
198 ns->base.offset = nv30mt->level[level].image_offset[face];
199 } else
200 if (pt->target == PIPE_TEXTURE_3D) {
201 ns->base.offset = nv30mt->level[level].image_offset[zslice];
202 } else {
203 ns->base.offset = nv30mt->level[level].image_offset[0];
204 }
205
206 /* create a linear temporary that we can render into if necessary.
207 * Note that ns->pitch is always a multiple of 64 for linear surfaces and swizzled surfaces are POT, so
208 * ns->pitch & 63 is equivalent to (ns->pitch < 64 && swizzled)*/
209 if((ns->pitch & 63) && (ns->base.usage & (PIPE_BUFFER_USAGE_GPU_WRITE | NOUVEAU_BUFFER_USAGE_NO_RENDER)) == PIPE_BUFFER_USAGE_GPU_WRITE)
210 return &nv04_surface_wrap_for_render(pscreen, ((struct nv30_screen*)pscreen)->eng2d, ns)->base;
211
212 return &ns->base;
213 }
214
215 static void
216 nv30_miptree_surface_del(struct pipe_surface *ps)
217 {
218 struct nv04_surface* ns = (struct nv04_surface*)ps;
219 if(ns->backing)
220 {
221 struct nv30_screen* screen = (struct nv30_screen*)ps->texture->screen;
222 if(ns->backing->base.usage & PIPE_BUFFER_USAGE_GPU_WRITE)
223 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
224 nv30_miptree_surface_del(&ns->backing->base);
225 }
226
227 pipe_texture_reference(&ps->texture, NULL);
228 FREE(ps);
229 }
230
231 void
232 nv30_screen_init_miptree_functions(struct pipe_screen *pscreen)
233 {
234 pscreen->texture_create = nv30_miptree_create;
235 pscreen->texture_blanket = nv30_miptree_blanket;
236 pscreen->texture_destroy = nv30_miptree_destroy;
237 pscreen->get_tex_surface = nv30_miptree_surface_new;
238 pscreen->tex_surface_destroy = nv30_miptree_surface_del;
239 }