2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
27 #include <pipe/p_screen.h>
28 #include <util/u_format.h>
29 #include <util/u_math.h>
30 #include <util/u_inlines.h>
31 #include <util/u_memory.h>
32 #include <util/u_upload_mgr.h>
33 #include "state_tracker/drm_driver.h"
34 #include "r600_screen.h"
35 #include "r600_context.h"
36 #include "r600_resource.h"
38 extern struct u_resource_vtbl r600_buffer_vtbl
;
40 u32
r600_domain_from_usage(unsigned usage
)
42 u32 domain
= RADEON_GEM_DOMAIN_GTT
;
44 if (usage
& PIPE_BIND_RENDER_TARGET
) {
45 domain
|= RADEON_GEM_DOMAIN_VRAM
;
47 if (usage
& PIPE_BIND_DEPTH_STENCIL
) {
48 domain
|= RADEON_GEM_DOMAIN_VRAM
;
50 if (usage
& PIPE_BIND_SAMPLER_VIEW
) {
51 domain
|= RADEON_GEM_DOMAIN_VRAM
;
53 /* also need BIND_BLIT_SOURCE/DESTINATION ? */
54 if (usage
& PIPE_BIND_VERTEX_BUFFER
) {
55 domain
|= RADEON_GEM_DOMAIN_GTT
;
57 if (usage
& PIPE_BIND_INDEX_BUFFER
) {
58 domain
|= RADEON_GEM_DOMAIN_GTT
;
60 if (usage
& PIPE_BIND_CONSTANT_BUFFER
) {
61 domain
|= RADEON_GEM_DOMAIN_VRAM
;
67 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
68 const struct pipe_resource
*templ
)
70 struct r600_screen
*rscreen
= r600_screen(screen
);
71 struct r600_resource_buffer
*rbuffer
;
72 struct radeon_ws_bo
*bo
;
73 /* XXX We probably want a different alignment for buffers and textures. */
74 unsigned alignment
= 4096;
76 rbuffer
= CALLOC_STRUCT(r600_resource_buffer
);
80 rbuffer
->magic
= R600_BUFFER_MAGIC
;
81 rbuffer
->user_buffer
= NULL
;
82 rbuffer
->num_ranges
= 0;
83 rbuffer
->r
.base
.b
= *templ
;
84 pipe_reference_init(&rbuffer
->r
.base
.b
.reference
, 1);
85 rbuffer
->r
.base
.b
.screen
= screen
;
86 rbuffer
->r
.base
.vtbl
= &r600_buffer_vtbl
;
87 rbuffer
->r
.size
= rbuffer
->r
.base
.b
.width0
;
88 rbuffer
->r
.domain
= r600_domain_from_usage(rbuffer
->r
.base
.b
.bind
);
89 bo
= radeon_ws_bo(rscreen
->rw
, rbuffer
->r
.base
.b
.width0
, alignment
, rbuffer
->r
.base
.b
.bind
);
95 return &rbuffer
->r
.base
.b
;
98 struct pipe_resource
*r600_user_buffer_create(struct pipe_screen
*screen
,
99 void *ptr
, unsigned bytes
,
102 struct r600_resource_buffer
*rbuffer
;
104 rbuffer
= CALLOC_STRUCT(r600_resource_buffer
);
108 rbuffer
->magic
= R600_BUFFER_MAGIC
;
109 pipe_reference_init(&rbuffer
->r
.base
.b
.reference
, 1);
110 rbuffer
->r
.base
.vtbl
= &r600_buffer_vtbl
;
111 rbuffer
->r
.base
.b
.screen
= screen
;
112 rbuffer
->r
.base
.b
.target
= PIPE_BUFFER
;
113 rbuffer
->r
.base
.b
.format
= PIPE_FORMAT_R8_UNORM
;
114 rbuffer
->r
.base
.b
.usage
= PIPE_USAGE_IMMUTABLE
;
115 rbuffer
->r
.base
.b
.bind
= bind
;
116 rbuffer
->r
.base
.b
.width0
= bytes
;
117 rbuffer
->r
.base
.b
.height0
= 1;
118 rbuffer
->r
.base
.b
.depth0
= 1;
119 rbuffer
->r
.base
.b
.flags
= 0;
120 rbuffer
->num_ranges
= 0;
121 rbuffer
->r
.bo
= NULL
;
122 rbuffer
->user_buffer
= ptr
;
123 return &rbuffer
->r
.base
.b
;
126 static void r600_buffer_destroy(struct pipe_screen
*screen
,
127 struct pipe_resource
*buf
)
129 struct r600_resource_buffer
*rbuffer
= r600_buffer(buf
);
130 struct r600_screen
*rscreen
= r600_screen(screen
);
133 radeon_ws_bo_reference(rscreen
->rw
, &rbuffer
->r
.bo
, NULL
);
138 static void *r600_buffer_transfer_map(struct pipe_context
*pipe
,
139 struct pipe_transfer
*transfer
)
141 struct r600_context
*rctx
= r600_context(pipe
);
142 struct r600_resource_buffer
*rbuffer
= r600_buffer(transfer
->resource
);
143 struct r600_screen
*rscreen
= r600_screen(pipe
->screen
);
147 boolean flush
= FALSE
;
149 if (rbuffer
->user_buffer
)
150 return (uint8_t*)rbuffer
->user_buffer
+ transfer
->box
.x
;
152 if (transfer
->usage
& PIPE_TRANSFER_DISCARD
) {
153 for (i
= 0; i
< rbuffer
->num_ranges
; i
++) {
154 if ((transfer
->box
.x
>= rbuffer
->ranges
[i
].start
) &&
155 (transfer
->box
.x
< rbuffer
->ranges
[i
].end
))
159 radeon_ws_bo_reference(rscreen
->rw
, &rbuffer
->r
.bo
, NULL
);
160 rbuffer
->num_ranges
= 0;
161 rbuffer
->r
.bo
= radeon_ws_bo(rscreen
->rw
,
162 rbuffer
->r
.base
.b
.width0
, 0,
163 rbuffer
->r
.base
.b
.bind
);
168 if (transfer
->usage
& PIPE_TRANSFER_DONTBLOCK
) {
171 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
174 data
= radeon_ws_bo_map(rscreen
->rw
, rbuffer
->r
.bo
, transfer
->usage
, rctx
);
178 return (uint8_t*)data
+ transfer
->box
.x
;
181 static void r600_buffer_transfer_unmap(struct pipe_context
*pipe
,
182 struct pipe_transfer
*transfer
)
184 struct r600_resource_buffer
*rbuffer
= r600_buffer(transfer
->resource
);
185 struct r600_screen
*rscreen
= r600_screen(pipe
->screen
);
188 radeon_ws_bo_unmap(rscreen
->rw
, rbuffer
->r
.bo
);
191 static void r600_buffer_transfer_flush_region(struct pipe_context
*pipe
,
192 struct pipe_transfer
*transfer
,
193 const struct pipe_box
*box
)
195 struct r600_resource_buffer
*rbuffer
= r600_buffer(transfer
->resource
);
197 unsigned offset
= transfer
->box
.x
+ box
->x
;
198 unsigned length
= box
->width
;
200 assert(box
->x
+ box
->width
<= transfer
->box
.width
);
202 if (rbuffer
->user_buffer
)
205 /* mark the range as used */
206 for(i
= 0; i
< rbuffer
->num_ranges
; ++i
) {
207 if(offset
<= rbuffer
->ranges
[i
].end
&& rbuffer
->ranges
[i
].start
<= (offset
+box
->width
)) {
208 rbuffer
->ranges
[i
].start
= MIN2(rbuffer
->ranges
[i
].start
, offset
);
209 rbuffer
->ranges
[i
].end
= MAX2(rbuffer
->ranges
[i
].end
, (offset
+length
));
214 rbuffer
->ranges
[rbuffer
->num_ranges
].start
= offset
;
215 rbuffer
->ranges
[rbuffer
->num_ranges
].end
= offset
+length
;
216 rbuffer
->num_ranges
++;
219 unsigned r600_buffer_is_referenced_by_cs(struct pipe_context
*context
,
220 struct pipe_resource
*buf
,
221 unsigned face
, unsigned level
)
224 return PIPE_REFERENCED_FOR_READ
| PIPE_REFERENCED_FOR_WRITE
;
227 struct pipe_resource
*r600_buffer_from_handle(struct pipe_screen
*screen
,
228 struct winsys_handle
*whandle
)
230 struct radeon
*rw
= (struct radeon
*)screen
->winsys
;
231 struct r600_resource
*rbuffer
;
232 struct radeon_ws_bo
*bo
= NULL
;
234 bo
= radeon_ws_bo_handle(rw
, whandle
->handle
);
239 rbuffer
= CALLOC_STRUCT(r600_resource
);
240 if (rbuffer
== NULL
) {
241 radeon_ws_bo_reference(rw
, &bo
, NULL
);
245 pipe_reference_init(&rbuffer
->base
.b
.reference
, 1);
246 rbuffer
->base
.b
.target
= PIPE_BUFFER
;
247 rbuffer
->base
.b
.screen
= screen
;
248 rbuffer
->base
.vtbl
= &r600_buffer_vtbl
;
250 return &rbuffer
->base
.b
;
253 struct u_resource_vtbl r600_buffer_vtbl
=
255 u_default_resource_get_handle
, /* get_handle */
256 r600_buffer_destroy
, /* resource_destroy */
257 r600_buffer_is_referenced_by_cs
, /* is_buffer_referenced */
258 u_default_get_transfer
, /* get_transfer */
259 u_default_transfer_destroy
, /* transfer_destroy */
260 r600_buffer_transfer_map
, /* transfer_map */
261 r600_buffer_transfer_flush_region
, /* transfer_flush_region */
262 r600_buffer_transfer_unmap
, /* transfer_unmap */
263 u_default_transfer_inline_write
/* transfer_inline_write */
266 int r600_upload_index_buffer(struct r600_context
*rctx
,
267 struct r600_draw
*draw
)
269 struct pipe_resource
*upload_buffer
= NULL
;
270 unsigned index_offset
= draw
->index_buffer_offset
;
273 if (r600_buffer_is_user_buffer(draw
->index_buffer
)) {
274 ret
= u_upload_buffer(rctx
->upload_ib
,
276 draw
->count
* draw
->index_size
,
283 draw
->index_buffer_offset
= index_offset
;
284 draw
->index_buffer
= upload_buffer
;
291 int r600_upload_user_buffers(struct r600_context
*rctx
)
293 enum pipe_error ret
= PIPE_OK
;
296 nr
= rctx
->vertex_elements
->count
;
298 for (i
= 0; i
< nr
; i
++) {
299 struct pipe_vertex_buffer
*vb
=
300 &rctx
->vertex_buffer
[rctx
->vertex_elements
->elements
[i
].vertex_buffer_index
];
302 if (r600_buffer_is_user_buffer(vb
->buffer
)) {
303 struct pipe_resource
*upload_buffer
= NULL
;
304 unsigned offset
= 0; /*vb->buffer_offset * 4;*/
305 unsigned size
= vb
->buffer
->width0
;
306 unsigned upload_offset
;
307 ret
= u_upload_buffer(rctx
->upload_vb
,
310 &upload_offset
, &upload_buffer
);
314 pipe_resource_reference(&vb
->buffer
, NULL
);
315 vb
->buffer
= upload_buffer
;
316 vb
->buffer_offset
= upload_offset
;