2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23 #include "radeon_drm_buffer.h"
25 #include "util/u_memory.h"
26 #include "pipebuffer/pb_bufmgr.h"
28 #include "radeon_cs_gem.h"
29 #include "state_tracker/drm_driver.h"
31 static unsigned get_pb_usage_from_create_flags(unsigned bind
, unsigned usage
,
32 enum r300_buffer_domain domain
)
36 if (bind
& (PIPE_BIND_DEPTH_STENCIL
| PIPE_BIND_RENDER_TARGET
|
37 PIPE_BIND_DISPLAY_TARGET
| PIPE_BIND_SCANOUT
))
38 res
|= PB_USAGE_GPU_WRITE
;
40 if (bind
& PIPE_BIND_SAMPLER_VIEW
)
41 res
|= PB_USAGE_GPU_READ
| PB_USAGE_GPU_WRITE
;
43 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
44 res
|= PB_USAGE_GPU_READ
;
46 if (bind
& PIPE_BIND_TRANSFER_WRITE
)
47 res
|= PB_USAGE_CPU_WRITE
;
49 if (bind
& PIPE_BIND_TRANSFER_READ
)
50 res
|= PB_USAGE_CPU_READ
;
52 /* Is usage of any use for us? Probably not. */
54 /* Now add driver-specific usage flags. */
55 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
56 res
|= RADEON_PB_USAGE_VERTEX
;
58 if (domain
& R300_DOMAIN_GTT
)
59 res
|= RADEON_PB_USAGE_DOMAIN_GTT
;
61 if (domain
& R300_DOMAIN_VRAM
)
62 res
|= RADEON_PB_USAGE_DOMAIN_VRAM
;
67 static struct r300_winsys_buffer
*
68 radeon_r300_winsys_buffer_create(struct r300_winsys_screen
*rws
,
73 enum r300_buffer_domain domain
)
75 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
77 struct pb_manager
*provider
;
78 struct pb_buffer
*buffer
;
80 memset(&desc
, 0, sizeof(desc
));
81 desc
.alignment
= alignment
;
82 desc
.usage
= get_pb_usage_from_create_flags(bind
, usage
, domain
);
84 /* Assign a buffer manager. */
85 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
90 buffer
= provider
->create_buffer(provider
, size
, &desc
);
94 return (struct r300_winsys_buffer
*)buffer
;
97 static void radeon_r300_winsys_buffer_reference(struct r300_winsys_screen
*rws
,
98 struct r300_winsys_buffer
**pdst
,
99 struct r300_winsys_buffer
*src
)
101 struct pb_buffer
*_src
= radeon_pb_buffer(src
);
102 struct pb_buffer
*_dst
= radeon_pb_buffer(*pdst
);
104 pb_reference(&_dst
, _src
);
106 *pdst
= (struct r300_winsys_buffer
*)_dst
;
109 static struct r300_winsys_buffer
*radeon_r300_winsys_buffer_from_handle(struct r300_winsys_screen
*rws
,
110 struct winsys_handle
*whandle
,
114 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
115 struct pb_buffer
*_buf
;
117 _buf
= radeon_drm_bufmgr_create_buffer_from_handle(ws
->kman
, whandle
->handle
);
120 *stride
= whandle
->stride
;
122 *size
= _buf
->base
.size
;
124 return (struct r300_winsys_buffer
*)_buf
;
127 static boolean
radeon_r300_winsys_buffer_get_handle(struct r300_winsys_screen
*rws
,
128 struct r300_winsys_buffer
*buffer
,
130 struct winsys_handle
*whandle
)
132 struct pb_buffer
*_buf
= radeon_pb_buffer(buffer
);
133 whandle
->stride
= stride
;
134 return radeon_drm_bufmgr_get_handle(_buf
, whandle
);
137 static void radeon_r300_winsys_cs_set_flush(struct r300_winsys_cs
*rcs
,
138 void (*flush
)(void *),
141 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
142 cs
->flush_cs
= flush
;
143 cs
->flush_data
= user
;
144 radeon_cs_space_set_flush(cs
->cs
, flush
, user
);
147 static boolean
radeon_r300_winsys_cs_validate(struct r300_winsys_cs
*rcs
)
149 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
151 return radeon_cs_space_check(cs
->cs
) >= 0;
154 static void radeon_r300_winsys_cs_reset_buffers(struct r300_winsys_cs
*rcs
)
156 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
157 radeon_cs_space_reset_bos(cs
->cs
);
160 static void radeon_r300_winsys_cs_flush(struct r300_winsys_cs
*rcs
)
162 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
165 /* Don't flush a zero-sized CS. */
170 cs
->cs
->cdw
= cs
->base
.cdw
;
172 radeon_drm_bufmgr_flush_maps(cs
->ws
->kman
);
175 retval
= radeon_cs_emit(cs
->cs
);
177 if (debug_get_bool_option("RADEON_DUMP_CS", FALSE
)) {
178 fprintf(stderr
, "radeon: The kernel rejected CS, dumping...\n");
179 radeon_cs_print(cs
->cs
, stderr
);
181 fprintf(stderr
, "radeon: The kernel rejected CS, "
182 "see dmesg for more information.\n");
187 * Someday, when we care about performance, we should really find a way
188 * to rotate between two or three CS objects so that the GPU can be
189 * spinning through one CS while another one is being filled. */
190 radeon_cs_erase(cs
->cs
);
192 cs
->base
.buf
= cs
->cs
->packets
;
193 cs
->base
.cdw
= cs
->cs
->cdw
;
196 static uint32_t radeon_get_value(struct r300_winsys_screen
*rws
,
197 enum r300_value_id id
)
199 struct radeon_drm_winsys
*ws
= (struct radeon_drm_winsys
*)rws
;
202 case R300_VID_PCI_ID
:
204 case R300_VID_GB_PIPES
:
206 case R300_VID_Z_PIPES
:
208 case R300_VID_SQUARE_TILING_SUPPORT
:
209 return ws
->squaretiling
;
210 case R300_VID_DRM_2_3_0
:
211 return ws
->drm_2_3_0
;
212 case R300_VID_DRM_2_6_0
:
213 return ws
->drm_2_6_0
;
214 case R300_VID_DRM_2_8_0
:
215 return ws
->drm_2_8_0
;
216 case R300_CAN_HYPERZ
:
222 static struct r300_winsys_cs
*radeon_r300_winsys_cs_create(struct r300_winsys_screen
*rws
)
224 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
225 struct radeon_drm_cs
*cs
= CALLOC_STRUCT(radeon_drm_cs
);
230 /* Size limit on IBs is 64 kibibytes. */
231 cs
->cs
= radeon_cs_create(ws
->csm
, 1024 * 64 / 4);
237 radeon_cs_set_limit(cs
->cs
,
238 RADEON_GEM_DOMAIN_GTT
, ws
->gart_size
);
239 radeon_cs_set_limit(cs
->cs
,
240 RADEON_GEM_DOMAIN_VRAM
, ws
->vram_size
);
243 cs
->base
.buf
= cs
->cs
->packets
;
244 cs
->base
.cdw
= cs
->cs
->cdw
;
248 static void radeon_r300_winsys_cs_destroy(struct r300_winsys_cs
*rcs
)
250 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
251 radeon_cs_destroy(cs
->cs
);
255 void radeon_winsys_init_functions(struct radeon_drm_winsys
*ws
)
257 ws
->base
.get_value
= radeon_get_value
;
258 ws
->base
.buffer_create
= radeon_r300_winsys_buffer_create
;
259 ws
->base
.buffer_reference
= radeon_r300_winsys_buffer_reference
;
260 ws
->base
.buffer_from_handle
= radeon_r300_winsys_buffer_from_handle
;
261 ws
->base
.buffer_get_handle
= radeon_r300_winsys_buffer_get_handle
;
262 ws
->base
.cs_create
= radeon_r300_winsys_cs_create
;
263 ws
->base
.cs_destroy
= radeon_r300_winsys_cs_destroy
;
264 ws
->base
.cs_validate
= radeon_r300_winsys_cs_validate
;
265 ws
->base
.cs_flush
= radeon_r300_winsys_cs_flush
;
266 ws
->base
.cs_reset_buffers
= radeon_r300_winsys_cs_reset_buffers
;
267 ws
->base
.cs_set_flush
= radeon_r300_winsys_cs_set_flush
;