while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) == -EBUSY);
} else*/ {
- struct drm_radeon_gem_wait_idle args = {};
+ struct drm_radeon_gem_wait_idle args;
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
&args, sizeof(args)) == -EBUSY);
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) != 0;
} else*/ {
- struct drm_radeon_gem_busy args = {};
+ struct drm_radeon_gem_busy args;
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
&args, sizeof(args)) != 0;
static void radeon_bo_destroy(struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
- struct drm_gem_close args = {};
+ struct drm_gem_close args;
+
+ memset(&args, 0, sizeof(args));
if (bo->name) {
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
{
struct radeon_bo *bo = radeon_bo(_buf);
struct radeon_drm_cs *cs = flush_ctx;
- struct drm_radeon_gem_mmap args = {};
+ struct drm_radeon_gem_mmap args;
void *ptr;
+ memset(&args, 0, sizeof(args));
+
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
struct radeon_drm_winsys *rws = mgr->rws;
struct radeon_bo *bo;
- struct drm_radeon_gem_create args = {};
+ struct drm_radeon_gem_create args;
struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
+ memset(&args, 0, sizeof(args));
+
assert(rdesc->initial_domains && rdesc->reloc_domains);
assert((rdesc->initial_domains &
~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
enum radeon_bo_layout *macrotiled)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
- struct drm_radeon_gem_set_tiling args = {};
+ struct drm_radeon_gem_set_tiling args;
+
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
- struct drm_radeon_gem_set_tiling args = {};
+ struct drm_radeon_gem_set_tiling args;
+
+ memset(&args, 0, sizeof(args));
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
struct drm_gem_open open_arg = {};
+ memset(&open_arg, 0, sizeof(open_arg));
+
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
* more than one BO for the same handle and then relocated them in a CS,
unsigned stride,
struct winsys_handle *whandle)
{
- struct drm_gem_flink flink = {};
+ struct drm_gem_flink flink;
struct radeon_bo *bo = get_radeon_bo(buffer);
+ memset(&flink, 0, sizeof(flink));
+
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
if (!bo->flinked) {
flink.handle = bo->handle;
pipe_mutex *mutex,
unsigned request, boolean enable)
{
- struct drm_radeon_info info = {0};
+ struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
+ memset(&info, 0, sizeof(info));
+
pipe_mutex_lock(*mutex);
/* Early exit if we are sure the request will fail. */
static boolean radeon_get_drm_value(int fd, unsigned request,
const char *errname, uint32_t *out)
{
- struct drm_radeon_info info = {0};
+ struct drm_radeon_info info;
int retval;
+ memset(&info, 0, sizeof(info));
+
info.value = (unsigned long)out;
info.request = request;
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct radeon_drm_winsys *ws)
{
- struct drm_radeon_gem_info gem_info = {0};
+ struct drm_radeon_gem_info gem_info;
int retval;
drmVersionPtr version;
+ memset(&gem_info, 0, sizeof(gem_info));
+
/* We do things in a specific order here.
*
* DRM version first. We need to be sure we're running on a KMS chipset.