svga: Add a limit to the maximum surface size
[mesa.git] / src / gallium / winsys / svga / drm / vmw_context.c
1 /**********************************************************
2 * Copyright 2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "svga_cmd.h"
28
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/u_debug_stack.h"
32 #include "util/u_debug_flush.h"
33 #include "util/u_hash_table.h"
34 #include "pipebuffer/pb_buffer.h"
35 #include "pipebuffer/pb_validate.h"
36
37 #include "svga_winsys.h"
38 #include "vmw_context.h"
39 #include "vmw_screen.h"
40 #include "vmw_buffer.h"
41 #include "vmw_surface.h"
42 #include "vmw_fence.h"
43 #include "vmw_shader.h"
44
45 #define VMW_COMMAND_SIZE (64*1024)
46 #define VMW_SURFACE_RELOCS (1024)
47 #define VMW_SHADER_RELOCS (1024)
48 #define VMW_REGION_RELOCS (512)
49
50 #define VMW_MUST_FLUSH_STACK 8
51
52 /*
53 * A factor applied to the maximum mob memory size to determine
54 * the optimial time to preemptively flush the command buffer.
55 * The constant is based on some performance trials with SpecViewperf.
56 */
57 #define VMW_MAX_MOB_MEM_FACTOR 2
58
59 /*
60 * A factor applied to the maximum surface memory size to determine
61 * the optimial time to preemptively flush the command buffer.
62 * The constant is based on some performance trials with SpecViewperf.
63 */
64 #define VMW_MAX_SURF_MEM_FACTOR 2
65
66
67 struct vmw_buffer_relocation
68 {
69 struct pb_buffer *buffer;
70 boolean is_mob;
71 uint32 offset;
72
73 union {
74 struct {
75 struct SVGAGuestPtr *where;
76 } region;
77 struct {
78 SVGAMobId *id;
79 uint32 *offset_into_mob;
80 } mob;
81 };
82 };
83
84 struct vmw_ctx_validate_item {
85 union {
86 struct vmw_svga_winsys_surface *vsurf;
87 struct vmw_svga_winsys_shader *vshader;
88 };
89 boolean referenced;
90 };
91
92 struct vmw_svga_winsys_context
93 {
94 struct svga_winsys_context base;
95
96 struct vmw_winsys_screen *vws;
97 struct util_hash_table *hash;
98
99 #ifdef DEBUG
100 boolean must_flush;
101 struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
102 struct debug_flush_ctx *fctx;
103 #endif
104
105 struct {
106 uint8_t buffer[VMW_COMMAND_SIZE];
107 uint32_t size;
108 uint32_t used;
109 uint32_t reserved;
110 } command;
111
112 struct {
113 struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
114 uint32_t size;
115 uint32_t used;
116 uint32_t staged;
117 uint32_t reserved;
118 } surface;
119
120 struct {
121 struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
122 uint32_t size;
123 uint32_t used;
124 uint32_t staged;
125 uint32_t reserved;
126 } region;
127
128 struct {
129 struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
130 uint32_t size;
131 uint32_t used;
132 uint32_t staged;
133 uint32_t reserved;
134 } shader;
135
136 struct pb_validate *validate;
137
138 /**
139 * The amount of surface, GMR or MOB memory that is referred by the commands
140 * currently batched in the context command buffer.
141 */
142 uint64_t seen_surfaces;
143 uint64_t seen_regions;
144 uint64_t seen_mobs;
145
146 /**
147 * Whether this context should fail to reserve more commands, not because it
148 * ran out of command space, but because a substantial ammount of GMR was
149 * referred.
150 */
151 boolean preemptive_flush;
152 };
153
154
155 static INLINE struct vmw_svga_winsys_context *
156 vmw_svga_winsys_context(struct svga_winsys_context *swc)
157 {
158 assert(swc);
159 return (struct vmw_svga_winsys_context *)swc;
160 }
161
162
163 static INLINE unsigned
164 vmw_translate_to_pb_flags(unsigned flags)
165 {
166 unsigned f = 0;
167 if (flags & SVGA_RELOC_READ)
168 f |= PB_USAGE_GPU_READ;
169
170 if (flags & SVGA_RELOC_WRITE)
171 f |= PB_USAGE_GPU_WRITE;
172
173 return f;
174 }
175
176 static enum pipe_error
177 vmw_swc_flush(struct svga_winsys_context *swc,
178 struct pipe_fence_handle **pfence)
179 {
180 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
181 struct pipe_fence_handle *fence = NULL;
182 unsigned i;
183 enum pipe_error ret;
184
185 ret = pb_validate_validate(vswc->validate);
186 assert(ret == PIPE_OK);
187 if(ret == PIPE_OK) {
188
189 /* Apply relocations */
190 for(i = 0; i < vswc->region.used; ++i) {
191 struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
192 struct SVGAGuestPtr ptr;
193
194 if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
195 assert(0);
196
197 ptr.offset += reloc->offset;
198
199 if (reloc->is_mob) {
200 if (reloc->mob.id)
201 *reloc->mob.id = ptr.gmrId;
202 if (reloc->mob.offset_into_mob)
203 *reloc->mob.offset_into_mob = ptr.offset;
204 else {
205 assert(ptr.offset == 0);
206 }
207 } else
208 *reloc->region.where = ptr;
209 }
210
211 if (vswc->command.used || pfence != NULL)
212 vmw_ioctl_command(vswc->vws,
213 vswc->base.cid,
214 0,
215 vswc->command.buffer,
216 vswc->command.used,
217 &fence);
218
219 pb_validate_fence(vswc->validate, fence);
220 }
221
222 vswc->command.used = 0;
223 vswc->command.reserved = 0;
224
225 for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
226 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
227 if (isurf->referenced)
228 p_atomic_dec(&isurf->vsurf->validated);
229 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
230 }
231
232 util_hash_table_clear(vswc->hash);
233 vswc->surface.used = 0;
234 vswc->surface.reserved = 0;
235
236 for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
237 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
238 if (ishader->referenced)
239 p_atomic_dec(&ishader->vshader->validated);
240 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
241 }
242
243 vswc->shader.used = 0;
244 vswc->shader.reserved = 0;
245
246 vswc->region.used = 0;
247 vswc->region.reserved = 0;
248
249 #ifdef DEBUG
250 vswc->must_flush = FALSE;
251 debug_flush_flush(vswc->fctx);
252 #endif
253 vswc->preemptive_flush = FALSE;
254 vswc->seen_surfaces = 0;
255 vswc->seen_regions = 0;
256 vswc->seen_mobs = 0;
257
258 if(pfence)
259 vmw_fence_reference(vswc->vws, pfence, fence);
260
261 vmw_fence_reference(vswc->vws, &fence, NULL);
262
263 return ret;
264 }
265
266
267 static void *
268 vmw_swc_reserve(struct svga_winsys_context *swc,
269 uint32_t nr_bytes, uint32_t nr_relocs )
270 {
271 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
272
273 #ifdef DEBUG
274 /* Check if somebody forgot to check the previous failure */
275 if(vswc->must_flush) {
276 debug_printf("Forgot to flush:\n");
277 debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
278 assert(!vswc->must_flush);
279 }
280 debug_flush_might_flush(vswc->fctx);
281 #endif
282
283 assert(nr_bytes <= vswc->command.size);
284 if(nr_bytes > vswc->command.size)
285 return NULL;
286
287 if(vswc->preemptive_flush ||
288 vswc->command.used + nr_bytes > vswc->command.size ||
289 vswc->surface.used + nr_relocs > vswc->surface.size ||
290 vswc->shader.used + nr_relocs > vswc->shader.size ||
291 vswc->region.used + nr_relocs > vswc->region.size) {
292 #ifdef DEBUG
293 vswc->must_flush = TRUE;
294 debug_backtrace_capture(vswc->must_flush_stack, 1,
295 VMW_MUST_FLUSH_STACK);
296 #endif
297 return NULL;
298 }
299
300 assert(vswc->command.used + nr_bytes <= vswc->command.size);
301 assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
302 assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
303 assert(vswc->region.used + nr_relocs <= vswc->region.size);
304
305 vswc->command.reserved = nr_bytes;
306 vswc->surface.reserved = nr_relocs;
307 vswc->surface.staged = 0;
308 vswc->shader.reserved = nr_relocs;
309 vswc->shader.staged = 0;
310 vswc->region.reserved = nr_relocs;
311 vswc->region.staged = 0;
312
313 return vswc->command.buffer + vswc->command.used;
314 }
315
316 static void
317 vmw_swc_context_relocation(struct svga_winsys_context *swc,
318 uint32 *cid)
319 {
320 *cid = swc->cid;
321 }
322
323 static boolean
324 vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
325 struct pb_buffer *pb_buf,
326 unsigned flags)
327 {
328 enum pipe_error ret;
329 unsigned translated_flags;
330
331 /*
332 * TODO: Update pb_validate to provide a similar functionality
333 * (Check buffer already present before adding)
334 */
335 if (util_hash_table_get(vswc->hash, pb_buf) != pb_buf) {
336 translated_flags = vmw_translate_to_pb_flags(flags);
337 ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags);
338 /* TODO: Update pipebuffer to reserve buffers and not fail here */
339 assert(ret == PIPE_OK);
340 (void)ret;
341 (void)util_hash_table_set(vswc->hash, pb_buf, pb_buf);
342 return TRUE;
343 }
344
345 return FALSE;
346 }
347
348 static void
349 vmw_swc_region_relocation(struct svga_winsys_context *swc,
350 struct SVGAGuestPtr *where,
351 struct svga_winsys_buffer *buffer,
352 uint32 offset,
353 unsigned flags)
354 {
355 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
356 struct vmw_buffer_relocation *reloc;
357
358 assert(vswc->region.staged < vswc->region.reserved);
359
360 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
361 reloc->region.where = where;
362
363 /*
364 * pb_validate holds a refcount to the buffer, so no need to
365 * refcount it again in the relocation.
366 */
367 reloc->buffer = vmw_pb_buffer(buffer);
368 reloc->offset = offset;
369 reloc->is_mob = FALSE;
370 ++vswc->region.staged;
371
372 if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
373 vswc->seen_regions += reloc->buffer->size;
374 if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
375 vswc->preemptive_flush = TRUE;
376 }
377
378 #ifdef DEBUG
379 if (!(flags & SVGA_RELOC_INTERNAL))
380 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
381 #endif
382 }
383
384 static void
385 vmw_swc_mob_relocation(struct svga_winsys_context *swc,
386 SVGAMobId *id,
387 uint32 *offset_into_mob,
388 struct svga_winsys_buffer *buffer,
389 uint32 offset,
390 unsigned flags)
391 {
392 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
393 struct vmw_buffer_relocation *reloc;
394
395 assert(vswc->region.staged < vswc->region.reserved);
396
397 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
398 reloc->mob.id = id;
399 reloc->mob.offset_into_mob = offset_into_mob;
400
401 /*
402 * pb_validate holds a refcount to the buffer, so no need to
403 * refcount it again in the relocation.
404 */
405 reloc->buffer = vmw_pb_buffer(buffer);
406 reloc->offset = offset;
407 reloc->is_mob = TRUE;
408 ++vswc->region.staged;
409
410 if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
411 vswc->seen_mobs += reloc->buffer->size;
412 /* divide by 5, tested for best performance */
413 if (vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
414 vswc->preemptive_flush = TRUE;
415 }
416
417 #ifdef DEBUG
418 if (!(flags & SVGA_RELOC_INTERNAL))
419 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
420 #endif
421 }
422
423
424 /**
425 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
426 *
427 * @swc: Pointer to an svga_winsys_context
428 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
429 * we want to clear
430 *
431 * This is primarily used by a discard surface map to indicate that the
432 * surface data is no longer referenced by a draw call, and mapping it
433 * should therefore no longer cause a flush.
434 */
435 void
436 vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
437 struct vmw_svga_winsys_surface *vsurf)
438 {
439 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
440 struct vmw_ctx_validate_item *isrf =
441 util_hash_table_get(vswc->hash, vsurf);
442
443 if (isrf && isrf->referenced) {
444 isrf->referenced = FALSE;
445 p_atomic_dec(&vsurf->validated);
446 }
447 }
448
449 static void
450 vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
451 uint32 *where,
452 struct vmw_svga_winsys_surface *vsurf,
453 unsigned flags)
454 {
455 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
456 struct vmw_ctx_validate_item *isrf;
457
458 assert(vswc->surface.staged < vswc->surface.reserved);
459 isrf = util_hash_table_get(vswc->hash, vsurf);
460
461 if (isrf == NULL) {
462 isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
463 vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
464 isrf->referenced = FALSE;
465 /*
466 * Note that a failure here may just fall back to unhashed behavior
467 * and potentially cause unnecessary flushing, so ignore the
468 * return code.
469 */
470 (void) util_hash_table_set(vswc->hash, vsurf, isrf);
471 ++vswc->surface.staged;
472
473 vswc->seen_surfaces += vsurf->size;
474 /* divide by 5 not well tuned for performance */
475 if (vswc->seen_surfaces >= vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
476 vswc->preemptive_flush = TRUE;
477 }
478
479 if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
480 isrf->referenced = TRUE;
481 p_atomic_inc(&vsurf->validated);
482 }
483
484 *where = vsurf->sid;
485 }
486
487 static void
488 vmw_swc_surface_relocation(struct svga_winsys_context *swc,
489 uint32 *where,
490 uint32 *mobid,
491 struct svga_winsys_surface *surface,
492 unsigned flags)
493 {
494 struct vmw_svga_winsys_surface *vsurf;
495
496 assert(swc->have_gb_objects || mobid == NULL);
497
498 if(!surface) {
499 *where = SVGA3D_INVALID_ID;
500 if (mobid)
501 *mobid = SVGA3D_INVALID_ID;
502 return;
503 }
504
505 vsurf = vmw_svga_winsys_surface(surface);
506 vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
507
508 if (swc->have_gb_objects && vsurf->buf != NULL) {
509
510 /*
511 * Make sure backup buffer ends up fenced.
512 */
513
514 pipe_mutex_lock(vsurf->mutex);
515 assert(vsurf->buf != NULL);
516
517 vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
518 vsurf->buf, 0, flags);
519 pipe_mutex_unlock(vsurf->mutex);
520 }
521 }
522
523 static void
524 vmw_swc_shader_relocation(struct svga_winsys_context *swc,
525 uint32 *shid,
526 uint32 *mobid,
527 uint32 *offset,
528 struct svga_winsys_gb_shader *shader)
529 {
530 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
531 struct vmw_svga_winsys_shader *vshader;
532 struct vmw_ctx_validate_item *ishader;
533 if(!shader) {
534 *shid = SVGA3D_INVALID_ID;
535 return;
536 }
537
538 assert(vswc->shader.staged < vswc->shader.reserved);
539 vshader = vmw_svga_winsys_shader(shader);
540 ishader = util_hash_table_get(vswc->hash, vshader);
541
542 if (ishader == NULL) {
543 ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
544 vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
545 ishader->referenced = FALSE;
546 /*
547 * Note that a failure here may just fall back to unhashed behavior
548 * and potentially cause unnecessary flushing, so ignore the
549 * return code.
550 */
551 (void) util_hash_table_set(vswc->hash, vshader, ishader);
552 ++vswc->shader.staged;
553 }
554
555 if (!ishader->referenced) {
556 ishader->referenced = TRUE;
557 p_atomic_inc(&vshader->validated);
558 }
559
560 *shid = vshader->shid;
561
562 if (mobid != NULL && vshader->buf)
563 vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
564 0, SVGA_RELOC_READ);
565 }
566
567 static void
568 vmw_swc_commit(struct svga_winsys_context *swc)
569 {
570 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
571
572 assert(vswc->command.reserved);
573 assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
574 vswc->command.used += vswc->command.reserved;
575 vswc->command.reserved = 0;
576
577 assert(vswc->surface.staged <= vswc->surface.reserved);
578 assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
579 vswc->surface.used += vswc->surface.staged;
580 vswc->surface.staged = 0;
581 vswc->surface.reserved = 0;
582
583 assert(vswc->shader.staged <= vswc->shader.reserved);
584 assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
585 vswc->shader.used += vswc->shader.staged;
586 vswc->shader.staged = 0;
587 vswc->shader.reserved = 0;
588
589 assert(vswc->region.staged <= vswc->region.reserved);
590 assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
591 vswc->region.used += vswc->region.staged;
592 vswc->region.staged = 0;
593 vswc->region.reserved = 0;
594 }
595
596
597 static void
598 vmw_swc_destroy(struct svga_winsys_context *swc)
599 {
600 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
601 unsigned i;
602
603 for(i = 0; i < vswc->surface.used; ++i) {
604 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
605 if (isurf->referenced)
606 p_atomic_dec(&isurf->vsurf->validated);
607 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
608 }
609
610 for(i = 0; i < vswc->shader.used; ++i) {
611 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
612 if (ishader->referenced)
613 p_atomic_dec(&ishader->vshader->validated);
614 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
615 }
616
617 util_hash_table_destroy(vswc->hash);
618 pb_validate_destroy(vswc->validate);
619 vmw_ioctl_context_destroy(vswc->vws, swc->cid);
620 #ifdef DEBUG
621 debug_flush_ctx_destroy(vswc->fctx);
622 #endif
623 FREE(vswc);
624 }
625
626 static unsigned vmw_hash_ptr(void *p)
627 {
628 return (unsigned)(unsigned long)p;
629 }
630
631 static int vmw_ptr_compare(void *key1, void *key2)
632 {
633 return (key1 == key2) ? 0 : 1;
634 }
635
636 struct svga_winsys_context *
637 vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
638 {
639 struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
640 struct vmw_svga_winsys_context *vswc;
641
642 vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
643 if(!vswc)
644 return NULL;
645
646 vswc->base.destroy = vmw_swc_destroy;
647 vswc->base.reserve = vmw_swc_reserve;
648 vswc->base.surface_relocation = vmw_swc_surface_relocation;
649 vswc->base.region_relocation = vmw_swc_region_relocation;
650 vswc->base.mob_relocation = vmw_swc_mob_relocation;
651 vswc->base.context_relocation = vmw_swc_context_relocation;
652 vswc->base.shader_relocation = vmw_swc_shader_relocation;
653 vswc->base.commit = vmw_swc_commit;
654 vswc->base.flush = vmw_swc_flush;
655 vswc->base.surface_map = vmw_svga_winsys_surface_map;
656 vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
657
658 vswc->base.cid = vmw_ioctl_context_create(vws);
659 vswc->base.have_gb_objects = sws->have_gb_objects;
660
661 vswc->vws = vws;
662
663 vswc->command.size = VMW_COMMAND_SIZE;
664 vswc->surface.size = VMW_SURFACE_RELOCS;
665 vswc->shader.size = VMW_SHADER_RELOCS;
666 vswc->region.size = VMW_REGION_RELOCS;
667
668 vswc->validate = pb_validate_create();
669 if(!vswc->validate)
670 goto out_no_validate;
671
672 vswc->hash = util_hash_table_create(vmw_hash_ptr, vmw_ptr_compare);
673 if (!vswc->hash)
674 goto out_no_hash;
675
676 #ifdef DEBUG
677 vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
678 #endif
679
680 return &vswc->base;
681
682 out_no_hash:
683 pb_validate_destroy(vswc->validate);
684 out_no_validate:
685 FREE(vswc);
686 return NULL;
687 }