1675af4cbc83f172c09c921af673d0783bf7b1bd
[mesa.git] / src / gallium / winsys / svga / drm / vmw_context.c
1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "svga_cmd.h"
28
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/u_debug_stack.h"
32 #include "util/u_debug_flush.h"
33 #include "util/u_hash_table.h"
34 #include "pipebuffer/pb_buffer.h"
35 #include "pipebuffer/pb_validate.h"
36
37 #include "svga_winsys.h"
38 #include "vmw_context.h"
39 #include "vmw_screen.h"
40 #include "vmw_buffer.h"
41 #include "vmw_surface.h"
42 #include "vmw_fence.h"
43 #include "vmw_shader.h"
44 #include "vmw_query.h"
45
46 #define VMW_COMMAND_SIZE (64*1024)
47 #define VMW_SURFACE_RELOCS (1024)
48 #define VMW_SHADER_RELOCS (1024)
49 #define VMW_REGION_RELOCS (512)
50
51 #define VMW_MUST_FLUSH_STACK 8
52
53 /*
54 * A factor applied to the maximum mob memory size to determine
55 * the optimial time to preemptively flush the command buffer.
56 * The constant is based on some performance trials with SpecViewperf.
57 */
58 #define VMW_MAX_MOB_MEM_FACTOR 2
59
60 /*
61 * A factor applied to the maximum surface memory size to determine
62 * the optimial time to preemptively flush the command buffer.
63 * The constant is based on some performance trials with SpecViewperf.
64 */
65 #define VMW_MAX_SURF_MEM_FACTOR 2
66
67
68 struct vmw_buffer_relocation
69 {
70 struct pb_buffer *buffer;
71 boolean is_mob;
72 uint32 offset;
73
74 union {
75 struct {
76 struct SVGAGuestPtr *where;
77 } region;
78 struct {
79 SVGAMobId *id;
80 uint32 *offset_into_mob;
81 } mob;
82 };
83 };
84
85 struct vmw_ctx_validate_item {
86 union {
87 struct vmw_svga_winsys_surface *vsurf;
88 struct vmw_svga_winsys_shader *vshader;
89 };
90 boolean referenced;
91 };
92
93 struct vmw_svga_winsys_context
94 {
95 struct svga_winsys_context base;
96
97 struct vmw_winsys_screen *vws;
98 struct util_hash_table *hash;
99
100 #ifdef DEBUG
101 boolean must_flush;
102 struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
103 struct debug_flush_ctx *fctx;
104 #endif
105
106 struct {
107 uint8_t buffer[VMW_COMMAND_SIZE];
108 uint32_t size;
109 uint32_t used;
110 uint32_t reserved;
111 } command;
112
113 struct {
114 struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
115 uint32_t size;
116 uint32_t used;
117 uint32_t staged;
118 uint32_t reserved;
119 } surface;
120
121 struct {
122 struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
123 uint32_t size;
124 uint32_t used;
125 uint32_t staged;
126 uint32_t reserved;
127 } region;
128
129 struct {
130 struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
131 uint32_t size;
132 uint32_t used;
133 uint32_t staged;
134 uint32_t reserved;
135 } shader;
136
137 struct pb_validate *validate;
138
139 /**
140 * The amount of surface, GMR or MOB memory that is referred by the commands
141 * currently batched in the context command buffer.
142 */
143 uint64_t seen_surfaces;
144 uint64_t seen_regions;
145 uint64_t seen_mobs;
146
147 /**
148 * Whether this context should fail to reserve more commands, not because it
149 * ran out of command space, but because a substantial ammount of GMR was
150 * referred.
151 */
152 boolean preemptive_flush;
153 };
154
155
156 static inline struct vmw_svga_winsys_context *
157 vmw_svga_winsys_context(struct svga_winsys_context *swc)
158 {
159 assert(swc);
160 return (struct vmw_svga_winsys_context *)swc;
161 }
162
163
164 static inline unsigned
165 vmw_translate_to_pb_flags(unsigned flags)
166 {
167 unsigned f = 0;
168 if (flags & SVGA_RELOC_READ)
169 f |= PB_USAGE_GPU_READ;
170
171 if (flags & SVGA_RELOC_WRITE)
172 f |= PB_USAGE_GPU_WRITE;
173
174 return f;
175 }
176
177 static enum pipe_error
178 vmw_swc_flush(struct svga_winsys_context *swc,
179 struct pipe_fence_handle **pfence)
180 {
181 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
182 struct pipe_fence_handle *fence = NULL;
183 unsigned i;
184 enum pipe_error ret;
185
186 ret = pb_validate_validate(vswc->validate);
187 assert(ret == PIPE_OK);
188 if(ret == PIPE_OK) {
189
190 /* Apply relocations */
191 for(i = 0; i < vswc->region.used; ++i) {
192 struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
193 struct SVGAGuestPtr ptr;
194
195 if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
196 assert(0);
197
198 ptr.offset += reloc->offset;
199
200 if (reloc->is_mob) {
201 if (reloc->mob.id)
202 *reloc->mob.id = ptr.gmrId;
203 if (reloc->mob.offset_into_mob)
204 *reloc->mob.offset_into_mob = ptr.offset;
205 else {
206 assert(ptr.offset == 0);
207 }
208 } else
209 *reloc->region.where = ptr;
210 }
211
212 if (vswc->command.used || pfence != NULL)
213 vmw_ioctl_command(vswc->vws,
214 vswc->base.cid,
215 0,
216 vswc->command.buffer,
217 vswc->command.used,
218 &fence);
219
220 pb_validate_fence(vswc->validate, fence);
221 }
222
223 vswc->command.used = 0;
224 vswc->command.reserved = 0;
225
226 for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
227 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
228 if (isurf->referenced)
229 p_atomic_dec(&isurf->vsurf->validated);
230 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
231 }
232
233 util_hash_table_clear(vswc->hash);
234 vswc->surface.used = 0;
235 vswc->surface.reserved = 0;
236
237 for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
238 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
239 if (ishader->referenced)
240 p_atomic_dec(&ishader->vshader->validated);
241 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
242 }
243
244 vswc->shader.used = 0;
245 vswc->shader.reserved = 0;
246
247 vswc->region.used = 0;
248 vswc->region.reserved = 0;
249
250 #ifdef DEBUG
251 vswc->must_flush = FALSE;
252 debug_flush_flush(vswc->fctx);
253 #endif
254 vswc->preemptive_flush = FALSE;
255 vswc->seen_surfaces = 0;
256 vswc->seen_regions = 0;
257 vswc->seen_mobs = 0;
258
259 if(pfence)
260 vmw_fence_reference(vswc->vws, pfence, fence);
261
262 vmw_fence_reference(vswc->vws, &fence, NULL);
263
264 return ret;
265 }
266
267
268 static void *
269 vmw_swc_reserve(struct svga_winsys_context *swc,
270 uint32_t nr_bytes, uint32_t nr_relocs )
271 {
272 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
273
274 #ifdef DEBUG
275 /* Check if somebody forgot to check the previous failure */
276 if(vswc->must_flush) {
277 debug_printf("Forgot to flush:\n");
278 debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
279 assert(!vswc->must_flush);
280 }
281 debug_flush_might_flush(vswc->fctx);
282 #endif
283
284 assert(nr_bytes <= vswc->command.size);
285 if(nr_bytes > vswc->command.size)
286 return NULL;
287
288 if(vswc->preemptive_flush ||
289 vswc->command.used + nr_bytes > vswc->command.size ||
290 vswc->surface.used + nr_relocs > vswc->surface.size ||
291 vswc->shader.used + nr_relocs > vswc->shader.size ||
292 vswc->region.used + nr_relocs > vswc->region.size) {
293 #ifdef DEBUG
294 vswc->must_flush = TRUE;
295 debug_backtrace_capture(vswc->must_flush_stack, 1,
296 VMW_MUST_FLUSH_STACK);
297 #endif
298 return NULL;
299 }
300
301 assert(vswc->command.used + nr_bytes <= vswc->command.size);
302 assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
303 assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
304 assert(vswc->region.used + nr_relocs <= vswc->region.size);
305
306 vswc->command.reserved = nr_bytes;
307 vswc->surface.reserved = nr_relocs;
308 vswc->surface.staged = 0;
309 vswc->shader.reserved = nr_relocs;
310 vswc->shader.staged = 0;
311 vswc->region.reserved = nr_relocs;
312 vswc->region.staged = 0;
313
314 return vswc->command.buffer + vswc->command.used;
315 }
316
317 static void
318 vmw_swc_context_relocation(struct svga_winsys_context *swc,
319 uint32 *cid)
320 {
321 *cid = swc->cid;
322 }
323
324 static boolean
325 vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
326 struct pb_buffer *pb_buf,
327 unsigned flags)
328 {
329 enum pipe_error ret;
330 unsigned translated_flags;
331
332 /*
333 * TODO: Update pb_validate to provide a similar functionality
334 * (Check buffer already present before adding)
335 */
336 if (util_hash_table_get(vswc->hash, pb_buf) != pb_buf) {
337 translated_flags = vmw_translate_to_pb_flags(flags);
338 ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags);
339 /* TODO: Update pipebuffer to reserve buffers and not fail here */
340 assert(ret == PIPE_OK);
341 (void)ret;
342 (void)util_hash_table_set(vswc->hash, pb_buf, pb_buf);
343 return TRUE;
344 }
345
346 return FALSE;
347 }
348
349 static void
350 vmw_swc_region_relocation(struct svga_winsys_context *swc,
351 struct SVGAGuestPtr *where,
352 struct svga_winsys_buffer *buffer,
353 uint32 offset,
354 unsigned flags)
355 {
356 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
357 struct vmw_buffer_relocation *reloc;
358
359 assert(vswc->region.staged < vswc->region.reserved);
360
361 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
362 reloc->region.where = where;
363
364 /*
365 * pb_validate holds a refcount to the buffer, so no need to
366 * refcount it again in the relocation.
367 */
368 reloc->buffer = vmw_pb_buffer(buffer);
369 reloc->offset = offset;
370 reloc->is_mob = FALSE;
371 ++vswc->region.staged;
372
373 if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
374 vswc->seen_regions += reloc->buffer->size;
375 if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
376 vswc->preemptive_flush = TRUE;
377 }
378
379 #ifdef DEBUG
380 if (!(flags & SVGA_RELOC_INTERNAL))
381 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
382 #endif
383 }
384
385 static void
386 vmw_swc_mob_relocation(struct svga_winsys_context *swc,
387 SVGAMobId *id,
388 uint32 *offset_into_mob,
389 struct svga_winsys_buffer *buffer,
390 uint32 offset,
391 unsigned flags)
392 {
393 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
394 struct vmw_buffer_relocation *reloc;
395 struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer);
396
397 if (id) {
398 assert(vswc->region.staged < vswc->region.reserved);
399
400 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
401 reloc->mob.id = id;
402 reloc->mob.offset_into_mob = offset_into_mob;
403
404 /*
405 * pb_validate holds a refcount to the buffer, so no need to
406 * refcount it again in the relocation.
407 */
408 reloc->buffer = pb_buffer;
409 reloc->offset = offset;
410 reloc->is_mob = TRUE;
411 ++vswc->region.staged;
412 }
413
414 if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) {
415 vswc->seen_mobs += pb_buffer->size;
416 /* divide by 5, tested for best performance */
417 if (vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
418 vswc->preemptive_flush = TRUE;
419 }
420
421 #ifdef DEBUG
422 if (!(flags & SVGA_RELOC_INTERNAL))
423 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
424 #endif
425 }
426
427
428 /**
429 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
430 *
431 * @swc: Pointer to an svga_winsys_context
432 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
433 * we want to clear
434 *
435 * This is primarily used by a discard surface map to indicate that the
436 * surface data is no longer referenced by a draw call, and mapping it
437 * should therefore no longer cause a flush.
438 */
439 void
440 vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
441 struct vmw_svga_winsys_surface *vsurf)
442 {
443 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
444 struct vmw_ctx_validate_item *isrf =
445 util_hash_table_get(vswc->hash, vsurf);
446
447 if (isrf && isrf->referenced) {
448 isrf->referenced = FALSE;
449 p_atomic_dec(&vsurf->validated);
450 }
451 }
452
453 static void
454 vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
455 uint32 *where,
456 struct vmw_svga_winsys_surface *vsurf,
457 unsigned flags)
458 {
459 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
460 struct vmw_ctx_validate_item *isrf;
461
462 assert(vswc->surface.staged < vswc->surface.reserved);
463 isrf = util_hash_table_get(vswc->hash, vsurf);
464
465 if (isrf == NULL) {
466 isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
467 vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
468 isrf->referenced = FALSE;
469 /*
470 * Note that a failure here may just fall back to unhashed behavior
471 * and potentially cause unnecessary flushing, so ignore the
472 * return code.
473 */
474 (void) util_hash_table_set(vswc->hash, vsurf, isrf);
475 ++vswc->surface.staged;
476
477 vswc->seen_surfaces += vsurf->size;
478 /* divide by 5 not well tuned for performance */
479 if (vswc->seen_surfaces >= vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
480 vswc->preemptive_flush = TRUE;
481 }
482
483 if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
484 isrf->referenced = TRUE;
485 p_atomic_inc(&vsurf->validated);
486 }
487
488 if (where)
489 *where = vsurf->sid;
490 }
491
492 static void
493 vmw_swc_surface_relocation(struct svga_winsys_context *swc,
494 uint32 *where,
495 uint32 *mobid,
496 struct svga_winsys_surface *surface,
497 unsigned flags)
498 {
499 struct vmw_svga_winsys_surface *vsurf;
500
501 assert(swc->have_gb_objects || mobid == NULL);
502
503 if (!surface) {
504 *where = SVGA3D_INVALID_ID;
505 if (mobid)
506 *mobid = SVGA3D_INVALID_ID;
507 return;
508 }
509
510 vsurf = vmw_svga_winsys_surface(surface);
511 vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
512
513 if (swc->have_gb_objects && vsurf->buf != NULL) {
514
515 /*
516 * Make sure backup buffer ends up fenced.
517 */
518
519 pipe_mutex_lock(vsurf->mutex);
520 assert(vsurf->buf != NULL);
521
522 vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
523 vsurf->buf, 0, flags);
524 pipe_mutex_unlock(vsurf->mutex);
525 }
526 }
527
528 static void
529 vmw_swc_shader_relocation(struct svga_winsys_context *swc,
530 uint32 *shid,
531 uint32 *mobid,
532 uint32 *offset,
533 struct svga_winsys_gb_shader *shader,
534 unsigned flags)
535 {
536 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
537 struct vmw_winsys_screen *vws = vswc->vws;
538 struct vmw_svga_winsys_shader *vshader;
539 struct vmw_ctx_validate_item *ishader;
540
541 if(!shader) {
542 *shid = SVGA3D_INVALID_ID;
543 return;
544 }
545
546 vshader = vmw_svga_winsys_shader(shader);
547
548 if (!vws->base.have_vgpu10) {
549 assert(vswc->shader.staged < vswc->shader.reserved);
550 ishader = util_hash_table_get(vswc->hash, vshader);
551
552 if (ishader == NULL) {
553 ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
554 vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
555 ishader->referenced = FALSE;
556 /*
557 * Note that a failure here may just fall back to unhashed behavior
558 * and potentially cause unnecessary flushing, so ignore the
559 * return code.
560 */
561 (void) util_hash_table_set(vswc->hash, vshader, ishader);
562 ++vswc->shader.staged;
563 }
564
565 if (!ishader->referenced) {
566 ishader->referenced = TRUE;
567 p_atomic_inc(&vshader->validated);
568 }
569 }
570
571 if (shid)
572 *shid = vshader->shid;
573
574 if (vshader->buf)
575 vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
576 0, SVGA_RELOC_READ);
577 }
578
579 static void
580 vmw_swc_query_relocation(struct svga_winsys_context *swc,
581 SVGAMobId *id,
582 struct svga_winsys_gb_query *query)
583 {
584 /* Queries are backed by one big MOB */
585 vmw_swc_mob_relocation(swc, id, NULL, query->buf, 0,
586 SVGA_RELOC_READ | SVGA_RELOC_WRITE);
587 }
588
589 static void
590 vmw_swc_commit(struct svga_winsys_context *swc)
591 {
592 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
593
594 assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
595 vswc->command.used += vswc->command.reserved;
596 vswc->command.reserved = 0;
597
598 assert(vswc->surface.staged <= vswc->surface.reserved);
599 assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
600 vswc->surface.used += vswc->surface.staged;
601 vswc->surface.staged = 0;
602 vswc->surface.reserved = 0;
603
604 assert(vswc->shader.staged <= vswc->shader.reserved);
605 assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
606 vswc->shader.used += vswc->shader.staged;
607 vswc->shader.staged = 0;
608 vswc->shader.reserved = 0;
609
610 assert(vswc->region.staged <= vswc->region.reserved);
611 assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
612 vswc->region.used += vswc->region.staged;
613 vswc->region.staged = 0;
614 vswc->region.reserved = 0;
615 }
616
617
618 static void
619 vmw_swc_destroy(struct svga_winsys_context *swc)
620 {
621 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
622 unsigned i;
623
624 for(i = 0; i < vswc->surface.used; ++i) {
625 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
626 if (isurf->referenced)
627 p_atomic_dec(&isurf->vsurf->validated);
628 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
629 }
630
631 for(i = 0; i < vswc->shader.used; ++i) {
632 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
633 if (ishader->referenced)
634 p_atomic_dec(&ishader->vshader->validated);
635 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
636 }
637
638 util_hash_table_destroy(vswc->hash);
639 pb_validate_destroy(vswc->validate);
640 vmw_ioctl_context_destroy(vswc->vws, swc->cid);
641 #ifdef DEBUG
642 debug_flush_ctx_destroy(vswc->fctx);
643 #endif
644 FREE(vswc);
645 }
646
647 static unsigned vmw_hash_ptr(void *p)
648 {
649 return (unsigned)(unsigned long)p;
650 }
651
652 static int vmw_ptr_compare(void *key1, void *key2)
653 {
654 return (key1 == key2) ? 0 : 1;
655 }
656
657
658 /**
659 * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
660 *
661 * @swc: The winsys context.
662 * @shaderId: Previously allocated shader id.
663 * @shaderType: The shader type.
664 * @bytecode: The shader bytecode
665 * @bytecodelen: The length of the bytecode.
666 *
667 * Creates an svga_winsys_gb_shader structure and allocates a buffer for the
668 * shader code and copies the shader code into the buffer. Shader
669 * resource creation is not done.
670 */
671 static struct svga_winsys_gb_shader *
672 vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc,
673 uint32 shaderId,
674 SVGA3dShaderType shaderType,
675 const uint32 *bytecode,
676 uint32 bytecodeLen)
677 {
678 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
679 struct vmw_svga_winsys_shader *shader;
680 struct svga_winsys_gb_shader *gb_shader =
681 vmw_svga_winsys_shader_create(&vswc->vws->base, shaderType, bytecode,
682 bytecodeLen);
683 if (!gb_shader)
684 return NULL;
685
686 shader = vmw_svga_winsys_shader(gb_shader);
687 shader->shid = shaderId;
688
689 return gb_shader;
690 }
691
692 /**
693 * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
694 *
695 * @swc: The winsys context.
696 * @shader: A shader structure previously allocated by shader_create.
697 *
698 * Frees the shader structure and the buffer holding the shader code.
699 */
700 static void
701 vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc,
702 struct svga_winsys_gb_shader *shader)
703 {
704 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
705
706 vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader);
707 }
708
709 /**
710 * vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
711 *
712 * @swc: The winsys context.
713 * @surface: The surface to be referenced.
714 * @shader: The shader to be referenced.
715 * @flags: Relocation flags.
716 *
717 * This callback is needed because shader backing buffers are sub-allocated, and
718 * hence the kernel fencing is not sufficient. The buffers need to be put on
719 * the context's validation list and fenced after command submission to avoid
720 * reuse of busy shader buffers. In addition, surfaces need to be put on the
721 * validation list in order for the driver to regard them as referenced
722 * by the command stream.
723 */
724 static enum pipe_error
725 vmw_svga_winsys_resource_rebind(struct svga_winsys_context *swc,
726 struct svga_winsys_surface *surface,
727 struct svga_winsys_gb_shader *shader,
728 unsigned flags)
729 {
730 /**
731 * Need to reserve one validation item for either the surface or
732 * the shader.
733 */
734 if (!vmw_swc_reserve(swc, 0, 1))
735 return PIPE_ERROR_OUT_OF_MEMORY;
736
737 if (surface)
738 vmw_swc_surface_relocation(swc, NULL, NULL, surface, flags);
739 else if (shader)
740 vmw_swc_shader_relocation(swc, NULL, NULL, NULL, shader, flags);
741
742 vmw_swc_commit(swc);
743
744 return PIPE_OK;
745 }
746
747 struct svga_winsys_context *
748 vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
749 {
750 struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
751 struct vmw_svga_winsys_context *vswc;
752
753 vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
754 if(!vswc)
755 return NULL;
756
757 vswc->base.destroy = vmw_swc_destroy;
758 vswc->base.reserve = vmw_swc_reserve;
759 vswc->base.surface_relocation = vmw_swc_surface_relocation;
760 vswc->base.region_relocation = vmw_swc_region_relocation;
761 vswc->base.mob_relocation = vmw_swc_mob_relocation;
762 vswc->base.query_relocation = vmw_swc_query_relocation;
763 vswc->base.query_bind = vmw_swc_query_bind;
764 vswc->base.context_relocation = vmw_swc_context_relocation;
765 vswc->base.shader_relocation = vmw_swc_shader_relocation;
766 vswc->base.commit = vmw_swc_commit;
767 vswc->base.flush = vmw_swc_flush;
768 vswc->base.surface_map = vmw_svga_winsys_surface_map;
769 vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
770
771 vswc->base.shader_create = vmw_svga_winsys_vgpu10_shader_create;
772 vswc->base.shader_destroy = vmw_svga_winsys_vgpu10_shader_destroy;
773
774 vswc->base.resource_rebind = vmw_svga_winsys_resource_rebind;
775
776 if (sws->have_vgpu10)
777 vswc->base.cid = vmw_ioctl_extended_context_create(vws, sws->have_vgpu10);
778 else
779 vswc->base.cid = vmw_ioctl_context_create(vws);
780
781 if (vswc->base.cid == -1)
782 goto out_no_context;
783
784 vswc->base.have_gb_objects = sws->have_gb_objects;
785
786 vswc->vws = vws;
787
788 vswc->command.size = VMW_COMMAND_SIZE;
789 vswc->surface.size = VMW_SURFACE_RELOCS;
790 vswc->shader.size = VMW_SHADER_RELOCS;
791 vswc->region.size = VMW_REGION_RELOCS;
792
793 vswc->validate = pb_validate_create();
794 if(!vswc->validate)
795 goto out_no_validate;
796
797 vswc->hash = util_hash_table_create(vmw_hash_ptr, vmw_ptr_compare);
798 if (!vswc->hash)
799 goto out_no_hash;
800
801 #ifdef DEBUG
802 vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
803 #endif
804
805 return &vswc->base;
806
807 out_no_hash:
808 pb_validate_destroy(vswc->validate);
809 out_no_validate:
810 vmw_ioctl_context_destroy(vws, vswc->base.cid);
811 out_no_context:
812 FREE(vswc);
813 return NULL;
814 }