2 * Copyright 2011 Joakim Sindholt <opensource@zhasha.com>
3 * Copyright 2013 Christoph Bumiller
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
27 #include "swapchain9.h"
28 #include "basetexture9.h"
30 #include "indexbuffer9.h"
32 #include "vertexbuffer9.h"
33 #include "vertexdeclaration9.h"
34 #include "vertexshader9.h"
35 #include "pixelshader9.h"
36 #include "nine_pipe.h"
38 #include "nine_limits.h"
39 #include "pipe/p_context.h"
40 #include "pipe/p_state.h"
41 #include "cso_cache/cso_context.h"
42 #include "util/u_atomic.h"
43 #include "util/u_upload_mgr.h"
44 #include "util/u_math.h"
45 #include "util/u_box.h"
46 #include "util/u_simple_shaders.h"
47 #include "util/u_gen_mipmap.h"
50 #include "nine_queue.h"
51 #include "nine_csmt_helper.h"
52 #include "os/os_thread.h"
54 #define DBG_CHANNEL DBG_DEVICE
58 struct csmt_instruction
{
59 int (* func
)(struct NineDevice9
*This
, struct csmt_instruction
*instr
);
64 struct nine_queue_pool
* pool
;
66 cnd_t event_processed
;
67 mtx_t mutex_processed
;
68 struct NineDevice9
*device
;
76 /* Wait for instruction to be processed.
77 * Caller has to ensure that only one thread waits at time.
80 nine_csmt_wait_processed(struct csmt_context
*ctx
)
82 mtx_lock(&ctx
->mutex_processed
);
83 while (!p_atomic_read(&ctx
->processed
)) {
84 cnd_wait(&ctx
->event_processed
, &ctx
->mutex_processed
);
86 mtx_unlock(&ctx
->mutex_processed
);
89 /* CSMT worker thread */
92 nine_csmt_worker(void *arg
)
94 struct csmt_context
*ctx
= arg
;
95 struct csmt_instruction
*instr
;
96 DBG("CSMT worker spawned\n");
98 u_thread_setname("CSMT-Worker");
101 nine_queue_wait_flush(ctx
->pool
);
102 mtx_lock(&ctx
->thread_running
);
104 /* Get instruction. NULL on empty cmdbuf. */
105 while (!p_atomic_read(&ctx
->terminate
) &&
106 (instr
= (struct csmt_instruction
*)nine_queue_get(ctx
->pool
))) {
109 if (instr
->func(ctx
->device
, instr
)) {
110 mtx_lock(&ctx
->mutex_processed
);
111 p_atomic_set(&ctx
->processed
, TRUE
);
112 cnd_signal(&ctx
->event_processed
);
113 mtx_unlock(&ctx
->mutex_processed
);
115 if (p_atomic_read(&ctx
->toPause
)) {
116 mtx_unlock(&ctx
->thread_running
);
117 /* will wait here the thread can be resumed */
118 mtx_lock(&ctx
->thread_resume
);
119 mtx_lock(&ctx
->thread_running
);
120 mtx_unlock(&ctx
->thread_resume
);
124 mtx_unlock(&ctx
->thread_running
);
125 if (p_atomic_read(&ctx
->terminate
)) {
126 mtx_lock(&ctx
->mutex_processed
);
127 p_atomic_set(&ctx
->processed
, TRUE
);
128 cnd_signal(&ctx
->event_processed
);
129 mtx_unlock(&ctx
->mutex_processed
);
134 DBG("CSMT worker destroyed\n");
138 /* Create a CSMT context.
139 * Spawns a worker thread.
141 struct csmt_context
*
142 nine_csmt_create( struct NineDevice9
*This
)
144 struct csmt_context
*ctx
;
146 ctx
= CALLOC_STRUCT(csmt_context
);
150 ctx
->pool
= nine_queue_create();
155 cnd_init(&ctx
->event_processed
);
156 (void) mtx_init(&ctx
->mutex_processed
, mtx_plain
);
157 (void) mtx_init(&ctx
->thread_running
, mtx_plain
);
158 (void) mtx_init(&ctx
->thread_resume
, mtx_plain
);
161 u_thread_setname("Main thread");
166 ctx
->worker
= u_thread_create(nine_csmt_worker
, ctx
);
168 nine_queue_delete(ctx
->pool
);
173 DBG("Returning context %p\n", ctx
);
179 nop_func( struct NineDevice9
*This
, struct csmt_instruction
*instr
)
187 /* Push nop instruction and flush the queue.
188 * Waits for the worker to complete. */
190 nine_csmt_process( struct NineDevice9
*device
)
192 struct csmt_instruction
* instr
;
193 struct csmt_context
*ctx
= device
->csmt_ctx
;
195 if (!device
->csmt_active
)
198 if (nine_queue_isempty(ctx
->pool
))
201 DBG("device=%p\n", device
);
204 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
206 instr
->func
= nop_func
;
208 p_atomic_set(&ctx
->processed
, FALSE
);
209 nine_queue_flush(ctx
->pool
);
211 nine_csmt_wait_processed(ctx
);
214 /* Destroys a CSMT context.
215 * Waits for the worker thread to terminate.
218 nine_csmt_destroy( struct NineDevice9
*device
, struct csmt_context
*ctx
)
220 struct csmt_instruction
* instr
;
221 thrd_t render_thread
= ctx
->worker
;
223 DBG("device=%p ctx=%p\n", device
, ctx
);
225 /* Push nop and flush the queue. */
226 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
228 instr
->func
= nop_func
;
230 p_atomic_set(&ctx
->processed
, FALSE
);
231 /* Signal worker to terminate. */
232 p_atomic_set(&ctx
->terminate
, TRUE
);
233 nine_queue_flush(ctx
->pool
);
235 nine_csmt_wait_processed(ctx
);
236 nine_queue_delete(ctx
->pool
);
237 mtx_destroy(&ctx
->mutex_processed
);
241 thrd_join(render_thread
, NULL
);
245 nine_csmt_pause( struct NineDevice9
*device
)
247 struct csmt_context
*ctx
= device
->csmt_ctx
;
249 if (!device
->csmt_active
)
252 /* No need to pause the thread */
253 if (nine_queue_no_flushed_work(ctx
->pool
))
256 mtx_lock(&ctx
->thread_resume
);
257 p_atomic_set(&ctx
->toPause
, TRUE
);
259 /* Wait the thread is paused */
260 mtx_lock(&ctx
->thread_running
);
261 ctx
->hasPaused
= TRUE
;
262 p_atomic_set(&ctx
->toPause
, FALSE
);
266 nine_csmt_resume( struct NineDevice9
*device
)
268 struct csmt_context
*ctx
= device
->csmt_ctx
;
270 if (!device
->csmt_active
)
276 ctx
->hasPaused
= FALSE
;
277 mtx_unlock(&ctx
->thread_running
);
278 mtx_unlock(&ctx
->thread_resume
);
281 struct pipe_context
*
282 nine_context_get_pipe( struct NineDevice9
*device
)
284 nine_csmt_process(device
);
285 return device
->context
.pipe
;
288 struct pipe_context
*
289 nine_context_get_pipe_multithread( struct NineDevice9
*device
)
291 struct csmt_context
*ctx
= device
->csmt_ctx
;
293 if (!device
->csmt_active
)
294 return device
->context
.pipe
;
296 if (!u_thread_is_self(ctx
->worker
))
297 nine_csmt_process(device
);
299 return device
->context
.pipe
;
302 struct pipe_context
*
303 nine_context_get_pipe_acquire( struct NineDevice9
*device
)
305 nine_csmt_pause(device
);
306 return device
->context
.pipe
;
310 nine_context_get_pipe_release( struct NineDevice9
*device
)
312 nine_csmt_resume(device
);
315 /* Nine state functions */
317 /* Check if some states need to be set dirty */
320 check_multisample(struct NineDevice9
*device
)
322 DWORD
*rs
= device
->context
.rs
;
323 DWORD new_value
= (rs
[D3DRS_ZENABLE
] || rs
[D3DRS_STENCILENABLE
]) &&
324 device
->context
.rt
[0]->desc
.MultiSampleType
>= 1 &&
325 rs
[D3DRS_MULTISAMPLEANTIALIAS
];
326 if (rs
[NINED3DRS_MULTISAMPLE
] != new_value
) {
327 rs
[NINED3DRS_MULTISAMPLE
] = new_value
;
328 return NINE_STATE_RASTERIZER
;
333 /* State preparation only */
336 prepare_blend(struct NineDevice9
*device
)
338 nine_convert_blend_state(&device
->context
.pipe_data
.blend
, device
->context
.rs
);
339 device
->context
.commit
|= NINE_STATE_COMMIT_BLEND
;
343 prepare_dsa(struct NineDevice9
*device
)
345 nine_convert_dsa_state(&device
->context
.pipe_data
.dsa
, device
->context
.rs
);
346 device
->context
.commit
|= NINE_STATE_COMMIT_DSA
;
350 prepare_rasterizer(struct NineDevice9
*device
)
352 nine_convert_rasterizer_state(device
, &device
->context
.pipe_data
.rast
, device
->context
.rs
);
353 device
->context
.commit
|= NINE_STATE_COMMIT_RASTERIZER
;
357 prepare_vs_constants_userbuf_swvp(struct NineDevice9
*device
)
359 struct nine_context
*context
= &device
->context
;
361 if (context
->changed
.vs_const_f
|| context
->changed
.group
& NINE_STATE_SWVP
) {
362 struct pipe_constant_buffer cb
;
364 cb
.buffer_offset
= 0;
365 cb
.buffer_size
= 4096 * sizeof(float[4]);
366 cb
.user_buffer
= context
->vs_const_f_swvp
;
368 if (context
->vs
->lconstf
.ranges
) {
369 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
370 const struct nine_range
*r
= lconstf
->ranges
;
372 float *dst
= context
->vs_lconstf_temp
;
373 float *src
= (float *)cb
.user_buffer
;
374 memcpy(dst
, src
, cb
.buffer_size
);
377 unsigned c
= r
->end
- r
->bgn
;
378 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
382 cb
.user_buffer
= dst
;
385 /* Do not erase the buffer field.
386 * It is either NULL (user_cbufs), or a resource.
387 * u_upload_data will do the proper refcount */
388 context
->pipe_data
.cb0_swvp
.buffer_offset
= cb
.buffer_offset
;
389 context
->pipe_data
.cb0_swvp
.buffer_size
= cb
.buffer_size
;
390 context
->pipe_data
.cb0_swvp
.user_buffer
= cb
.user_buffer
;
392 cb
.user_buffer
= (char *)cb
.user_buffer
+ 4096 * sizeof(float[4]);
393 context
->pipe_data
.cb1_swvp
.buffer_offset
= cb
.buffer_offset
;
394 context
->pipe_data
.cb1_swvp
.buffer_size
= cb
.buffer_size
;
395 context
->pipe_data
.cb1_swvp
.user_buffer
= cb
.user_buffer
;
397 context
->changed
.vs_const_f
= 0;
400 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
401 struct pipe_constant_buffer cb
;
403 cb
.buffer_offset
= 0;
404 cb
.buffer_size
= 2048 * sizeof(float[4]);
405 cb
.user_buffer
= context
->vs_const_i
;
407 context
->pipe_data
.cb2_swvp
.buffer_offset
= cb
.buffer_offset
;
408 context
->pipe_data
.cb2_swvp
.buffer_size
= cb
.buffer_size
;
409 context
->pipe_data
.cb2_swvp
.user_buffer
= cb
.user_buffer
;
410 context
->changed
.vs_const_i
= 0;
413 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
414 struct pipe_constant_buffer cb
;
416 cb
.buffer_offset
= 0;
417 cb
.buffer_size
= 512 * sizeof(float[4]);
418 cb
.user_buffer
= context
->vs_const_b
;
420 context
->pipe_data
.cb3_swvp
.buffer_offset
= cb
.buffer_offset
;
421 context
->pipe_data
.cb3_swvp
.buffer_size
= cb
.buffer_size
;
422 context
->pipe_data
.cb3_swvp
.user_buffer
= cb
.user_buffer
;
423 context
->changed
.vs_const_b
= 0;
426 if (!device
->driver_caps
.user_cbufs
) {
427 struct pipe_constant_buffer
*cb
= &(context
->pipe_data
.cb0_swvp
);
428 u_upload_data(device
->context
.pipe
->const_uploader
,
431 device
->constbuf_alignment
,
433 &(cb
->buffer_offset
),
435 u_upload_unmap(device
->context
.pipe
->const_uploader
);
436 cb
->user_buffer
= NULL
;
438 cb
= &(context
->pipe_data
.cb1_swvp
);
439 u_upload_data(device
->context
.pipe
->const_uploader
,
442 device
->constbuf_alignment
,
444 &(cb
->buffer_offset
),
446 u_upload_unmap(device
->context
.pipe
->const_uploader
);
447 cb
->user_buffer
= NULL
;
449 cb
= &(context
->pipe_data
.cb2_swvp
);
450 u_upload_data(device
->context
.pipe
->const_uploader
,
453 device
->constbuf_alignment
,
455 &(cb
->buffer_offset
),
457 u_upload_unmap(device
->context
.pipe
->const_uploader
);
458 cb
->user_buffer
= NULL
;
460 cb
= &(context
->pipe_data
.cb3_swvp
);
461 u_upload_data(device
->context
.pipe
->const_uploader
,
464 device
->constbuf_alignment
,
466 &(cb
->buffer_offset
),
468 u_upload_unmap(device
->context
.pipe
->const_uploader
);
469 cb
->user_buffer
= NULL
;
472 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
473 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
477 prepare_vs_constants_userbuf(struct NineDevice9
*device
)
479 struct nine_context
*context
= &device
->context
;
480 struct pipe_constant_buffer cb
;
482 cb
.buffer_offset
= 0;
483 cb
.buffer_size
= context
->vs
->const_used_size
;
484 cb
.user_buffer
= context
->vs_const_f
;
487 prepare_vs_constants_userbuf_swvp(device
);
491 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
492 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
493 memcpy(idst
, context
->vs_const_i
, NINE_MAX_CONST_I
* sizeof(int[4]));
494 context
->changed
.vs_const_i
= 0;
497 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
498 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
499 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
500 memcpy(bdst
, context
->vs_const_b
, NINE_MAX_CONST_B
* sizeof(BOOL
));
501 context
->changed
.vs_const_b
= 0;
507 if (context
->vs
->lconstf
.ranges
) {
508 /* TODO: Can we make it so that we don't have to copy everything ? */
509 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
510 const struct nine_range
*r
= lconstf
->ranges
;
512 float *dst
= context
->vs_lconstf_temp
;
513 float *src
= (float *)cb
.user_buffer
;
514 memcpy(dst
, src
, cb
.buffer_size
);
517 unsigned c
= r
->end
- r
->bgn
;
518 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
522 cb
.user_buffer
= dst
;
525 if (!device
->driver_caps
.user_cbufs
) {
526 context
->pipe_data
.cb_vs
.buffer_size
= cb
.buffer_size
;
527 u_upload_data(device
->context
.pipe
->const_uploader
,
530 device
->constbuf_alignment
,
532 &context
->pipe_data
.cb_vs
.buffer_offset
,
533 &context
->pipe_data
.cb_vs
.buffer
);
534 u_upload_unmap(device
->context
.pipe
->const_uploader
);
535 context
->pipe_data
.cb_vs
.user_buffer
= NULL
;
537 context
->pipe_data
.cb_vs
= cb
;
539 context
->changed
.vs_const_f
= 0;
541 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
542 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
546 prepare_ps_constants_userbuf(struct NineDevice9
*device
)
548 struct nine_context
*context
= &device
->context
;
549 struct pipe_constant_buffer cb
;
551 cb
.buffer_offset
= 0;
552 cb
.buffer_size
= context
->ps
->const_used_size
;
553 cb
.user_buffer
= context
->ps_const_f
;
555 if (context
->changed
.ps_const_i
) {
556 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
557 memcpy(idst
, context
->ps_const_i
, sizeof(context
->ps_const_i
));
558 context
->changed
.ps_const_i
= 0;
560 if (context
->changed
.ps_const_b
) {
561 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
562 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
563 memcpy(bdst
, context
->ps_const_b
, sizeof(context
->ps_const_b
));
564 context
->changed
.ps_const_b
= 0;
567 /* Upload special constants needed to implement PS1.x instructions like TEXBEM,TEXBEML and BEM */
568 if (context
->ps
->bumpenvmat_needed
) {
569 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, cb
.buffer_size
);
570 memcpy(&context
->ps_lconstf_temp
[4 * 8], &device
->context
.bumpmap_vars
, sizeof(device
->context
.bumpmap_vars
));
572 cb
.user_buffer
= context
->ps_lconstf_temp
;
575 if (context
->ps
->byte_code
.version
< 0x30 &&
576 context
->rs
[D3DRS_FOGENABLE
]) {
577 float *dst
= &context
->ps_lconstf_temp
[4 * 32];
578 if (cb
.user_buffer
!= context
->ps_lconstf_temp
) {
579 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, cb
.buffer_size
);
580 cb
.user_buffer
= context
->ps_lconstf_temp
;
583 d3dcolor_to_rgba(dst
, context
->rs
[D3DRS_FOGCOLOR
]);
584 if (context
->rs
[D3DRS_FOGTABLEMODE
] == D3DFOG_LINEAR
) {
585 dst
[4] = asfloat(context
->rs
[D3DRS_FOGEND
]);
586 dst
[5] = 1.0f
/ (asfloat(context
->rs
[D3DRS_FOGEND
]) - asfloat(context
->rs
[D3DRS_FOGSTART
]));
587 } else if (context
->rs
[D3DRS_FOGTABLEMODE
] != D3DFOG_NONE
) {
588 dst
[4] = asfloat(context
->rs
[D3DRS_FOGDENSITY
]);
590 cb
.buffer_size
= 4 * 4 * 34;
596 if (!device
->driver_caps
.user_cbufs
) {
597 context
->pipe_data
.cb_ps
.buffer_size
= cb
.buffer_size
;
598 u_upload_data(device
->context
.pipe
->const_uploader
,
601 device
->constbuf_alignment
,
603 &context
->pipe_data
.cb_ps
.buffer_offset
,
604 &context
->pipe_data
.cb_ps
.buffer
);
605 u_upload_unmap(device
->context
.pipe
->const_uploader
);
606 context
->pipe_data
.cb_ps
.user_buffer
= NULL
;
608 context
->pipe_data
.cb_ps
= cb
;
610 context
->changed
.ps_const_f
= 0;
612 context
->changed
.group
&= ~NINE_STATE_PS_CONST
;
613 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
616 static inline uint32_t
617 prepare_vs(struct NineDevice9
*device
, uint8_t shader_changed
)
619 struct nine_context
*context
= &device
->context
;
620 struct NineVertexShader9
*vs
= context
->vs
;
621 uint32_t changed_group
= 0;
622 int has_key_changed
= 0;
624 if (likely(context
->programmable_vs
))
625 has_key_changed
= NineVertexShader9_UpdateKey(vs
, device
);
627 if (!shader_changed
&& !has_key_changed
)
630 /* likely because we dislike FF */
631 if (likely(context
->programmable_vs
)) {
632 context
->cso_shader
.vs
= NineVertexShader9_GetVariant(vs
);
635 context
->cso_shader
.vs
= vs
->ff_cso
;
638 if (context
->rs
[NINED3DRS_VSPOINTSIZE
] != vs
->point_size
) {
639 context
->rs
[NINED3DRS_VSPOINTSIZE
] = vs
->point_size
;
640 changed_group
|= NINE_STATE_RASTERIZER
;
643 if ((context
->bound_samplers_mask_vs
& vs
->sampler_mask
) != vs
->sampler_mask
)
644 /* Bound dummy sampler. */
645 changed_group
|= NINE_STATE_SAMPLER
;
647 context
->commit
|= NINE_STATE_COMMIT_VS
;
648 return changed_group
;
651 static inline uint32_t
652 prepare_ps(struct NineDevice9
*device
, uint8_t shader_changed
)
654 struct nine_context
*context
= &device
->context
;
655 struct NinePixelShader9
*ps
= context
->ps
;
656 uint32_t changed_group
= 0;
657 int has_key_changed
= 0;
660 has_key_changed
= NinePixelShader9_UpdateKey(ps
, context
);
662 if (!shader_changed
&& !has_key_changed
)
666 context
->cso_shader
.ps
= NinePixelShader9_GetVariant(ps
);
669 context
->cso_shader
.ps
= ps
->ff_cso
;
672 if ((context
->bound_samplers_mask_ps
& ps
->sampler_mask
) != ps
->sampler_mask
)
673 /* Bound dummy sampler. */
674 changed_group
|= NINE_STATE_SAMPLER
;
676 context
->commit
|= NINE_STATE_COMMIT_PS
;
677 return changed_group
;
680 /* State preparation incremental */
682 /* State preparation + State commit */
685 update_framebuffer(struct NineDevice9
*device
, bool is_clear
)
687 struct nine_context
*context
= &device
->context
;
688 struct pipe_context
*pipe
= context
->pipe
;
689 struct pipe_framebuffer_state
*fb
= &context
->pipe_data
.fb
;
691 struct NineSurface9
*rt0
= context
->rt
[0];
692 unsigned w
= rt0
->desc
.Width
;
693 unsigned h
= rt0
->desc
.Height
;
694 unsigned nr_samples
= rt0
->base
.info
.nr_samples
;
695 unsigned ps_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
696 unsigned mask
= is_clear
? 0xf : ps_mask
;
697 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
701 context
->rt_mask
= 0x0;
704 /* all render targets must have the same size and the depth buffer must be
705 * bigger. Multisample has to match, according to spec. But some apps do
706 * things wrong there, and no error is returned. The behaviour they get
707 * apparently is that depth buffer is disabled if it doesn't match.
708 * Surely the same for render targets. */
710 /* Special case: D3DFMT_NULL is used to bound no real render target,
711 * but render to depth buffer. We have to not take into account the render
712 * target info. TODO: know what should happen when there are several render targers
713 * and the first one is D3DFMT_NULL */
714 if (rt0
->desc
.Format
== D3DFMT_NULL
&& context
->ds
) {
715 w
= context
->ds
->desc
.Width
;
716 h
= context
->ds
->desc
.Height
;
717 nr_samples
= context
->ds
->base
.info
.nr_samples
;
720 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
721 struct NineSurface9
*rt
= context
->rt
[i
];
723 if (rt
&& rt
->desc
.Format
!= D3DFMT_NULL
&& (mask
& (1 << i
)) &&
724 rt
->desc
.Width
== w
&& rt
->desc
.Height
== h
&&
725 rt
->base
.info
.nr_samples
== nr_samples
) {
726 fb
->cbufs
[i
] = NineSurface9_GetSurface(rt
, sRGB
);
727 context
->rt_mask
|= 1 << i
;
728 fb
->nr_cbufs
= i
+ 1;
730 /* Color outputs must match RT slot,
731 * drivers will have to handle NULL entries for GL, too.
737 if (context
->ds
&& context
->ds
->desc
.Width
>= w
&&
738 context
->ds
->desc
.Height
>= h
&&
739 context
->ds
->base
.info
.nr_samples
== nr_samples
) {
740 fb
->zsbuf
= NineSurface9_GetSurface(context
->ds
, 0);
748 pipe
->set_framebuffer_state(pipe
, fb
); /* XXX: cso ? */
750 if (is_clear
&& context
->rt_mask
== ps_mask
)
751 context
->changed
.group
&= ~NINE_STATE_FB
;
755 update_viewport(struct NineDevice9
*device
)
757 struct nine_context
*context
= &device
->context
;
758 const D3DVIEWPORT9
*vport
= &context
->viewport
;
759 struct pipe_viewport_state pvport
;
761 /* D3D coordinates are:
762 * -1 .. +1 for X,Y and
763 * 0 .. +1 for Z (we use pipe_rasterizer_state.clip_halfz)
765 pvport
.scale
[0] = (float)vport
->Width
* 0.5f
;
766 pvport
.scale
[1] = (float)vport
->Height
* -0.5f
;
767 pvport
.scale
[2] = vport
->MaxZ
- vport
->MinZ
;
768 pvport
.translate
[0] = (float)vport
->Width
* 0.5f
+ (float)vport
->X
;
769 pvport
.translate
[1] = (float)vport
->Height
* 0.5f
+ (float)vport
->Y
;
770 pvport
.translate
[2] = vport
->MinZ
;
772 /* We found R600 and SI cards have some imprecision
773 * on the barycentric coordinates used for interpolation.
774 * Some shaders rely on having something precise.
775 * We found that the proprietary driver has the imprecision issue,
776 * except when the render target width and height are powers of two.
777 * It is using some sort of workaround for these cases
778 * which covers likely all the cases the applications rely
779 * on something precise.
780 * We haven't found the workaround, but it seems like it's better
781 * for applications if the imprecision is biased towards infinity
782 * instead of -infinity (which is what measured). So shift slightly
783 * the viewport: not enough to change rasterization result (in particular
784 * for multisampling), but enough to make the imprecision biased
785 * towards infinity. We do this shift only if render target width and
786 * height are powers of two.
787 * Solves 'red shadows' bug on UE3 games.
789 if (device
->driver_bugs
.buggy_barycentrics
&&
790 ((vport
->Width
& (vport
->Width
-1)) == 0) &&
791 ((vport
->Height
& (vport
->Height
-1)) == 0)) {
792 pvport
.translate
[0] -= 1.0f
/ 128.0f
;
793 pvport
.translate
[1] -= 1.0f
/ 128.0f
;
796 cso_set_viewport(context
->cso
, &pvport
);
799 /* Loop through VS inputs and pick the vertex elements with the declared
800 * usage from the vertex declaration, then insert the instance divisor from
801 * the stream source frequency setting.
804 update_vertex_elements(struct NineDevice9
*device
)
806 struct nine_context
*context
= &device
->context
;
807 const struct NineVertexDeclaration9
*vdecl
= device
->context
.vdecl
;
808 const struct NineVertexShader9
*vs
;
811 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
812 char used_streams
[device
->caps
.MaxStreams
];
813 int dummy_vbo_stream
= -1;
814 BOOL need_dummy_vbo
= FALSE
;
815 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
817 context
->stream_usage_mask
= 0;
818 memset(vdecl_index_map
, -1, 16);
819 memset(used_streams
, 0, device
->caps
.MaxStreams
);
820 vs
= context
->programmable_vs
? context
->vs
: device
->ff
.vs
;
823 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
824 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
825 n
, vs
->input_map
[n
].ndecl
, vdecl
);
827 for (i
= 0; i
< vdecl
->nelems
; i
++) {
828 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
829 vdecl_index_map
[n
] = i
;
830 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
834 if (vdecl_index_map
[n
] < 0)
835 need_dummy_vbo
= TRUE
;
838 /* No vertex declaration. Likely will never happen in practice,
839 * but we need not crash on this */
840 need_dummy_vbo
= TRUE
;
843 if (need_dummy_vbo
) {
844 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
845 if (!used_streams
[i
]) {
846 dummy_vbo_stream
= i
;
851 /* there are less vertex shader inputs than stream slots,
852 * so if we need a slot for the dummy vbo, we should have found one */
853 assert (!need_dummy_vbo
|| dummy_vbo_stream
!= -1);
855 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
856 index
= vdecl_index_map
[n
];
858 ve
[n
] = vdecl
->elems
[index
];
859 b
= ve
[n
].vertex_buffer_index
;
860 context
->stream_usage_mask
|= 1 << b
;
861 /* XXX wine just uses 1 here: */
862 if (context
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
863 ve
[n
].instance_divisor
= context
->stream_freq
[b
] & 0x7FFFFF;
865 /* if the vertex declaration is incomplete compared to what the
866 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
867 * This is not precised by the spec, but is the behaviour
869 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
870 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
871 ve
[n
].src_offset
= 0;
872 ve
[n
].instance_divisor
= 0;
876 if (context
->dummy_vbo_bound_at
!= dummy_vbo_stream
) {
877 if (context
->dummy_vbo_bound_at
>= 0)
878 context
->changed
.vtxbuf
|= 1 << context
->dummy_vbo_bound_at
;
879 if (dummy_vbo_stream
>= 0) {
880 context
->changed
.vtxbuf
|= 1 << dummy_vbo_stream
;
881 context
->vbo_bound_done
= FALSE
;
883 context
->dummy_vbo_bound_at
= dummy_vbo_stream
;
886 cso_set_vertex_elements(context
->cso
, vs
->num_inputs
, ve
);
890 update_vertex_buffers(struct NineDevice9
*device
)
892 struct nine_context
*context
= &device
->context
;
893 struct pipe_context
*pipe
= context
->pipe
;
894 struct pipe_vertex_buffer dummy_vtxbuf
;
895 uint32_t mask
= context
->changed
.vtxbuf
;
898 DBG("mask=%x\n", mask
);
900 if (context
->dummy_vbo_bound_at
>= 0) {
901 if (!context
->vbo_bound_done
) {
902 dummy_vtxbuf
.buffer
.resource
= device
->dummy_vbo
;
903 dummy_vtxbuf
.stride
= 0;
904 dummy_vtxbuf
.is_user_buffer
= false;
905 dummy_vtxbuf
.buffer_offset
= 0;
906 pipe
->set_vertex_buffers(pipe
, context
->dummy_vbo_bound_at
,
908 context
->vbo_bound_done
= TRUE
;
910 mask
&= ~(1 << context
->dummy_vbo_bound_at
);
913 for (i
= 0; mask
; mask
>>= 1, ++i
) {
915 if (context
->vtxbuf
[i
].buffer
.resource
)
916 pipe
->set_vertex_buffers(pipe
, i
, 1, &context
->vtxbuf
[i
]);
918 pipe
->set_vertex_buffers(pipe
, i
, 1, NULL
);
922 context
->changed
.vtxbuf
= 0;
925 static inline boolean
926 update_sampler_derived(struct nine_context
*context
, unsigned s
)
928 boolean changed
= FALSE
;
930 if (context
->samp
[s
][NINED3DSAMP_SHADOW
] != context
->texture
[s
].shadow
) {
932 context
->samp
[s
][NINED3DSAMP_SHADOW
] = context
->texture
[s
].shadow
;
935 if (context
->samp
[s
][NINED3DSAMP_CUBETEX
] !=
936 (context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
)) {
938 context
->samp
[s
][NINED3DSAMP_CUBETEX
] =
939 context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
;
942 if (context
->samp
[s
][D3DSAMP_MIPFILTER
] != D3DTEXF_NONE
) {
943 int lod
= context
->samp
[s
][D3DSAMP_MAXMIPLEVEL
] - context
->texture
[s
].lod
;
946 if (context
->samp
[s
][NINED3DSAMP_MINLOD
] != lod
) {
948 context
->samp
[s
][NINED3DSAMP_MINLOD
] = lod
;
951 context
->changed
.sampler
[s
] &= ~0x300; /* lod changes irrelevant */
957 /* TODO: add sRGB override to pipe_sampler_state ? */
959 update_textures_and_samplers(struct NineDevice9
*device
)
961 struct nine_context
*context
= &device
->context
;
962 struct pipe_sampler_view
*view
[NINE_MAX_SAMPLERS
];
963 unsigned num_textures
;
965 boolean commit_samplers
;
966 uint16_t sampler_mask
= context
->ps
? context
->ps
->sampler_mask
:
967 device
->ff
.ps
->sampler_mask
;
969 /* TODO: Can we reduce iterations here ? */
971 commit_samplers
= FALSE
;
972 context
->bound_samplers_mask_ps
= 0;
973 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_PS
; ++i
) {
974 const unsigned s
= NINE_SAMPLER_PS(i
);
977 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
982 if (context
->texture
[s
].enabled
) {
983 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
985 view
[i
] = context
->texture
[s
].view
[sRGB
];
986 num_textures
= i
+ 1;
988 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
989 context
->changed
.sampler
[s
] = 0;
990 commit_samplers
= TRUE
;
991 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
994 /* Bind dummy sampler. We do not bind dummy sampler when
995 * it is not needed because it could add overhead. The
996 * dummy sampler should have r=g=b=0 and a=1. We do not
997 * unbind dummy sampler directly when they are not needed
998 * anymore, but they're going to be removed as long as texture
999 * or sampler states are changed. */
1000 view
[i
] = device
->dummy_sampler_view
;
1001 num_textures
= i
+ 1;
1003 cso_single_sampler(context
->cso
, PIPE_SHADER_FRAGMENT
,
1004 s
- NINE_SAMPLER_PS(0), &device
->dummy_sampler_state
);
1006 commit_samplers
= TRUE
;
1007 context
->changed
.sampler
[s
] = ~0;
1010 context
->bound_samplers_mask_ps
|= (1 << s
);
1013 cso_set_sampler_views(context
->cso
, PIPE_SHADER_FRAGMENT
, num_textures
, view
);
1015 if (commit_samplers
)
1016 cso_single_sampler_done(context
->cso
, PIPE_SHADER_FRAGMENT
);
1018 commit_samplers
= FALSE
;
1019 sampler_mask
= context
->programmable_vs
? context
->vs
->sampler_mask
: 0;
1020 context
->bound_samplers_mask_vs
= 0;
1021 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_VS
; ++i
) {
1022 const unsigned s
= NINE_SAMPLER_VS(i
);
1025 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
1030 if (context
->texture
[s
].enabled
) {
1031 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
1033 view
[i
] = context
->texture
[s
].view
[sRGB
];
1034 num_textures
= i
+ 1;
1036 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
1037 context
->changed
.sampler
[s
] = 0;
1038 commit_samplers
= TRUE
;
1039 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
1042 /* Bind dummy sampler. We do not bind dummy sampler when
1043 * it is not needed because it could add overhead. The
1044 * dummy sampler should have r=g=b=0 and a=1. We do not
1045 * unbind dummy sampler directly when they are not needed
1046 * anymore, but they're going to be removed as long as texture
1047 * or sampler states are changed. */
1048 view
[i
] = device
->dummy_sampler_view
;
1049 num_textures
= i
+ 1;
1051 cso_single_sampler(context
->cso
, PIPE_SHADER_VERTEX
,
1052 s
- NINE_SAMPLER_VS(0), &device
->dummy_sampler_state
);
1054 commit_samplers
= TRUE
;
1055 context
->changed
.sampler
[s
] = ~0;
1058 context
->bound_samplers_mask_vs
|= (1 << s
);
1061 cso_set_sampler_views(context
->cso
, PIPE_SHADER_VERTEX
, num_textures
, view
);
1063 if (commit_samplers
)
1064 cso_single_sampler_done(context
->cso
, PIPE_SHADER_VERTEX
);
1067 /* State commit only */
1070 commit_blend(struct NineDevice9
*device
)
1072 struct nine_context
*context
= &device
->context
;
1074 cso_set_blend(context
->cso
, &context
->pipe_data
.blend
);
1078 commit_dsa(struct NineDevice9
*device
)
1080 struct nine_context
*context
= &device
->context
;
1082 cso_set_depth_stencil_alpha(context
->cso
, &context
->pipe_data
.dsa
);
1086 commit_scissor(struct NineDevice9
*device
)
1088 struct nine_context
*context
= &device
->context
;
1089 struct pipe_context
*pipe
= context
->pipe
;
1091 pipe
->set_scissor_states(pipe
, 0, 1, &context
->scissor
);
1095 commit_rasterizer(struct NineDevice9
*device
)
1097 struct nine_context
*context
= &device
->context
;
1099 cso_set_rasterizer(context
->cso
, &context
->pipe_data
.rast
);
1103 commit_vs_constants(struct NineDevice9
*device
)
1105 struct nine_context
*context
= &device
->context
;
1106 struct pipe_context
*pipe
= context
->pipe
;
1108 if (unlikely(!context
->programmable_vs
))
1109 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs_ff
);
1111 if (context
->swvp
) {
1112 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb0_swvp
);
1113 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 1, &context
->pipe_data
.cb1_swvp
);
1114 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 2, &context
->pipe_data
.cb2_swvp
);
1115 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 3, &context
->pipe_data
.cb3_swvp
);
1117 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs
);
1123 commit_ps_constants(struct NineDevice9
*device
)
1125 struct nine_context
*context
= &device
->context
;
1126 struct pipe_context
*pipe
= context
->pipe
;
1128 if (unlikely(!context
->ps
))
1129 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps_ff
);
1131 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps
);
1135 commit_vs(struct NineDevice9
*device
)
1137 struct nine_context
*context
= &device
->context
;
1139 context
->pipe
->bind_vs_state(context
->pipe
, context
->cso_shader
.vs
);
1144 commit_ps(struct NineDevice9
*device
)
1146 struct nine_context
*context
= &device
->context
;
1148 context
->pipe
->bind_fs_state(context
->pipe
, context
->cso_shader
.ps
);
1152 #define NINE_STATE_SHADER_CHANGE_VS \
1154 NINE_STATE_TEXTURE | \
1155 NINE_STATE_FOG_SHADER | \
1156 NINE_STATE_POINTSIZE_SHADER | \
1159 #define NINE_STATE_SHADER_CHANGE_PS \
1161 NINE_STATE_TEXTURE | \
1162 NINE_STATE_FOG_SHADER | \
1163 NINE_STATE_PS1X_SHADER)
1165 #define NINE_STATE_FREQUENT \
1166 (NINE_STATE_RASTERIZER | \
1167 NINE_STATE_TEXTURE | \
1168 NINE_STATE_SAMPLER | \
1169 NINE_STATE_VS_CONST | \
1170 NINE_STATE_PS_CONST | \
1171 NINE_STATE_MULTISAMPLE)
1173 #define NINE_STATE_COMMON \
1175 NINE_STATE_BLEND | \
1177 NINE_STATE_VIEWPORT | \
1178 NINE_STATE_VDECL | \
1179 NINE_STATE_IDXBUF | \
1180 NINE_STATE_STREAMFREQ)
1182 #define NINE_STATE_RARE \
1183 (NINE_STATE_SCISSOR | \
1184 NINE_STATE_BLEND_COLOR | \
1185 NINE_STATE_STENCIL_REF | \
1186 NINE_STATE_SAMPLE_MASK)
1189 nine_update_state(struct NineDevice9
*device
)
1191 struct nine_context
*context
= &device
->context
;
1192 struct pipe_context
*pipe
= context
->pipe
;
1195 DBG("changed state groups: %x\n", context
->changed
.group
);
1197 /* NOTE: We may want to use the cso cache for everything, or let
1198 * NineDevice9.RestoreNonCSOState actually set the states, then we wouldn't
1199 * have to care about state being clobbered here and could merge this back
1200 * into update_textures. Except, we also need to re-validate textures that
1201 * may be dirty anyway, even if no texture bindings changed.
1204 /* ff_update may change VS/PS dirty bits */
1205 if (unlikely(!context
->programmable_vs
|| !context
->ps
))
1206 nine_ff_update(device
);
1207 group
= context
->changed
.group
;
1209 if (group
& (NINE_STATE_SHADER_CHANGE_VS
| NINE_STATE_SHADER_CHANGE_PS
)) {
1210 if (group
& NINE_STATE_SHADER_CHANGE_VS
)
1211 group
|= prepare_vs(device
, (group
& NINE_STATE_VS
) != 0); /* may set NINE_STATE_RASTERIZER and NINE_STATE_SAMPLER*/
1212 if (group
& NINE_STATE_SHADER_CHANGE_PS
)
1213 group
|= prepare_ps(device
, (group
& NINE_STATE_PS
) != 0);
1216 if (group
& (NINE_STATE_COMMON
| NINE_STATE_VS
)) {
1217 if (group
& NINE_STATE_FB
)
1218 update_framebuffer(device
, FALSE
);
1219 if (group
& NINE_STATE_BLEND
)
1220 prepare_blend(device
);
1221 if (group
& NINE_STATE_DSA
)
1222 prepare_dsa(device
);
1223 if (group
& NINE_STATE_VIEWPORT
)
1224 update_viewport(device
);
1225 if (group
& (NINE_STATE_VDECL
| NINE_STATE_VS
| NINE_STATE_STREAMFREQ
))
1226 update_vertex_elements(device
);
1229 if (likely(group
& (NINE_STATE_FREQUENT
| NINE_STATE_VS
| NINE_STATE_PS
| NINE_STATE_SWVP
))) {
1230 if (group
& NINE_STATE_MULTISAMPLE
)
1231 group
|= check_multisample(device
);
1232 if (group
& NINE_STATE_RASTERIZER
)
1233 prepare_rasterizer(device
);
1234 if (group
& (NINE_STATE_TEXTURE
| NINE_STATE_SAMPLER
))
1235 update_textures_and_samplers(device
);
1236 if ((group
& (NINE_STATE_VS_CONST
| NINE_STATE_VS
| NINE_STATE_SWVP
)) && context
->programmable_vs
)
1237 prepare_vs_constants_userbuf(device
);
1238 if ((group
& (NINE_STATE_PS_CONST
| NINE_STATE_PS
)) && context
->ps
)
1239 prepare_ps_constants_userbuf(device
);
1242 if (context
->changed
.vtxbuf
)
1243 update_vertex_buffers(device
);
1245 if (context
->commit
& NINE_STATE_COMMIT_BLEND
)
1246 commit_blend(device
);
1247 if (context
->commit
& NINE_STATE_COMMIT_DSA
)
1249 if (context
->commit
& NINE_STATE_COMMIT_RASTERIZER
)
1250 commit_rasterizer(device
);
1251 if (context
->commit
& NINE_STATE_COMMIT_CONST_VS
)
1252 commit_vs_constants(device
);
1253 if (context
->commit
& NINE_STATE_COMMIT_CONST_PS
)
1254 commit_ps_constants(device
);
1255 if (context
->commit
& NINE_STATE_COMMIT_VS
)
1257 if (context
->commit
& NINE_STATE_COMMIT_PS
)
1260 context
->commit
= 0;
1262 if (unlikely(context
->changed
.ucp
)) {
1263 pipe
->set_clip_state(pipe
, &context
->clip
);
1264 context
->changed
.ucp
= FALSE
;
1267 if (unlikely(group
& NINE_STATE_RARE
)) {
1268 if (group
& NINE_STATE_SCISSOR
)
1269 commit_scissor(device
);
1270 if (group
& NINE_STATE_BLEND_COLOR
) {
1271 struct pipe_blend_color color
;
1272 d3dcolor_to_rgba(&color
.color
[0], context
->rs
[D3DRS_BLENDFACTOR
]);
1273 pipe
->set_blend_color(pipe
, &color
);
1275 if (group
& NINE_STATE_SAMPLE_MASK
) {
1276 if (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) {
1277 pipe
->set_sample_mask(pipe
, ~0);
1279 pipe
->set_sample_mask(pipe
, context
->rs
[D3DRS_MULTISAMPLEMASK
]);
1282 if (group
& NINE_STATE_STENCIL_REF
) {
1283 struct pipe_stencil_ref ref
;
1284 ref
.ref_value
[0] = context
->rs
[D3DRS_STENCILREF
];
1285 ref
.ref_value
[1] = ref
.ref_value
[0];
1286 pipe
->set_stencil_ref(pipe
, &ref
);
1290 context
->changed
.group
&=
1291 (NINE_STATE_FF
| NINE_STATE_VS_CONST
| NINE_STATE_PS_CONST
);
1296 #define RESZ_CODE 0x7fa05000
1299 NineDevice9_ResolveZ( struct NineDevice9
*device
)
1301 struct nine_context
*context
= &device
->context
;
1302 const struct util_format_description
*desc
;
1303 struct NineSurface9
*source
= context
->ds
;
1304 struct pipe_resource
*src
, *dst
;
1305 struct pipe_blit_info blit
;
1307 DBG("RESZ resolve\n");
1309 if (!source
|| !context
->texture
[0].enabled
||
1310 context
->texture
[0].type
!= D3DRTYPE_TEXTURE
)
1313 src
= source
->base
.resource
;
1314 dst
= context
->texture
[0].resource
;
1319 /* check dst is depth format. we know already for src */
1320 desc
= util_format_description(dst
->format
);
1321 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
)
1324 memset(&blit
, 0, sizeof(blit
));
1325 blit
.src
.resource
= src
;
1327 blit
.src
.format
= src
->format
;
1329 blit
.src
.box
.depth
= 1;
1332 blit
.src
.box
.width
= src
->width0
;
1333 blit
.src
.box
.height
= src
->height0
;
1335 blit
.dst
.resource
= dst
;
1337 blit
.dst
.format
= dst
->format
;
1339 blit
.dst
.box
.depth
= 1;
1342 blit
.dst
.box
.width
= dst
->width0
;
1343 blit
.dst
.box
.height
= dst
->height0
;
1345 blit
.mask
= PIPE_MASK_ZS
;
1346 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
1347 blit
.scissor_enable
= FALSE
;
1349 context
->pipe
->blit(context
->pipe
, &blit
);
1352 #define ALPHA_TO_COVERAGE_ENABLE MAKEFOURCC('A', '2', 'M', '1')
1353 #define ALPHA_TO_COVERAGE_DISABLE MAKEFOURCC('A', '2', 'M', '0')
1355 /* Nine_context functions.
1356 * Serialized through CSMT macros.
1360 nine_context_set_texture_apply(struct NineDevice9
*device
,
1365 D3DRESOURCETYPE type
,
1367 struct pipe_resource
*res
,
1368 struct pipe_sampler_view
*view0
,
1369 struct pipe_sampler_view
*view1
);
1371 nine_context_set_stream_source_apply(struct NineDevice9
*device
,
1373 struct pipe_resource
*res
,
1378 nine_context_set_indices_apply(struct NineDevice9
*device
,
1379 struct pipe_resource
*res
,
1381 UINT OffsetInBytes
);
1384 nine_context_set_pixel_shader_constant_i_transformed(struct NineDevice9
*device
,
1386 const int *pConstantData
,
1387 unsigned pConstantData_size
,
1388 UINT Vector4iCount
);
1390 CSMT_ITEM_NO_WAIT(nine_context_set_render_state
,
1391 ARG_VAL(D3DRENDERSTATETYPE
, State
),
1392 ARG_VAL(DWORD
, Value
))
1394 struct nine_context
*context
= &device
->context
;
1396 /* Amd hacks (equivalent to GL extensions) */
1397 if (unlikely(State
== D3DRS_POINTSIZE
)) {
1398 if (Value
== RESZ_CODE
) {
1399 NineDevice9_ResolveZ(device
);
1403 if (Value
== ALPHA_TO_COVERAGE_ENABLE
||
1404 Value
== ALPHA_TO_COVERAGE_DISABLE
) {
1405 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== ALPHA_TO_COVERAGE_ENABLE
);
1406 context
->changed
.group
|= NINE_STATE_BLEND
;
1412 if (unlikely(State
== D3DRS_ADAPTIVETESS_Y
)) {
1413 if (Value
== D3DFMT_ATOC
|| (Value
== D3DFMT_UNKNOWN
&& context
->rs
[NINED3DRS_ALPHACOVERAGE
])) {
1414 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== D3DFMT_ATOC
) ? 3 : 0;
1415 context
->rs
[NINED3DRS_ALPHACOVERAGE
] &= context
->rs
[D3DRS_ALPHATESTENABLE
] ? 3 : 2;
1416 context
->changed
.group
|= NINE_STATE_BLEND
;
1420 if (unlikely(State
== D3DRS_ALPHATESTENABLE
&& (context
->rs
[NINED3DRS_ALPHACOVERAGE
] & 2))) {
1421 DWORD alphacoverage_prev
= context
->rs
[NINED3DRS_ALPHACOVERAGE
];
1422 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
? 3 : 2);
1423 if (context
->rs
[NINED3DRS_ALPHACOVERAGE
] != alphacoverage_prev
)
1424 context
->changed
.group
|= NINE_STATE_BLEND
;
1427 context
->rs
[State
] = nine_fix_render_state_value(State
, Value
);
1428 context
->changed
.group
|= nine_render_state_group
[State
];
1431 CSMT_ITEM_NO_WAIT(nine_context_set_texture_apply
,
1432 ARG_VAL(DWORD
, stage
),
1433 ARG_VAL(BOOL
, enabled
),
1434 ARG_VAL(BOOL
, shadow
),
1435 ARG_VAL(DWORD
, lod
),
1436 ARG_VAL(D3DRESOURCETYPE
, type
),
1437 ARG_VAL(uint8_t, pstype
),
1438 ARG_BIND_RES(struct pipe_resource
, res
),
1439 ARG_BIND_VIEW(struct pipe_sampler_view
, view0
),
1440 ARG_BIND_VIEW(struct pipe_sampler_view
, view1
))
1442 struct nine_context
*context
= &device
->context
;
1444 context
->texture
[stage
].enabled
= enabled
;
1445 context
->samplers_shadow
&= ~(1 << stage
);
1446 context
->samplers_shadow
|= shadow
<< stage
;
1447 context
->texture
[stage
].shadow
= shadow
;
1448 context
->texture
[stage
].lod
= lod
;
1449 context
->texture
[stage
].type
= type
;
1450 context
->texture
[stage
].pstype
= pstype
;
1451 pipe_resource_reference(&context
->texture
[stage
].resource
, res
);
1452 pipe_sampler_view_reference(&context
->texture
[stage
].view
[0], view0
);
1453 pipe_sampler_view_reference(&context
->texture
[stage
].view
[1], view1
);
1455 context
->changed
.group
|= NINE_STATE_TEXTURE
;
1459 nine_context_set_texture(struct NineDevice9
*device
,
1461 struct NineBaseTexture9
*tex
)
1463 BOOL enabled
= FALSE
;
1464 BOOL shadow
= FALSE
;
1466 D3DRESOURCETYPE type
= D3DRTYPE_TEXTURE
;
1468 struct pipe_resource
*res
= NULL
;
1469 struct pipe_sampler_view
*view0
= NULL
, *view1
= NULL
;
1471 /* For managed pool, the data can be initially incomplete.
1472 * In that case, the texture is rebound later
1473 * (in NineBaseTexture9_Validate/NineBaseTexture9_UploadSelf). */
1474 if (tex
&& tex
->base
.resource
) {
1476 shadow
= tex
->shadow
;
1477 lod
= tex
->managed
.lod
;
1478 type
= tex
->base
.type
;
1479 pstype
= tex
->pstype
;
1480 res
= tex
->base
.resource
;
1481 view0
= NineBaseTexture9_GetSamplerView(tex
, 0);
1482 view1
= NineBaseTexture9_GetSamplerView(tex
, 1);
1485 nine_context_set_texture_apply(device
, Stage
, enabled
,
1486 shadow
, lod
, type
, pstype
,
1490 CSMT_ITEM_NO_WAIT(nine_context_set_sampler_state
,
1491 ARG_VAL(DWORD
, Sampler
),
1492 ARG_VAL(D3DSAMPLERSTATETYPE
, Type
),
1493 ARG_VAL(DWORD
, Value
))
1495 struct nine_context
*context
= &device
->context
;
1497 if (unlikely(!nine_check_sampler_state_value(Type
, Value
)))
1500 context
->samp
[Sampler
][Type
] = Value
;
1501 context
->changed
.group
|= NINE_STATE_SAMPLER
;
1502 context
->changed
.sampler
[Sampler
] |= 1 << Type
;
1505 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_apply
,
1506 ARG_VAL(UINT
, StreamNumber
),
1507 ARG_BIND_RES(struct pipe_resource
, res
),
1508 ARG_VAL(UINT
, OffsetInBytes
),
1509 ARG_VAL(UINT
, Stride
))
1511 struct nine_context
*context
= &device
->context
;
1512 const unsigned i
= StreamNumber
;
1514 context
->vtxbuf
[i
].stride
= Stride
;
1515 context
->vtxbuf
[i
].buffer_offset
= OffsetInBytes
;
1516 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
.resource
, res
);
1518 context
->changed
.vtxbuf
|= 1 << StreamNumber
;
1522 nine_context_set_stream_source(struct NineDevice9
*device
,
1524 struct NineVertexBuffer9
*pVBuf9
,
1528 struct pipe_resource
*res
= NULL
;
1529 unsigned offset
= 0;
1532 res
= NineVertexBuffer9_GetResource(pVBuf9
, &offset
);
1533 /* in the future when there is internal offset, add it
1534 * to OffsetInBytes */
1536 nine_context_set_stream_source_apply(device
, StreamNumber
,
1537 res
, offset
+ OffsetInBytes
,
1541 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_freq
,
1542 ARG_VAL(UINT
, StreamNumber
),
1543 ARG_VAL(UINT
, Setting
))
1545 struct nine_context
*context
= &device
->context
;
1547 context
->stream_freq
[StreamNumber
] = Setting
;
1549 if (Setting
& D3DSTREAMSOURCE_INSTANCEDATA
)
1550 context
->stream_instancedata_mask
|= 1 << StreamNumber
;
1552 context
->stream_instancedata_mask
&= ~(1 << StreamNumber
);
1554 if (StreamNumber
!= 0)
1555 context
->changed
.group
|= NINE_STATE_STREAMFREQ
;
1558 CSMT_ITEM_NO_WAIT(nine_context_set_indices_apply
,
1559 ARG_BIND_RES(struct pipe_resource
, res
),
1560 ARG_VAL(UINT
, IndexSize
),
1561 ARG_VAL(UINT
, OffsetInBytes
))
1563 struct nine_context
*context
= &device
->context
;
1565 context
->index_size
= IndexSize
;
1566 context
->index_offset
= OffsetInBytes
;
1567 pipe_resource_reference(&context
->idxbuf
, res
);
1569 context
->changed
.group
|= NINE_STATE_IDXBUF
;
1573 nine_context_set_indices(struct NineDevice9
*device
,
1574 struct NineIndexBuffer9
*idxbuf
)
1576 struct pipe_resource
*res
= NULL
;
1578 unsigned OffsetInBytes
= 0;
1581 res
= NineIndexBuffer9_GetBuffer(idxbuf
, &OffsetInBytes
);
1582 IndexSize
= idxbuf
->index_size
;
1585 nine_context_set_indices_apply(device
, res
, IndexSize
, OffsetInBytes
);
1588 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_declaration
,
1589 ARG_BIND_REF(struct NineVertexDeclaration9
, vdecl
))
1591 struct nine_context
*context
= &device
->context
;
1592 BOOL was_programmable_vs
= context
->programmable_vs
;
1594 nine_bind(&context
->vdecl
, vdecl
);
1596 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1597 if (was_programmable_vs
!= context
->programmable_vs
) {
1598 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1599 context
->changed
.group
|= NINE_STATE_VS
;
1602 context
->changed
.group
|= NINE_STATE_VDECL
;
1605 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader
,
1606 ARG_BIND_REF(struct NineVertexShader9
, pShader
))
1608 struct nine_context
*context
= &device
->context
;
1609 BOOL was_programmable_vs
= context
->programmable_vs
;
1611 nine_bind(&context
->vs
, pShader
);
1613 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1615 /* ff -> non-ff: commit back non-ff constants */
1616 if (!was_programmable_vs
&& context
->programmable_vs
)
1617 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1619 context
->changed
.group
|= NINE_STATE_VS
;
1622 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_f
,
1623 ARG_VAL(UINT
, StartRegister
),
1624 ARG_MEM(float, pConstantData
),
1625 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1626 ARG_VAL(UINT
, Vector4fCount
))
1628 struct nine_context
*context
= &device
->context
;
1629 float *vs_const_f
= device
->may_swvp
? context
->vs_const_f_swvp
: context
->vs_const_f
;
1631 memcpy(&vs_const_f
[StartRegister
* 4],
1633 pConstantData_size
);
1635 if (device
->may_swvp
) {
1636 Vector4fCount
= MIN2(StartRegister
+ Vector4fCount
, NINE_MAX_CONST_F
) - StartRegister
;
1637 if (StartRegister
< NINE_MAX_CONST_F
)
1638 memcpy(&context
->vs_const_f
[StartRegister
* 4],
1640 Vector4fCount
* 4 * sizeof(context
->vs_const_f
[0]));
1643 context
->changed
.vs_const_f
= TRUE
;
1644 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1647 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_i
,
1648 ARG_VAL(UINT
, StartRegister
),
1649 ARG_MEM(int, pConstantData
),
1650 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1651 ARG_VAL(UINT
, Vector4iCount
))
1653 struct nine_context
*context
= &device
->context
;
1656 if (device
->driver_caps
.vs_integer
) {
1657 memcpy(&context
->vs_const_i
[4 * StartRegister
],
1659 pConstantData_size
);
1661 for (i
= 0; i
< Vector4iCount
; i
++) {
1662 context
->vs_const_i
[4 * (StartRegister
+ i
)] = fui((float)(pConstantData
[4 * i
]));
1663 context
->vs_const_i
[4 * (StartRegister
+ i
) + 1] = fui((float)(pConstantData
[4 * i
+ 1]));
1664 context
->vs_const_i
[4 * (StartRegister
+ i
) + 2] = fui((float)(pConstantData
[4 * i
+ 2]));
1665 context
->vs_const_i
[4 * (StartRegister
+ i
) + 3] = fui((float)(pConstantData
[4 * i
+ 3]));
1669 context
->changed
.vs_const_i
= TRUE
;
1670 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1673 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_b
,
1674 ARG_VAL(UINT
, StartRegister
),
1675 ARG_MEM(BOOL
, pConstantData
),
1676 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1677 ARG_VAL(UINT
, BoolCount
))
1679 struct nine_context
*context
= &device
->context
;
1681 uint32_t bool_true
= device
->driver_caps
.vs_integer
? 0xFFFFFFFF : fui(1.0f
);
1683 (void) pConstantData_size
;
1685 for (i
= 0; i
< BoolCount
; i
++)
1686 context
->vs_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1688 context
->changed
.vs_const_b
= TRUE
;
1689 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1692 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader
,
1693 ARG_BIND_REF(struct NinePixelShader9
, ps
))
1695 struct nine_context
*context
= &device
->context
;
1696 unsigned old_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1699 /* ff -> non-ff: commit back non-ff constants */
1700 if (!context
->ps
&& ps
)
1701 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
1703 nine_bind(&context
->ps
, ps
);
1705 context
->changed
.group
|= NINE_STATE_PS
;
1707 mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1708 /* We need to update cbufs if the pixel shader would
1709 * write to different render targets */
1710 if (mask
!= old_mask
)
1711 context
->changed
.group
|= NINE_STATE_FB
;
1714 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_f
,
1715 ARG_VAL(UINT
, StartRegister
),
1716 ARG_MEM(float, pConstantData
),
1717 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1718 ARG_VAL(UINT
, Vector4fCount
))
1720 struct nine_context
*context
= &device
->context
;
1722 memcpy(&context
->ps_const_f
[StartRegister
* 4],
1724 pConstantData_size
);
1726 context
->changed
.ps_const_f
= TRUE
;
1727 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1730 /* For stateblocks */
1731 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i_transformed
,
1732 ARG_VAL(UINT
, StartRegister
),
1733 ARG_MEM(int, pConstantData
),
1734 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1735 ARG_VAL(UINT
, Vector4iCount
))
1737 struct nine_context
*context
= &device
->context
;
1739 memcpy(&context
->ps_const_i
[StartRegister
][0],
1741 Vector4iCount
* sizeof(context
->ps_const_i
[0]));
1743 context
->changed
.ps_const_i
= TRUE
;
1744 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1747 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i
,
1748 ARG_VAL(UINT
, StartRegister
),
1749 ARG_MEM(int, pConstantData
),
1750 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1751 ARG_VAL(UINT
, Vector4iCount
))
1753 struct nine_context
*context
= &device
->context
;
1756 if (device
->driver_caps
.ps_integer
) {
1757 memcpy(&context
->ps_const_i
[StartRegister
][0],
1759 pConstantData_size
);
1761 for (i
= 0; i
< Vector4iCount
; i
++) {
1762 context
->ps_const_i
[StartRegister
+i
][0] = fui((float)(pConstantData
[4*i
]));
1763 context
->ps_const_i
[StartRegister
+i
][1] = fui((float)(pConstantData
[4*i
+1]));
1764 context
->ps_const_i
[StartRegister
+i
][2] = fui((float)(pConstantData
[4*i
+2]));
1765 context
->ps_const_i
[StartRegister
+i
][3] = fui((float)(pConstantData
[4*i
+3]));
1768 context
->changed
.ps_const_i
= TRUE
;
1769 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1772 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_b
,
1773 ARG_VAL(UINT
, StartRegister
),
1774 ARG_MEM(BOOL
, pConstantData
),
1775 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1776 ARG_VAL(UINT
, BoolCount
))
1778 struct nine_context
*context
= &device
->context
;
1780 uint32_t bool_true
= device
->driver_caps
.ps_integer
? 0xFFFFFFFF : fui(1.0f
);
1782 (void) pConstantData_size
;
1784 for (i
= 0; i
< BoolCount
; i
++)
1785 context
->ps_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1787 context
->changed
.ps_const_b
= TRUE
;
1788 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1791 /* XXX: use resource, as resource might change */
1792 CSMT_ITEM_NO_WAIT(nine_context_set_render_target
,
1793 ARG_VAL(DWORD
, RenderTargetIndex
),
1794 ARG_BIND_REF(struct NineSurface9
, rt
))
1796 struct nine_context
*context
= &device
->context
;
1797 const unsigned i
= RenderTargetIndex
;
1800 context
->viewport
.X
= 0;
1801 context
->viewport
.Y
= 0;
1802 context
->viewport
.Width
= rt
->desc
.Width
;
1803 context
->viewport
.Height
= rt
->desc
.Height
;
1804 context
->viewport
.MinZ
= 0.0f
;
1805 context
->viewport
.MaxZ
= 1.0f
;
1807 context
->scissor
.minx
= 0;
1808 context
->scissor
.miny
= 0;
1809 context
->scissor
.maxx
= rt
->desc
.Width
;
1810 context
->scissor
.maxy
= rt
->desc
.Height
;
1812 context
->changed
.group
|= NINE_STATE_VIEWPORT
| NINE_STATE_SCISSOR
| NINE_STATE_MULTISAMPLE
;
1814 if (context
->rt
[0] &&
1815 (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) !=
1816 (rt
->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
))
1817 context
->changed
.group
|= NINE_STATE_SAMPLE_MASK
;
1820 if (context
->rt
[i
] != rt
) {
1821 nine_bind(&context
->rt
[i
], rt
);
1822 context
->changed
.group
|= NINE_STATE_FB
;
1826 /* XXX: use resource instead of ds, as resource might change */
1827 CSMT_ITEM_NO_WAIT(nine_context_set_depth_stencil
,
1828 ARG_BIND_REF(struct NineSurface9
, ds
))
1830 struct nine_context
*context
= &device
->context
;
1832 nine_bind(&context
->ds
, ds
);
1833 context
->changed
.group
|= NINE_STATE_FB
;
1836 CSMT_ITEM_NO_WAIT(nine_context_set_viewport
,
1837 ARG_COPY_REF(D3DVIEWPORT9
, viewport
))
1839 struct nine_context
*context
= &device
->context
;
1841 context
->viewport
= *viewport
;
1842 context
->changed
.group
|= NINE_STATE_VIEWPORT
;
1845 CSMT_ITEM_NO_WAIT(nine_context_set_scissor
,
1846 ARG_COPY_REF(struct pipe_scissor_state
, scissor
))
1848 struct nine_context
*context
= &device
->context
;
1850 context
->scissor
= *scissor
;
1851 context
->changed
.group
|= NINE_STATE_SCISSOR
;
1854 CSMT_ITEM_NO_WAIT(nine_context_set_transform
,
1855 ARG_VAL(D3DTRANSFORMSTATETYPE
, State
),
1856 ARG_COPY_REF(D3DMATRIX
, pMatrix
))
1858 struct nine_context
*context
= &device
->context
;
1859 D3DMATRIX
*M
= nine_state_access_transform(&context
->ff
, State
, TRUE
);
1862 context
->ff
.changed
.transform
[State
/ 32] |= 1 << (State
% 32);
1863 context
->changed
.group
|= NINE_STATE_FF
;
1866 CSMT_ITEM_NO_WAIT(nine_context_set_material
,
1867 ARG_COPY_REF(D3DMATERIAL9
, pMaterial
))
1869 struct nine_context
*context
= &device
->context
;
1871 context
->ff
.material
= *pMaterial
;
1872 context
->changed
.group
|= NINE_STATE_FF_MATERIAL
;
1875 CSMT_ITEM_NO_WAIT(nine_context_set_light
,
1876 ARG_VAL(DWORD
, Index
),
1877 ARG_COPY_REF(D3DLIGHT9
, pLight
))
1879 struct nine_context
*context
= &device
->context
;
1881 (void)nine_state_set_light(&context
->ff
, Index
, pLight
);
1882 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1886 /* For stateblocks */
1888 nine_context_light_enable_stateblock(struct NineDevice9
*device
,
1889 const uint16_t active_light
[NINE_MAX_LIGHTS_ACTIVE
], /* TODO: use pointer that convey size for csmt */
1890 unsigned int num_lights_active
)
1892 struct nine_context
*context
= &device
->context
;
1894 /* TODO: Use CSMT_* to avoid calling nine_csmt_process */
1895 nine_csmt_process(device
);
1896 memcpy(context
->ff
.active_light
, active_light
, NINE_MAX_LIGHTS_ACTIVE
* sizeof(context
->ff
.active_light
[0]));
1897 context
->ff
.num_lights_active
= num_lights_active
;
1898 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1901 CSMT_ITEM_NO_WAIT(nine_context_light_enable
,
1902 ARG_VAL(DWORD
, Index
),
1903 ARG_VAL(BOOL
, Enable
))
1905 struct nine_context
*context
= &device
->context
;
1907 nine_state_light_enable(&context
->ff
, &context
->changed
.group
, Index
, Enable
);
1910 CSMT_ITEM_NO_WAIT(nine_context_set_texture_stage_state
,
1911 ARG_VAL(DWORD
, Stage
),
1912 ARG_VAL(D3DTEXTURESTAGESTATETYPE
, Type
),
1913 ARG_VAL(DWORD
, Value
))
1915 struct nine_context
*context
= &device
->context
;
1916 int bumpmap_index
= -1;
1918 context
->ff
.tex_stage
[Stage
][Type
] = Value
;
1920 case D3DTSS_BUMPENVMAT00
:
1921 bumpmap_index
= 4 * Stage
;
1923 case D3DTSS_BUMPENVMAT01
:
1924 bumpmap_index
= 4 * Stage
+ 1;
1926 case D3DTSS_BUMPENVMAT10
:
1927 bumpmap_index
= 4 * Stage
+ 2;
1929 case D3DTSS_BUMPENVMAT11
:
1930 bumpmap_index
= 4 * Stage
+ 3;
1932 case D3DTSS_BUMPENVLSCALE
:
1933 bumpmap_index
= 4 * 8 + 2 * Stage
;
1935 case D3DTSS_BUMPENVLOFFSET
:
1936 bumpmap_index
= 4 * 8 + 2 * Stage
+ 1;
1938 case D3DTSS_TEXTURETRANSFORMFLAGS
:
1939 context
->changed
.group
|= NINE_STATE_PS1X_SHADER
;
1945 if (bumpmap_index
>= 0) {
1946 context
->bumpmap_vars
[bumpmap_index
] = Value
;
1947 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1950 context
->changed
.group
|= NINE_STATE_FF_PSSTAGES
;
1951 context
->ff
.changed
.tex_stage
[Stage
][Type
/ 32] |= 1 << (Type
% 32);
1954 CSMT_ITEM_NO_WAIT(nine_context_set_clip_plane
,
1955 ARG_VAL(DWORD
, Index
),
1956 ARG_COPY_REF(struct nine_clipplane
, pPlane
))
1958 struct nine_context
*context
= &device
->context
;
1960 memcpy(&context
->clip
.ucp
[Index
][0], pPlane
, sizeof(context
->clip
.ucp
[0]));
1961 context
->changed
.ucp
= TRUE
;
1964 CSMT_ITEM_NO_WAIT(nine_context_set_swvp
,
1965 ARG_VAL(boolean
, swvp
))
1967 struct nine_context
*context
= &device
->context
;
1969 context
->swvp
= swvp
;
1970 context
->changed
.group
|= NINE_STATE_SWVP
;
1976 nine_context_apply_stateblock(struct NineDevice9
*device
,
1977 const struct nine_state
*src
)
1979 struct nine_context
*context
= &device
->context
;
1982 context
->changed
.group
|= src
->changed
.group
;
1984 for (i
= 0; i
< ARRAY_SIZE(src
->changed
.rs
); ++i
) {
1985 uint32_t m
= src
->changed
.rs
[i
];
1987 const int r
= ffs(m
) - 1;
1989 context
->rs
[i
* 32 + r
] = nine_fix_render_state_value(i
* 32 + r
, src
->rs_advertised
[i
* 32 + r
]);
1994 if (src
->changed
.texture
) {
1995 uint32_t m
= src
->changed
.texture
;
1998 for (s
= 0; m
; ++s
, m
>>= 1) {
1999 struct NineBaseTexture9
*tex
= src
->texture
[s
];
2002 nine_context_set_texture(device
, s
, tex
);
2007 if (src
->changed
.group
& NINE_STATE_SAMPLER
) {
2010 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2011 uint32_t m
= src
->changed
.sampler
[s
];
2013 const int i
= ffs(m
) - 1;
2015 if (nine_check_sampler_state_value(i
, src
->samp_advertised
[s
][i
]))
2016 context
->samp
[s
][i
] = src
->samp_advertised
[s
][i
];
2018 context
->changed
.sampler
[s
] |= src
->changed
.sampler
[s
];
2022 /* Vertex buffers */
2023 if (src
->changed
.vtxbuf
| src
->changed
.stream_freq
) {
2024 uint32_t m
= src
->changed
.vtxbuf
| src
->changed
.stream_freq
;
2025 for (i
= 0; m
; ++i
, m
>>= 1) {
2026 if (src
->changed
.vtxbuf
& (1 << i
)) {
2027 if (src
->stream
[i
]) {
2028 unsigned offset
= 0;
2029 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
,
2030 src
->stream
[i
] ? NineVertexBuffer9_GetResource(src
->stream
[i
], &offset
) : NULL
);
2031 context
->vtxbuf
[i
].buffer_offset
= src
->vtxbuf
[i
].buffer_offset
+ offset
;
2032 context
->vtxbuf
[i
].stride
= src
->vtxbuf
[i
].stride
;
2035 if (src
->changed
.stream_freq
& (1 << i
)) {
2036 context
->stream_freq
[i
] = src
->stream_freq
[i
];
2037 if (src
->stream_freq
[i
] & D3DSTREAMSOURCE_INSTANCEDATA
)
2038 context
->stream_instancedata_mask
|= 1 << i
;
2040 context
->stream_instancedata_mask
&= ~(1 << i
);
2043 context
->changed
.vtxbuf
|= src
->changed
.vtxbuf
;
2047 if (src
->changed
.group
& NINE_STATE_IDXBUF
)
2048 nine_context_set_indices(device
, src
->idxbuf
);
2050 /* Vertex declaration */
2051 if ((src
->changed
.group
& NINE_STATE_VDECL
) && src
->vdecl
)
2052 nine_context_set_vertex_declaration(device
, src
->vdecl
);
2055 if (src
->changed
.group
& NINE_STATE_VS
)
2056 nine_bind(&context
->vs
, src
->vs
);
2058 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
2061 if (src
->changed
.group
& NINE_STATE_PS
)
2062 nine_bind(&context
->ps
, src
->ps
);
2064 /* Vertex constants */
2065 if (src
->changed
.group
& NINE_STATE_VS_CONST
) {
2066 struct nine_range
*r
;
2067 if (device
->may_swvp
) {
2068 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
) {
2071 memcpy(&context
->vs_const_f_swvp
[bgn
* 4],
2072 &src
->vs_const_f
[bgn
* 4],
2073 (end
- bgn
) * 4 * sizeof(float));
2074 if (bgn
< device
->max_vs_const_f
) {
2075 end
= MIN2(end
, device
->max_vs_const_f
);
2076 memcpy(&context
->vs_const_f
[bgn
* 4],
2077 &src
->vs_const_f
[bgn
* 4],
2078 (end
- bgn
) * 4 * sizeof(float));
2082 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
) {
2083 memcpy(&context
->vs_const_f
[r
->bgn
* 4],
2084 &src
->vs_const_f
[r
->bgn
* 4],
2085 (r
->end
- r
->bgn
) * 4 * sizeof(float));
2088 for (r
= src
->changed
.vs_const_i
; r
; r
= r
->next
) {
2089 memcpy(&context
->vs_const_i
[r
->bgn
* 4],
2090 &src
->vs_const_i
[r
->bgn
* 4],
2091 (r
->end
- r
->bgn
) * 4 * sizeof(int));
2093 for (r
= src
->changed
.vs_const_b
; r
; r
= r
->next
) {
2094 memcpy(&context
->vs_const_b
[r
->bgn
],
2095 &src
->vs_const_b
[r
->bgn
],
2096 (r
->end
- r
->bgn
) * sizeof(int));
2098 context
->changed
.vs_const_f
= !!src
->changed
.vs_const_f
;
2099 context
->changed
.vs_const_i
= !!src
->changed
.vs_const_i
;
2100 context
->changed
.vs_const_b
= !!src
->changed
.vs_const_b
;
2103 /* Pixel constants */
2104 if (src
->changed
.group
& NINE_STATE_PS_CONST
) {
2105 struct nine_range
*r
;
2106 for (r
= src
->changed
.ps_const_f
; r
; r
= r
->next
) {
2107 memcpy(&context
->ps_const_f
[r
->bgn
* 4],
2108 &src
->ps_const_f
[r
->bgn
* 4],
2109 (r
->end
- r
->bgn
) * 4 * sizeof(float));
2111 if (src
->changed
.ps_const_i
) {
2112 uint16_t m
= src
->changed
.ps_const_i
;
2113 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2115 memcpy(context
->ps_const_i
[i
], src
->ps_const_i
[i
], 4 * sizeof(int));
2117 if (src
->changed
.ps_const_b
) {
2118 uint16_t m
= src
->changed
.ps_const_b
;
2119 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2121 context
->ps_const_b
[i
] = src
->ps_const_b
[i
];
2123 context
->changed
.ps_const_f
= !!src
->changed
.ps_const_f
;
2124 context
->changed
.ps_const_i
= !!src
->changed
.ps_const_i
;
2125 context
->changed
.ps_const_b
= !!src
->changed
.ps_const_b
;
2129 if (src
->changed
.group
& NINE_STATE_VIEWPORT
)
2130 context
->viewport
= src
->viewport
;
2133 if (src
->changed
.group
& NINE_STATE_SCISSOR
)
2134 context
->scissor
= src
->scissor
;
2136 /* User Clip Planes */
2137 if (src
->changed
.ucp
) {
2138 for (i
= 0; i
< PIPE_MAX_CLIP_PLANES
; ++i
)
2139 if (src
->changed
.ucp
& (1 << i
))
2140 memcpy(context
->clip
.ucp
[i
],
2141 src
->clip
.ucp
[i
], sizeof(src
->clip
.ucp
[0]));
2142 context
->changed
.ucp
= TRUE
;
2145 if (!(src
->changed
.group
& NINE_STATE_FF
))
2148 /* Fixed function state. */
2150 if (src
->changed
.group
& NINE_STATE_FF_MATERIAL
)
2151 context
->ff
.material
= src
->ff
.material
;
2153 if (src
->changed
.group
& NINE_STATE_FF_PSSTAGES
) {
2155 for (s
= 0; s
< NINE_MAX_TEXTURE_STAGES
; ++s
) {
2156 for (i
= 0; i
< NINED3DTSS_COUNT
; ++i
)
2157 if (src
->ff
.changed
.tex_stage
[s
][i
/ 32] & (1 << (i
% 32)))
2158 context
->ff
.tex_stage
[s
][i
] = src
->ff
.tex_stage
[s
][i
];
2161 if (src
->changed
.group
& NINE_STATE_FF_LIGHTING
) {
2162 unsigned num_lights
= MAX2(context
->ff
.num_lights
, src
->ff
.num_lights
);
2163 /* Can happen if the stateblock had recorded the creation of
2165 if (context
->ff
.num_lights
< num_lights
) {
2166 context
->ff
.light
= REALLOC(context
->ff
.light
,
2167 context
->ff
.num_lights
* sizeof(D3DLIGHT9
),
2168 num_lights
* sizeof(D3DLIGHT9
));
2169 memset(&context
->ff
.light
[context
->ff
.num_lights
], 0, (num_lights
- context
->ff
.num_lights
) * sizeof(D3DLIGHT9
));
2170 for (i
= context
->ff
.num_lights
; i
< num_lights
; ++i
)
2171 context
->ff
.light
[i
].Type
= (D3DLIGHTTYPE
)NINED3DLIGHT_INVALID
;
2172 context
->ff
.num_lights
= num_lights
;
2174 /* src->ff.num_lights < num_lights has been handled before */
2175 assert (src
->ff
.num_lights
== num_lights
);
2177 for (i
= 0; i
< num_lights
; ++i
)
2178 if (src
->ff
.light
[i
].Type
!= NINED3DLIGHT_INVALID
)
2179 context
->ff
.light
[i
] = src
->ff
.light
[i
];
2181 memcpy(context
->ff
.active_light
, src
->ff
.active_light
, sizeof(src
->ff
.active_light
) );
2182 context
->ff
.num_lights_active
= src
->ff
.num_lights_active
;
2184 if (src
->changed
.group
& NINE_STATE_FF_VSTRANSF
) {
2185 for (i
= 0; i
< ARRAY_SIZE(src
->ff
.changed
.transform
); ++i
) {
2187 if (!src
->ff
.changed
.transform
[i
])
2189 for (s
= i
* 32; s
< (i
* 32 + 32); ++s
) {
2190 if (!(src
->ff
.changed
.transform
[i
] & (1 << (s
% 32))))
2192 *nine_state_access_transform(&context
->ff
, s
, TRUE
) =
2193 *nine_state_access_transform( /* const because !alloc */
2194 (struct nine_ff_state
*)&src
->ff
, s
, FALSE
);
2196 context
->ff
.changed
.transform
[i
] |= src
->ff
.changed
.transform
[i
];
2203 /* Do not write to nine_context directly. Slower,
2204 * but works with csmt. TODO: write a special csmt version that
2205 * would record the list of commands as much as possible,
2206 * and use the version above else.
2209 nine_context_apply_stateblock(struct NineDevice9
*device
,
2210 const struct nine_state
*src
)
2214 /* No need to apply src->changed.group, since all calls do
2215 * set context->changed.group */
2217 for (i
= 0; i
< ARRAY_SIZE(src
->changed
.rs
); ++i
) {
2218 uint32_t m
= src
->changed
.rs
[i
];
2220 const int r
= ffs(m
) - 1;
2222 nine_context_set_render_state(device
, i
* 32 + r
, src
->rs_advertised
[i
* 32 + r
]);
2227 if (src
->changed
.texture
) {
2228 uint32_t m
= src
->changed
.texture
;
2231 for (s
= 0; m
; ++s
, m
>>= 1) {
2232 struct NineBaseTexture9
*tex
= src
->texture
[s
];
2235 nine_context_set_texture(device
, s
, tex
);
2240 if (src
->changed
.group
& NINE_STATE_SAMPLER
) {
2243 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2244 uint32_t m
= src
->changed
.sampler
[s
];
2246 const int i
= ffs(m
) - 1;
2248 nine_context_set_sampler_state(device
, s
, i
, src
->samp_advertised
[s
][i
]);
2253 /* Vertex buffers */
2254 if (src
->changed
.vtxbuf
| src
->changed
.stream_freq
) {
2255 uint32_t m
= src
->changed
.vtxbuf
| src
->changed
.stream_freq
;
2256 for (i
= 0; m
; ++i
, m
>>= 1) {
2257 if (src
->changed
.vtxbuf
& (1 << i
))
2258 nine_context_set_stream_source(device
, i
, src
->stream
[i
], src
->vtxbuf
[i
].buffer_offset
, src
->vtxbuf
[i
].stride
);
2259 if (src
->changed
.stream_freq
& (1 << i
))
2260 nine_context_set_stream_source_freq(device
, i
, src
->stream_freq
[i
]);
2265 if (src
->changed
.group
& NINE_STATE_IDXBUF
)
2266 nine_context_set_indices(device
, src
->idxbuf
);
2268 /* Vertex declaration */
2269 if ((src
->changed
.group
& NINE_STATE_VDECL
) && src
->vdecl
)
2270 nine_context_set_vertex_declaration(device
, src
->vdecl
);
2273 if (src
->changed
.group
& NINE_STATE_VS
)
2274 nine_context_set_vertex_shader(device
, src
->vs
);
2277 if (src
->changed
.group
& NINE_STATE_PS
)
2278 nine_context_set_pixel_shader(device
, src
->ps
);
2280 /* Vertex constants */
2281 if (src
->changed
.group
& NINE_STATE_VS_CONST
) {
2282 struct nine_range
*r
;
2283 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
)
2284 nine_context_set_vertex_shader_constant_f(device
, r
->bgn
,
2285 &src
->vs_const_f
[r
->bgn
* 4],
2286 sizeof(float[4]) * (r
->end
- r
->bgn
),
2288 for (r
= src
->changed
.vs_const_i
; r
; r
= r
->next
)
2289 nine_context_set_vertex_shader_constant_i(device
, r
->bgn
,
2290 &src
->vs_const_i
[r
->bgn
* 4],
2291 sizeof(int[4]) * (r
->end
- r
->bgn
),
2293 for (r
= src
->changed
.vs_const_b
; r
; r
= r
->next
)
2294 nine_context_set_vertex_shader_constant_b(device
, r
->bgn
,
2295 &src
->vs_const_b
[r
->bgn
* 4],
2296 sizeof(BOOL
) * (r
->end
- r
->bgn
),
2300 /* Pixel constants */
2301 if (src
->changed
.group
& NINE_STATE_PS_CONST
) {
2302 struct nine_range
*r
;
2303 for (r
= src
->changed
.ps_const_f
; r
; r
= r
->next
)
2304 nine_context_set_pixel_shader_constant_f(device
, r
->bgn
,
2305 &src
->ps_const_f
[r
->bgn
* 4],
2306 sizeof(float[4]) * (r
->end
- r
->bgn
),
2308 if (src
->changed
.ps_const_i
) {
2309 uint16_t m
= src
->changed
.ps_const_i
;
2310 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2312 nine_context_set_pixel_shader_constant_i_transformed(device
, i
,
2313 src
->ps_const_i
[i
], sizeof(int[4]), 1);
2315 if (src
->changed
.ps_const_b
) {
2316 uint16_t m
= src
->changed
.ps_const_b
;
2317 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2319 nine_context_set_pixel_shader_constant_b(device
, i
,
2320 &src
->ps_const_b
[i
], sizeof(BOOL
), 1);
2325 if (src
->changed
.group
& NINE_STATE_VIEWPORT
)
2326 nine_context_set_viewport(device
, &src
->viewport
);
2329 if (src
->changed
.group
& NINE_STATE_SCISSOR
)
2330 nine_context_set_scissor(device
, &src
->scissor
);
2332 /* User Clip Planes */
2333 if (src
->changed
.ucp
)
2334 for (i
= 0; i
< PIPE_MAX_CLIP_PLANES
; ++i
)
2335 if (src
->changed
.ucp
& (1 << i
))
2336 nine_context_set_clip_plane(device
, i
, (struct nine_clipplane
*)&src
->clip
.ucp
[i
][0]);
2338 if (!(src
->changed
.group
& NINE_STATE_FF
))
2341 /* Fixed function state. */
2343 if (src
->changed
.group
& NINE_STATE_FF_MATERIAL
)
2344 nine_context_set_material(device
, &src
->ff
.material
);
2346 if (src
->changed
.group
& NINE_STATE_FF_PSSTAGES
) {
2348 for (s
= 0; s
< NINE_MAX_TEXTURE_STAGES
; ++s
) {
2349 for (i
= 0; i
< NINED3DTSS_COUNT
; ++i
)
2350 if (src
->ff
.changed
.tex_stage
[s
][i
/ 32] & (1 << (i
% 32)))
2351 nine_context_set_texture_stage_state(device
, s
, i
, src
->ff
.tex_stage
[s
][i
]);
2354 if (src
->changed
.group
& NINE_STATE_FF_LIGHTING
) {
2355 for (i
= 0; i
< src
->ff
.num_lights
; ++i
)
2356 if (src
->ff
.light
[i
].Type
!= NINED3DLIGHT_INVALID
)
2357 nine_context_set_light(device
, i
, &src
->ff
.light
[i
]);
2359 nine_context_light_enable_stateblock(device
, src
->ff
.active_light
, src
->ff
.num_lights_active
);
2361 if (src
->changed
.group
& NINE_STATE_FF_VSTRANSF
) {
2362 for (i
= 0; i
< ARRAY_SIZE(src
->ff
.changed
.transform
); ++i
) {
2364 if (!src
->ff
.changed
.transform
[i
])
2366 for (s
= i
* 32; s
< (i
* 32 + 32); ++s
) {
2367 if (!(src
->ff
.changed
.transform
[i
] & (1 << (s
% 32))))
2369 nine_context_set_transform(device
, s
,
2370 nine_state_access_transform(
2371 (struct nine_ff_state
*)&src
->ff
,
2379 nine_update_state_framebuffer_clear(struct NineDevice9
*device
)
2381 struct nine_context
*context
= &device
->context
;
2383 if (context
->changed
.group
& NINE_STATE_FB
)
2384 update_framebuffer(device
, TRUE
);
2387 CSMT_ITEM_NO_WAIT(nine_context_clear_fb
,
2388 ARG_VAL(DWORD
, Count
),
2389 ARG_COPY_REF(D3DRECT
, pRects
),
2390 ARG_VAL(DWORD
, Flags
),
2391 ARG_VAL(D3DCOLOR
, Color
),
2393 ARG_VAL(DWORD
, Stencil
))
2395 struct nine_context
*context
= &device
->context
;
2396 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
2397 struct pipe_surface
*cbuf
, *zsbuf
;
2398 struct pipe_context
*pipe
= context
->pipe
;
2399 struct NineSurface9
*zsbuf_surf
= context
->ds
;
2400 struct NineSurface9
*rt
;
2403 union pipe_color_union rgba
;
2404 unsigned rt_mask
= 0;
2407 nine_update_state_framebuffer_clear(device
);
2409 if (Flags
& D3DCLEAR_TARGET
) bufs
|= PIPE_CLEAR_COLOR
;
2410 /* Ignore Z buffer if not bound */
2411 if (context
->pipe_data
.fb
.zsbuf
!= NULL
) {
2412 if (Flags
& D3DCLEAR_ZBUFFER
) bufs
|= PIPE_CLEAR_DEPTH
;
2413 if (Flags
& D3DCLEAR_STENCIL
) bufs
|= PIPE_CLEAR_STENCIL
;
2417 d3dcolor_to_pipe_color_union(&rgba
, Color
);
2419 rect
.x1
= context
->viewport
.X
;
2420 rect
.y1
= context
->viewport
.Y
;
2421 rect
.x2
= context
->viewport
.Width
+ rect
.x1
;
2422 rect
.y2
= context
->viewport
.Height
+ rect
.y1
;
2424 /* Both rectangles apply, which is weird, but that's D3D9. */
2425 if (context
->rs
[D3DRS_SCISSORTESTENABLE
]) {
2426 rect
.x1
= MAX2(rect
.x1
, context
->scissor
.minx
);
2427 rect
.y1
= MAX2(rect
.y1
, context
->scissor
.miny
);
2428 rect
.x2
= MIN2(rect
.x2
, context
->scissor
.maxx
);
2429 rect
.y2
= MIN2(rect
.y2
, context
->scissor
.maxy
);
2433 /* Maybe apps like to specify a large rect ? */
2434 if (pRects
[0].x1
<= rect
.x1
&& pRects
[0].x2
>= rect
.x2
&&
2435 pRects
[0].y1
<= rect
.y1
&& pRects
[0].y2
>= rect
.y2
) {
2436 DBG("First rect covers viewport.\n");
2442 if (rect
.x1
>= context
->pipe_data
.fb
.width
|| rect
.y1
>= context
->pipe_data
.fb
.height
)
2445 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2446 if (context
->rt
[i
] && context
->rt
[i
]->desc
.Format
!= D3DFMT_NULL
)
2450 /* fast path, clears everything at once */
2452 (!(bufs
& PIPE_CLEAR_COLOR
) || (rt_mask
== context
->rt_mask
)) &&
2453 rect
.x1
== 0 && rect
.y1
== 0 &&
2454 /* Case we clear only render target. Check clear region vs rt. */
2455 ((!(bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2456 rect
.x2
>= context
->pipe_data
.fb
.width
&&
2457 rect
.y2
>= context
->pipe_data
.fb
.height
) ||
2458 /* Case we clear depth buffer (and eventually rt too).
2459 * depth buffer size is always >= rt size. Compare to clear region */
2460 ((bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2461 rect
.x2
>= zsbuf_surf
->desc
.Width
&&
2462 rect
.y2
>= zsbuf_surf
->desc
.Height
))) {
2463 DBG("Clear fast path\n");
2464 pipe
->clear(pipe
, bufs
, &rgba
, Z
, Stencil
);
2473 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2474 rt
= context
->rt
[i
];
2475 if (!rt
|| rt
->desc
.Format
== D3DFMT_NULL
||
2476 !(bufs
& PIPE_CLEAR_COLOR
))
2477 continue; /* save space, compiler should hoist this */
2478 cbuf
= NineSurface9_GetSurface(rt
, sRGB
);
2479 for (r
= 0; r
< Count
; ++r
) {
2480 /* Don't trust users to pass these in the right order. */
2481 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2482 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2483 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2484 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2486 /* Drop negative rectangles (like wine expects). */
2487 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2488 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2491 x1
= MAX2(x1
, rect
.x1
);
2492 y1
= MAX2(y1
, rect
.y1
);
2493 x2
= MIN3(x2
, rect
.x2
, rt
->desc
.Width
);
2494 y2
= MIN3(y2
, rect
.y2
, rt
->desc
.Height
);
2496 DBG("Clearing (%u..%u)x(%u..%u)\n", x1
, x2
, y1
, y2
);
2497 pipe
->clear_render_target(pipe
, cbuf
, &rgba
,
2498 x1
, y1
, x2
- x1
, y2
- y1
, false);
2501 if (!(bufs
& PIPE_CLEAR_DEPTHSTENCIL
))
2504 bufs
&= PIPE_CLEAR_DEPTHSTENCIL
;
2506 for (r
= 0; r
< Count
; ++r
) {
2507 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2508 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2509 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2510 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2512 /* Drop negative rectangles. */
2513 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2514 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2517 x1
= MIN2(x1
, rect
.x1
);
2518 y1
= MIN2(y1
, rect
.y1
);
2519 x2
= MIN3(x2
, rect
.x2
, zsbuf_surf
->desc
.Width
);
2520 y2
= MIN3(y2
, rect
.y2
, zsbuf_surf
->desc
.Height
);
2522 zsbuf
= NineSurface9_GetSurface(zsbuf_surf
, 0);
2524 pipe
->clear_depth_stencil(pipe
, zsbuf
, bufs
, Z
, Stencil
,
2525 x1
, y1
, x2
- x1
, y2
- y1
, false);
2532 init_draw_info(struct pipe_draw_info
*info
,
2533 struct NineDevice9
*dev
, D3DPRIMITIVETYPE type
, UINT count
)
2535 info
->mode
= d3dprimitivetype_to_pipe_prim(type
);
2536 info
->count
= prim_count_to_vertex_count(type
, count
);
2537 info
->start_instance
= 0;
2538 info
->instance_count
= 1;
2539 if (dev
->context
.stream_instancedata_mask
& dev
->context
.stream_usage_mask
)
2540 info
->instance_count
= MAX2(dev
->context
.stream_freq
[0] & 0x7FFFFF, 1);
2541 info
->primitive_restart
= FALSE
;
2542 info
->has_user_indices
= FALSE
;
2543 info
->restart_index
= 0;
2544 info
->count_from_stream_output
= NULL
;
2545 info
->indirect
= NULL
;
2548 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive
,
2549 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2550 ARG_VAL(UINT
, StartVertex
),
2551 ARG_VAL(UINT
, PrimitiveCount
))
2553 struct nine_context
*context
= &device
->context
;
2554 struct pipe_draw_info info
;
2556 nine_update_state(device
);
2558 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2559 info
.index_size
= 0;
2560 info
.start
= StartVertex
;
2561 info
.index_bias
= 0;
2562 info
.min_index
= info
.start
;
2563 info
.max_index
= info
.count
- 1;
2564 info
.index
.resource
= NULL
;
2566 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2569 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive
,
2570 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2571 ARG_VAL(INT
, BaseVertexIndex
),
2572 ARG_VAL(UINT
, MinVertexIndex
),
2573 ARG_VAL(UINT
, NumVertices
),
2574 ARG_VAL(UINT
, StartIndex
),
2575 ARG_VAL(UINT
, PrimitiveCount
))
2577 struct nine_context
*context
= &device
->context
;
2578 struct pipe_draw_info info
;
2580 nine_update_state(device
);
2582 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2583 info
.index_size
= context
->index_size
;
2584 info
.start
= context
->index_offset
/ context
->index_size
+ StartIndex
;
2585 info
.index_bias
= BaseVertexIndex
;
2586 /* These don't include index bias: */
2587 info
.min_index
= MinVertexIndex
;
2588 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2589 info
.index
.resource
= context
->idxbuf
;
2591 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2594 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf
,
2595 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2596 ARG_VAL(UINT
, PrimitiveCount
),
2597 ARG_BIND_VBUF(struct pipe_vertex_buffer
, vtxbuf
))
2599 struct nine_context
*context
= &device
->context
;
2600 struct pipe_draw_info info
;
2602 nine_update_state(device
);
2604 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2605 info
.index_size
= 0;
2607 info
.index_bias
= 0;
2609 info
.max_index
= info
.count
- 1;
2610 info
.index
.resource
= NULL
;
2612 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vtxbuf
);
2614 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2617 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf
,
2618 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2619 ARG_VAL(UINT
, MinVertexIndex
),
2620 ARG_VAL(UINT
, NumVertices
),
2621 ARG_VAL(UINT
, PrimitiveCount
),
2622 ARG_BIND_VBUF(struct pipe_vertex_buffer
, vbuf
),
2623 ARG_BIND_RES(struct pipe_resource
, ibuf
),
2624 ARG_VAL(void *, user_ibuf
),
2625 ARG_VAL(UINT
, index_offset
),
2626 ARG_VAL(UINT
, index_size
))
2628 struct nine_context
*context
= &device
->context
;
2629 struct pipe_draw_info info
;
2631 nine_update_state(device
);
2633 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2634 info
.index_size
= index_size
;
2635 info
.start
= index_offset
/ info
.index_size
;
2636 info
.index_bias
= 0;
2637 info
.min_index
= MinVertexIndex
;
2638 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2639 info
.has_user_indices
= ibuf
== NULL
;
2641 info
.index
.resource
= ibuf
;
2643 info
.index
.user
= user_ibuf
;
2645 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vbuf
);
2647 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2650 CSMT_ITEM_NO_WAIT(nine_context_resource_copy_region
,
2651 ARG_BIND_REF(struct NineUnknown
, dst
),
2652 ARG_BIND_REF(struct NineUnknown
, src
),
2653 ARG_BIND_RES(struct pipe_resource
, dst_res
),
2654 ARG_VAL(unsigned, dst_level
),
2655 ARG_COPY_REF(struct pipe_box
, dst_box
),
2656 ARG_BIND_RES(struct pipe_resource
, src_res
),
2657 ARG_VAL(unsigned, src_level
),
2658 ARG_COPY_REF(struct pipe_box
, src_box
))
2660 struct nine_context
*context
= &device
->context
;
2665 context
->pipe
->resource_copy_region(context
->pipe
,
2667 dst_box
->x
, dst_box
->y
, dst_box
->z
,
2672 CSMT_ITEM_NO_WAIT(nine_context_blit
,
2673 ARG_BIND_REF(struct NineUnknown
, dst
),
2674 ARG_BIND_REF(struct NineUnknown
, src
),
2675 ARG_BIND_BLIT(struct pipe_blit_info
, blit
))
2677 struct nine_context
*context
= &device
->context
;
2682 context
->pipe
->blit(context
->pipe
, blit
);
2685 CSMT_ITEM_NO_WAIT(nine_context_clear_render_target
,
2686 ARG_BIND_REF(struct NineSurface9
, surface
),
2687 ARG_VAL(D3DCOLOR
, color
),
2690 ARG_VAL(UINT
, width
),
2691 ARG_VAL(UINT
, height
))
2693 struct nine_context
*context
= &device
->context
;
2694 struct pipe_surface
*surf
;
2695 union pipe_color_union rgba
;
2697 d3dcolor_to_pipe_color_union(&rgba
, color
);
2698 surf
= NineSurface9_GetSurface(surface
, 0);
2699 context
->pipe
->clear_render_target(context
->pipe
, surf
, &rgba
, x
, y
, width
, height
, false);
2702 CSMT_ITEM_NO_WAIT(nine_context_gen_mipmap
,
2703 ARG_BIND_REF(struct NineUnknown
, dst
),
2704 ARG_BIND_RES(struct pipe_resource
, res
),
2705 ARG_VAL(UINT
, base_level
),
2706 ARG_VAL(UINT
, last_level
),
2707 ARG_VAL(UINT
, first_layer
),
2708 ARG_VAL(UINT
, last_layer
),
2709 ARG_VAL(UINT
, filter
))
2711 struct nine_context
*context
= &device
->context
;
2713 /* We just bind dst for the bind count */
2716 util_gen_mipmap(context
->pipe
, res
, res
->format
, base_level
,
2717 last_level
, first_layer
, last_layer
, filter
);
2720 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_range_upload
,
2721 ARG_BIND_RES(struct pipe_resource
, res
),
2722 ARG_VAL(unsigned, offset
),
2723 ARG_VAL(unsigned, size
),
2724 ARG_VAL(const void *, data
))
2726 struct nine_context
*context
= &device
->context
;
2728 context
->pipe
->buffer_subdata(context
->pipe
, res
, 0, offset
, size
, data
);
2731 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_box_upload
,
2732 ARG_BIND_REF(struct NineUnknown
, dst
),
2733 ARG_BIND_RES(struct pipe_resource
, res
),
2734 ARG_VAL(unsigned, level
),
2735 ARG_COPY_REF(struct pipe_box
, dst_box
),
2736 ARG_VAL(enum pipe_format
, src_format
),
2737 ARG_VAL(const void *, src
),
2738 ARG_VAL(unsigned, src_stride
),
2739 ARG_VAL(unsigned, src_layer_stride
),
2740 ARG_COPY_REF(struct pipe_box
, src_box
))
2742 struct nine_context
*context
= &device
->context
;
2743 struct pipe_context
*pipe
= context
->pipe
;
2744 struct pipe_transfer
*transfer
= NULL
;
2747 /* We just bind dst for the bind count */
2750 map
= pipe
->transfer_map(pipe
,
2753 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
2754 dst_box
, &transfer
);
2758 /* Note: if formats are the sames, it will revert
2759 * to normal memcpy */
2760 (void) util_format_translate_3d(res
->format
,
2761 map
, transfer
->stride
,
2762 transfer
->layer_stride
,
2767 src_box
->x
, src_box
->y
, src_box
->z
,
2768 dst_box
->width
, dst_box
->height
,
2771 pipe_transfer_unmap(pipe
, transfer
);
2775 nine_context_create_query(struct NineDevice9
*device
, unsigned query_type
)
2777 struct pipe_context
*pipe
;
2778 struct pipe_query
*res
;
2780 pipe
= nine_context_get_pipe_acquire(device
);
2781 res
= pipe
->create_query(pipe
, query_type
, 0);
2782 nine_context_get_pipe_release(device
);
2786 CSMT_ITEM_DO_WAIT(nine_context_destroy_query
,
2787 ARG_REF(struct pipe_query
, query
))
2789 struct nine_context
*context
= &device
->context
;
2791 context
->pipe
->destroy_query(context
->pipe
, query
);
2794 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_begin_query
,
2795 ARG_REF(struct pipe_query
, query
))
2797 struct nine_context
*context
= &device
->context
;
2799 (void) context
->pipe
->begin_query(context
->pipe
, query
);
2802 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_end_query
,
2803 ARG_REF(struct pipe_query
, query
))
2805 struct nine_context
*context
= &device
->context
;
2807 (void) context
->pipe
->end_query(context
->pipe
, query
);
2811 nine_context_get_query_result(struct NineDevice9
*device
, struct pipe_query
*query
,
2812 unsigned *counter
, boolean flush
, boolean wait
,
2813 union pipe_query_result
*result
)
2815 struct pipe_context
*pipe
;
2819 nine_csmt_process(device
);
2820 else if (p_atomic_read(counter
) > 0) {
2821 if (flush
&& device
->csmt_active
)
2822 nine_queue_flush(device
->csmt_ctx
->pool
);
2823 DBG("Pending begin/end. Returning\n");
2827 pipe
= nine_context_get_pipe_acquire(device
);
2828 ret
= pipe
->get_query_result(pipe
, query
, wait
, result
);
2829 nine_context_get_pipe_release(device
);
2831 DBG("Query result %s\n", ret
? "found" : "not yet available");
2835 /* State defaults */
2837 static const DWORD nine_render_state_defaults
[NINED3DRS_LAST
+ 1] =
2839 /* [D3DRS_ZENABLE] = D3DZB_TRUE; wine: auto_depth_stencil */
2840 [D3DRS_ZENABLE
] = D3DZB_FALSE
,
2841 [D3DRS_FILLMODE
] = D3DFILL_SOLID
,
2842 [D3DRS_SHADEMODE
] = D3DSHADE_GOURAUD
,
2843 /* [D3DRS_LINEPATTERN] = 0x00000000, */
2844 [D3DRS_ZWRITEENABLE
] = TRUE
,
2845 [D3DRS_ALPHATESTENABLE
] = FALSE
,
2846 [D3DRS_LASTPIXEL
] = TRUE
,
2847 [D3DRS_SRCBLEND
] = D3DBLEND_ONE
,
2848 [D3DRS_DESTBLEND
] = D3DBLEND_ZERO
,
2849 [D3DRS_CULLMODE
] = D3DCULL_CCW
,
2850 [D3DRS_ZFUNC
] = D3DCMP_LESSEQUAL
,
2851 [D3DRS_ALPHAFUNC
] = D3DCMP_ALWAYS
,
2852 [D3DRS_ALPHAREF
] = 0,
2853 [D3DRS_DITHERENABLE
] = FALSE
,
2854 [D3DRS_ALPHABLENDENABLE
] = FALSE
,
2855 [D3DRS_FOGENABLE
] = FALSE
,
2856 [D3DRS_SPECULARENABLE
] = FALSE
,
2857 /* [D3DRS_ZVISIBLE] = 0, */
2858 [D3DRS_FOGCOLOR
] = 0,
2859 [D3DRS_FOGTABLEMODE
] = D3DFOG_NONE
,
2860 [D3DRS_FOGSTART
] = 0x00000000,
2861 [D3DRS_FOGEND
] = 0x3F800000,
2862 [D3DRS_FOGDENSITY
] = 0x3F800000,
2863 /* [D3DRS_EDGEANTIALIAS] = FALSE, */
2864 [D3DRS_RANGEFOGENABLE
] = FALSE
,
2865 [D3DRS_STENCILENABLE
] = FALSE
,
2866 [D3DRS_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2867 [D3DRS_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2868 [D3DRS_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2869 [D3DRS_STENCILREF
] = 0,
2870 [D3DRS_STENCILMASK
] = 0xFFFFFFFF,
2871 [D3DRS_STENCILFUNC
] = D3DCMP_ALWAYS
,
2872 [D3DRS_STENCILWRITEMASK
] = 0xFFFFFFFF,
2873 [D3DRS_TEXTUREFACTOR
] = 0xFFFFFFFF,
2882 [D3DRS_CLIPPING
] = TRUE
,
2883 [D3DRS_LIGHTING
] = TRUE
,
2884 [D3DRS_AMBIENT
] = 0,
2885 [D3DRS_FOGVERTEXMODE
] = D3DFOG_NONE
,
2886 [D3DRS_COLORVERTEX
] = TRUE
,
2887 [D3DRS_LOCALVIEWER
] = TRUE
,
2888 [D3DRS_NORMALIZENORMALS
] = FALSE
,
2889 [D3DRS_DIFFUSEMATERIALSOURCE
] = D3DMCS_COLOR1
,
2890 [D3DRS_SPECULARMATERIALSOURCE
] = D3DMCS_COLOR2
,
2891 [D3DRS_AMBIENTMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2892 [D3DRS_EMISSIVEMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2893 [D3DRS_VERTEXBLEND
] = D3DVBF_DISABLE
,
2894 [D3DRS_CLIPPLANEENABLE
] = 0,
2895 /* [D3DRS_SOFTWAREVERTEXPROCESSING] = FALSE, */
2896 [D3DRS_POINTSIZE
] = 0x3F800000,
2897 [D3DRS_POINTSIZE_MIN
] = 0x3F800000,
2898 [D3DRS_POINTSPRITEENABLE
] = FALSE
,
2899 [D3DRS_POINTSCALEENABLE
] = FALSE
,
2900 [D3DRS_POINTSCALE_A
] = 0x3F800000,
2901 [D3DRS_POINTSCALE_B
] = 0x00000000,
2902 [D3DRS_POINTSCALE_C
] = 0x00000000,
2903 [D3DRS_MULTISAMPLEANTIALIAS
] = TRUE
,
2904 [D3DRS_MULTISAMPLEMASK
] = 0xFFFFFFFF,
2905 [D3DRS_PATCHEDGESTYLE
] = D3DPATCHEDGE_DISCRETE
,
2906 /* [D3DRS_PATCHSEGMENTS] = 0x3F800000, */
2907 [D3DRS_DEBUGMONITORTOKEN
] = 0xDEADCAFE,
2908 [D3DRS_POINTSIZE_MAX
] = 0x3F800000, /* depends on cap */
2909 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = FALSE
,
2910 [D3DRS_COLORWRITEENABLE
] = 0x0000000f,
2911 [D3DRS_TWEENFACTOR
] = 0x00000000,
2912 [D3DRS_BLENDOP
] = D3DBLENDOP_ADD
,
2913 [D3DRS_POSITIONDEGREE
] = D3DDEGREE_CUBIC
,
2914 [D3DRS_NORMALDEGREE
] = D3DDEGREE_LINEAR
,
2915 [D3DRS_SCISSORTESTENABLE
] = FALSE
,
2916 [D3DRS_SLOPESCALEDEPTHBIAS
] = 0,
2917 [D3DRS_MINTESSELLATIONLEVEL
] = 0x3F800000,
2918 [D3DRS_MAXTESSELLATIONLEVEL
] = 0x3F800000,
2919 [D3DRS_ANTIALIASEDLINEENABLE
] = FALSE
,
2920 [D3DRS_ADAPTIVETESS_X
] = 0x00000000,
2921 [D3DRS_ADAPTIVETESS_Y
] = 0x00000000,
2922 [D3DRS_ADAPTIVETESS_Z
] = 0x3F800000,
2923 [D3DRS_ADAPTIVETESS_W
] = 0x00000000,
2924 [D3DRS_ENABLEADAPTIVETESSELLATION
] = FALSE
,
2925 [D3DRS_TWOSIDEDSTENCILMODE
] = FALSE
,
2926 [D3DRS_CCW_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2927 [D3DRS_CCW_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2928 [D3DRS_CCW_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2929 [D3DRS_CCW_STENCILFUNC
] = D3DCMP_ALWAYS
,
2930 [D3DRS_COLORWRITEENABLE1
] = 0x0000000F,
2931 [D3DRS_COLORWRITEENABLE2
] = 0x0000000F,
2932 [D3DRS_COLORWRITEENABLE3
] = 0x0000000F,
2933 [D3DRS_BLENDFACTOR
] = 0xFFFFFFFF,
2934 [D3DRS_SRGBWRITEENABLE
] = 0,
2935 [D3DRS_DEPTHBIAS
] = 0,
2944 [D3DRS_SEPARATEALPHABLENDENABLE
] = FALSE
,
2945 [D3DRS_SRCBLENDALPHA
] = D3DBLEND_ONE
,
2946 [D3DRS_DESTBLENDALPHA
] = D3DBLEND_ZERO
,
2947 [D3DRS_BLENDOPALPHA
] = D3DBLENDOP_ADD
,
2948 [NINED3DRS_VSPOINTSIZE
] = FALSE
,
2949 [NINED3DRS_RTMASK
] = 0xf,
2950 [NINED3DRS_ALPHACOVERAGE
] = FALSE
,
2951 [NINED3DRS_MULTISAMPLE
] = FALSE
2953 static const DWORD nine_tex_stage_state_defaults
[NINED3DTSS_LAST
+ 1] =
2955 [D3DTSS_COLOROP
] = D3DTOP_DISABLE
,
2956 [D3DTSS_ALPHAOP
] = D3DTOP_DISABLE
,
2957 [D3DTSS_COLORARG1
] = D3DTA_TEXTURE
,
2958 [D3DTSS_COLORARG2
] = D3DTA_CURRENT
,
2959 [D3DTSS_COLORARG0
] = D3DTA_CURRENT
,
2960 [D3DTSS_ALPHAARG1
] = D3DTA_TEXTURE
,
2961 [D3DTSS_ALPHAARG2
] = D3DTA_CURRENT
,
2962 [D3DTSS_ALPHAARG0
] = D3DTA_CURRENT
,
2963 [D3DTSS_RESULTARG
] = D3DTA_CURRENT
,
2964 [D3DTSS_BUMPENVMAT00
] = 0,
2965 [D3DTSS_BUMPENVMAT01
] = 0,
2966 [D3DTSS_BUMPENVMAT10
] = 0,
2967 [D3DTSS_BUMPENVMAT11
] = 0,
2968 [D3DTSS_BUMPENVLSCALE
] = 0,
2969 [D3DTSS_BUMPENVLOFFSET
] = 0,
2970 [D3DTSS_TEXCOORDINDEX
] = 0,
2971 [D3DTSS_TEXTURETRANSFORMFLAGS
] = D3DTTFF_DISABLE
,
2973 static const DWORD nine_samp_state_defaults
[NINED3DSAMP_LAST
+ 1] =
2975 [D3DSAMP_ADDRESSU
] = D3DTADDRESS_WRAP
,
2976 [D3DSAMP_ADDRESSV
] = D3DTADDRESS_WRAP
,
2977 [D3DSAMP_ADDRESSW
] = D3DTADDRESS_WRAP
,
2978 [D3DSAMP_BORDERCOLOR
] = 0,
2979 [D3DSAMP_MAGFILTER
] = D3DTEXF_POINT
,
2980 [D3DSAMP_MINFILTER
] = D3DTEXF_POINT
,
2981 [D3DSAMP_MIPFILTER
] = D3DTEXF_NONE
,
2982 [D3DSAMP_MIPMAPLODBIAS
] = 0,
2983 [D3DSAMP_MAXMIPLEVEL
] = 0,
2984 [D3DSAMP_MAXANISOTROPY
] = 1,
2985 [D3DSAMP_SRGBTEXTURE
] = 0,
2986 [D3DSAMP_ELEMENTINDEX
] = 0,
2987 [D3DSAMP_DMAPOFFSET
] = 0,
2988 [NINED3DSAMP_MINLOD
] = 0,
2989 [NINED3DSAMP_SHADOW
] = 0,
2990 [NINED3DSAMP_CUBETEX
] = 0
2993 /* Note: The following 4 functions assume there is no
2994 * pending commands */
2996 void nine_state_restore_non_cso(struct NineDevice9
*device
)
2998 struct nine_context
*context
= &device
->context
;
3000 context
->changed
.group
= NINE_STATE_ALL
;
3001 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
3002 context
->changed
.ucp
= TRUE
;
3003 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
| NINE_STATE_COMMIT_CONST_PS
;
3007 nine_state_set_defaults(struct NineDevice9
*device
, const D3DCAPS9
*caps
,
3010 struct nine_state
*state
= &device
->state
;
3011 struct nine_context
*context
= &device
->context
;
3014 /* Initialize defaults.
3016 memcpy(context
->rs
, nine_render_state_defaults
, sizeof(context
->rs
));
3018 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
) {
3019 memcpy(&state
->ff
.tex_stage
[s
], nine_tex_stage_state_defaults
,
3020 sizeof(state
->ff
.tex_stage
[s
]));
3021 state
->ff
.tex_stage
[s
][D3DTSS_TEXCOORDINDEX
] = s
;
3023 state
->ff
.tex_stage
[0][D3DTSS_COLOROP
] = D3DTOP_MODULATE
;
3024 state
->ff
.tex_stage
[0][D3DTSS_ALPHAOP
] = D3DTOP_SELECTARG1
;
3026 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
)
3027 memcpy(&context
->ff
.tex_stage
[s
], state
->ff
.tex_stage
[s
],
3028 sizeof(state
->ff
.tex_stage
[s
]));
3030 memset(&context
->bumpmap_vars
, 0, sizeof(context
->bumpmap_vars
));
3032 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
3033 memcpy(&context
->samp
[s
], nine_samp_state_defaults
,
3034 sizeof(context
->samp
[s
]));
3035 memcpy(&state
->samp_advertised
[s
], nine_samp_state_defaults
,
3036 sizeof(state
->samp_advertised
[s
]));
3039 memset(state
->vs_const_f
, 0, VS_CONST_F_SIZE(device
));
3040 memset(context
->vs_const_f
, 0, device
->vs_const_size
);
3041 if (context
->vs_const_f_swvp
)
3042 memset(context
->vs_const_f_swvp
, 0, NINE_MAX_CONST_F_SWVP
* sizeof(float[4]));
3043 memset(state
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
3044 memset(context
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
3045 memset(state
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
3046 memset(context
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
3047 memset(state
->ps_const_f
, 0, device
->ps_const_size
);
3048 memset(context
->ps_const_f
, 0, device
->ps_const_size
);
3049 memset(state
->ps_const_i
, 0, sizeof(state
->ps_const_i
));
3050 memset(context
->ps_const_i
, 0, sizeof(context
->ps_const_i
));
3051 memset(state
->ps_const_b
, 0, sizeof(state
->ps_const_b
));
3052 memset(context
->ps_const_b
, 0, sizeof(context
->ps_const_b
));
3054 /* Cap dependent initial state:
3056 context
->rs
[D3DRS_POINTSIZE_MAX
] = fui(caps
->MaxPointSize
);
3058 memcpy(state
->rs_advertised
, context
->rs
, sizeof(context
->rs
));
3060 /* Set changed flags to initialize driver.
3062 context
->changed
.group
= NINE_STATE_ALL
;
3063 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
3064 context
->changed
.ucp
= TRUE
;
3066 context
->ff
.changed
.transform
[0] = ~0;
3067 context
->ff
.changed
.transform
[D3DTS_WORLD
/ 32] |= 1 << (D3DTS_WORLD
% 32);
3070 state
->viewport
.MinZ
= context
->viewport
.MinZ
= 0.0f
;
3071 state
->viewport
.MaxZ
= context
->viewport
.MaxZ
= 1.0f
;
3074 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
)
3075 context
->changed
.sampler
[s
] = ~0;
3078 context
->dummy_vbo_bound_at
= -1;
3079 context
->vbo_bound_done
= FALSE
;
3084 nine_state_clear(struct nine_state
*state
, const boolean device
)
3088 for (i
= 0; i
< ARRAY_SIZE(state
->rt
); ++i
)
3089 nine_bind(&state
->rt
[i
], NULL
);
3090 nine_bind(&state
->ds
, NULL
);
3091 nine_bind(&state
->vs
, NULL
);
3092 nine_bind(&state
->ps
, NULL
);
3093 nine_bind(&state
->vdecl
, NULL
);
3094 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
3095 nine_bind(&state
->stream
[i
], NULL
);
3097 nine_bind(&state
->idxbuf
, NULL
);
3098 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
) {
3100 state
->texture
[i
] &&
3101 --state
->texture
[i
]->bind_count
== 0)
3102 list_delinit(&state
->texture
[i
]->list
);
3103 nine_bind(&state
->texture
[i
], NULL
);
3108 nine_context_clear(struct NineDevice9
*device
)
3110 struct nine_context
*context
= &device
->context
;
3111 struct pipe_context
*pipe
= context
->pipe
;
3112 struct cso_context
*cso
= context
->cso
;
3115 /* Early device ctor failure. Nothing to do */
3119 pipe
->bind_vs_state(pipe
, NULL
);
3120 pipe
->bind_fs_state(pipe
, NULL
);
3122 /* Don't unbind constant buffers, they're device-private and
3123 * do not change on Reset.
3126 cso_set_samplers(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
3127 cso_set_samplers(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
3129 cso_set_sampler_views(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
3130 cso_set_sampler_views(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
3132 pipe
->set_vertex_buffers(pipe
, 0, device
->caps
.MaxStreams
, NULL
);
3134 for (i
= 0; i
< ARRAY_SIZE(context
->rt
); ++i
)
3135 nine_bind(&context
->rt
[i
], NULL
);
3136 nine_bind(&context
->ds
, NULL
);
3137 nine_bind(&context
->vs
, NULL
);
3138 nine_bind(&context
->ps
, NULL
);
3139 nine_bind(&context
->vdecl
, NULL
);
3140 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
3141 pipe_vertex_buffer_unreference(&context
->vtxbuf
[i
]);
3142 pipe_resource_reference(&context
->idxbuf
, NULL
);
3144 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
) {
3145 context
->texture
[i
].enabled
= FALSE
;
3146 pipe_resource_reference(&context
->texture
[i
].resource
,
3148 pipe_sampler_view_reference(&context
->texture
[i
].view
[0],
3150 pipe_sampler_view_reference(&context
->texture
[i
].view
[1],
3156 nine_state_init_sw(struct NineDevice9
*device
)
3158 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3159 struct pipe_rasterizer_state rast
;
3160 struct pipe_blend_state blend
;
3161 struct pipe_depth_stencil_alpha_state dsa
;
3162 struct pipe_framebuffer_state fb
;
3164 /* Only used with Streamout */
3165 memset(&rast
, 0, sizeof(rast
));
3166 rast
.rasterizer_discard
= true;
3167 rast
.point_quad_rasterization
= 1; /* to make llvmpipe happy */
3168 cso_set_rasterizer(device
->cso_sw
, &rast
);
3170 /* dummy settings */
3171 memset(&blend
, 0, sizeof(blend
));
3172 memset(&dsa
, 0, sizeof(dsa
));
3173 memset(&fb
, 0, sizeof(fb
));
3174 cso_set_blend(device
->cso_sw
, &blend
);
3175 cso_set_depth_stencil_alpha(device
->cso_sw
, &dsa
);
3176 cso_set_framebuffer(device
->cso_sw
, &fb
);
3177 cso_set_viewport_dims(device
->cso_sw
, 1.0, 1.0, false);
3178 cso_set_fragment_shader_handle(device
->cso_sw
, util_make_empty_fragment_shader(pipe_sw
));
3181 /* There is duplication with update_vertex_elements.
3182 * TODO: Share the code */
3185 update_vertex_elements_sw(struct NineDevice9
*device
)
3187 struct nine_state
*state
= &device
->state
;
3188 const struct NineVertexDeclaration9
*vdecl
= device
->state
.vdecl
;
3189 const struct NineVertexShader9
*vs
;
3192 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
3193 char used_streams
[device
->caps
.MaxStreams
];
3194 int dummy_vbo_stream
= -1;
3195 BOOL need_dummy_vbo
= FALSE
;
3196 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
3197 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
3199 memset(vdecl_index_map
, -1, 16);
3200 memset(used_streams
, 0, device
->caps
.MaxStreams
);
3201 vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
3204 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
3205 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
3206 n
, vs
->input_map
[n
].ndecl
, vdecl
);
3208 for (i
= 0; i
< vdecl
->nelems
; i
++) {
3209 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
3210 vdecl_index_map
[n
] = i
;
3211 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
3215 if (vdecl_index_map
[n
] < 0)
3216 need_dummy_vbo
= TRUE
;
3219 /* No vertex declaration. Likely will never happen in practice,
3220 * but we need not crash on this */
3221 need_dummy_vbo
= TRUE
;
3224 if (need_dummy_vbo
) {
3225 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
3226 if (!used_streams
[i
]) {
3227 dummy_vbo_stream
= i
;
3232 /* TODO handle dummy_vbo */
3233 assert (!need_dummy_vbo
);
3235 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
3236 index
= vdecl_index_map
[n
];
3238 ve
[n
] = vdecl
->elems
[index
];
3239 b
= ve
[n
].vertex_buffer_index
;
3240 /* XXX wine just uses 1 here: */
3241 if (state
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
3242 ve
[n
].instance_divisor
= state
->stream_freq
[b
] & 0x7FFFFF;
3244 /* if the vertex declaration is incomplete compared to what the
3245 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
3246 * This is not precised by the spec, but is the behaviour
3248 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
3249 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
3250 ve
[n
].src_offset
= 0;
3251 ve
[n
].instance_divisor
= 0;
3255 cso_set_vertex_elements(device
->cso_sw
, vs
->num_inputs
, ve
);
3259 update_vertex_buffers_sw(struct NineDevice9
*device
, int start_vertice
, int num_vertices
)
3261 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3262 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3263 struct nine_state
*state
= &device
->state
;
3264 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3265 struct pipe_vertex_buffer vtxbuf
;
3266 uint32_t mask
= 0xf;
3269 DBG("mask=%x\n", mask
);
3271 /* TODO: handle dummy_vbo_bound_at */
3273 for (i
= 0; mask
; mask
>>= 1, ++i
) {
3275 if (state
->stream
[i
]) {
3277 struct pipe_resource
*buf
;
3278 struct pipe_box box
;
3281 vtxbuf
= state
->vtxbuf
[i
];
3282 buf
= NineVertexBuffer9_GetResource(state
->stream
[i
], &offset
);
3284 DBG("Locking %p (offset %d, length %d)\n", buf
,
3285 vtxbuf
.buffer_offset
, num_vertices
* vtxbuf
.stride
);
3287 u_box_1d(vtxbuf
.buffer_offset
+ offset
+ start_vertice
* vtxbuf
.stride
,
3288 num_vertices
* vtxbuf
.stride
, &box
);
3290 userbuf
= pipe
->transfer_map(pipe
, buf
, 0, PIPE_TRANSFER_READ
, &box
,
3291 &(sw_internal
->transfers_so
[i
]));
3292 vtxbuf
.is_user_buffer
= true;
3293 vtxbuf
.buffer
.user
= userbuf
;
3295 if (!device
->driver_caps
.user_sw_vbufs
) {
3296 vtxbuf
.buffer
.resource
= NULL
;
3297 vtxbuf
.is_user_buffer
= false;
3298 u_upload_data(device
->pipe_sw
->stream_uploader
,
3303 &(vtxbuf
.buffer_offset
),
3304 &(vtxbuf
.buffer
.resource
));
3305 u_upload_unmap(device
->pipe_sw
->stream_uploader
);
3307 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, &vtxbuf
);
3308 pipe_vertex_buffer_unreference(&vtxbuf
);
3310 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3313 nine_context_get_pipe_release(device
);
3317 update_vs_constants_sw(struct NineDevice9
*device
)
3319 struct nine_state
*state
= &device
->state
;
3320 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3325 struct pipe_constant_buffer cb
;
3329 cb
.buffer_offset
= 0;
3330 cb
.buffer_size
= 4096 * sizeof(float[4]);
3331 cb
.user_buffer
= state
->vs_const_f
;
3333 if (state
->vs
->lconstf
.ranges
) {
3334 const struct nine_lconstf
*lconstf
= &device
->state
.vs
->lconstf
;
3335 const struct nine_range
*r
= lconstf
->ranges
;
3337 float *dst
= device
->state
.vs_lconstf_temp
;
3338 float *src
= (float *)cb
.user_buffer
;
3339 memcpy(dst
, src
, 8192 * sizeof(float[4]));
3341 unsigned p
= r
->bgn
;
3342 unsigned c
= r
->end
- r
->bgn
;
3343 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
3347 cb
.user_buffer
= dst
;
3350 buf
= cb
.user_buffer
;
3351 if (!device
->driver_caps
.user_sw_cbufs
) {
3352 u_upload_data(device
->pipe_sw
->const_uploader
,
3357 &(cb
.buffer_offset
),
3359 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3360 cb
.user_buffer
= NULL
;
3363 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 0, &cb
);
3365 pipe_resource_reference(&cb
.buffer
, NULL
);
3367 cb
.user_buffer
= (char *)buf
+ 4096 * sizeof(float[4]);
3368 if (!device
->driver_caps
.user_sw_cbufs
) {
3369 u_upload_data(device
->pipe_sw
->const_uploader
,
3374 &(cb
.buffer_offset
),
3376 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3377 cb
.user_buffer
= NULL
;
3380 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 1, &cb
);
3382 pipe_resource_reference(&cb
.buffer
, NULL
);
3386 struct pipe_constant_buffer cb
;
3389 cb
.buffer_offset
= 0;
3390 cb
.buffer_size
= 2048 * sizeof(float[4]);
3391 cb
.user_buffer
= state
->vs_const_i
;
3393 if (!device
->driver_caps
.user_sw_cbufs
) {
3394 u_upload_data(device
->pipe_sw
->const_uploader
,
3399 &(cb
.buffer_offset
),
3401 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3402 cb
.user_buffer
= NULL
;
3405 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 2, &cb
);
3407 pipe_resource_reference(&cb
.buffer
, NULL
);
3411 struct pipe_constant_buffer cb
;
3414 cb
.buffer_offset
= 0;
3415 cb
.buffer_size
= 512 * sizeof(float[4]);
3416 cb
.user_buffer
= state
->vs_const_b
;
3418 if (!device
->driver_caps
.user_sw_cbufs
) {
3419 u_upload_data(device
->pipe_sw
->const_uploader
,
3424 &(cb
.buffer_offset
),
3426 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3427 cb
.user_buffer
= NULL
;
3430 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 3, &cb
);
3432 pipe_resource_reference(&cb
.buffer
, NULL
);
3436 struct pipe_constant_buffer cb
;
3437 const D3DVIEWPORT9
*vport
= &device
->state
.viewport
;
3438 float viewport_data
[8] = {(float)vport
->Width
* 0.5f
,
3439 (float)vport
->Height
* -0.5f
, vport
->MaxZ
- vport
->MinZ
, 0.f
,
3440 (float)vport
->Width
* 0.5f
+ (float)vport
->X
,
3441 (float)vport
->Height
* 0.5f
+ (float)vport
->Y
,
3445 cb
.buffer_offset
= 0;
3446 cb
.buffer_size
= 2 * sizeof(float[4]);
3447 cb
.user_buffer
= viewport_data
;
3450 u_upload_data(device
->pipe_sw
->const_uploader
,
3455 &(cb
.buffer_offset
),
3457 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3458 cb
.user_buffer
= NULL
;
3461 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 4, &cb
);
3463 pipe_resource_reference(&cb
.buffer
, NULL
);
3469 nine_state_prepare_draw_sw(struct NineDevice9
*device
, struct NineVertexDeclaration9
*vdecl_out
,
3470 int start_vertice
, int num_vertices
, struct pipe_stream_output_info
*so
)
3472 struct nine_state
*state
= &device
->state
;
3473 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
3474 struct NineVertexShader9
*vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
3476 assert(programmable_vs
);
3478 DBG("Preparing draw\n");
3479 cso_set_vertex_shader_handle(device
->cso_sw
,
3480 NineVertexShader9_GetVariantProcessVertices(vs
, vdecl_out
, so
));
3481 update_vertex_elements_sw(device
);
3482 update_vertex_buffers_sw(device
, start_vertice
, num_vertices
);
3483 update_vs_constants_sw(device
);
3484 DBG("Preparation succeeded\n");
3488 nine_state_after_draw_sw(struct NineDevice9
*device
)
3490 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3491 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3492 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3495 for (i
= 0; i
< 4; i
++) {
3496 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3497 if (sw_internal
->transfers_so
[i
])
3498 pipe
->transfer_unmap(pipe
, sw_internal
->transfers_so
[i
]);
3499 sw_internal
->transfers_so
[i
] = NULL
;
3501 nine_context_get_pipe_release(device
);
3505 nine_state_destroy_sw(struct NineDevice9
*device
)
3508 /* Everything destroyed with cso */
3512 static const DWORD nine_render_states_pixel[] =
3514 D3DRS_ALPHABLENDENABLE,
3517 D3DRS_ALPHATESTENABLE,
3518 D3DRS_ANTIALIASEDLINEENABLE,
3522 D3DRS_CCW_STENCILFAIL,
3523 D3DRS_CCW_STENCILPASS,
3524 D3DRS_CCW_STENCILZFAIL,
3525 D3DRS_COLORWRITEENABLE,
3526 D3DRS_COLORWRITEENABLE1,
3527 D3DRS_COLORWRITEENABLE2,
3528 D3DRS_COLORWRITEENABLE3,
3531 D3DRS_DESTBLENDALPHA,
3538 D3DRS_SCISSORTESTENABLE,
3539 D3DRS_SEPARATEALPHABLENDENABLE,
3541 D3DRS_SLOPESCALEDEPTHBIAS,
3543 D3DRS_SRCBLENDALPHA,
3544 D3DRS_SRGBWRITEENABLE,
3545 D3DRS_STENCILENABLE,
3551 D3DRS_STENCILWRITEMASK,
3553 D3DRS_TEXTUREFACTOR,
3554 D3DRS_TWOSIDEDSTENCILMODE,
3576 const uint32_t nine_render_states_pixel
[(NINED3DRS_LAST
+ 31) / 32] =
3578 0x0f99c380, 0x1ff00070, 0x00000000, 0x00000000,
3579 0x000000ff, 0xde01c900, 0x0003ffcf
3583 static const DWORD nine_render_states_vertex[] =
3585 D3DRS_ADAPTIVETESS_W,
3586 D3DRS_ADAPTIVETESS_X,
3587 D3DRS_ADAPTIVETESS_Y,
3588 D3DRS_ADAPTIVETESS_Z,
3590 D3DRS_AMBIENTMATERIALSOURCE,
3592 D3DRS_CLIPPLANEENABLE,
3595 D3DRS_DIFFUSEMATERIALSOURCE,
3596 D3DRS_EMISSIVEMATERIALSOURCE,
3597 D3DRS_ENABLEADAPTIVETESSELLATION,
3604 D3DRS_FOGVERTEXMODE,
3605 D3DRS_INDEXEDVERTEXBLENDENABLE,
3608 D3DRS_MAXTESSELLATIONLEVEL,
3609 D3DRS_MINTESSELLATIONLEVEL,
3610 D3DRS_MULTISAMPLEANTIALIAS,
3611 D3DRS_MULTISAMPLEMASK,
3613 D3DRS_NORMALIZENORMALS,
3614 D3DRS_PATCHEDGESTYLE,
3618 D3DRS_POINTSCALEENABLE,
3620 D3DRS_POINTSIZE_MAX,
3621 D3DRS_POINTSIZE_MIN,
3622 D3DRS_POINTSPRITEENABLE,
3623 D3DRS_POSITIONDEGREE,
3624 D3DRS_RANGEFOGENABLE,
3626 D3DRS_SPECULARENABLE,
3627 D3DRS_SPECULARMATERIALSOURCE,
3632 const uint32_t nine_render_states_vertex
[(NINED3DRS_LAST
+ 31) / 32] =
3634 0x30400200, 0x0001007c, 0x00000000, 0x00000000,
3635 0xfd9efb00, 0x01fc34cf, 0x00000000
3638 /* TODO: put in the right values */
3639 const uint32_t nine_render_state_group
[NINED3DRS_LAST
+ 1] =
3641 [D3DRS_ZENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3642 [D3DRS_FILLMODE
] = NINE_STATE_RASTERIZER
,
3643 [D3DRS_SHADEMODE
] = NINE_STATE_RASTERIZER
,
3644 [D3DRS_ZWRITEENABLE
] = NINE_STATE_DSA
,
3645 [D3DRS_ALPHATESTENABLE
] = NINE_STATE_DSA
,
3646 [D3DRS_LASTPIXEL
] = NINE_STATE_RASTERIZER
,
3647 [D3DRS_SRCBLEND
] = NINE_STATE_BLEND
,
3648 [D3DRS_DESTBLEND
] = NINE_STATE_BLEND
,
3649 [D3DRS_CULLMODE
] = NINE_STATE_RASTERIZER
,
3650 [D3DRS_ZFUNC
] = NINE_STATE_DSA
,
3651 [D3DRS_ALPHAREF
] = NINE_STATE_DSA
,
3652 [D3DRS_ALPHAFUNC
] = NINE_STATE_DSA
,
3653 [D3DRS_DITHERENABLE
] = NINE_STATE_BLEND
,
3654 [D3DRS_ALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3655 [D3DRS_FOGENABLE
] = NINE_STATE_FF_OTHER
| NINE_STATE_FOG_SHADER
| NINE_STATE_PS_CONST
,
3656 [D3DRS_SPECULARENABLE
] = NINE_STATE_FF_LIGHTING
,
3657 [D3DRS_FOGCOLOR
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3658 [D3DRS_FOGTABLEMODE
] = NINE_STATE_FF_OTHER
| NINE_STATE_FOG_SHADER
| NINE_STATE_PS_CONST
,
3659 [D3DRS_FOGSTART
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3660 [D3DRS_FOGEND
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3661 [D3DRS_FOGDENSITY
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3662 [D3DRS_RANGEFOGENABLE
] = NINE_STATE_FF_OTHER
,
3663 [D3DRS_STENCILENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3664 [D3DRS_STENCILFAIL
] = NINE_STATE_DSA
,
3665 [D3DRS_STENCILZFAIL
] = NINE_STATE_DSA
,
3666 [D3DRS_STENCILPASS
] = NINE_STATE_DSA
,
3667 [D3DRS_STENCILFUNC
] = NINE_STATE_DSA
,
3668 [D3DRS_STENCILREF
] = NINE_STATE_STENCIL_REF
,
3669 [D3DRS_STENCILMASK
] = NINE_STATE_DSA
,
3670 [D3DRS_STENCILWRITEMASK
] = NINE_STATE_DSA
,
3671 [D3DRS_TEXTUREFACTOR
] = NINE_STATE_FF_PSSTAGES
,
3672 [D3DRS_WRAP0
] = NINE_STATE_UNHANDLED
, /* cylindrical wrap is crazy */
3673 [D3DRS_WRAP1
] = NINE_STATE_UNHANDLED
,
3674 [D3DRS_WRAP2
] = NINE_STATE_UNHANDLED
,
3675 [D3DRS_WRAP3
] = NINE_STATE_UNHANDLED
,
3676 [D3DRS_WRAP4
] = NINE_STATE_UNHANDLED
,
3677 [D3DRS_WRAP5
] = NINE_STATE_UNHANDLED
,
3678 [D3DRS_WRAP6
] = NINE_STATE_UNHANDLED
,
3679 [D3DRS_WRAP7
] = NINE_STATE_UNHANDLED
,
3680 [D3DRS_CLIPPING
] = 0, /* software vertex processing only */
3681 [D3DRS_LIGHTING
] = NINE_STATE_FF_LIGHTING
,
3682 [D3DRS_AMBIENT
] = NINE_STATE_FF_LIGHTING
| NINE_STATE_FF_MATERIAL
,
3683 [D3DRS_FOGVERTEXMODE
] = NINE_STATE_FF_OTHER
,
3684 [D3DRS_COLORVERTEX
] = NINE_STATE_FF_LIGHTING
,
3685 [D3DRS_LOCALVIEWER
] = NINE_STATE_FF_LIGHTING
,
3686 [D3DRS_NORMALIZENORMALS
] = NINE_STATE_FF_OTHER
,
3687 [D3DRS_DIFFUSEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3688 [D3DRS_SPECULARMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3689 [D3DRS_AMBIENTMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3690 [D3DRS_EMISSIVEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3691 [D3DRS_VERTEXBLEND
] = NINE_STATE_FF_OTHER
,
3692 [D3DRS_CLIPPLANEENABLE
] = NINE_STATE_RASTERIZER
,
3693 [D3DRS_POINTSIZE
] = NINE_STATE_RASTERIZER
,
3694 [D3DRS_POINTSIZE_MIN
] = NINE_STATE_RASTERIZER
| NINE_STATE_POINTSIZE_SHADER
,
3695 [D3DRS_POINTSPRITEENABLE
] = NINE_STATE_RASTERIZER
,
3696 [D3DRS_POINTSCALEENABLE
] = NINE_STATE_FF_OTHER
,
3697 [D3DRS_POINTSCALE_A
] = NINE_STATE_FF_OTHER
,
3698 [D3DRS_POINTSCALE_B
] = NINE_STATE_FF_OTHER
,
3699 [D3DRS_POINTSCALE_C
] = NINE_STATE_FF_OTHER
,
3700 [D3DRS_MULTISAMPLEANTIALIAS
] = NINE_STATE_MULTISAMPLE
,
3701 [D3DRS_MULTISAMPLEMASK
] = NINE_STATE_SAMPLE_MASK
,
3702 [D3DRS_PATCHEDGESTYLE
] = NINE_STATE_UNHANDLED
,
3703 [D3DRS_DEBUGMONITORTOKEN
] = NINE_STATE_UNHANDLED
,
3704 [D3DRS_POINTSIZE_MAX
] = NINE_STATE_RASTERIZER
| NINE_STATE_POINTSIZE_SHADER
,
3705 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = NINE_STATE_FF_OTHER
,
3706 [D3DRS_COLORWRITEENABLE
] = NINE_STATE_BLEND
,
3707 [D3DRS_TWEENFACTOR
] = NINE_STATE_FF_OTHER
,
3708 [D3DRS_BLENDOP
] = NINE_STATE_BLEND
,
3709 [D3DRS_POSITIONDEGREE
] = NINE_STATE_UNHANDLED
,
3710 [D3DRS_NORMALDEGREE
] = NINE_STATE_UNHANDLED
,
3711 [D3DRS_SCISSORTESTENABLE
] = NINE_STATE_RASTERIZER
,
3712 [D3DRS_SLOPESCALEDEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3713 [D3DRS_ANTIALIASEDLINEENABLE
] = NINE_STATE_RASTERIZER
,
3714 [D3DRS_MINTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3715 [D3DRS_MAXTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3716 [D3DRS_ADAPTIVETESS_X
] = NINE_STATE_UNHANDLED
,
3717 [D3DRS_ADAPTIVETESS_Y
] = NINE_STATE_UNHANDLED
,
3718 [D3DRS_ADAPTIVETESS_Z
] = NINE_STATE_UNHANDLED
,
3719 [D3DRS_ADAPTIVETESS_W
] = NINE_STATE_UNHANDLED
,
3720 [D3DRS_ENABLEADAPTIVETESSELLATION
] = NINE_STATE_UNHANDLED
,
3721 [D3DRS_TWOSIDEDSTENCILMODE
] = NINE_STATE_DSA
,
3722 [D3DRS_CCW_STENCILFAIL
] = NINE_STATE_DSA
,
3723 [D3DRS_CCW_STENCILZFAIL
] = NINE_STATE_DSA
,
3724 [D3DRS_CCW_STENCILPASS
] = NINE_STATE_DSA
,
3725 [D3DRS_CCW_STENCILFUNC
] = NINE_STATE_DSA
,
3726 [D3DRS_COLORWRITEENABLE1
] = NINE_STATE_BLEND
,
3727 [D3DRS_COLORWRITEENABLE2
] = NINE_STATE_BLEND
,
3728 [D3DRS_COLORWRITEENABLE3
] = NINE_STATE_BLEND
,
3729 [D3DRS_BLENDFACTOR
] = NINE_STATE_BLEND_COLOR
,
3730 [D3DRS_SRGBWRITEENABLE
] = NINE_STATE_FB
,
3731 [D3DRS_DEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3732 [D3DRS_WRAP8
] = NINE_STATE_UNHANDLED
, /* cylwrap has to be done via GP */
3733 [D3DRS_WRAP9
] = NINE_STATE_UNHANDLED
,
3734 [D3DRS_WRAP10
] = NINE_STATE_UNHANDLED
,
3735 [D3DRS_WRAP11
] = NINE_STATE_UNHANDLED
,
3736 [D3DRS_WRAP12
] = NINE_STATE_UNHANDLED
,
3737 [D3DRS_WRAP13
] = NINE_STATE_UNHANDLED
,
3738 [D3DRS_WRAP14
] = NINE_STATE_UNHANDLED
,
3739 [D3DRS_WRAP15
] = NINE_STATE_UNHANDLED
,
3740 [D3DRS_SEPARATEALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3741 [D3DRS_SRCBLENDALPHA
] = NINE_STATE_BLEND
,
3742 [D3DRS_DESTBLENDALPHA
] = NINE_STATE_BLEND
,
3743 [D3DRS_BLENDOPALPHA
] = NINE_STATE_BLEND
3749 nine_state_access_transform(struct nine_ff_state
*ff_state
, D3DTRANSFORMSTATETYPE t
,
3752 static D3DMATRIX Identity
= { .m
[0] = { 1, 0, 0, 0 },
3753 .m
[1] = { 0, 1, 0, 0 },
3754 .m
[2] = { 0, 0, 1, 0 },
3755 .m
[3] = { 0, 0, 0, 1 } };
3759 case D3DTS_VIEW
: index
= 0; break;
3760 case D3DTS_PROJECTION
: index
= 1; break;
3761 case D3DTS_TEXTURE0
: index
= 2; break;
3762 case D3DTS_TEXTURE1
: index
= 3; break;
3763 case D3DTS_TEXTURE2
: index
= 4; break;
3764 case D3DTS_TEXTURE3
: index
= 5; break;
3765 case D3DTS_TEXTURE4
: index
= 6; break;
3766 case D3DTS_TEXTURE5
: index
= 7; break;
3767 case D3DTS_TEXTURE6
: index
= 8; break;
3768 case D3DTS_TEXTURE7
: index
= 9; break;
3770 if (!(t
>= D3DTS_WORLDMATRIX(0) && t
<= D3DTS_WORLDMATRIX(255)))
3772 index
= 10 + (t
- D3DTS_WORLDMATRIX(0));
3776 if (index
>= ff_state
->num_transforms
) {
3777 unsigned N
= index
+ 1;
3778 unsigned n
= ff_state
->num_transforms
;
3782 ff_state
->transform
= REALLOC(ff_state
->transform
,
3783 n
* sizeof(D3DMATRIX
),
3784 N
* sizeof(D3DMATRIX
));
3786 ff_state
->transform
[n
] = Identity
;
3787 ff_state
->num_transforms
= N
;
3789 return &ff_state
->transform
[index
];
3793 nine_state_set_light(struct nine_ff_state
*ff_state
, DWORD Index
,
3794 const D3DLIGHT9
*pLight
)
3796 if (Index
>= ff_state
->num_lights
) {
3797 unsigned n
= ff_state
->num_lights
;
3798 unsigned N
= Index
+ 1;
3800 ff_state
->light
= REALLOC(ff_state
->light
, n
* sizeof(D3DLIGHT9
),
3801 N
* sizeof(D3DLIGHT9
));
3802 if (!ff_state
->light
)
3803 return E_OUTOFMEMORY
;
3804 ff_state
->num_lights
= N
;
3806 for (; n
< Index
; ++n
) {
3807 memset(&ff_state
->light
[n
], 0, sizeof(D3DLIGHT9
));
3808 ff_state
->light
[n
].Type
= (D3DLIGHTTYPE
)NINED3DLIGHT_INVALID
;
3811 ff_state
->light
[Index
] = *pLight
;
3813 if (pLight
->Type
== D3DLIGHT_SPOT
&& pLight
->Theta
>= pLight
->Phi
) {
3814 DBG("Warning: clamping D3DLIGHT9.Theta\n");
3815 ff_state
->light
[Index
].Theta
= ff_state
->light
[Index
].Phi
;
3821 nine_state_light_enable(struct nine_ff_state
*ff_state
, uint32_t *change_group
,
3822 DWORD Index
, BOOL Enable
)
3826 user_assert(Index
< ff_state
->num_lights
, D3DERR_INVALIDCALL
);
3828 for (i
= 0; i
< ff_state
->num_lights_active
; ++i
) {
3829 if (ff_state
->active_light
[i
] == Index
)
3834 if (i
< ff_state
->num_lights_active
)
3836 /* XXX wine thinks this should still succeed:
3838 user_assert(i
< NINE_MAX_LIGHTS_ACTIVE
, D3DERR_INVALIDCALL
);
3840 ff_state
->active_light
[i
] = Index
;
3841 ff_state
->num_lights_active
++;
3843 if (i
== ff_state
->num_lights_active
)
3845 --ff_state
->num_lights_active
;
3846 for (; i
< ff_state
->num_lights_active
; ++i
)
3847 ff_state
->active_light
[i
] = ff_state
->active_light
[i
+ 1];
3850 *change_group
|= NINE_STATE_FF_LIGHTING
;
3855 #define D3DRS_TO_STRING_CASE(n) case D3DRS_##n: return "D3DRS_"#n
3856 const char *nine_d3drs_to_string(DWORD State
)
3859 D3DRS_TO_STRING_CASE(ZENABLE
);
3860 D3DRS_TO_STRING_CASE(FILLMODE
);
3861 D3DRS_TO_STRING_CASE(SHADEMODE
);
3862 D3DRS_TO_STRING_CASE(ZWRITEENABLE
);
3863 D3DRS_TO_STRING_CASE(ALPHATESTENABLE
);
3864 D3DRS_TO_STRING_CASE(LASTPIXEL
);
3865 D3DRS_TO_STRING_CASE(SRCBLEND
);
3866 D3DRS_TO_STRING_CASE(DESTBLEND
);
3867 D3DRS_TO_STRING_CASE(CULLMODE
);
3868 D3DRS_TO_STRING_CASE(ZFUNC
);
3869 D3DRS_TO_STRING_CASE(ALPHAREF
);
3870 D3DRS_TO_STRING_CASE(ALPHAFUNC
);
3871 D3DRS_TO_STRING_CASE(DITHERENABLE
);
3872 D3DRS_TO_STRING_CASE(ALPHABLENDENABLE
);
3873 D3DRS_TO_STRING_CASE(FOGENABLE
);
3874 D3DRS_TO_STRING_CASE(SPECULARENABLE
);
3875 D3DRS_TO_STRING_CASE(FOGCOLOR
);
3876 D3DRS_TO_STRING_CASE(FOGTABLEMODE
);
3877 D3DRS_TO_STRING_CASE(FOGSTART
);
3878 D3DRS_TO_STRING_CASE(FOGEND
);
3879 D3DRS_TO_STRING_CASE(FOGDENSITY
);
3880 D3DRS_TO_STRING_CASE(RANGEFOGENABLE
);
3881 D3DRS_TO_STRING_CASE(STENCILENABLE
);
3882 D3DRS_TO_STRING_CASE(STENCILFAIL
);
3883 D3DRS_TO_STRING_CASE(STENCILZFAIL
);
3884 D3DRS_TO_STRING_CASE(STENCILPASS
);
3885 D3DRS_TO_STRING_CASE(STENCILFUNC
);
3886 D3DRS_TO_STRING_CASE(STENCILREF
);
3887 D3DRS_TO_STRING_CASE(STENCILMASK
);
3888 D3DRS_TO_STRING_CASE(STENCILWRITEMASK
);
3889 D3DRS_TO_STRING_CASE(TEXTUREFACTOR
);
3890 D3DRS_TO_STRING_CASE(WRAP0
);
3891 D3DRS_TO_STRING_CASE(WRAP1
);
3892 D3DRS_TO_STRING_CASE(WRAP2
);
3893 D3DRS_TO_STRING_CASE(WRAP3
);
3894 D3DRS_TO_STRING_CASE(WRAP4
);
3895 D3DRS_TO_STRING_CASE(WRAP5
);
3896 D3DRS_TO_STRING_CASE(WRAP6
);
3897 D3DRS_TO_STRING_CASE(WRAP7
);
3898 D3DRS_TO_STRING_CASE(CLIPPING
);
3899 D3DRS_TO_STRING_CASE(LIGHTING
);
3900 D3DRS_TO_STRING_CASE(AMBIENT
);
3901 D3DRS_TO_STRING_CASE(FOGVERTEXMODE
);
3902 D3DRS_TO_STRING_CASE(COLORVERTEX
);
3903 D3DRS_TO_STRING_CASE(LOCALVIEWER
);
3904 D3DRS_TO_STRING_CASE(NORMALIZENORMALS
);
3905 D3DRS_TO_STRING_CASE(DIFFUSEMATERIALSOURCE
);
3906 D3DRS_TO_STRING_CASE(SPECULARMATERIALSOURCE
);
3907 D3DRS_TO_STRING_CASE(AMBIENTMATERIALSOURCE
);
3908 D3DRS_TO_STRING_CASE(EMISSIVEMATERIALSOURCE
);
3909 D3DRS_TO_STRING_CASE(VERTEXBLEND
);
3910 D3DRS_TO_STRING_CASE(CLIPPLANEENABLE
);
3911 D3DRS_TO_STRING_CASE(POINTSIZE
);
3912 D3DRS_TO_STRING_CASE(POINTSIZE_MIN
);
3913 D3DRS_TO_STRING_CASE(POINTSPRITEENABLE
);
3914 D3DRS_TO_STRING_CASE(POINTSCALEENABLE
);
3915 D3DRS_TO_STRING_CASE(POINTSCALE_A
);
3916 D3DRS_TO_STRING_CASE(POINTSCALE_B
);
3917 D3DRS_TO_STRING_CASE(POINTSCALE_C
);
3918 D3DRS_TO_STRING_CASE(MULTISAMPLEANTIALIAS
);
3919 D3DRS_TO_STRING_CASE(MULTISAMPLEMASK
);
3920 D3DRS_TO_STRING_CASE(PATCHEDGESTYLE
);
3921 D3DRS_TO_STRING_CASE(DEBUGMONITORTOKEN
);
3922 D3DRS_TO_STRING_CASE(POINTSIZE_MAX
);
3923 D3DRS_TO_STRING_CASE(INDEXEDVERTEXBLENDENABLE
);
3924 D3DRS_TO_STRING_CASE(COLORWRITEENABLE
);
3925 D3DRS_TO_STRING_CASE(TWEENFACTOR
);
3926 D3DRS_TO_STRING_CASE(BLENDOP
);
3927 D3DRS_TO_STRING_CASE(POSITIONDEGREE
);
3928 D3DRS_TO_STRING_CASE(NORMALDEGREE
);
3929 D3DRS_TO_STRING_CASE(SCISSORTESTENABLE
);
3930 D3DRS_TO_STRING_CASE(SLOPESCALEDEPTHBIAS
);
3931 D3DRS_TO_STRING_CASE(ANTIALIASEDLINEENABLE
);
3932 D3DRS_TO_STRING_CASE(MINTESSELLATIONLEVEL
);
3933 D3DRS_TO_STRING_CASE(MAXTESSELLATIONLEVEL
);
3934 D3DRS_TO_STRING_CASE(ADAPTIVETESS_X
);
3935 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Y
);
3936 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Z
);
3937 D3DRS_TO_STRING_CASE(ADAPTIVETESS_W
);
3938 D3DRS_TO_STRING_CASE(ENABLEADAPTIVETESSELLATION
);
3939 D3DRS_TO_STRING_CASE(TWOSIDEDSTENCILMODE
);
3940 D3DRS_TO_STRING_CASE(CCW_STENCILFAIL
);
3941 D3DRS_TO_STRING_CASE(CCW_STENCILZFAIL
);
3942 D3DRS_TO_STRING_CASE(CCW_STENCILPASS
);
3943 D3DRS_TO_STRING_CASE(CCW_STENCILFUNC
);
3944 D3DRS_TO_STRING_CASE(COLORWRITEENABLE1
);
3945 D3DRS_TO_STRING_CASE(COLORWRITEENABLE2
);
3946 D3DRS_TO_STRING_CASE(COLORWRITEENABLE3
);
3947 D3DRS_TO_STRING_CASE(BLENDFACTOR
);
3948 D3DRS_TO_STRING_CASE(SRGBWRITEENABLE
);
3949 D3DRS_TO_STRING_CASE(DEPTHBIAS
);
3950 D3DRS_TO_STRING_CASE(WRAP8
);
3951 D3DRS_TO_STRING_CASE(WRAP9
);
3952 D3DRS_TO_STRING_CASE(WRAP10
);
3953 D3DRS_TO_STRING_CASE(WRAP11
);
3954 D3DRS_TO_STRING_CASE(WRAP12
);
3955 D3DRS_TO_STRING_CASE(WRAP13
);
3956 D3DRS_TO_STRING_CASE(WRAP14
);
3957 D3DRS_TO_STRING_CASE(WRAP15
);
3958 D3DRS_TO_STRING_CASE(SEPARATEALPHABLENDENABLE
);
3959 D3DRS_TO_STRING_CASE(SRCBLENDALPHA
);
3960 D3DRS_TO_STRING_CASE(DESTBLENDALPHA
);
3961 D3DRS_TO_STRING_CASE(BLENDOPALPHA
);