2 * Copyright 2011 Joakim Sindholt <opensource@zhasha.com>
3 * Copyright 2013 Christoph Bumiller
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
27 #include "swapchain9.h"
28 #include "basetexture9.h"
30 #include "indexbuffer9.h"
32 #include "vertexbuffer9.h"
33 #include "vertexdeclaration9.h"
34 #include "vertexshader9.h"
35 #include "pixelshader9.h"
36 #include "nine_pipe.h"
38 #include "nine_limits.h"
39 #include "pipe/p_context.h"
40 #include "pipe/p_state.h"
41 #include "cso_cache/cso_context.h"
42 #include "util/u_atomic.h"
43 #include "util/u_upload_mgr.h"
44 #include "util/u_math.h"
45 #include "util/u_box.h"
46 #include "util/u_simple_shaders.h"
47 #include "util/u_gen_mipmap.h"
50 #include "nine_queue.h"
51 #include "nine_csmt_helper.h"
52 #include "os/os_thread.h"
54 #define DBG_CHANNEL DBG_DEVICE
58 struct csmt_instruction
{
59 int (* func
)(struct NineDevice9
*This
, struct csmt_instruction
*instr
);
64 struct nine_queue_pool
* pool
;
66 cnd_t event_processed
;
67 mtx_t mutex_processed
;
68 struct NineDevice9
*device
;
76 /* Wait for instruction to be processed.
77 * Caller has to ensure that only one thread waits at time.
80 nine_csmt_wait_processed(struct csmt_context
*ctx
)
82 mtx_lock(&ctx
->mutex_processed
);
83 while (!p_atomic_read(&ctx
->processed
)) {
84 cnd_wait(&ctx
->event_processed
, &ctx
->mutex_processed
);
86 mtx_unlock(&ctx
->mutex_processed
);
89 /* CSMT worker thread */
92 nine_csmt_worker(void *arg
)
94 struct csmt_context
*ctx
= arg
;
95 struct csmt_instruction
*instr
;
96 DBG("CSMT worker spawned\n");
98 u_thread_setname("CSMT-Worker");
101 nine_queue_wait_flush(ctx
->pool
);
102 mtx_lock(&ctx
->thread_running
);
104 /* Get instruction. NULL on empty cmdbuf. */
105 while (!p_atomic_read(&ctx
->terminate
) &&
106 (instr
= (struct csmt_instruction
*)nine_queue_get(ctx
->pool
))) {
109 if (instr
->func(ctx
->device
, instr
)) {
110 mtx_lock(&ctx
->mutex_processed
);
111 p_atomic_set(&ctx
->processed
, TRUE
);
112 cnd_signal(&ctx
->event_processed
);
113 mtx_unlock(&ctx
->mutex_processed
);
115 if (p_atomic_read(&ctx
->toPause
)) {
116 mtx_unlock(&ctx
->thread_running
);
117 /* will wait here the thread can be resumed */
118 mtx_lock(&ctx
->thread_resume
);
119 mtx_lock(&ctx
->thread_running
);
120 mtx_unlock(&ctx
->thread_resume
);
124 mtx_unlock(&ctx
->thread_running
);
125 if (p_atomic_read(&ctx
->terminate
)) {
126 mtx_lock(&ctx
->mutex_processed
);
127 p_atomic_set(&ctx
->processed
, TRUE
);
128 cnd_signal(&ctx
->event_processed
);
129 mtx_unlock(&ctx
->mutex_processed
);
134 DBG("CSMT worker destroyed\n");
138 /* Create a CSMT context.
139 * Spawns a worker thread.
141 struct csmt_context
*
142 nine_csmt_create( struct NineDevice9
*This
)
144 struct csmt_context
*ctx
;
146 ctx
= CALLOC_STRUCT(csmt_context
);
150 ctx
->pool
= nine_queue_create();
155 cnd_init(&ctx
->event_processed
);
156 (void) mtx_init(&ctx
->mutex_processed
, mtx_plain
);
157 (void) mtx_init(&ctx
->thread_running
, mtx_plain
);
158 (void) mtx_init(&ctx
->thread_resume
, mtx_plain
);
160 #if defined(DEBUG) || !defined(NDEBUG)
161 u_thread_setname("Main thread");
166 ctx
->worker
= u_thread_create(nine_csmt_worker
, ctx
);
168 nine_queue_delete(ctx
->pool
);
173 DBG("Returning context %p\n", ctx
);
179 nop_func( struct NineDevice9
*This
, struct csmt_instruction
*instr
)
187 /* Push nop instruction and flush the queue.
188 * Waits for the worker to complete. */
190 nine_csmt_process( struct NineDevice9
*device
)
192 struct csmt_instruction
* instr
;
193 struct csmt_context
*ctx
= device
->csmt_ctx
;
195 if (!device
->csmt_active
)
198 if (nine_queue_isempty(ctx
->pool
))
201 DBG("device=%p\n", device
);
204 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
206 instr
->func
= nop_func
;
208 p_atomic_set(&ctx
->processed
, FALSE
);
209 nine_queue_flush(ctx
->pool
);
211 nine_csmt_wait_processed(ctx
);
214 /* Destroys a CSMT context.
215 * Waits for the worker thread to terminate.
218 nine_csmt_destroy( struct NineDevice9
*device
, struct csmt_context
*ctx
)
220 struct csmt_instruction
* instr
;
221 thrd_t render_thread
= ctx
->worker
;
223 DBG("device=%p ctx=%p\n", device
, ctx
);
225 /* Push nop and flush the queue. */
226 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
228 instr
->func
= nop_func
;
230 p_atomic_set(&ctx
->processed
, FALSE
);
231 /* Signal worker to terminate. */
232 p_atomic_set(&ctx
->terminate
, TRUE
);
233 nine_queue_flush(ctx
->pool
);
235 nine_csmt_wait_processed(ctx
);
236 nine_queue_delete(ctx
->pool
);
238 mtx_destroy(&ctx
->thread_resume
);
239 mtx_destroy(&ctx
->thread_running
);
241 mtx_destroy(&ctx
->mutex_processed
);
242 cnd_destroy(&ctx
->event_processed
);
246 thrd_join(render_thread
, NULL
);
250 nine_csmt_pause( struct NineDevice9
*device
)
252 struct csmt_context
*ctx
= device
->csmt_ctx
;
254 if (!device
->csmt_active
)
257 /* No need to pause the thread */
258 if (nine_queue_no_flushed_work(ctx
->pool
))
261 mtx_lock(&ctx
->thread_resume
);
262 p_atomic_set(&ctx
->toPause
, TRUE
);
264 /* Wait the thread is paused */
265 mtx_lock(&ctx
->thread_running
);
266 ctx
->hasPaused
= TRUE
;
267 p_atomic_set(&ctx
->toPause
, FALSE
);
271 nine_csmt_resume( struct NineDevice9
*device
)
273 struct csmt_context
*ctx
= device
->csmt_ctx
;
275 if (!device
->csmt_active
)
281 ctx
->hasPaused
= FALSE
;
282 mtx_unlock(&ctx
->thread_running
);
283 mtx_unlock(&ctx
->thread_resume
);
286 struct pipe_context
*
287 nine_context_get_pipe( struct NineDevice9
*device
)
289 nine_csmt_process(device
);
290 return device
->context
.pipe
;
293 struct pipe_context
*
294 nine_context_get_pipe_multithread( struct NineDevice9
*device
)
296 struct csmt_context
*ctx
= device
->csmt_ctx
;
298 if (!device
->csmt_active
)
299 return device
->context
.pipe
;
301 if (!u_thread_is_self(ctx
->worker
))
302 nine_csmt_process(device
);
304 return device
->context
.pipe
;
307 struct pipe_context
*
308 nine_context_get_pipe_acquire( struct NineDevice9
*device
)
310 nine_csmt_pause(device
);
311 return device
->context
.pipe
;
315 nine_context_get_pipe_release( struct NineDevice9
*device
)
317 nine_csmt_resume(device
);
320 /* Nine state functions */
322 /* Check if some states need to be set dirty */
325 check_multisample(struct NineDevice9
*device
)
327 DWORD
*rs
= device
->context
.rs
;
328 DWORD new_value
= (rs
[D3DRS_ZENABLE
] || rs
[D3DRS_STENCILENABLE
]) &&
329 device
->context
.rt
[0]->desc
.MultiSampleType
>= 1 &&
330 rs
[D3DRS_MULTISAMPLEANTIALIAS
];
331 if (rs
[NINED3DRS_MULTISAMPLE
] != new_value
) {
332 rs
[NINED3DRS_MULTISAMPLE
] = new_value
;
333 return NINE_STATE_RASTERIZER
;
338 /* State preparation only */
341 prepare_blend(struct NineDevice9
*device
)
343 nine_convert_blend_state(&device
->context
.pipe_data
.blend
, device
->context
.rs
);
344 device
->context
.commit
|= NINE_STATE_COMMIT_BLEND
;
348 prepare_dsa(struct NineDevice9
*device
)
350 nine_convert_dsa_state(&device
->context
.pipe_data
.dsa
, device
->context
.rs
);
351 device
->context
.commit
|= NINE_STATE_COMMIT_DSA
;
355 prepare_rasterizer(struct NineDevice9
*device
)
357 nine_convert_rasterizer_state(device
, &device
->context
.pipe_data
.rast
, device
->context
.rs
);
358 device
->context
.commit
|= NINE_STATE_COMMIT_RASTERIZER
;
362 prepare_vs_constants_userbuf_swvp(struct NineDevice9
*device
)
364 struct nine_context
*context
= &device
->context
;
366 if (context
->changed
.vs_const_f
|| context
->changed
.group
& NINE_STATE_SWVP
) {
367 struct pipe_constant_buffer cb
;
369 cb
.buffer_offset
= 0;
370 cb
.buffer_size
= 4096 * sizeof(float[4]);
371 cb
.user_buffer
= context
->vs_const_f_swvp
;
373 if (context
->vs
->lconstf
.ranges
) {
374 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
375 const struct nine_range
*r
= lconstf
->ranges
;
377 float *dst
= context
->vs_lconstf_temp
;
378 float *src
= (float *)cb
.user_buffer
;
379 memcpy(dst
, src
, cb
.buffer_size
);
382 unsigned c
= r
->end
- r
->bgn
;
383 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
387 cb
.user_buffer
= dst
;
390 context
->pipe_data
.cb0_swvp
.buffer_offset
= cb
.buffer_offset
;
391 context
->pipe_data
.cb0_swvp
.buffer_size
= cb
.buffer_size
;
392 context
->pipe_data
.cb0_swvp
.user_buffer
= cb
.user_buffer
;
394 cb
.user_buffer
= (char *)cb
.user_buffer
+ 4096 * sizeof(float[4]);
395 context
->pipe_data
.cb1_swvp
.buffer_offset
= cb
.buffer_offset
;
396 context
->pipe_data
.cb1_swvp
.buffer_size
= cb
.buffer_size
;
397 context
->pipe_data
.cb1_swvp
.user_buffer
= cb
.user_buffer
;
399 context
->changed
.vs_const_f
= 0;
402 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
403 struct pipe_constant_buffer cb
;
405 cb
.buffer_offset
= 0;
406 cb
.buffer_size
= 2048 * sizeof(float[4]);
407 cb
.user_buffer
= context
->vs_const_i
;
409 context
->pipe_data
.cb2_swvp
.buffer_offset
= cb
.buffer_offset
;
410 context
->pipe_data
.cb2_swvp
.buffer_size
= cb
.buffer_size
;
411 context
->pipe_data
.cb2_swvp
.user_buffer
= cb
.user_buffer
;
412 context
->changed
.vs_const_i
= 0;
415 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
416 struct pipe_constant_buffer cb
;
418 cb
.buffer_offset
= 0;
419 cb
.buffer_size
= 512 * sizeof(float[4]);
420 cb
.user_buffer
= context
->vs_const_b
;
422 context
->pipe_data
.cb3_swvp
.buffer_offset
= cb
.buffer_offset
;
423 context
->pipe_data
.cb3_swvp
.buffer_size
= cb
.buffer_size
;
424 context
->pipe_data
.cb3_swvp
.user_buffer
= cb
.user_buffer
;
425 context
->changed
.vs_const_b
= 0;
428 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
429 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
433 prepare_vs_constants_userbuf(struct NineDevice9
*device
)
435 struct nine_context
*context
= &device
->context
;
436 uint8_t *upload_ptr
= NULL
;
437 struct pipe_constant_buffer cb
;
439 cb
.buffer_offset
= 0;
440 cb
.buffer_size
= context
->cso_shader
.vs_const_used_size
;
441 cb
.user_buffer
= context
->vs_const_f
;
444 prepare_vs_constants_userbuf_swvp(device
);
448 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
449 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
450 memcpy(idst
, context
->vs_const_i
, NINE_MAX_CONST_I
* sizeof(int[4]));
451 context
->changed
.vs_const_i
= 0;
454 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
455 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
456 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
457 memcpy(bdst
, context
->vs_const_b
, NINE_MAX_CONST_B
* sizeof(BOOL
));
458 context
->changed
.vs_const_b
= 0;
464 if (context
->vs
->lconstf
.ranges
) {
465 /* TODO: Can we make it so that we don't have to copy everything ? */
466 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
467 const struct nine_range
*r
= lconstf
->ranges
;
469 float *dst
= context
->vs_lconstf_temp
;
470 float *src
= (float *)cb
.user_buffer
;
471 memcpy(dst
, src
, cb
.buffer_size
);
474 unsigned c
= r
->end
- r
->bgn
;
475 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
479 cb
.user_buffer
= dst
;
482 /* Note: We probably don't want to do separate memcpy to
483 * upload_ptr directly, if we have to copy some constants
484 * at random locations (context->vs->lconstf.ranges),
485 * to have efficient WC. Thus for this case we really want
486 * that intermediate buffer. */
488 u_upload_alloc(context
->pipe
->const_uploader
,
491 256, /* Be conservative about alignment */
494 (void**)&upload_ptr
);
496 assert(cb
.buffer
&& upload_ptr
);
498 if (!context
->cso_shader
.vs_const_ranges
) {
499 memcpy(upload_ptr
, cb
.user_buffer
, cb
.buffer_size
);
503 while (context
->cso_shader
.vs_const_ranges
[i
*2+1] != 0) {
504 memcpy(upload_ptr
+offset
,
505 &((float*)cb
.user_buffer
)[4*context
->cso_shader
.vs_const_ranges
[i
*2]],
506 context
->cso_shader
.vs_const_ranges
[i
*2+1] * sizeof(float[4]));
507 offset
+= context
->cso_shader
.vs_const_ranges
[i
*2+1] * sizeof(float[4]);
512 u_upload_unmap(context
->pipe
->const_uploader
);
513 cb
.user_buffer
= NULL
;
515 /* Free previous resource */
516 pipe_resource_reference(&context
->pipe_data
.cb_vs
.buffer
, NULL
);
518 context
->pipe_data
.cb_vs
= cb
;
519 context
->changed
.vs_const_f
= 0;
521 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
522 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
526 prepare_ps_constants_userbuf(struct NineDevice9
*device
)
528 struct nine_context
*context
= &device
->context
;
529 uint8_t *upload_ptr
= NULL
;
530 struct pipe_constant_buffer cb
;
532 cb
.buffer_offset
= 0;
533 cb
.buffer_size
= context
->cso_shader
.ps_const_used_size
;
534 cb
.user_buffer
= context
->ps_const_f
;
536 if (context
->changed
.ps_const_i
) {
537 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
538 memcpy(idst
, context
->ps_const_i
, sizeof(context
->ps_const_i
));
539 context
->changed
.ps_const_i
= 0;
541 if (context
->changed
.ps_const_b
) {
542 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
543 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
544 memcpy(bdst
, context
->ps_const_b
, sizeof(context
->ps_const_b
));
545 context
->changed
.ps_const_b
= 0;
548 /* Upload special constants needed to implement PS1.x instructions like TEXBEM,TEXBEML and BEM */
549 if (context
->ps
->bumpenvmat_needed
) {
550 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, 8 * sizeof(float[4]));
551 memcpy(&context
->ps_lconstf_temp
[4 * 8], &device
->context
.bumpmap_vars
, sizeof(device
->context
.bumpmap_vars
));
553 cb
.user_buffer
= context
->ps_lconstf_temp
;
556 if (context
->ps
->byte_code
.version
< 0x30 &&
557 context
->rs
[D3DRS_FOGENABLE
]) {
558 float *dst
= &context
->ps_lconstf_temp
[4 * 32];
559 if (cb
.user_buffer
!= context
->ps_lconstf_temp
) {
560 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, 32 * sizeof(float[4]));
561 cb
.user_buffer
= context
->ps_lconstf_temp
;
564 d3dcolor_to_rgba(dst
, context
->rs
[D3DRS_FOGCOLOR
]);
565 if (context
->rs
[D3DRS_FOGTABLEMODE
] == D3DFOG_LINEAR
) {
566 dst
[4] = asfloat(context
->rs
[D3DRS_FOGEND
]);
567 dst
[5] = 1.0f
/ (asfloat(context
->rs
[D3DRS_FOGEND
]) - asfloat(context
->rs
[D3DRS_FOGSTART
]));
568 } else if (context
->rs
[D3DRS_FOGTABLEMODE
] != D3DFOG_NONE
) {
569 dst
[4] = asfloat(context
->rs
[D3DRS_FOGDENSITY
]);
576 u_upload_alloc(context
->pipe
->const_uploader
,
579 256, /* Be conservative about alignment */
582 (void**)&upload_ptr
);
584 assert(cb
.buffer
&& upload_ptr
);
586 if (!context
->cso_shader
.ps_const_ranges
) {
587 memcpy(upload_ptr
, cb
.user_buffer
, cb
.buffer_size
);
591 while (context
->cso_shader
.ps_const_ranges
[i
*2+1] != 0) {
592 memcpy(upload_ptr
+offset
,
593 &((float*)cb
.user_buffer
)[4*context
->cso_shader
.ps_const_ranges
[i
*2]],
594 context
->cso_shader
.ps_const_ranges
[i
*2+1] * sizeof(float[4]));
595 offset
+= context
->cso_shader
.ps_const_ranges
[i
*2+1] * sizeof(float[4]);
600 u_upload_unmap(context
->pipe
->const_uploader
);
601 cb
.user_buffer
= NULL
;
603 /* Free previous resource */
604 pipe_resource_reference(&context
->pipe_data
.cb_ps
.buffer
, NULL
);
606 context
->pipe_data
.cb_ps
= cb
;
607 context
->changed
.ps_const_f
= 0;
609 context
->changed
.group
&= ~NINE_STATE_PS_CONST
;
610 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
613 static inline uint32_t
614 prepare_vs(struct NineDevice9
*device
, uint8_t shader_changed
)
616 struct nine_context
*context
= &device
->context
;
617 struct NineVertexShader9
*vs
= context
->vs
;
618 uint32_t changed_group
= 0;
619 int has_key_changed
= 0;
621 if (likely(context
->programmable_vs
))
622 has_key_changed
= NineVertexShader9_UpdateKey(vs
, device
);
624 if (!shader_changed
&& !has_key_changed
)
627 /* likely because we dislike FF */
628 if (likely(context
->programmable_vs
)) {
629 context
->cso_shader
.vs
= NineVertexShader9_GetVariant(vs
,
630 &context
->cso_shader
.vs_const_ranges
,
631 &context
->cso_shader
.vs_const_used_size
);
634 context
->cso_shader
.vs
= vs
->ff_cso
;
637 if (context
->rs
[NINED3DRS_VSPOINTSIZE
] != vs
->point_size
) {
638 context
->rs
[NINED3DRS_VSPOINTSIZE
] = vs
->point_size
;
639 changed_group
|= NINE_STATE_RASTERIZER
;
642 if ((context
->bound_samplers_mask_vs
& vs
->sampler_mask
) != vs
->sampler_mask
)
643 /* Bound dummy sampler. */
644 changed_group
|= NINE_STATE_SAMPLER
;
646 context
->commit
|= NINE_STATE_COMMIT_VS
;
647 return changed_group
;
650 static inline uint32_t
651 prepare_ps(struct NineDevice9
*device
, uint8_t shader_changed
)
653 struct nine_context
*context
= &device
->context
;
654 struct NinePixelShader9
*ps
= context
->ps
;
655 uint32_t changed_group
= 0;
656 int has_key_changed
= 0;
659 has_key_changed
= NinePixelShader9_UpdateKey(ps
, context
);
661 if (!shader_changed
&& !has_key_changed
)
665 context
->cso_shader
.ps
= NinePixelShader9_GetVariant(ps
,
666 &context
->cso_shader
.ps_const_ranges
,
667 &context
->cso_shader
.ps_const_used_size
);
670 context
->cso_shader
.ps
= ps
->ff_cso
;
673 if ((context
->bound_samplers_mask_ps
& ps
->sampler_mask
) != ps
->sampler_mask
)
674 /* Bound dummy sampler. */
675 changed_group
|= NINE_STATE_SAMPLER
;
677 context
->commit
|= NINE_STATE_COMMIT_PS
;
678 return changed_group
;
681 /* State preparation incremental */
683 /* State preparation + State commit */
686 update_framebuffer(struct NineDevice9
*device
, bool is_clear
)
688 struct nine_context
*context
= &device
->context
;
689 struct pipe_context
*pipe
= context
->pipe
;
690 struct pipe_framebuffer_state
*fb
= &context
->pipe_data
.fb
;
692 struct NineSurface9
*rt0
= context
->rt
[0];
693 unsigned w
= rt0
->desc
.Width
;
694 unsigned h
= rt0
->desc
.Height
;
695 unsigned nr_samples
= rt0
->base
.info
.nr_samples
;
696 unsigned ps_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
697 unsigned mask
= is_clear
? 0xf : ps_mask
;
698 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
702 context
->rt_mask
= 0x0;
705 /* all render targets must have the same size and the depth buffer must be
706 * bigger. Multisample has to match, according to spec. But some apps do
707 * things wrong there, and no error is returned. The behaviour they get
708 * apparently is that depth buffer is disabled if it doesn't match.
709 * Surely the same for render targets. */
711 /* Special case: D3DFMT_NULL is used to bound no real render target,
712 * but render to depth buffer. We have to not take into account the render
713 * target info. TODO: know what should happen when there are several render targers
714 * and the first one is D3DFMT_NULL */
715 if (rt0
->desc
.Format
== D3DFMT_NULL
&& context
->ds
) {
716 w
= context
->ds
->desc
.Width
;
717 h
= context
->ds
->desc
.Height
;
718 nr_samples
= context
->ds
->base
.info
.nr_samples
;
721 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
722 struct NineSurface9
*rt
= context
->rt
[i
];
724 if (rt
&& rt
->desc
.Format
!= D3DFMT_NULL
&& (mask
& (1 << i
)) &&
725 rt
->desc
.Width
== w
&& rt
->desc
.Height
== h
&&
726 rt
->base
.info
.nr_samples
== nr_samples
) {
727 fb
->cbufs
[i
] = NineSurface9_GetSurface(rt
, sRGB
);
728 context
->rt_mask
|= 1 << i
;
729 fb
->nr_cbufs
= i
+ 1;
731 /* Color outputs must match RT slot,
732 * drivers will have to handle NULL entries for GL, too.
738 if (context
->ds
&& context
->ds
->desc
.Width
>= w
&&
739 context
->ds
->desc
.Height
>= h
&&
740 context
->ds
->base
.info
.nr_samples
== nr_samples
) {
741 fb
->zsbuf
= NineSurface9_GetSurface(context
->ds
, 0);
749 pipe
->set_framebuffer_state(pipe
, fb
); /* XXX: cso ? */
751 if (is_clear
&& context
->rt_mask
== ps_mask
)
752 context
->changed
.group
&= ~NINE_STATE_FB
;
756 update_viewport(struct NineDevice9
*device
)
758 struct nine_context
*context
= &device
->context
;
759 const D3DVIEWPORT9
*vport
= &context
->viewport
;
760 struct pipe_viewport_state pvport
;
762 /* D3D coordinates are:
763 * -1 .. +1 for X,Y and
764 * 0 .. +1 for Z (we use pipe_rasterizer_state.clip_halfz)
766 pvport
.scale
[0] = (float)vport
->Width
* 0.5f
;
767 pvport
.scale
[1] = (float)vport
->Height
* -0.5f
;
768 pvport
.scale
[2] = vport
->MaxZ
- vport
->MinZ
;
769 pvport
.translate
[0] = (float)vport
->Width
* 0.5f
+ (float)vport
->X
;
770 pvport
.translate
[1] = (float)vport
->Height
* 0.5f
+ (float)vport
->Y
;
771 pvport
.translate
[2] = vport
->MinZ
;
773 /* We found R600 and SI cards have some imprecision
774 * on the barycentric coordinates used for interpolation.
775 * Some shaders rely on having something precise.
776 * We found that the proprietary driver has the imprecision issue,
777 * except when the render target width and height are powers of two.
778 * It is using some sort of workaround for these cases
779 * which covers likely all the cases the applications rely
780 * on something precise.
781 * We haven't found the workaround, but it seems like it's better
782 * for applications if the imprecision is biased towards infinity
783 * instead of -infinity (which is what measured). So shift slightly
784 * the viewport: not enough to change rasterization result (in particular
785 * for multisampling), but enough to make the imprecision biased
786 * towards infinity. We do this shift only if render target width and
787 * height are powers of two.
788 * Solves 'red shadows' bug on UE3 games.
790 if (device
->driver_bugs
.buggy_barycentrics
&&
791 ((vport
->Width
& (vport
->Width
-1)) == 0) &&
792 ((vport
->Height
& (vport
->Height
-1)) == 0)) {
793 pvport
.translate
[0] -= 1.0f
/ 128.0f
;
794 pvport
.translate
[1] -= 1.0f
/ 128.0f
;
797 cso_set_viewport(context
->cso
, &pvport
);
800 /* Loop through VS inputs and pick the vertex elements with the declared
801 * usage from the vertex declaration, then insert the instance divisor from
802 * the stream source frequency setting.
805 update_vertex_elements(struct NineDevice9
*device
)
807 struct nine_context
*context
= &device
->context
;
808 const struct NineVertexDeclaration9
*vdecl
= device
->context
.vdecl
;
809 const struct NineVertexShader9
*vs
;
812 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
813 char used_streams
[device
->caps
.MaxStreams
];
814 int dummy_vbo_stream
= -1;
815 BOOL need_dummy_vbo
= FALSE
;
816 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
818 context
->stream_usage_mask
= 0;
819 memset(vdecl_index_map
, -1, 16);
820 memset(used_streams
, 0, device
->caps
.MaxStreams
);
821 vs
= context
->programmable_vs
? context
->vs
: device
->ff
.vs
;
824 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
825 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
826 n
, vs
->input_map
[n
].ndecl
, vdecl
);
828 for (i
= 0; i
< vdecl
->nelems
; i
++) {
829 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
830 vdecl_index_map
[n
] = i
;
831 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
835 if (vdecl_index_map
[n
] < 0)
836 need_dummy_vbo
= TRUE
;
839 /* No vertex declaration. Likely will never happen in practice,
840 * but we need not crash on this */
841 need_dummy_vbo
= TRUE
;
844 if (need_dummy_vbo
) {
845 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
846 if (!used_streams
[i
]) {
847 dummy_vbo_stream
= i
;
852 /* there are less vertex shader inputs than stream slots,
853 * so if we need a slot for the dummy vbo, we should have found one */
854 assert (!need_dummy_vbo
|| dummy_vbo_stream
!= -1);
856 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
857 index
= vdecl_index_map
[n
];
859 ve
[n
] = vdecl
->elems
[index
];
860 b
= ve
[n
].vertex_buffer_index
;
861 context
->stream_usage_mask
|= 1 << b
;
862 /* XXX wine just uses 1 here: */
863 if (context
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
864 ve
[n
].instance_divisor
= context
->stream_freq
[b
] & 0x7FFFFF;
866 /* if the vertex declaration is incomplete compared to what the
867 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
868 * This is not precised by the spec, but is the behaviour
870 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
871 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
872 ve
[n
].src_offset
= 0;
873 ve
[n
].instance_divisor
= 0;
877 if (context
->dummy_vbo_bound_at
!= dummy_vbo_stream
) {
878 if (context
->dummy_vbo_bound_at
>= 0)
879 context
->changed
.vtxbuf
|= 1 << context
->dummy_vbo_bound_at
;
880 if (dummy_vbo_stream
>= 0) {
881 context
->changed
.vtxbuf
|= 1 << dummy_vbo_stream
;
882 context
->vbo_bound_done
= FALSE
;
884 context
->dummy_vbo_bound_at
= dummy_vbo_stream
;
887 cso_set_vertex_elements(context
->cso
, vs
->num_inputs
, ve
);
891 update_vertex_buffers(struct NineDevice9
*device
)
893 struct nine_context
*context
= &device
->context
;
894 struct pipe_context
*pipe
= context
->pipe
;
895 struct pipe_vertex_buffer dummy_vtxbuf
;
896 uint32_t mask
= context
->changed
.vtxbuf
;
899 DBG("mask=%x\n", mask
);
901 if (context
->dummy_vbo_bound_at
>= 0) {
902 if (!context
->vbo_bound_done
) {
903 dummy_vtxbuf
.buffer
.resource
= device
->dummy_vbo
;
904 dummy_vtxbuf
.stride
= 0;
905 dummy_vtxbuf
.is_user_buffer
= false;
906 dummy_vtxbuf
.buffer_offset
= 0;
907 pipe
->set_vertex_buffers(pipe
, context
->dummy_vbo_bound_at
,
909 context
->vbo_bound_done
= TRUE
;
911 mask
&= ~(1 << context
->dummy_vbo_bound_at
);
914 for (i
= 0; mask
; mask
>>= 1, ++i
) {
916 if (context
->vtxbuf
[i
].buffer
.resource
)
917 pipe
->set_vertex_buffers(pipe
, i
, 1, &context
->vtxbuf
[i
]);
919 pipe
->set_vertex_buffers(pipe
, i
, 1, NULL
);
923 context
->changed
.vtxbuf
= 0;
926 static inline boolean
927 update_sampler_derived(struct nine_context
*context
, unsigned s
)
929 boolean changed
= FALSE
;
931 if (context
->samp
[s
][NINED3DSAMP_SHADOW
] != context
->texture
[s
].shadow
) {
933 context
->samp
[s
][NINED3DSAMP_SHADOW
] = context
->texture
[s
].shadow
;
936 if (context
->samp
[s
][NINED3DSAMP_CUBETEX
] !=
937 (context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
)) {
939 context
->samp
[s
][NINED3DSAMP_CUBETEX
] =
940 context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
;
943 if (context
->samp
[s
][D3DSAMP_MIPFILTER
] != D3DTEXF_NONE
) {
944 int lod
= context
->samp
[s
][D3DSAMP_MAXMIPLEVEL
] - context
->texture
[s
].lod
;
947 if (context
->samp
[s
][NINED3DSAMP_MINLOD
] != lod
) {
949 context
->samp
[s
][NINED3DSAMP_MINLOD
] = lod
;
952 context
->changed
.sampler
[s
] &= ~0x300; /* lod changes irrelevant */
958 /* TODO: add sRGB override to pipe_sampler_state ? */
960 update_textures_and_samplers(struct NineDevice9
*device
)
962 struct nine_context
*context
= &device
->context
;
963 struct pipe_sampler_view
*view
[NINE_MAX_SAMPLERS
];
964 unsigned num_textures
;
966 boolean commit_samplers
;
967 uint16_t sampler_mask
= context
->ps
? context
->ps
->sampler_mask
:
968 device
->ff
.ps
->sampler_mask
;
970 /* TODO: Can we reduce iterations here ? */
972 commit_samplers
= FALSE
;
973 context
->bound_samplers_mask_ps
= 0;
974 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_PS
; ++i
) {
975 const unsigned s
= NINE_SAMPLER_PS(i
);
978 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
983 if (context
->texture
[s
].enabled
) {
984 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
986 view
[i
] = context
->texture
[s
].view
[sRGB
];
987 num_textures
= i
+ 1;
989 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
990 context
->changed
.sampler
[s
] = 0;
991 commit_samplers
= TRUE
;
992 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
995 /* Bind dummy sampler. We do not bind dummy sampler when
996 * it is not needed because it could add overhead. The
997 * dummy sampler should have r=g=b=0 and a=1. We do not
998 * unbind dummy sampler directly when they are not needed
999 * anymore, but they're going to be removed as long as texture
1000 * or sampler states are changed. */
1001 view
[i
] = device
->dummy_sampler_view
;
1002 num_textures
= i
+ 1;
1004 cso_single_sampler(context
->cso
, PIPE_SHADER_FRAGMENT
,
1005 s
- NINE_SAMPLER_PS(0), &device
->dummy_sampler_state
);
1007 commit_samplers
= TRUE
;
1008 context
->changed
.sampler
[s
] = ~0;
1011 context
->bound_samplers_mask_ps
|= (1 << s
);
1014 cso_set_sampler_views(context
->cso
, PIPE_SHADER_FRAGMENT
, num_textures
, view
);
1016 if (commit_samplers
)
1017 cso_single_sampler_done(context
->cso
, PIPE_SHADER_FRAGMENT
);
1019 commit_samplers
= FALSE
;
1020 sampler_mask
= context
->programmable_vs
? context
->vs
->sampler_mask
: 0;
1021 context
->bound_samplers_mask_vs
= 0;
1022 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_VS
; ++i
) {
1023 const unsigned s
= NINE_SAMPLER_VS(i
);
1026 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
1031 if (context
->texture
[s
].enabled
) {
1032 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
1034 view
[i
] = context
->texture
[s
].view
[sRGB
];
1035 num_textures
= i
+ 1;
1037 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
1038 context
->changed
.sampler
[s
] = 0;
1039 commit_samplers
= TRUE
;
1040 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
1043 /* Bind dummy sampler. We do not bind dummy sampler when
1044 * it is not needed because it could add overhead. The
1045 * dummy sampler should have r=g=b=0 and a=1. We do not
1046 * unbind dummy sampler directly when they are not needed
1047 * anymore, but they're going to be removed as long as texture
1048 * or sampler states are changed. */
1049 view
[i
] = device
->dummy_sampler_view
;
1050 num_textures
= i
+ 1;
1052 cso_single_sampler(context
->cso
, PIPE_SHADER_VERTEX
,
1053 s
- NINE_SAMPLER_VS(0), &device
->dummy_sampler_state
);
1055 commit_samplers
= TRUE
;
1056 context
->changed
.sampler
[s
] = ~0;
1059 context
->bound_samplers_mask_vs
|= (1 << i
);
1062 cso_set_sampler_views(context
->cso
, PIPE_SHADER_VERTEX
, num_textures
, view
);
1064 if (commit_samplers
)
1065 cso_single_sampler_done(context
->cso
, PIPE_SHADER_VERTEX
);
1068 /* State commit only */
1071 commit_blend(struct NineDevice9
*device
)
1073 struct nine_context
*context
= &device
->context
;
1075 cso_set_blend(context
->cso
, &context
->pipe_data
.blend
);
1079 commit_dsa(struct NineDevice9
*device
)
1081 struct nine_context
*context
= &device
->context
;
1083 cso_set_depth_stencil_alpha(context
->cso
, &context
->pipe_data
.dsa
);
1087 commit_scissor(struct NineDevice9
*device
)
1089 struct nine_context
*context
= &device
->context
;
1090 struct pipe_context
*pipe
= context
->pipe
;
1092 pipe
->set_scissor_states(pipe
, 0, 1, &context
->scissor
);
1096 commit_rasterizer(struct NineDevice9
*device
)
1098 struct nine_context
*context
= &device
->context
;
1100 cso_set_rasterizer(context
->cso
, &context
->pipe_data
.rast
);
1104 commit_vs_constants(struct NineDevice9
*device
)
1106 struct nine_context
*context
= &device
->context
;
1107 struct pipe_context
*pipe
= context
->pipe
;
1109 if (unlikely(!context
->programmable_vs
))
1110 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs_ff
);
1112 if (context
->swvp
) {
1113 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb0_swvp
);
1114 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 1, &context
->pipe_data
.cb1_swvp
);
1115 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 2, &context
->pipe_data
.cb2_swvp
);
1116 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 3, &context
->pipe_data
.cb3_swvp
);
1118 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs
);
1124 commit_ps_constants(struct NineDevice9
*device
)
1126 struct nine_context
*context
= &device
->context
;
1127 struct pipe_context
*pipe
= context
->pipe
;
1129 if (unlikely(!context
->ps
))
1130 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps_ff
);
1132 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps
);
1136 commit_vs(struct NineDevice9
*device
)
1138 struct nine_context
*context
= &device
->context
;
1140 context
->pipe
->bind_vs_state(context
->pipe
, context
->cso_shader
.vs
);
1145 commit_ps(struct NineDevice9
*device
)
1147 struct nine_context
*context
= &device
->context
;
1149 context
->pipe
->bind_fs_state(context
->pipe
, context
->cso_shader
.ps
);
1153 #define NINE_STATE_SHADER_CHANGE_VS \
1155 NINE_STATE_TEXTURE | \
1156 NINE_STATE_VS_PARAMS_MISC | \
1159 #define NINE_STATE_SHADER_CHANGE_PS \
1161 NINE_STATE_TEXTURE | \
1162 NINE_STATE_PS_PARAMS_MISC)
1164 #define NINE_STATE_FREQUENT \
1165 (NINE_STATE_RASTERIZER | \
1166 NINE_STATE_TEXTURE | \
1167 NINE_STATE_SAMPLER | \
1168 NINE_STATE_VS_CONST | \
1169 NINE_STATE_PS_CONST | \
1170 NINE_STATE_MULTISAMPLE)
1172 #define NINE_STATE_COMMON \
1174 NINE_STATE_BLEND | \
1176 NINE_STATE_VIEWPORT | \
1177 NINE_STATE_VDECL | \
1178 NINE_STATE_IDXBUF | \
1179 NINE_STATE_STREAMFREQ)
1181 #define NINE_STATE_RARE \
1182 (NINE_STATE_SCISSOR | \
1183 NINE_STATE_BLEND_COLOR | \
1184 NINE_STATE_STENCIL_REF | \
1185 NINE_STATE_SAMPLE_MASK)
1188 nine_update_state(struct NineDevice9
*device
)
1190 struct nine_context
*context
= &device
->context
;
1191 struct pipe_context
*pipe
= context
->pipe
;
1194 DBG("changed state groups: %x\n", context
->changed
.group
);
1196 /* NOTE: We may want to use the cso cache for everything, or let
1197 * NineDevice9.RestoreNonCSOState actually set the states, then we wouldn't
1198 * have to care about state being clobbered here and could merge this back
1199 * into update_textures. Except, we also need to re-validate textures that
1200 * may be dirty anyway, even if no texture bindings changed.
1203 /* ff_update may change VS/PS dirty bits */
1204 if (unlikely(!context
->programmable_vs
|| !context
->ps
))
1205 nine_ff_update(device
);
1206 group
= context
->changed
.group
;
1208 if (group
& (NINE_STATE_SHADER_CHANGE_VS
| NINE_STATE_SHADER_CHANGE_PS
)) {
1209 if (group
& NINE_STATE_SHADER_CHANGE_VS
)
1210 group
|= prepare_vs(device
, (group
& NINE_STATE_VS
) != 0); /* may set NINE_STATE_RASTERIZER and NINE_STATE_SAMPLER*/
1211 if (group
& NINE_STATE_SHADER_CHANGE_PS
)
1212 group
|= prepare_ps(device
, (group
& NINE_STATE_PS
) != 0);
1215 if (group
& (NINE_STATE_COMMON
| NINE_STATE_VS
)) {
1216 if (group
& NINE_STATE_FB
)
1217 update_framebuffer(device
, FALSE
);
1218 if (group
& NINE_STATE_BLEND
)
1219 prepare_blend(device
);
1220 if (group
& NINE_STATE_DSA
)
1221 prepare_dsa(device
);
1222 if (group
& NINE_STATE_VIEWPORT
)
1223 update_viewport(device
);
1224 if (group
& (NINE_STATE_VDECL
| NINE_STATE_VS
| NINE_STATE_STREAMFREQ
))
1225 update_vertex_elements(device
);
1228 if (likely(group
& (NINE_STATE_FREQUENT
| NINE_STATE_VS
| NINE_STATE_PS
| NINE_STATE_SWVP
))) {
1229 if (group
& NINE_STATE_MULTISAMPLE
)
1230 group
|= check_multisample(device
);
1231 if (group
& NINE_STATE_RASTERIZER
)
1232 prepare_rasterizer(device
);
1233 if (group
& (NINE_STATE_TEXTURE
| NINE_STATE_SAMPLER
))
1234 update_textures_and_samplers(device
);
1235 if ((group
& (NINE_STATE_VS_CONST
| NINE_STATE_VS
| NINE_STATE_SWVP
)) && context
->programmable_vs
)
1236 prepare_vs_constants_userbuf(device
);
1237 if ((group
& (NINE_STATE_PS_CONST
| NINE_STATE_PS
)) && context
->ps
)
1238 prepare_ps_constants_userbuf(device
);
1241 if (context
->changed
.vtxbuf
)
1242 update_vertex_buffers(device
);
1244 if (context
->commit
& NINE_STATE_COMMIT_BLEND
)
1245 commit_blend(device
);
1246 if (context
->commit
& NINE_STATE_COMMIT_DSA
)
1248 if (context
->commit
& NINE_STATE_COMMIT_RASTERIZER
)
1249 commit_rasterizer(device
);
1250 if (context
->commit
& NINE_STATE_COMMIT_CONST_VS
)
1251 commit_vs_constants(device
);
1252 if (context
->commit
& NINE_STATE_COMMIT_CONST_PS
)
1253 commit_ps_constants(device
);
1254 if (context
->commit
& NINE_STATE_COMMIT_VS
)
1256 if (context
->commit
& NINE_STATE_COMMIT_PS
)
1259 context
->commit
= 0;
1261 if (unlikely(context
->changed
.ucp
)) {
1262 pipe
->set_clip_state(pipe
, &context
->clip
);
1263 context
->changed
.ucp
= FALSE
;
1266 if (unlikely(group
& NINE_STATE_RARE
)) {
1267 if (group
& NINE_STATE_SCISSOR
)
1268 commit_scissor(device
);
1269 if (group
& NINE_STATE_BLEND_COLOR
) {
1270 struct pipe_blend_color color
;
1271 d3dcolor_to_rgba(&color
.color
[0], context
->rs
[D3DRS_BLENDFACTOR
]);
1272 pipe
->set_blend_color(pipe
, &color
);
1274 if (group
& NINE_STATE_SAMPLE_MASK
) {
1275 if (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) {
1276 pipe
->set_sample_mask(pipe
, ~0);
1278 pipe
->set_sample_mask(pipe
, context
->rs
[D3DRS_MULTISAMPLEMASK
]);
1281 if (group
& NINE_STATE_STENCIL_REF
) {
1282 struct pipe_stencil_ref ref
;
1283 ref
.ref_value
[0] = context
->rs
[D3DRS_STENCILREF
];
1284 ref
.ref_value
[1] = ref
.ref_value
[0];
1285 pipe
->set_stencil_ref(pipe
, &ref
);
1289 context
->changed
.group
&=
1290 (NINE_STATE_FF
| NINE_STATE_VS_CONST
| NINE_STATE_PS_CONST
);
1295 #define RESZ_CODE 0x7fa05000
1298 NineDevice9_ResolveZ( struct NineDevice9
*device
)
1300 struct nine_context
*context
= &device
->context
;
1301 const struct util_format_description
*desc
;
1302 struct NineSurface9
*source
= context
->ds
;
1303 struct pipe_resource
*src
, *dst
;
1304 struct pipe_blit_info blit
;
1306 DBG("RESZ resolve\n");
1308 if (!source
|| !context
->texture
[0].enabled
||
1309 context
->texture
[0].type
!= D3DRTYPE_TEXTURE
)
1312 src
= source
->base
.resource
;
1313 dst
= context
->texture
[0].resource
;
1318 /* check dst is depth format. we know already for src */
1319 desc
= util_format_description(dst
->format
);
1320 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
)
1323 memset(&blit
, 0, sizeof(blit
));
1324 blit
.src
.resource
= src
;
1326 blit
.src
.format
= src
->format
;
1328 blit
.src
.box
.depth
= 1;
1331 blit
.src
.box
.width
= src
->width0
;
1332 blit
.src
.box
.height
= src
->height0
;
1334 blit
.dst
.resource
= dst
;
1336 blit
.dst
.format
= dst
->format
;
1338 blit
.dst
.box
.depth
= 1;
1341 blit
.dst
.box
.width
= dst
->width0
;
1342 blit
.dst
.box
.height
= dst
->height0
;
1344 blit
.mask
= PIPE_MASK_ZS
;
1345 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
1346 blit
.scissor_enable
= FALSE
;
1348 context
->pipe
->blit(context
->pipe
, &blit
);
1351 #define ALPHA_TO_COVERAGE_ENABLE MAKEFOURCC('A', '2', 'M', '1')
1352 #define ALPHA_TO_COVERAGE_DISABLE MAKEFOURCC('A', '2', 'M', '0')
1354 /* Nine_context functions.
1355 * Serialized through CSMT macros.
1359 nine_context_set_texture_apply(struct NineDevice9
*device
,
1364 D3DRESOURCETYPE type
,
1366 struct pipe_resource
*res
,
1367 struct pipe_sampler_view
*view0
,
1368 struct pipe_sampler_view
*view1
);
1370 nine_context_set_stream_source_apply(struct NineDevice9
*device
,
1372 struct pipe_resource
*res
,
1377 nine_context_set_indices_apply(struct NineDevice9
*device
,
1378 struct pipe_resource
*res
,
1380 UINT OffsetInBytes
);
1383 nine_context_set_pixel_shader_constant_i_transformed(struct NineDevice9
*device
,
1385 const int *pConstantData
,
1386 unsigned pConstantData_size
,
1387 UINT Vector4iCount
);
1389 CSMT_ITEM_NO_WAIT(nine_context_set_render_state
,
1390 ARG_VAL(D3DRENDERSTATETYPE
, State
),
1391 ARG_VAL(DWORD
, Value
))
1393 struct nine_context
*context
= &device
->context
;
1395 /* Amd hacks (equivalent to GL extensions) */
1396 if (unlikely(State
== D3DRS_POINTSIZE
)) {
1397 if (Value
== RESZ_CODE
) {
1398 NineDevice9_ResolveZ(device
);
1402 if (Value
== ALPHA_TO_COVERAGE_ENABLE
||
1403 Value
== ALPHA_TO_COVERAGE_DISABLE
) {
1404 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== ALPHA_TO_COVERAGE_ENABLE
);
1405 context
->changed
.group
|= NINE_STATE_BLEND
;
1411 if (unlikely(State
== D3DRS_ADAPTIVETESS_Y
)) {
1412 if (Value
== D3DFMT_ATOC
|| (Value
== D3DFMT_UNKNOWN
&& context
->rs
[NINED3DRS_ALPHACOVERAGE
])) {
1413 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== D3DFMT_ATOC
) ? 3 : 0;
1414 context
->rs
[NINED3DRS_ALPHACOVERAGE
] &= context
->rs
[D3DRS_ALPHATESTENABLE
] ? 3 : 2;
1415 context
->changed
.group
|= NINE_STATE_BLEND
;
1419 if (unlikely(State
== D3DRS_ALPHATESTENABLE
&& (context
->rs
[NINED3DRS_ALPHACOVERAGE
] & 2))) {
1420 DWORD alphacoverage_prev
= context
->rs
[NINED3DRS_ALPHACOVERAGE
];
1421 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
? 3 : 2);
1422 if (context
->rs
[NINED3DRS_ALPHACOVERAGE
] != alphacoverage_prev
)
1423 context
->changed
.group
|= NINE_STATE_BLEND
;
1426 context
->rs
[State
] = nine_fix_render_state_value(State
, Value
);
1427 context
->changed
.group
|= nine_render_state_group
[State
];
1430 CSMT_ITEM_NO_WAIT(nine_context_set_texture_apply
,
1431 ARG_VAL(DWORD
, stage
),
1432 ARG_VAL(BOOL
, enabled
),
1433 ARG_VAL(BOOL
, shadow
),
1434 ARG_VAL(DWORD
, lod
),
1435 ARG_VAL(D3DRESOURCETYPE
, type
),
1436 ARG_VAL(uint8_t, pstype
),
1437 ARG_BIND_RES(struct pipe_resource
, res
),
1438 ARG_BIND_VIEW(struct pipe_sampler_view
, view0
),
1439 ARG_BIND_VIEW(struct pipe_sampler_view
, view1
))
1441 struct nine_context
*context
= &device
->context
;
1443 context
->texture
[stage
].enabled
= enabled
;
1444 context
->samplers_shadow
&= ~(1 << stage
);
1445 context
->samplers_shadow
|= shadow
<< stage
;
1446 context
->texture
[stage
].shadow
= shadow
;
1447 context
->texture
[stage
].lod
= lod
;
1448 context
->texture
[stage
].type
= type
;
1449 context
->texture
[stage
].pstype
= pstype
;
1450 pipe_resource_reference(&context
->texture
[stage
].resource
, res
);
1451 pipe_sampler_view_reference(&context
->texture
[stage
].view
[0], view0
);
1452 pipe_sampler_view_reference(&context
->texture
[stage
].view
[1], view1
);
1454 context
->changed
.group
|= NINE_STATE_TEXTURE
;
1458 nine_context_set_texture(struct NineDevice9
*device
,
1460 struct NineBaseTexture9
*tex
)
1462 BOOL enabled
= FALSE
;
1463 BOOL shadow
= FALSE
;
1465 D3DRESOURCETYPE type
= D3DRTYPE_TEXTURE
;
1467 struct pipe_resource
*res
= NULL
;
1468 struct pipe_sampler_view
*view0
= NULL
, *view1
= NULL
;
1470 /* For managed pool, the data can be initially incomplete.
1471 * In that case, the texture is rebound later
1472 * (in NineBaseTexture9_Validate/NineBaseTexture9_UploadSelf). */
1473 if (tex
&& tex
->base
.resource
) {
1475 shadow
= tex
->shadow
;
1476 lod
= tex
->managed
.lod
;
1477 type
= tex
->base
.type
;
1478 pstype
= tex
->pstype
;
1479 res
= tex
->base
.resource
;
1480 view0
= NineBaseTexture9_GetSamplerView(tex
, 0);
1481 view1
= NineBaseTexture9_GetSamplerView(tex
, 1);
1484 nine_context_set_texture_apply(device
, Stage
, enabled
,
1485 shadow
, lod
, type
, pstype
,
1489 CSMT_ITEM_NO_WAIT(nine_context_set_sampler_state
,
1490 ARG_VAL(DWORD
, Sampler
),
1491 ARG_VAL(D3DSAMPLERSTATETYPE
, Type
),
1492 ARG_VAL(DWORD
, Value
))
1494 struct nine_context
*context
= &device
->context
;
1496 if (unlikely(!nine_check_sampler_state_value(Type
, Value
)))
1499 context
->samp
[Sampler
][Type
] = Value
;
1500 context
->changed
.group
|= NINE_STATE_SAMPLER
;
1501 context
->changed
.sampler
[Sampler
] |= 1 << Type
;
1504 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_apply
,
1505 ARG_VAL(UINT
, StreamNumber
),
1506 ARG_BIND_RES(struct pipe_resource
, res
),
1507 ARG_VAL(UINT
, OffsetInBytes
),
1508 ARG_VAL(UINT
, Stride
))
1510 struct nine_context
*context
= &device
->context
;
1511 const unsigned i
= StreamNumber
;
1513 context
->vtxbuf
[i
].stride
= Stride
;
1514 context
->vtxbuf
[i
].buffer_offset
= OffsetInBytes
;
1515 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
.resource
, res
);
1517 context
->changed
.vtxbuf
|= 1 << StreamNumber
;
1521 nine_context_set_stream_source(struct NineDevice9
*device
,
1523 struct NineVertexBuffer9
*pVBuf9
,
1527 struct pipe_resource
*res
= NULL
;
1528 unsigned offset
= 0;
1531 res
= NineVertexBuffer9_GetResource(pVBuf9
, &offset
);
1532 /* in the future when there is internal offset, add it
1533 * to OffsetInBytes */
1535 nine_context_set_stream_source_apply(device
, StreamNumber
,
1536 res
, offset
+ OffsetInBytes
,
1540 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_freq
,
1541 ARG_VAL(UINT
, StreamNumber
),
1542 ARG_VAL(UINT
, Setting
))
1544 struct nine_context
*context
= &device
->context
;
1546 context
->stream_freq
[StreamNumber
] = Setting
;
1548 if (Setting
& D3DSTREAMSOURCE_INSTANCEDATA
)
1549 context
->stream_instancedata_mask
|= 1 << StreamNumber
;
1551 context
->stream_instancedata_mask
&= ~(1 << StreamNumber
);
1553 if (StreamNumber
!= 0)
1554 context
->changed
.group
|= NINE_STATE_STREAMFREQ
;
1557 CSMT_ITEM_NO_WAIT(nine_context_set_indices_apply
,
1558 ARG_BIND_RES(struct pipe_resource
, res
),
1559 ARG_VAL(UINT
, IndexSize
),
1560 ARG_VAL(UINT
, OffsetInBytes
))
1562 struct nine_context
*context
= &device
->context
;
1564 context
->index_size
= IndexSize
;
1565 context
->index_offset
= OffsetInBytes
;
1566 pipe_resource_reference(&context
->idxbuf
, res
);
1568 context
->changed
.group
|= NINE_STATE_IDXBUF
;
1572 nine_context_set_indices(struct NineDevice9
*device
,
1573 struct NineIndexBuffer9
*idxbuf
)
1575 struct pipe_resource
*res
= NULL
;
1577 unsigned OffsetInBytes
= 0;
1580 res
= NineIndexBuffer9_GetBuffer(idxbuf
, &OffsetInBytes
);
1581 IndexSize
= idxbuf
->index_size
;
1584 nine_context_set_indices_apply(device
, res
, IndexSize
, OffsetInBytes
);
1587 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_declaration
,
1588 ARG_BIND_REF(struct NineVertexDeclaration9
, vdecl
))
1590 struct nine_context
*context
= &device
->context
;
1591 BOOL was_programmable_vs
= context
->programmable_vs
;
1593 nine_bind(&context
->vdecl
, vdecl
);
1595 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1596 if (was_programmable_vs
!= context
->programmable_vs
) {
1597 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1598 context
->changed
.group
|= NINE_STATE_VS
;
1601 context
->changed
.group
|= NINE_STATE_VDECL
;
1604 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader
,
1605 ARG_BIND_REF(struct NineVertexShader9
, pShader
))
1607 struct nine_context
*context
= &device
->context
;
1608 BOOL was_programmable_vs
= context
->programmable_vs
;
1610 nine_bind(&context
->vs
, pShader
);
1612 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1614 /* ff -> non-ff: commit back non-ff constants */
1615 if (!was_programmable_vs
&& context
->programmable_vs
)
1616 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1618 context
->changed
.group
|= NINE_STATE_VS
;
1621 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_f
,
1622 ARG_VAL(UINT
, StartRegister
),
1623 ARG_MEM(float, pConstantData
),
1624 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1625 ARG_VAL(UINT
, Vector4fCount
))
1627 struct nine_context
*context
= &device
->context
;
1628 float *vs_const_f
= device
->may_swvp
? context
->vs_const_f_swvp
: context
->vs_const_f
;
1630 memcpy(&vs_const_f
[StartRegister
* 4],
1632 pConstantData_size
);
1634 if (device
->may_swvp
) {
1635 Vector4fCount
= MIN2(StartRegister
+ Vector4fCount
, NINE_MAX_CONST_F
) - StartRegister
;
1636 if (StartRegister
< NINE_MAX_CONST_F
)
1637 memcpy(&context
->vs_const_f
[StartRegister
* 4],
1639 Vector4fCount
* 4 * sizeof(context
->vs_const_f
[0]));
1642 context
->changed
.vs_const_f
= TRUE
;
1643 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1646 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_i
,
1647 ARG_VAL(UINT
, StartRegister
),
1648 ARG_MEM(int, pConstantData
),
1649 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1650 ARG_VAL(UINT
, Vector4iCount
))
1652 struct nine_context
*context
= &device
->context
;
1655 if (device
->driver_caps
.vs_integer
) {
1656 memcpy(&context
->vs_const_i
[4 * StartRegister
],
1658 pConstantData_size
);
1660 for (i
= 0; i
< Vector4iCount
; i
++) {
1661 context
->vs_const_i
[4 * (StartRegister
+ i
)] = fui((float)(pConstantData
[4 * i
]));
1662 context
->vs_const_i
[4 * (StartRegister
+ i
) + 1] = fui((float)(pConstantData
[4 * i
+ 1]));
1663 context
->vs_const_i
[4 * (StartRegister
+ i
) + 2] = fui((float)(pConstantData
[4 * i
+ 2]));
1664 context
->vs_const_i
[4 * (StartRegister
+ i
) + 3] = fui((float)(pConstantData
[4 * i
+ 3]));
1668 context
->changed
.vs_const_i
= TRUE
;
1669 context
->changed
.group
|= NINE_STATE_VS_CONST
| NINE_STATE_VS_PARAMS_MISC
;
1672 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_b
,
1673 ARG_VAL(UINT
, StartRegister
),
1674 ARG_MEM(BOOL
, pConstantData
),
1675 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1676 ARG_VAL(UINT
, BoolCount
))
1678 struct nine_context
*context
= &device
->context
;
1680 uint32_t bool_true
= device
->driver_caps
.vs_integer
? 0xFFFFFFFF : fui(1.0f
);
1682 (void) pConstantData_size
;
1684 for (i
= 0; i
< BoolCount
; i
++)
1685 context
->vs_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1687 context
->changed
.vs_const_b
= TRUE
;
1688 context
->changed
.group
|= NINE_STATE_VS_CONST
| NINE_STATE_VS_PARAMS_MISC
;
1691 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader
,
1692 ARG_BIND_REF(struct NinePixelShader9
, ps
))
1694 struct nine_context
*context
= &device
->context
;
1695 unsigned old_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1698 /* ff -> non-ff: commit back non-ff constants */
1699 if (!context
->ps
&& ps
)
1700 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
1702 nine_bind(&context
->ps
, ps
);
1704 context
->changed
.group
|= NINE_STATE_PS
;
1706 mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1707 /* We need to update cbufs if the pixel shader would
1708 * write to different render targets */
1709 if (mask
!= old_mask
)
1710 context
->changed
.group
|= NINE_STATE_FB
;
1713 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_f
,
1714 ARG_VAL(UINT
, StartRegister
),
1715 ARG_MEM(float, pConstantData
),
1716 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1717 ARG_VAL(UINT
, Vector4fCount
))
1719 struct nine_context
*context
= &device
->context
;
1721 memcpy(&context
->ps_const_f
[StartRegister
* 4],
1723 pConstantData_size
);
1725 context
->changed
.ps_const_f
= TRUE
;
1726 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1729 /* For stateblocks */
1730 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i_transformed
,
1731 ARG_VAL(UINT
, StartRegister
),
1732 ARG_MEM(int, pConstantData
),
1733 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1734 ARG_VAL(UINT
, Vector4iCount
))
1736 struct nine_context
*context
= &device
->context
;
1738 memcpy(&context
->ps_const_i
[StartRegister
][0],
1740 Vector4iCount
* sizeof(context
->ps_const_i
[0]));
1742 context
->changed
.ps_const_i
= TRUE
;
1743 context
->changed
.group
|= NINE_STATE_PS_CONST
| NINE_STATE_PS_PARAMS_MISC
;
1746 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i
,
1747 ARG_VAL(UINT
, StartRegister
),
1748 ARG_MEM(int, pConstantData
),
1749 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1750 ARG_VAL(UINT
, Vector4iCount
))
1752 struct nine_context
*context
= &device
->context
;
1755 if (device
->driver_caps
.ps_integer
) {
1756 memcpy(&context
->ps_const_i
[StartRegister
][0],
1758 pConstantData_size
);
1760 for (i
= 0; i
< Vector4iCount
; i
++) {
1761 context
->ps_const_i
[StartRegister
+i
][0] = fui((float)(pConstantData
[4*i
]));
1762 context
->ps_const_i
[StartRegister
+i
][1] = fui((float)(pConstantData
[4*i
+1]));
1763 context
->ps_const_i
[StartRegister
+i
][2] = fui((float)(pConstantData
[4*i
+2]));
1764 context
->ps_const_i
[StartRegister
+i
][3] = fui((float)(pConstantData
[4*i
+3]));
1767 context
->changed
.ps_const_i
= TRUE
;
1768 context
->changed
.group
|= NINE_STATE_PS_CONST
| NINE_STATE_PS_PARAMS_MISC
;
1771 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_b
,
1772 ARG_VAL(UINT
, StartRegister
),
1773 ARG_MEM(BOOL
, pConstantData
),
1774 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1775 ARG_VAL(UINT
, BoolCount
))
1777 struct nine_context
*context
= &device
->context
;
1779 uint32_t bool_true
= device
->driver_caps
.ps_integer
? 0xFFFFFFFF : fui(1.0f
);
1781 (void) pConstantData_size
;
1783 for (i
= 0; i
< BoolCount
; i
++)
1784 context
->ps_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1786 context
->changed
.ps_const_b
= TRUE
;
1787 context
->changed
.group
|= NINE_STATE_PS_CONST
| NINE_STATE_PS_PARAMS_MISC
;
1790 /* XXX: use resource, as resource might change */
1791 CSMT_ITEM_NO_WAIT(nine_context_set_render_target
,
1792 ARG_VAL(DWORD
, RenderTargetIndex
),
1793 ARG_BIND_REF(struct NineSurface9
, rt
))
1795 struct nine_context
*context
= &device
->context
;
1796 const unsigned i
= RenderTargetIndex
;
1799 context
->viewport
.X
= 0;
1800 context
->viewport
.Y
= 0;
1801 context
->viewport
.Width
= rt
->desc
.Width
;
1802 context
->viewport
.Height
= rt
->desc
.Height
;
1803 context
->viewport
.MinZ
= 0.0f
;
1804 context
->viewport
.MaxZ
= 1.0f
;
1806 context
->scissor
.minx
= 0;
1807 context
->scissor
.miny
= 0;
1808 context
->scissor
.maxx
= rt
->desc
.Width
;
1809 context
->scissor
.maxy
= rt
->desc
.Height
;
1811 context
->changed
.group
|= NINE_STATE_VIEWPORT
| NINE_STATE_SCISSOR
| NINE_STATE_MULTISAMPLE
;
1813 if (context
->rt
[0] &&
1814 (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) !=
1815 (rt
->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
))
1816 context
->changed
.group
|= NINE_STATE_SAMPLE_MASK
;
1819 if (context
->rt
[i
] != rt
) {
1820 nine_bind(&context
->rt
[i
], rt
);
1821 context
->changed
.group
|= NINE_STATE_FB
;
1825 /* XXX: use resource instead of ds, as resource might change */
1826 CSMT_ITEM_NO_WAIT(nine_context_set_depth_stencil
,
1827 ARG_BIND_REF(struct NineSurface9
, ds
))
1829 struct nine_context
*context
= &device
->context
;
1831 nine_bind(&context
->ds
, ds
);
1832 context
->changed
.group
|= NINE_STATE_FB
;
1835 CSMT_ITEM_NO_WAIT(nine_context_set_viewport
,
1836 ARG_COPY_REF(D3DVIEWPORT9
, viewport
))
1838 struct nine_context
*context
= &device
->context
;
1840 context
->viewport
= *viewport
;
1841 context
->changed
.group
|= NINE_STATE_VIEWPORT
;
1844 CSMT_ITEM_NO_WAIT(nine_context_set_scissor
,
1845 ARG_COPY_REF(struct pipe_scissor_state
, scissor
))
1847 struct nine_context
*context
= &device
->context
;
1849 context
->scissor
= *scissor
;
1850 context
->changed
.group
|= NINE_STATE_SCISSOR
;
1853 CSMT_ITEM_NO_WAIT(nine_context_set_transform
,
1854 ARG_VAL(D3DTRANSFORMSTATETYPE
, State
),
1855 ARG_COPY_REF(D3DMATRIX
, pMatrix
))
1857 struct nine_context
*context
= &device
->context
;
1858 D3DMATRIX
*M
= nine_state_access_transform(&context
->ff
, State
, TRUE
);
1861 context
->ff
.changed
.transform
[State
/ 32] |= 1 << (State
% 32);
1862 context
->changed
.group
|= NINE_STATE_FF
;
1865 CSMT_ITEM_NO_WAIT(nine_context_set_material
,
1866 ARG_COPY_REF(D3DMATERIAL9
, pMaterial
))
1868 struct nine_context
*context
= &device
->context
;
1870 context
->ff
.material
= *pMaterial
;
1871 context
->changed
.group
|= NINE_STATE_FF_MATERIAL
;
1874 CSMT_ITEM_NO_WAIT(nine_context_set_light
,
1875 ARG_VAL(DWORD
, Index
),
1876 ARG_COPY_REF(D3DLIGHT9
, pLight
))
1878 struct nine_context
*context
= &device
->context
;
1880 (void)nine_state_set_light(&context
->ff
, Index
, pLight
);
1881 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1885 /* For stateblocks */
1887 nine_context_light_enable_stateblock(struct NineDevice9
*device
,
1888 const uint16_t active_light
[NINE_MAX_LIGHTS_ACTIVE
], /* TODO: use pointer that convey size for csmt */
1889 unsigned int num_lights_active
)
1891 struct nine_context
*context
= &device
->context
;
1893 /* TODO: Use CSMT_* to avoid calling nine_csmt_process */
1894 nine_csmt_process(device
);
1895 memcpy(context
->ff
.active_light
, active_light
, NINE_MAX_LIGHTS_ACTIVE
* sizeof(context
->ff
.active_light
[0]));
1896 context
->ff
.num_lights_active
= num_lights_active
;
1897 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1900 CSMT_ITEM_NO_WAIT(nine_context_light_enable
,
1901 ARG_VAL(DWORD
, Index
),
1902 ARG_VAL(BOOL
, Enable
))
1904 struct nine_context
*context
= &device
->context
;
1906 nine_state_light_enable(&context
->ff
, Index
, Enable
);
1907 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1910 CSMT_ITEM_NO_WAIT(nine_context_set_texture_stage_state
,
1911 ARG_VAL(DWORD
, Stage
),
1912 ARG_VAL(D3DTEXTURESTAGESTATETYPE
, Type
),
1913 ARG_VAL(DWORD
, Value
))
1915 struct nine_context
*context
= &device
->context
;
1916 int bumpmap_index
= -1;
1918 context
->ff
.tex_stage
[Stage
][Type
] = Value
;
1920 case D3DTSS_BUMPENVMAT00
:
1921 bumpmap_index
= 4 * Stage
;
1923 case D3DTSS_BUMPENVMAT01
:
1924 bumpmap_index
= 4 * Stage
+ 1;
1926 case D3DTSS_BUMPENVMAT10
:
1927 bumpmap_index
= 4 * Stage
+ 2;
1929 case D3DTSS_BUMPENVMAT11
:
1930 bumpmap_index
= 4 * Stage
+ 3;
1932 case D3DTSS_BUMPENVLSCALE
:
1933 bumpmap_index
= 4 * 8 + 2 * Stage
;
1935 case D3DTSS_BUMPENVLOFFSET
:
1936 bumpmap_index
= 4 * 8 + 2 * Stage
+ 1;
1938 case D3DTSS_TEXTURETRANSFORMFLAGS
:
1939 context
->changed
.group
|= NINE_STATE_PS_PARAMS_MISC
;
1945 if (bumpmap_index
>= 0) {
1946 context
->bumpmap_vars
[bumpmap_index
] = Value
;
1947 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1950 context
->changed
.group
|= NINE_STATE_FF_PS_CONSTS
;
1951 context
->ff
.changed
.tex_stage
[Stage
][Type
/ 32] |= 1 << (Type
% 32);
1954 CSMT_ITEM_NO_WAIT(nine_context_set_clip_plane
,
1955 ARG_VAL(DWORD
, Index
),
1956 ARG_COPY_REF(struct nine_clipplane
, pPlane
))
1958 struct nine_context
*context
= &device
->context
;
1960 memcpy(&context
->clip
.ucp
[Index
][0], pPlane
, sizeof(context
->clip
.ucp
[0]));
1961 context
->changed
.ucp
= TRUE
;
1964 CSMT_ITEM_NO_WAIT(nine_context_set_swvp
,
1965 ARG_VAL(boolean
, swvp
))
1967 struct nine_context
*context
= &device
->context
;
1969 context
->swvp
= swvp
;
1970 context
->changed
.group
|= NINE_STATE_SWVP
;
1973 /* Do not write to nine_context directly. Slower,
1974 * but works with csmt. TODO: write a special csmt version that
1975 * would record the list of commands as much as possible,
1976 * and use the version above else.
1979 nine_context_apply_stateblock(struct NineDevice9
*device
,
1980 const struct nine_state
*src
)
1984 /* No need to apply src->changed.group, since all calls do
1985 * set context->changed.group */
1987 for (i
= 0; i
< ARRAY_SIZE(src
->changed
.rs
); ++i
) {
1988 uint32_t m
= src
->changed
.rs
[i
];
1990 const int r
= ffs(m
) - 1;
1992 nine_context_set_render_state(device
, i
* 32 + r
, src
->rs_advertised
[i
* 32 + r
]);
1997 if (src
->changed
.texture
) {
1998 uint32_t m
= src
->changed
.texture
;
2001 for (s
= 0; m
; ++s
, m
>>= 1) {
2002 struct NineBaseTexture9
*tex
= src
->texture
[s
];
2005 nine_context_set_texture(device
, s
, tex
);
2010 if (src
->changed
.group
& NINE_STATE_SAMPLER
) {
2013 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2014 uint32_t m
= src
->changed
.sampler
[s
];
2016 const int i
= ffs(m
) - 1;
2018 nine_context_set_sampler_state(device
, s
, i
, src
->samp_advertised
[s
][i
]);
2023 /* Vertex buffers */
2024 if (src
->changed
.vtxbuf
| src
->changed
.stream_freq
) {
2025 uint32_t m
= src
->changed
.vtxbuf
| src
->changed
.stream_freq
;
2026 for (i
= 0; m
; ++i
, m
>>= 1) {
2027 if (src
->changed
.vtxbuf
& (1 << i
))
2028 nine_context_set_stream_source(device
, i
, src
->stream
[i
], src
->vtxbuf
[i
].buffer_offset
, src
->vtxbuf
[i
].stride
);
2029 if (src
->changed
.stream_freq
& (1 << i
))
2030 nine_context_set_stream_source_freq(device
, i
, src
->stream_freq
[i
]);
2035 if (src
->changed
.group
& NINE_STATE_IDXBUF
)
2036 nine_context_set_indices(device
, src
->idxbuf
);
2038 /* Vertex declaration */
2039 if ((src
->changed
.group
& NINE_STATE_VDECL
) && src
->vdecl
)
2040 nine_context_set_vertex_declaration(device
, src
->vdecl
);
2043 if (src
->changed
.group
& NINE_STATE_VS
)
2044 nine_context_set_vertex_shader(device
, src
->vs
);
2047 if (src
->changed
.group
& NINE_STATE_PS
)
2048 nine_context_set_pixel_shader(device
, src
->ps
);
2050 /* Vertex constants */
2051 if (src
->changed
.group
& NINE_STATE_VS_CONST
) {
2052 struct nine_range
*r
;
2053 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
)
2054 nine_context_set_vertex_shader_constant_f(device
, r
->bgn
,
2055 &src
->vs_const_f
[r
->bgn
* 4],
2056 sizeof(float[4]) * (r
->end
- r
->bgn
),
2058 for (r
= src
->changed
.vs_const_i
; r
; r
= r
->next
)
2059 nine_context_set_vertex_shader_constant_i(device
, r
->bgn
,
2060 &src
->vs_const_i
[r
->bgn
* 4],
2061 sizeof(int[4]) * (r
->end
- r
->bgn
),
2063 for (r
= src
->changed
.vs_const_b
; r
; r
= r
->next
)
2064 nine_context_set_vertex_shader_constant_b(device
, r
->bgn
,
2065 &src
->vs_const_b
[r
->bgn
* 4],
2066 sizeof(BOOL
) * (r
->end
- r
->bgn
),
2070 /* Pixel constants */
2071 if (src
->changed
.group
& NINE_STATE_PS_CONST
) {
2072 struct nine_range
*r
;
2073 for (r
= src
->changed
.ps_const_f
; r
; r
= r
->next
)
2074 nine_context_set_pixel_shader_constant_f(device
, r
->bgn
,
2075 &src
->ps_const_f
[r
->bgn
* 4],
2076 sizeof(float[4]) * (r
->end
- r
->bgn
),
2078 if (src
->changed
.ps_const_i
) {
2079 uint16_t m
= src
->changed
.ps_const_i
;
2080 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2082 nine_context_set_pixel_shader_constant_i_transformed(device
, i
,
2083 src
->ps_const_i
[i
], sizeof(int[4]), 1);
2085 if (src
->changed
.ps_const_b
) {
2086 uint16_t m
= src
->changed
.ps_const_b
;
2087 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2089 nine_context_set_pixel_shader_constant_b(device
, i
,
2090 &src
->ps_const_b
[i
], sizeof(BOOL
), 1);
2095 if (src
->changed
.group
& NINE_STATE_VIEWPORT
)
2096 nine_context_set_viewport(device
, &src
->viewport
);
2099 if (src
->changed
.group
& NINE_STATE_SCISSOR
)
2100 nine_context_set_scissor(device
, &src
->scissor
);
2102 /* User Clip Planes */
2103 if (src
->changed
.ucp
)
2104 for (i
= 0; i
< PIPE_MAX_CLIP_PLANES
; ++i
)
2105 if (src
->changed
.ucp
& (1 << i
))
2106 nine_context_set_clip_plane(device
, i
, (struct nine_clipplane
*)&src
->clip
.ucp
[i
][0]);
2108 if (!(src
->changed
.group
& NINE_STATE_FF
))
2111 /* Fixed function state. */
2113 if (src
->changed
.group
& NINE_STATE_FF_MATERIAL
)
2114 nine_context_set_material(device
, &src
->ff
.material
);
2116 if (src
->changed
.group
& NINE_STATE_FF_PS_CONSTS
) {
2118 for (s
= 0; s
< NINE_MAX_TEXTURE_STAGES
; ++s
) {
2119 for (i
= 0; i
< NINED3DTSS_COUNT
; ++i
)
2120 if (src
->ff
.changed
.tex_stage
[s
][i
/ 32] & (1 << (i
% 32)))
2121 nine_context_set_texture_stage_state(device
, s
, i
, src
->ff
.tex_stage
[s
][i
]);
2124 if (src
->changed
.group
& NINE_STATE_FF_LIGHTING
) {
2125 for (i
= 0; i
< src
->ff
.num_lights
; ++i
)
2126 if (src
->ff
.light
[i
].Type
!= NINED3DLIGHT_INVALID
)
2127 nine_context_set_light(device
, i
, &src
->ff
.light
[i
]);
2129 nine_context_light_enable_stateblock(device
, src
->ff
.active_light
, src
->ff
.num_lights_active
);
2131 if (src
->changed
.group
& NINE_STATE_FF_VSTRANSF
) {
2132 for (i
= 0; i
< ARRAY_SIZE(src
->ff
.changed
.transform
); ++i
) {
2134 if (!src
->ff
.changed
.transform
[i
])
2136 for (s
= i
* 32; s
< (i
* 32 + 32); ++s
) {
2137 if (!(src
->ff
.changed
.transform
[i
] & (1 << (s
% 32))))
2139 /* MaxVertexBlendMatrixIndex is 8, which means
2140 * we don't read past index D3DTS_WORLDMATRIX(8).
2141 * swvp is supposed to allow all 256, but we don't
2142 * implement it for now. */
2143 if (s
> D3DTS_WORLDMATRIX(8))
2145 nine_context_set_transform(device
, s
,
2146 nine_state_access_transform(
2147 (struct nine_ff_state
*)&src
->ff
,
2155 nine_update_state_framebuffer_clear(struct NineDevice9
*device
)
2157 struct nine_context
*context
= &device
->context
;
2159 if (context
->changed
.group
& NINE_STATE_FB
)
2160 update_framebuffer(device
, TRUE
);
2163 CSMT_ITEM_NO_WAIT(nine_context_clear_fb
,
2164 ARG_VAL(DWORD
, Count
),
2165 ARG_COPY_REF(D3DRECT
, pRects
),
2166 ARG_VAL(DWORD
, Flags
),
2167 ARG_VAL(D3DCOLOR
, Color
),
2169 ARG_VAL(DWORD
, Stencil
))
2171 struct nine_context
*context
= &device
->context
;
2172 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
2173 struct pipe_surface
*cbuf
, *zsbuf
;
2174 struct pipe_context
*pipe
= context
->pipe
;
2175 struct NineSurface9
*zsbuf_surf
= context
->ds
;
2176 struct NineSurface9
*rt
;
2179 union pipe_color_union rgba
;
2180 unsigned rt_mask
= 0;
2183 nine_update_state_framebuffer_clear(device
);
2185 if (Flags
& D3DCLEAR_TARGET
) bufs
|= PIPE_CLEAR_COLOR
;
2186 /* Ignore Z buffer if not bound */
2187 if (context
->pipe_data
.fb
.zsbuf
!= NULL
) {
2188 if (Flags
& D3DCLEAR_ZBUFFER
) bufs
|= PIPE_CLEAR_DEPTH
;
2189 if (Flags
& D3DCLEAR_STENCIL
) bufs
|= PIPE_CLEAR_STENCIL
;
2193 d3dcolor_to_pipe_color_union(&rgba
, Color
);
2195 rect
.x1
= context
->viewport
.X
;
2196 rect
.y1
= context
->viewport
.Y
;
2197 rect
.x2
= context
->viewport
.Width
+ rect
.x1
;
2198 rect
.y2
= context
->viewport
.Height
+ rect
.y1
;
2200 /* Both rectangles apply, which is weird, but that's D3D9. */
2201 if (context
->rs
[D3DRS_SCISSORTESTENABLE
]) {
2202 rect
.x1
= MAX2(rect
.x1
, context
->scissor
.minx
);
2203 rect
.y1
= MAX2(rect
.y1
, context
->scissor
.miny
);
2204 rect
.x2
= MIN2(rect
.x2
, context
->scissor
.maxx
);
2205 rect
.y2
= MIN2(rect
.y2
, context
->scissor
.maxy
);
2209 /* Maybe apps like to specify a large rect ? */
2210 if (pRects
[0].x1
<= rect
.x1
&& pRects
[0].x2
>= rect
.x2
&&
2211 pRects
[0].y1
<= rect
.y1
&& pRects
[0].y2
>= rect
.y2
) {
2212 DBG("First rect covers viewport.\n");
2218 if (rect
.x1
>= context
->pipe_data
.fb
.width
|| rect
.y1
>= context
->pipe_data
.fb
.height
)
2221 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2222 if (context
->rt
[i
] && context
->rt
[i
]->desc
.Format
!= D3DFMT_NULL
)
2226 /* fast path, clears everything at once */
2228 (!(bufs
& PIPE_CLEAR_COLOR
) || (rt_mask
== context
->rt_mask
)) &&
2229 rect
.x1
== 0 && rect
.y1
== 0 &&
2230 /* Case we clear only render target. Check clear region vs rt. */
2231 ((!(bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2232 rect
.x2
>= context
->pipe_data
.fb
.width
&&
2233 rect
.y2
>= context
->pipe_data
.fb
.height
) ||
2234 /* Case we clear depth buffer (and eventually rt too).
2235 * depth buffer size is always >= rt size. Compare to clear region */
2236 ((bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2237 rect
.x2
>= zsbuf_surf
->desc
.Width
&&
2238 rect
.y2
>= zsbuf_surf
->desc
.Height
))) {
2239 DBG("Clear fast path\n");
2240 pipe
->clear(pipe
, bufs
, &rgba
, Z
, Stencil
);
2249 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2250 rt
= context
->rt
[i
];
2251 if (!rt
|| rt
->desc
.Format
== D3DFMT_NULL
||
2252 !(bufs
& PIPE_CLEAR_COLOR
))
2253 continue; /* save space, compiler should hoist this */
2254 cbuf
= NineSurface9_GetSurface(rt
, sRGB
);
2255 for (r
= 0; r
< Count
; ++r
) {
2256 /* Don't trust users to pass these in the right order. */
2257 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2258 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2259 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2260 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2262 /* Drop negative rectangles (like wine expects). */
2263 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2264 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2267 x1
= MAX2(x1
, rect
.x1
);
2268 y1
= MAX2(y1
, rect
.y1
);
2269 x2
= MIN3(x2
, rect
.x2
, rt
->desc
.Width
);
2270 y2
= MIN3(y2
, rect
.y2
, rt
->desc
.Height
);
2272 DBG("Clearing (%u..%u)x(%u..%u)\n", x1
, x2
, y1
, y2
);
2273 pipe
->clear_render_target(pipe
, cbuf
, &rgba
,
2274 x1
, y1
, x2
- x1
, y2
- y1
, false);
2277 if (!(bufs
& PIPE_CLEAR_DEPTHSTENCIL
))
2280 bufs
&= PIPE_CLEAR_DEPTHSTENCIL
;
2282 for (r
= 0; r
< Count
; ++r
) {
2283 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2284 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2285 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2286 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2288 /* Drop negative rectangles. */
2289 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2290 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2293 x1
= MIN2(x1
, rect
.x1
);
2294 y1
= MIN2(y1
, rect
.y1
);
2295 x2
= MIN3(x2
, rect
.x2
, zsbuf_surf
->desc
.Width
);
2296 y2
= MIN3(y2
, rect
.y2
, zsbuf_surf
->desc
.Height
);
2298 zsbuf
= NineSurface9_GetSurface(zsbuf_surf
, 0);
2300 pipe
->clear_depth_stencil(pipe
, zsbuf
, bufs
, Z
, Stencil
,
2301 x1
, y1
, x2
- x1
, y2
- y1
, false);
2308 init_draw_info(struct pipe_draw_info
*info
,
2309 struct NineDevice9
*dev
, D3DPRIMITIVETYPE type
, UINT count
)
2311 info
->mode
= d3dprimitivetype_to_pipe_prim(type
);
2312 info
->count
= prim_count_to_vertex_count(type
, count
);
2313 info
->start_instance
= 0;
2314 info
->instance_count
= 1;
2315 if (dev
->context
.stream_instancedata_mask
& dev
->context
.stream_usage_mask
)
2316 info
->instance_count
= MAX2(dev
->context
.stream_freq
[0] & 0x7FFFFF, 1);
2317 info
->primitive_restart
= FALSE
;
2318 info
->has_user_indices
= FALSE
;
2319 info
->restart_index
= 0;
2320 info
->count_from_stream_output
= NULL
;
2321 info
->indirect
= NULL
;
2324 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive
,
2325 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2326 ARG_VAL(UINT
, StartVertex
),
2327 ARG_VAL(UINT
, PrimitiveCount
))
2329 struct nine_context
*context
= &device
->context
;
2330 struct pipe_draw_info info
;
2332 nine_update_state(device
);
2334 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2335 info
.index_size
= 0;
2336 info
.start
= StartVertex
;
2337 info
.index_bias
= 0;
2338 info
.min_index
= info
.start
;
2339 info
.max_index
= info
.count
- 1;
2340 info
.index
.resource
= NULL
;
2342 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2345 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive
,
2346 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2347 ARG_VAL(INT
, BaseVertexIndex
),
2348 ARG_VAL(UINT
, MinVertexIndex
),
2349 ARG_VAL(UINT
, NumVertices
),
2350 ARG_VAL(UINT
, StartIndex
),
2351 ARG_VAL(UINT
, PrimitiveCount
))
2353 struct nine_context
*context
= &device
->context
;
2354 struct pipe_draw_info info
;
2356 nine_update_state(device
);
2358 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2359 info
.index_size
= context
->index_size
;
2360 info
.start
= context
->index_offset
/ context
->index_size
+ StartIndex
;
2361 info
.index_bias
= BaseVertexIndex
;
2362 /* These don't include index bias: */
2363 info
.min_index
= MinVertexIndex
;
2364 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2365 info
.index
.resource
= context
->idxbuf
;
2367 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2370 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf
,
2371 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2372 ARG_VAL(UINT
, PrimitiveCount
),
2373 ARG_BIND_VBUF(struct pipe_vertex_buffer
, vtxbuf
))
2375 struct nine_context
*context
= &device
->context
;
2376 struct pipe_draw_info info
;
2378 nine_update_state(device
);
2380 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2381 info
.index_size
= 0;
2383 info
.index_bias
= 0;
2385 info
.max_index
= info
.count
- 1;
2386 info
.index
.resource
= NULL
;
2388 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vtxbuf
);
2390 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2393 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf
,
2394 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2395 ARG_VAL(UINT
, MinVertexIndex
),
2396 ARG_VAL(UINT
, NumVertices
),
2397 ARG_VAL(UINT
, PrimitiveCount
),
2398 ARG_BIND_VBUF(struct pipe_vertex_buffer
, vbuf
),
2399 ARG_BIND_RES(struct pipe_resource
, ibuf
),
2400 ARG_VAL(void *, user_ibuf
),
2401 ARG_VAL(UINT
, index_offset
),
2402 ARG_VAL(UINT
, index_size
))
2404 struct nine_context
*context
= &device
->context
;
2405 struct pipe_draw_info info
;
2407 nine_update_state(device
);
2409 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2410 info
.index_size
= index_size
;
2411 info
.start
= index_offset
/ info
.index_size
;
2412 info
.index_bias
= 0;
2413 info
.min_index
= MinVertexIndex
;
2414 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2415 info
.has_user_indices
= ibuf
== NULL
;
2417 info
.index
.resource
= ibuf
;
2419 info
.index
.user
= user_ibuf
;
2421 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vbuf
);
2423 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2426 CSMT_ITEM_NO_WAIT(nine_context_resource_copy_region
,
2427 ARG_BIND_REF(struct NineUnknown
, dst
),
2428 ARG_BIND_REF(struct NineUnknown
, src
),
2429 ARG_BIND_RES(struct pipe_resource
, dst_res
),
2430 ARG_VAL(unsigned, dst_level
),
2431 ARG_COPY_REF(struct pipe_box
, dst_box
),
2432 ARG_BIND_RES(struct pipe_resource
, src_res
),
2433 ARG_VAL(unsigned, src_level
),
2434 ARG_COPY_REF(struct pipe_box
, src_box
))
2436 struct nine_context
*context
= &device
->context
;
2441 context
->pipe
->resource_copy_region(context
->pipe
,
2443 dst_box
->x
, dst_box
->y
, dst_box
->z
,
2448 CSMT_ITEM_NO_WAIT(nine_context_blit
,
2449 ARG_BIND_REF(struct NineUnknown
, dst
),
2450 ARG_BIND_REF(struct NineUnknown
, src
),
2451 ARG_BIND_BLIT(struct pipe_blit_info
, blit
))
2453 struct nine_context
*context
= &device
->context
;
2458 context
->pipe
->blit(context
->pipe
, blit
);
2461 CSMT_ITEM_NO_WAIT(nine_context_clear_render_target
,
2462 ARG_BIND_REF(struct NineSurface9
, surface
),
2463 ARG_VAL(D3DCOLOR
, color
),
2466 ARG_VAL(UINT
, width
),
2467 ARG_VAL(UINT
, height
))
2469 struct nine_context
*context
= &device
->context
;
2470 struct pipe_surface
*surf
;
2471 union pipe_color_union rgba
;
2473 d3dcolor_to_pipe_color_union(&rgba
, color
);
2474 surf
= NineSurface9_GetSurface(surface
, 0);
2475 context
->pipe
->clear_render_target(context
->pipe
, surf
, &rgba
, x
, y
, width
, height
, false);
2478 CSMT_ITEM_NO_WAIT(nine_context_gen_mipmap
,
2479 ARG_BIND_REF(struct NineUnknown
, dst
),
2480 ARG_BIND_RES(struct pipe_resource
, res
),
2481 ARG_VAL(UINT
, base_level
),
2482 ARG_VAL(UINT
, last_level
),
2483 ARG_VAL(UINT
, first_layer
),
2484 ARG_VAL(UINT
, last_layer
),
2485 ARG_VAL(UINT
, filter
))
2487 struct nine_context
*context
= &device
->context
;
2489 /* We just bind dst for the bind count */
2492 util_gen_mipmap(context
->pipe
, res
, res
->format
, base_level
,
2493 last_level
, first_layer
, last_layer
, filter
);
2496 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_range_upload
,
2497 ARG_BIND_REF(struct NineUnknown
, src_ref
),
2498 ARG_BIND_RES(struct pipe_resource
, res
),
2499 ARG_VAL(unsigned, offset
),
2500 ARG_VAL(unsigned, size
),
2501 ARG_VAL(const void *, data
))
2503 struct nine_context
*context
= &device
->context
;
2505 /* Binding src_ref avoids release before upload */
2508 context
->pipe
->buffer_subdata(context
->pipe
, res
, 0, offset
, size
, data
);
2511 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_box_upload
,
2512 ARG_BIND_REF(struct NineUnknown
, src_ref
),
2513 ARG_BIND_RES(struct pipe_resource
, res
),
2514 ARG_VAL(unsigned, level
),
2515 ARG_COPY_REF(struct pipe_box
, dst_box
),
2516 ARG_VAL(enum pipe_format
, src_format
),
2517 ARG_VAL(const void *, src
),
2518 ARG_VAL(unsigned, src_stride
),
2519 ARG_VAL(unsigned, src_layer_stride
),
2520 ARG_COPY_REF(struct pipe_box
, src_box
))
2522 struct nine_context
*context
= &device
->context
;
2523 struct pipe_context
*pipe
= context
->pipe
;
2524 struct pipe_transfer
*transfer
= NULL
;
2527 /* Binding src_ref avoids release before upload */
2530 map
= pipe
->transfer_map(pipe
,
2533 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
2534 dst_box
, &transfer
);
2538 /* Note: if formats are the sames, it will revert
2539 * to normal memcpy */
2540 (void) util_format_translate_3d(res
->format
,
2541 map
, transfer
->stride
,
2542 transfer
->layer_stride
,
2547 src_box
->x
, src_box
->y
, src_box
->z
,
2548 dst_box
->width
, dst_box
->height
,
2551 pipe_transfer_unmap(pipe
, transfer
);
2555 nine_context_create_query(struct NineDevice9
*device
, unsigned query_type
)
2557 struct pipe_context
*pipe
;
2558 struct pipe_query
*res
;
2560 pipe
= nine_context_get_pipe_acquire(device
);
2561 res
= pipe
->create_query(pipe
, query_type
, 0);
2562 nine_context_get_pipe_release(device
);
2566 CSMT_ITEM_DO_WAIT(nine_context_destroy_query
,
2567 ARG_REF(struct pipe_query
, query
))
2569 struct nine_context
*context
= &device
->context
;
2571 context
->pipe
->destroy_query(context
->pipe
, query
);
2574 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_begin_query
,
2575 ARG_REF(struct pipe_query
, query
))
2577 struct nine_context
*context
= &device
->context
;
2579 (void) context
->pipe
->begin_query(context
->pipe
, query
);
2582 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_end_query
,
2583 ARG_REF(struct pipe_query
, query
))
2585 struct nine_context
*context
= &device
->context
;
2587 (void) context
->pipe
->end_query(context
->pipe
, query
);
2591 nine_context_get_query_result(struct NineDevice9
*device
, struct pipe_query
*query
,
2592 unsigned *counter
, boolean flush
, boolean wait
,
2593 union pipe_query_result
*result
)
2595 struct pipe_context
*pipe
;
2599 nine_csmt_process(device
);
2600 else if (p_atomic_read(counter
) > 0) {
2601 if (flush
&& device
->csmt_active
)
2602 nine_queue_flush(device
->csmt_ctx
->pool
);
2603 DBG("Pending begin/end. Returning\n");
2607 pipe
= nine_context_get_pipe_acquire(device
);
2608 ret
= pipe
->get_query_result(pipe
, query
, wait
, result
);
2609 nine_context_get_pipe_release(device
);
2611 DBG("Query result %s\n", ret
? "found" : "not yet available");
2615 /* State defaults */
2617 static const DWORD nine_render_state_defaults
[NINED3DRS_LAST
+ 1] =
2619 /* [D3DRS_ZENABLE] = D3DZB_TRUE; wine: auto_depth_stencil */
2620 [D3DRS_ZENABLE
] = D3DZB_FALSE
,
2621 [D3DRS_FILLMODE
] = D3DFILL_SOLID
,
2622 [D3DRS_SHADEMODE
] = D3DSHADE_GOURAUD
,
2623 /* [D3DRS_LINEPATTERN] = 0x00000000, */
2624 [D3DRS_ZWRITEENABLE
] = TRUE
,
2625 [D3DRS_ALPHATESTENABLE
] = FALSE
,
2626 [D3DRS_LASTPIXEL
] = TRUE
,
2627 [D3DRS_SRCBLEND
] = D3DBLEND_ONE
,
2628 [D3DRS_DESTBLEND
] = D3DBLEND_ZERO
,
2629 [D3DRS_CULLMODE
] = D3DCULL_CCW
,
2630 [D3DRS_ZFUNC
] = D3DCMP_LESSEQUAL
,
2631 [D3DRS_ALPHAFUNC
] = D3DCMP_ALWAYS
,
2632 [D3DRS_ALPHAREF
] = 0,
2633 [D3DRS_DITHERENABLE
] = FALSE
,
2634 [D3DRS_ALPHABLENDENABLE
] = FALSE
,
2635 [D3DRS_FOGENABLE
] = FALSE
,
2636 [D3DRS_SPECULARENABLE
] = FALSE
,
2637 /* [D3DRS_ZVISIBLE] = 0, */
2638 [D3DRS_FOGCOLOR
] = 0,
2639 [D3DRS_FOGTABLEMODE
] = D3DFOG_NONE
,
2640 [D3DRS_FOGSTART
] = 0x00000000,
2641 [D3DRS_FOGEND
] = 0x3F800000,
2642 [D3DRS_FOGDENSITY
] = 0x3F800000,
2643 /* [D3DRS_EDGEANTIALIAS] = FALSE, */
2644 [D3DRS_RANGEFOGENABLE
] = FALSE
,
2645 [D3DRS_STENCILENABLE
] = FALSE
,
2646 [D3DRS_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2647 [D3DRS_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2648 [D3DRS_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2649 [D3DRS_STENCILREF
] = 0,
2650 [D3DRS_STENCILMASK
] = 0xFFFFFFFF,
2651 [D3DRS_STENCILFUNC
] = D3DCMP_ALWAYS
,
2652 [D3DRS_STENCILWRITEMASK
] = 0xFFFFFFFF,
2653 [D3DRS_TEXTUREFACTOR
] = 0xFFFFFFFF,
2662 [D3DRS_CLIPPING
] = TRUE
,
2663 [D3DRS_LIGHTING
] = TRUE
,
2664 [D3DRS_AMBIENT
] = 0,
2665 [D3DRS_FOGVERTEXMODE
] = D3DFOG_NONE
,
2666 [D3DRS_COLORVERTEX
] = TRUE
,
2667 [D3DRS_LOCALVIEWER
] = TRUE
,
2668 [D3DRS_NORMALIZENORMALS
] = FALSE
,
2669 [D3DRS_DIFFUSEMATERIALSOURCE
] = D3DMCS_COLOR1
,
2670 [D3DRS_SPECULARMATERIALSOURCE
] = D3DMCS_COLOR2
,
2671 [D3DRS_AMBIENTMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2672 [D3DRS_EMISSIVEMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2673 [D3DRS_VERTEXBLEND
] = D3DVBF_DISABLE
,
2674 [D3DRS_CLIPPLANEENABLE
] = 0,
2675 /* [D3DRS_SOFTWAREVERTEXPROCESSING] = FALSE, */
2676 [D3DRS_POINTSIZE
] = 0x3F800000,
2677 [D3DRS_POINTSIZE_MIN
] = 0x3F800000,
2678 [D3DRS_POINTSPRITEENABLE
] = FALSE
,
2679 [D3DRS_POINTSCALEENABLE
] = FALSE
,
2680 [D3DRS_POINTSCALE_A
] = 0x3F800000,
2681 [D3DRS_POINTSCALE_B
] = 0x00000000,
2682 [D3DRS_POINTSCALE_C
] = 0x00000000,
2683 [D3DRS_MULTISAMPLEANTIALIAS
] = TRUE
,
2684 [D3DRS_MULTISAMPLEMASK
] = 0xFFFFFFFF,
2685 [D3DRS_PATCHEDGESTYLE
] = D3DPATCHEDGE_DISCRETE
,
2686 /* [D3DRS_PATCHSEGMENTS] = 0x3F800000, */
2687 [D3DRS_DEBUGMONITORTOKEN
] = 0xDEADCAFE,
2688 [D3DRS_POINTSIZE_MAX
] = 0x3F800000, /* depends on cap */
2689 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = FALSE
,
2690 [D3DRS_COLORWRITEENABLE
] = 0x0000000f,
2691 [D3DRS_TWEENFACTOR
] = 0x00000000,
2692 [D3DRS_BLENDOP
] = D3DBLENDOP_ADD
,
2693 [D3DRS_POSITIONDEGREE
] = D3DDEGREE_CUBIC
,
2694 [D3DRS_NORMALDEGREE
] = D3DDEGREE_LINEAR
,
2695 [D3DRS_SCISSORTESTENABLE
] = FALSE
,
2696 [D3DRS_SLOPESCALEDEPTHBIAS
] = 0,
2697 [D3DRS_MINTESSELLATIONLEVEL
] = 0x3F800000,
2698 [D3DRS_MAXTESSELLATIONLEVEL
] = 0x3F800000,
2699 [D3DRS_ANTIALIASEDLINEENABLE
] = FALSE
,
2700 [D3DRS_ADAPTIVETESS_X
] = 0x00000000,
2701 [D3DRS_ADAPTIVETESS_Y
] = 0x00000000,
2702 [D3DRS_ADAPTIVETESS_Z
] = 0x3F800000,
2703 [D3DRS_ADAPTIVETESS_W
] = 0x00000000,
2704 [D3DRS_ENABLEADAPTIVETESSELLATION
] = FALSE
,
2705 [D3DRS_TWOSIDEDSTENCILMODE
] = FALSE
,
2706 [D3DRS_CCW_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2707 [D3DRS_CCW_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2708 [D3DRS_CCW_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2709 [D3DRS_CCW_STENCILFUNC
] = D3DCMP_ALWAYS
,
2710 [D3DRS_COLORWRITEENABLE1
] = 0x0000000F,
2711 [D3DRS_COLORWRITEENABLE2
] = 0x0000000F,
2712 [D3DRS_COLORWRITEENABLE3
] = 0x0000000F,
2713 [D3DRS_BLENDFACTOR
] = 0xFFFFFFFF,
2714 [D3DRS_SRGBWRITEENABLE
] = 0,
2715 [D3DRS_DEPTHBIAS
] = 0,
2724 [D3DRS_SEPARATEALPHABLENDENABLE
] = FALSE
,
2725 [D3DRS_SRCBLENDALPHA
] = D3DBLEND_ONE
,
2726 [D3DRS_DESTBLENDALPHA
] = D3DBLEND_ZERO
,
2727 [D3DRS_BLENDOPALPHA
] = D3DBLENDOP_ADD
,
2728 [NINED3DRS_VSPOINTSIZE
] = FALSE
,
2729 [NINED3DRS_RTMASK
] = 0xf,
2730 [NINED3DRS_ALPHACOVERAGE
] = FALSE
,
2731 [NINED3DRS_MULTISAMPLE
] = FALSE
2733 static const DWORD nine_tex_stage_state_defaults
[NINED3DTSS_LAST
+ 1] =
2735 [D3DTSS_COLOROP
] = D3DTOP_DISABLE
,
2736 [D3DTSS_ALPHAOP
] = D3DTOP_DISABLE
,
2737 [D3DTSS_COLORARG1
] = D3DTA_TEXTURE
,
2738 [D3DTSS_COLORARG2
] = D3DTA_CURRENT
,
2739 [D3DTSS_COLORARG0
] = D3DTA_CURRENT
,
2740 [D3DTSS_ALPHAARG1
] = D3DTA_TEXTURE
,
2741 [D3DTSS_ALPHAARG2
] = D3DTA_CURRENT
,
2742 [D3DTSS_ALPHAARG0
] = D3DTA_CURRENT
,
2743 [D3DTSS_RESULTARG
] = D3DTA_CURRENT
,
2744 [D3DTSS_BUMPENVMAT00
] = 0,
2745 [D3DTSS_BUMPENVMAT01
] = 0,
2746 [D3DTSS_BUMPENVMAT10
] = 0,
2747 [D3DTSS_BUMPENVMAT11
] = 0,
2748 [D3DTSS_BUMPENVLSCALE
] = 0,
2749 [D3DTSS_BUMPENVLOFFSET
] = 0,
2750 [D3DTSS_TEXCOORDINDEX
] = 0,
2751 [D3DTSS_TEXTURETRANSFORMFLAGS
] = D3DTTFF_DISABLE
,
2753 static const DWORD nine_samp_state_defaults
[NINED3DSAMP_LAST
+ 1] =
2755 [D3DSAMP_ADDRESSU
] = D3DTADDRESS_WRAP
,
2756 [D3DSAMP_ADDRESSV
] = D3DTADDRESS_WRAP
,
2757 [D3DSAMP_ADDRESSW
] = D3DTADDRESS_WRAP
,
2758 [D3DSAMP_BORDERCOLOR
] = 0,
2759 [D3DSAMP_MAGFILTER
] = D3DTEXF_POINT
,
2760 [D3DSAMP_MINFILTER
] = D3DTEXF_POINT
,
2761 [D3DSAMP_MIPFILTER
] = D3DTEXF_NONE
,
2762 [D3DSAMP_MIPMAPLODBIAS
] = 0,
2763 [D3DSAMP_MAXMIPLEVEL
] = 0,
2764 [D3DSAMP_MAXANISOTROPY
] = 1,
2765 [D3DSAMP_SRGBTEXTURE
] = 0,
2766 [D3DSAMP_ELEMENTINDEX
] = 0,
2767 [D3DSAMP_DMAPOFFSET
] = 0,
2768 [NINED3DSAMP_MINLOD
] = 0,
2769 [NINED3DSAMP_SHADOW
] = 0,
2770 [NINED3DSAMP_CUBETEX
] = 0
2773 /* Note: The following 4 functions assume there is no
2774 * pending commands */
2776 void nine_state_restore_non_cso(struct NineDevice9
*device
)
2778 struct nine_context
*context
= &device
->context
;
2780 context
->changed
.group
= NINE_STATE_ALL
;
2781 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
2782 context
->changed
.ucp
= TRUE
;
2783 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
| NINE_STATE_COMMIT_CONST_PS
;
2787 nine_state_set_defaults(struct NineDevice9
*device
, const D3DCAPS9
*caps
,
2790 struct nine_state
*state
= &device
->state
;
2791 struct nine_context
*context
= &device
->context
;
2794 /* Initialize defaults.
2796 memcpy(context
->rs
, nine_render_state_defaults
, sizeof(context
->rs
));
2798 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
) {
2799 memcpy(&state
->ff
.tex_stage
[s
], nine_tex_stage_state_defaults
,
2800 sizeof(state
->ff
.tex_stage
[s
]));
2801 state
->ff
.tex_stage
[s
][D3DTSS_TEXCOORDINDEX
] = s
;
2803 state
->ff
.tex_stage
[0][D3DTSS_COLOROP
] = D3DTOP_MODULATE
;
2804 state
->ff
.tex_stage
[0][D3DTSS_ALPHAOP
] = D3DTOP_SELECTARG1
;
2806 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
)
2807 memcpy(&context
->ff
.tex_stage
[s
], state
->ff
.tex_stage
[s
],
2808 sizeof(state
->ff
.tex_stage
[s
]));
2810 memset(&context
->bumpmap_vars
, 0, sizeof(context
->bumpmap_vars
));
2812 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2813 memcpy(&context
->samp
[s
], nine_samp_state_defaults
,
2814 sizeof(context
->samp
[s
]));
2815 memcpy(&state
->samp_advertised
[s
], nine_samp_state_defaults
,
2816 sizeof(state
->samp_advertised
[s
]));
2819 memset(state
->vs_const_f
, 0, VS_CONST_F_SIZE(device
));
2820 memset(context
->vs_const_f
, 0, device
->vs_const_size
);
2821 if (context
->vs_const_f_swvp
)
2822 memset(context
->vs_const_f_swvp
, 0, NINE_MAX_CONST_F_SWVP
* sizeof(float[4]));
2823 memset(state
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
2824 memset(context
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
2825 memset(state
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
2826 memset(context
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
2827 memset(state
->ps_const_f
, 0, device
->ps_const_size
);
2828 memset(context
->ps_const_f
, 0, device
->ps_const_size
);
2829 memset(state
->ps_const_i
, 0, sizeof(state
->ps_const_i
));
2830 memset(context
->ps_const_i
, 0, sizeof(context
->ps_const_i
));
2831 memset(state
->ps_const_b
, 0, sizeof(state
->ps_const_b
));
2832 memset(context
->ps_const_b
, 0, sizeof(context
->ps_const_b
));
2834 /* Cap dependent initial state:
2836 context
->rs
[D3DRS_POINTSIZE_MAX
] = fui(caps
->MaxPointSize
);
2838 memcpy(state
->rs_advertised
, context
->rs
, sizeof(context
->rs
));
2840 /* Set changed flags to initialize driver.
2842 context
->changed
.group
= NINE_STATE_ALL
;
2843 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
2844 context
->changed
.ucp
= TRUE
;
2846 context
->ff
.changed
.transform
[0] = ~0;
2847 context
->ff
.changed
.transform
[D3DTS_WORLD
/ 32] |= 1 << (D3DTS_WORLD
% 32);
2850 state
->viewport
.MinZ
= context
->viewport
.MinZ
= 0.0f
;
2851 state
->viewport
.MaxZ
= context
->viewport
.MaxZ
= 1.0f
;
2854 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
)
2855 context
->changed
.sampler
[s
] = ~0;
2858 context
->dummy_vbo_bound_at
= -1;
2859 context
->vbo_bound_done
= FALSE
;
2864 nine_device_state_clear(struct NineDevice9
*device
)
2866 struct nine_state
*state
= &device
->state
;
2869 for (i
= 0; i
< ARRAY_SIZE(state
->rt
); ++i
)
2870 nine_bind(&state
->rt
[i
], NULL
);
2871 nine_bind(&state
->ds
, NULL
);
2872 nine_bind(&state
->vs
, NULL
);
2873 nine_bind(&state
->ps
, NULL
);
2874 nine_bind(&state
->vdecl
, NULL
);
2875 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
2876 NineBindBufferToDevice(device
,
2877 (struct NineBuffer9
**)&state
->stream
[i
],
2879 NineBindBufferToDevice(device
,
2880 (struct NineBuffer9
**)&state
->idxbuf
,
2883 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
)
2884 NineBindTextureToDevice(device
, &state
->texture
[i
], NULL
);
2888 nine_context_clear(struct NineDevice9
*device
)
2890 struct nine_context
*context
= &device
->context
;
2891 struct pipe_context
*pipe
= context
->pipe
;
2892 struct cso_context
*cso
= context
->cso
;
2895 /* Early device ctor failure. Nothing to do */
2899 pipe
->bind_vs_state(pipe
, NULL
);
2900 pipe
->bind_fs_state(pipe
, NULL
);
2902 /* Don't unbind constant buffers, they're device-private and
2903 * do not change on Reset.
2906 cso_set_samplers(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
2907 cso_set_samplers(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
2909 cso_set_sampler_views(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
2910 cso_set_sampler_views(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
2912 pipe
->set_vertex_buffers(pipe
, 0, device
->caps
.MaxStreams
, NULL
);
2914 for (i
= 0; i
< ARRAY_SIZE(context
->rt
); ++i
)
2915 nine_bind(&context
->rt
[i
], NULL
);
2916 nine_bind(&context
->ds
, NULL
);
2917 nine_bind(&context
->vs
, NULL
);
2918 nine_bind(&context
->ps
, NULL
);
2919 nine_bind(&context
->vdecl
, NULL
);
2920 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
2921 pipe_vertex_buffer_unreference(&context
->vtxbuf
[i
]);
2922 pipe_resource_reference(&context
->idxbuf
, NULL
);
2923 pipe_resource_reference(&context
->pipe_data
.cb_vs
.buffer
, NULL
);
2924 pipe_resource_reference(&context
->pipe_data
.cb_ps
.buffer
, NULL
);
2926 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
) {
2927 context
->texture
[i
].enabled
= FALSE
;
2928 pipe_resource_reference(&context
->texture
[i
].resource
,
2930 pipe_sampler_view_reference(&context
->texture
[i
].view
[0],
2932 pipe_sampler_view_reference(&context
->texture
[i
].view
[1],
2938 nine_state_init_sw(struct NineDevice9
*device
)
2940 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
2941 struct pipe_rasterizer_state rast
;
2942 struct pipe_blend_state blend
;
2943 struct pipe_depth_stencil_alpha_state dsa
;
2944 struct pipe_framebuffer_state fb
;
2946 /* Only used with Streamout */
2947 memset(&rast
, 0, sizeof(rast
));
2948 rast
.rasterizer_discard
= true;
2949 rast
.point_quad_rasterization
= 1; /* to make llvmpipe happy */
2950 cso_set_rasterizer(device
->cso_sw
, &rast
);
2952 /* dummy settings */
2953 memset(&blend
, 0, sizeof(blend
));
2954 memset(&dsa
, 0, sizeof(dsa
));
2955 memset(&fb
, 0, sizeof(fb
));
2956 cso_set_blend(device
->cso_sw
, &blend
);
2957 cso_set_depth_stencil_alpha(device
->cso_sw
, &dsa
);
2958 cso_set_framebuffer(device
->cso_sw
, &fb
);
2959 cso_set_viewport_dims(device
->cso_sw
, 1.0, 1.0, false);
2960 cso_set_fragment_shader_handle(device
->cso_sw
, util_make_empty_fragment_shader(pipe_sw
));
2963 /* There is duplication with update_vertex_elements.
2964 * TODO: Share the code */
2967 update_vertex_elements_sw(struct NineDevice9
*device
)
2969 struct nine_state
*state
= &device
->state
;
2970 const struct NineVertexDeclaration9
*vdecl
= device
->state
.vdecl
;
2971 const struct NineVertexShader9
*vs
;
2974 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
2975 char used_streams
[device
->caps
.MaxStreams
];
2976 int dummy_vbo_stream
= -1;
2977 BOOL need_dummy_vbo
= FALSE
;
2978 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
2979 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
2981 memset(vdecl_index_map
, -1, 16);
2982 memset(used_streams
, 0, device
->caps
.MaxStreams
);
2983 vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
2986 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
2987 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
2988 n
, vs
->input_map
[n
].ndecl
, vdecl
);
2990 for (i
= 0; i
< vdecl
->nelems
; i
++) {
2991 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
2992 vdecl_index_map
[n
] = i
;
2993 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
2997 if (vdecl_index_map
[n
] < 0)
2998 need_dummy_vbo
= TRUE
;
3001 /* No vertex declaration. Likely will never happen in practice,
3002 * but we need not crash on this */
3003 need_dummy_vbo
= TRUE
;
3006 if (need_dummy_vbo
) {
3007 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
3008 if (!used_streams
[i
]) {
3009 dummy_vbo_stream
= i
;
3014 /* TODO handle dummy_vbo */
3015 assert (!need_dummy_vbo
);
3017 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
3018 index
= vdecl_index_map
[n
];
3020 ve
[n
] = vdecl
->elems
[index
];
3021 b
= ve
[n
].vertex_buffer_index
;
3022 /* XXX wine just uses 1 here: */
3023 if (state
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
3024 ve
[n
].instance_divisor
= state
->stream_freq
[b
] & 0x7FFFFF;
3026 /* if the vertex declaration is incomplete compared to what the
3027 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
3028 * This is not precised by the spec, but is the behaviour
3030 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
3031 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
3032 ve
[n
].src_offset
= 0;
3033 ve
[n
].instance_divisor
= 0;
3037 cso_set_vertex_elements(device
->cso_sw
, vs
->num_inputs
, ve
);
3041 update_vertex_buffers_sw(struct NineDevice9
*device
, int start_vertice
, int num_vertices
)
3043 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3044 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3045 struct nine_state
*state
= &device
->state
;
3046 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3047 struct pipe_vertex_buffer vtxbuf
;
3048 uint32_t mask
= 0xf;
3051 DBG("mask=%x\n", mask
);
3053 /* TODO: handle dummy_vbo_bound_at */
3055 for (i
= 0; mask
; mask
>>= 1, ++i
) {
3057 if (state
->stream
[i
]) {
3059 struct pipe_resource
*buf
;
3060 struct pipe_box box
;
3063 vtxbuf
= state
->vtxbuf
[i
];
3064 buf
= NineVertexBuffer9_GetResource(state
->stream
[i
], &offset
);
3066 DBG("Locking %p (offset %d, length %d)\n", buf
,
3067 vtxbuf
.buffer_offset
, num_vertices
* vtxbuf
.stride
);
3069 u_box_1d(vtxbuf
.buffer_offset
+ offset
+ start_vertice
* vtxbuf
.stride
,
3070 num_vertices
* vtxbuf
.stride
, &box
);
3072 userbuf
= pipe
->transfer_map(pipe
, buf
, 0, PIPE_TRANSFER_READ
, &box
,
3073 &(sw_internal
->transfers_so
[i
]));
3074 vtxbuf
.is_user_buffer
= true;
3075 vtxbuf
.buffer
.user
= userbuf
;
3077 if (!device
->driver_caps
.user_sw_vbufs
) {
3078 vtxbuf
.buffer
.resource
= NULL
;
3079 vtxbuf
.is_user_buffer
= false;
3080 u_upload_data(device
->pipe_sw
->stream_uploader
,
3085 &(vtxbuf
.buffer_offset
),
3086 &(vtxbuf
.buffer
.resource
));
3087 u_upload_unmap(device
->pipe_sw
->stream_uploader
);
3089 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, &vtxbuf
);
3090 pipe_vertex_buffer_unreference(&vtxbuf
);
3092 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3095 nine_context_get_pipe_release(device
);
3099 update_vs_constants_sw(struct NineDevice9
*device
)
3101 struct nine_state
*state
= &device
->state
;
3102 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3107 struct pipe_constant_buffer cb
;
3111 cb
.buffer_offset
= 0;
3112 cb
.buffer_size
= 4096 * sizeof(float[4]);
3113 cb
.user_buffer
= state
->vs_const_f
;
3115 if (state
->vs
->lconstf
.ranges
) {
3116 const struct nine_lconstf
*lconstf
= &device
->state
.vs
->lconstf
;
3117 const struct nine_range
*r
= lconstf
->ranges
;
3119 float *dst
= device
->state
.vs_lconstf_temp
;
3120 float *src
= (float *)cb
.user_buffer
;
3121 memcpy(dst
, src
, 8192 * sizeof(float[4]));
3123 unsigned p
= r
->bgn
;
3124 unsigned c
= r
->end
- r
->bgn
;
3125 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
3129 cb
.user_buffer
= dst
;
3132 buf
= cb
.user_buffer
;
3134 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 0, &cb
);
3136 pipe_resource_reference(&cb
.buffer
, NULL
);
3138 cb
.user_buffer
= (char *)buf
+ 4096 * sizeof(float[4]);
3140 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 1, &cb
);
3142 pipe_resource_reference(&cb
.buffer
, NULL
);
3146 struct pipe_constant_buffer cb
;
3149 cb
.buffer_offset
= 0;
3150 cb
.buffer_size
= 2048 * sizeof(float[4]);
3151 cb
.user_buffer
= state
->vs_const_i
;
3153 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 2, &cb
);
3155 pipe_resource_reference(&cb
.buffer
, NULL
);
3159 struct pipe_constant_buffer cb
;
3162 cb
.buffer_offset
= 0;
3163 cb
.buffer_size
= 512 * sizeof(float[4]);
3164 cb
.user_buffer
= state
->vs_const_b
;
3166 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 3, &cb
);
3168 pipe_resource_reference(&cb
.buffer
, NULL
);
3172 struct pipe_constant_buffer cb
;
3173 const D3DVIEWPORT9
*vport
= &device
->state
.viewport
;
3174 float viewport_data
[8] = {(float)vport
->Width
* 0.5f
,
3175 (float)vport
->Height
* -0.5f
, vport
->MaxZ
- vport
->MinZ
, 0.f
,
3176 (float)vport
->Width
* 0.5f
+ (float)vport
->X
,
3177 (float)vport
->Height
* 0.5f
+ (float)vport
->Y
,
3181 cb
.buffer_offset
= 0;
3182 cb
.buffer_size
= 2 * sizeof(float[4]);
3183 cb
.user_buffer
= viewport_data
;
3186 u_upload_data(device
->pipe_sw
->const_uploader
,
3191 &(cb
.buffer_offset
),
3193 u_upload_unmap(device
->pipe_sw
->const_uploader
);
3194 cb
.user_buffer
= NULL
;
3197 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 4, &cb
);
3199 pipe_resource_reference(&cb
.buffer
, NULL
);
3205 nine_state_prepare_draw_sw(struct NineDevice9
*device
, struct NineVertexDeclaration9
*vdecl_out
,
3206 int start_vertice
, int num_vertices
, struct pipe_stream_output_info
*so
)
3208 struct nine_state
*state
= &device
->state
;
3209 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
3210 struct NineVertexShader9
*vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
3212 assert(programmable_vs
);
3214 DBG("Preparing draw\n");
3215 cso_set_vertex_shader_handle(device
->cso_sw
,
3216 NineVertexShader9_GetVariantProcessVertices(vs
, vdecl_out
, so
));
3217 update_vertex_elements_sw(device
);
3218 update_vertex_buffers_sw(device
, start_vertice
, num_vertices
);
3219 update_vs_constants_sw(device
);
3220 DBG("Preparation succeeded\n");
3224 nine_state_after_draw_sw(struct NineDevice9
*device
)
3226 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3227 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3228 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3231 for (i
= 0; i
< 4; i
++) {
3232 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3233 if (sw_internal
->transfers_so
[i
])
3234 pipe
->transfer_unmap(pipe
, sw_internal
->transfers_so
[i
]);
3235 sw_internal
->transfers_so
[i
] = NULL
;
3237 nine_context_get_pipe_release(device
);
3241 nine_state_destroy_sw(struct NineDevice9
*device
)
3244 /* Everything destroyed with cso */
3248 static const DWORD nine_render_states_pixel[] =
3250 D3DRS_ALPHABLENDENABLE,
3253 D3DRS_ALPHATESTENABLE,
3254 D3DRS_ANTIALIASEDLINEENABLE,
3258 D3DRS_CCW_STENCILFAIL,
3259 D3DRS_CCW_STENCILPASS,
3260 D3DRS_CCW_STENCILZFAIL,
3261 D3DRS_COLORWRITEENABLE,
3262 D3DRS_COLORWRITEENABLE1,
3263 D3DRS_COLORWRITEENABLE2,
3264 D3DRS_COLORWRITEENABLE3,
3267 D3DRS_DESTBLENDALPHA,
3274 D3DRS_SCISSORTESTENABLE,
3275 D3DRS_SEPARATEALPHABLENDENABLE,
3277 D3DRS_SLOPESCALEDEPTHBIAS,
3279 D3DRS_SRCBLENDALPHA,
3280 D3DRS_SRGBWRITEENABLE,
3281 D3DRS_STENCILENABLE,
3287 D3DRS_STENCILWRITEMASK,
3289 D3DRS_TEXTUREFACTOR,
3290 D3DRS_TWOSIDEDSTENCILMODE,
3312 const uint32_t nine_render_states_pixel
[(NINED3DRS_LAST
+ 31) / 32] =
3314 0x0f99c380, 0x1ff00070, 0x00000000, 0x00000000,
3315 0x000000ff, 0xde01c900, 0x0003ffcf
3319 static const DWORD nine_render_states_vertex[] =
3321 D3DRS_ADAPTIVETESS_W,
3322 D3DRS_ADAPTIVETESS_X,
3323 D3DRS_ADAPTIVETESS_Y,
3324 D3DRS_ADAPTIVETESS_Z,
3326 D3DRS_AMBIENTMATERIALSOURCE,
3328 D3DRS_CLIPPLANEENABLE,
3331 D3DRS_DIFFUSEMATERIALSOURCE,
3332 D3DRS_EMISSIVEMATERIALSOURCE,
3333 D3DRS_ENABLEADAPTIVETESSELLATION,
3340 D3DRS_FOGVERTEXMODE,
3341 D3DRS_INDEXEDVERTEXBLENDENABLE,
3344 D3DRS_MAXTESSELLATIONLEVEL,
3345 D3DRS_MINTESSELLATIONLEVEL,
3346 D3DRS_MULTISAMPLEANTIALIAS,
3347 D3DRS_MULTISAMPLEMASK,
3349 D3DRS_NORMALIZENORMALS,
3350 D3DRS_PATCHEDGESTYLE,
3354 D3DRS_POINTSCALEENABLE,
3356 D3DRS_POINTSIZE_MAX,
3357 D3DRS_POINTSIZE_MIN,
3358 D3DRS_POINTSPRITEENABLE,
3359 D3DRS_POSITIONDEGREE,
3360 D3DRS_RANGEFOGENABLE,
3362 D3DRS_SPECULARENABLE,
3363 D3DRS_SPECULARMATERIALSOURCE,
3368 const uint32_t nine_render_states_vertex
[(NINED3DRS_LAST
+ 31) / 32] =
3370 0x30400200, 0x0001007c, 0x00000000, 0x00000000,
3371 0xfd9efb00, 0x01fc34cf, 0x00000000
3374 /* TODO: put in the right values */
3375 const uint32_t nine_render_state_group
[NINED3DRS_LAST
+ 1] =
3377 [D3DRS_ZENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3378 [D3DRS_FILLMODE
] = NINE_STATE_RASTERIZER
,
3379 [D3DRS_SHADEMODE
] = NINE_STATE_RASTERIZER
,
3380 [D3DRS_ZWRITEENABLE
] = NINE_STATE_DSA
,
3381 [D3DRS_ALPHATESTENABLE
] = NINE_STATE_DSA
,
3382 [D3DRS_LASTPIXEL
] = NINE_STATE_RASTERIZER
,
3383 [D3DRS_SRCBLEND
] = NINE_STATE_BLEND
,
3384 [D3DRS_DESTBLEND
] = NINE_STATE_BLEND
,
3385 [D3DRS_CULLMODE
] = NINE_STATE_RASTERIZER
,
3386 [D3DRS_ZFUNC
] = NINE_STATE_DSA
,
3387 [D3DRS_ALPHAREF
] = NINE_STATE_DSA
,
3388 [D3DRS_ALPHAFUNC
] = NINE_STATE_DSA
,
3389 [D3DRS_DITHERENABLE
] = NINE_STATE_BLEND
,
3390 [D3DRS_ALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3391 [D3DRS_FOGENABLE
] = NINE_STATE_FF_SHADER
| NINE_STATE_VS_PARAMS_MISC
| NINE_STATE_PS_PARAMS_MISC
| NINE_STATE_PS_CONST
,
3392 [D3DRS_SPECULARENABLE
] = NINE_STATE_FF_LIGHTING
,
3393 [D3DRS_FOGCOLOR
] = NINE_STATE_FF_PS_CONSTS
| NINE_STATE_PS_CONST
,
3394 [D3DRS_FOGTABLEMODE
] = NINE_STATE_FF_SHADER
| NINE_STATE_PS_PARAMS_MISC
| NINE_STATE_PS_CONST
,
3395 [D3DRS_FOGSTART
] = NINE_STATE_FF_VS_OTHER
| NINE_STATE_FF_PS_CONSTS
| NINE_STATE_PS_CONST
,
3396 [D3DRS_FOGEND
] = NINE_STATE_FF_VS_OTHER
| NINE_STATE_FF_PS_CONSTS
| NINE_STATE_PS_CONST
,
3397 [D3DRS_FOGDENSITY
] = NINE_STATE_FF_VS_OTHER
| NINE_STATE_FF_PS_CONSTS
| NINE_STATE_PS_CONST
,
3398 [D3DRS_RANGEFOGENABLE
] = NINE_STATE_FF_SHADER
,
3399 [D3DRS_STENCILENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3400 [D3DRS_STENCILFAIL
] = NINE_STATE_DSA
,
3401 [D3DRS_STENCILZFAIL
] = NINE_STATE_DSA
,
3402 [D3DRS_STENCILPASS
] = NINE_STATE_DSA
,
3403 [D3DRS_STENCILFUNC
] = NINE_STATE_DSA
,
3404 [D3DRS_STENCILREF
] = NINE_STATE_STENCIL_REF
,
3405 [D3DRS_STENCILMASK
] = NINE_STATE_DSA
,
3406 [D3DRS_STENCILWRITEMASK
] = NINE_STATE_DSA
,
3407 [D3DRS_TEXTUREFACTOR
] = NINE_STATE_FF_PS_CONSTS
,
3408 [D3DRS_WRAP0
] = NINE_STATE_UNHANDLED
, /* cylindrical wrap is crazy */
3409 [D3DRS_WRAP1
] = NINE_STATE_UNHANDLED
,
3410 [D3DRS_WRAP2
] = NINE_STATE_UNHANDLED
,
3411 [D3DRS_WRAP3
] = NINE_STATE_UNHANDLED
,
3412 [D3DRS_WRAP4
] = NINE_STATE_UNHANDLED
,
3413 [D3DRS_WRAP5
] = NINE_STATE_UNHANDLED
,
3414 [D3DRS_WRAP6
] = NINE_STATE_UNHANDLED
,
3415 [D3DRS_WRAP7
] = NINE_STATE_UNHANDLED
,
3416 [D3DRS_CLIPPING
] = 0, /* software vertex processing only */
3417 [D3DRS_LIGHTING
] = NINE_STATE_FF_LIGHTING
,
3418 [D3DRS_AMBIENT
] = NINE_STATE_FF_LIGHTING
| NINE_STATE_FF_MATERIAL
,
3419 [D3DRS_FOGVERTEXMODE
] = NINE_STATE_FF_SHADER
,
3420 [D3DRS_COLORVERTEX
] = NINE_STATE_FF_LIGHTING
,
3421 [D3DRS_LOCALVIEWER
] = NINE_STATE_FF_LIGHTING
,
3422 [D3DRS_NORMALIZENORMALS
] = NINE_STATE_FF_SHADER
,
3423 [D3DRS_DIFFUSEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3424 [D3DRS_SPECULARMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3425 [D3DRS_AMBIENTMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3426 [D3DRS_EMISSIVEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3427 [D3DRS_VERTEXBLEND
] = NINE_STATE_FF_SHADER
,
3428 [D3DRS_CLIPPLANEENABLE
] = NINE_STATE_RASTERIZER
,
3429 [D3DRS_POINTSIZE
] = NINE_STATE_RASTERIZER
| NINE_STATE_FF_VS_OTHER
,
3430 [D3DRS_POINTSIZE_MIN
] = NINE_STATE_RASTERIZER
| NINE_STATE_FF_VS_OTHER
| NINE_STATE_VS_PARAMS_MISC
,
3431 [D3DRS_POINTSPRITEENABLE
] = NINE_STATE_RASTERIZER
,
3432 [D3DRS_POINTSCALEENABLE
] = NINE_STATE_FF_SHADER
,
3433 [D3DRS_POINTSCALE_A
] = NINE_STATE_FF_VS_OTHER
,
3434 [D3DRS_POINTSCALE_B
] = NINE_STATE_FF_VS_OTHER
,
3435 [D3DRS_POINTSCALE_C
] = NINE_STATE_FF_VS_OTHER
,
3436 [D3DRS_MULTISAMPLEANTIALIAS
] = NINE_STATE_MULTISAMPLE
,
3437 [D3DRS_MULTISAMPLEMASK
] = NINE_STATE_SAMPLE_MASK
,
3438 [D3DRS_PATCHEDGESTYLE
] = NINE_STATE_UNHANDLED
,
3439 [D3DRS_DEBUGMONITORTOKEN
] = NINE_STATE_UNHANDLED
,
3440 [D3DRS_POINTSIZE_MAX
] = NINE_STATE_RASTERIZER
| NINE_STATE_FF_VS_OTHER
| NINE_STATE_VS_PARAMS_MISC
,
3441 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = NINE_STATE_FF_SHADER
,
3442 [D3DRS_COLORWRITEENABLE
] = NINE_STATE_BLEND
,
3443 [D3DRS_TWEENFACTOR
] = NINE_STATE_FF_VS_OTHER
,
3444 [D3DRS_BLENDOP
] = NINE_STATE_BLEND
,
3445 [D3DRS_POSITIONDEGREE
] = NINE_STATE_UNHANDLED
,
3446 [D3DRS_NORMALDEGREE
] = NINE_STATE_UNHANDLED
,
3447 [D3DRS_SCISSORTESTENABLE
] = NINE_STATE_RASTERIZER
,
3448 [D3DRS_SLOPESCALEDEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3449 [D3DRS_ANTIALIASEDLINEENABLE
] = NINE_STATE_RASTERIZER
,
3450 [D3DRS_MINTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3451 [D3DRS_MAXTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3452 [D3DRS_ADAPTIVETESS_X
] = NINE_STATE_UNHANDLED
,
3453 [D3DRS_ADAPTIVETESS_Y
] = NINE_STATE_UNHANDLED
,
3454 [D3DRS_ADAPTIVETESS_Z
] = NINE_STATE_UNHANDLED
,
3455 [D3DRS_ADAPTIVETESS_W
] = NINE_STATE_UNHANDLED
,
3456 [D3DRS_ENABLEADAPTIVETESSELLATION
] = NINE_STATE_UNHANDLED
,
3457 [D3DRS_TWOSIDEDSTENCILMODE
] = NINE_STATE_DSA
,
3458 [D3DRS_CCW_STENCILFAIL
] = NINE_STATE_DSA
,
3459 [D3DRS_CCW_STENCILZFAIL
] = NINE_STATE_DSA
,
3460 [D3DRS_CCW_STENCILPASS
] = NINE_STATE_DSA
,
3461 [D3DRS_CCW_STENCILFUNC
] = NINE_STATE_DSA
,
3462 [D3DRS_COLORWRITEENABLE1
] = NINE_STATE_BLEND
,
3463 [D3DRS_COLORWRITEENABLE2
] = NINE_STATE_BLEND
,
3464 [D3DRS_COLORWRITEENABLE3
] = NINE_STATE_BLEND
,
3465 [D3DRS_BLENDFACTOR
] = NINE_STATE_BLEND_COLOR
,
3466 [D3DRS_SRGBWRITEENABLE
] = NINE_STATE_FB
,
3467 [D3DRS_DEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3468 [D3DRS_WRAP8
] = NINE_STATE_UNHANDLED
, /* cylwrap has to be done via GP */
3469 [D3DRS_WRAP9
] = NINE_STATE_UNHANDLED
,
3470 [D3DRS_WRAP10
] = NINE_STATE_UNHANDLED
,
3471 [D3DRS_WRAP11
] = NINE_STATE_UNHANDLED
,
3472 [D3DRS_WRAP12
] = NINE_STATE_UNHANDLED
,
3473 [D3DRS_WRAP13
] = NINE_STATE_UNHANDLED
,
3474 [D3DRS_WRAP14
] = NINE_STATE_UNHANDLED
,
3475 [D3DRS_WRAP15
] = NINE_STATE_UNHANDLED
,
3476 [D3DRS_SEPARATEALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3477 [D3DRS_SRCBLENDALPHA
] = NINE_STATE_BLEND
,
3478 [D3DRS_DESTBLENDALPHA
] = NINE_STATE_BLEND
,
3479 [D3DRS_BLENDOPALPHA
] = NINE_STATE_BLEND
3484 static D3DMATRIX nine_state_identity
= { .m
[0] = { 1, 0, 0, 0 },
3485 .m
[1] = { 0, 1, 0, 0 },
3486 .m
[2] = { 0, 0, 1, 0 },
3487 .m
[3] = { 0, 0, 0, 1 } };
3490 nine_state_resize_transform(struct nine_ff_state
*ff_state
, unsigned N
)
3492 unsigned n
= ff_state
->num_transforms
;
3497 ff_state
->transform
= REALLOC(ff_state
->transform
,
3498 n
* sizeof(D3DMATRIX
),
3499 N
* sizeof(D3DMATRIX
));
3501 ff_state
->transform
[n
] = nine_state_identity
;
3502 ff_state
->num_transforms
= N
;
3506 nine_state_access_transform(struct nine_ff_state
*ff_state
, D3DTRANSFORMSTATETYPE t
,
3512 case D3DTS_VIEW
: index
= 0; break;
3513 case D3DTS_PROJECTION
: index
= 1; break;
3514 case D3DTS_TEXTURE0
: index
= 2; break;
3515 case D3DTS_TEXTURE1
: index
= 3; break;
3516 case D3DTS_TEXTURE2
: index
= 4; break;
3517 case D3DTS_TEXTURE3
: index
= 5; break;
3518 case D3DTS_TEXTURE4
: index
= 6; break;
3519 case D3DTS_TEXTURE5
: index
= 7; break;
3520 case D3DTS_TEXTURE6
: index
= 8; break;
3521 case D3DTS_TEXTURE7
: index
= 9; break;
3523 if (!(t
>= D3DTS_WORLDMATRIX(0) && t
<= D3DTS_WORLDMATRIX(255)))
3525 index
= 10 + (t
- D3DTS_WORLDMATRIX(0));
3529 if (index
>= ff_state
->num_transforms
) {
3531 return &nine_state_identity
;
3532 nine_state_resize_transform(ff_state
, index
+ 1);
3534 return &ff_state
->transform
[index
];
3538 nine_state_set_light(struct nine_ff_state
*ff_state
, DWORD Index
,
3539 const D3DLIGHT9
*pLight
)
3541 if (Index
>= ff_state
->num_lights
) {
3542 unsigned n
= ff_state
->num_lights
;
3543 unsigned N
= Index
+ 1;
3545 ff_state
->light
= REALLOC(ff_state
->light
, n
* sizeof(D3DLIGHT9
),
3546 N
* sizeof(D3DLIGHT9
));
3547 if (!ff_state
->light
)
3548 return E_OUTOFMEMORY
;
3549 ff_state
->num_lights
= N
;
3551 for (; n
< Index
; ++n
) {
3552 memset(&ff_state
->light
[n
], 0, sizeof(D3DLIGHT9
));
3553 ff_state
->light
[n
].Type
= (D3DLIGHTTYPE
)NINED3DLIGHT_INVALID
;
3556 ff_state
->light
[Index
] = *pLight
;
3558 if (pLight
->Type
== D3DLIGHT_SPOT
&& pLight
->Theta
>= pLight
->Phi
) {
3559 DBG("Warning: clamping D3DLIGHT9.Theta\n");
3560 ff_state
->light
[Index
].Theta
= ff_state
->light
[Index
].Phi
;
3566 nine_state_light_enable(struct nine_ff_state
*ff_state
,
3567 DWORD Index
, BOOL Enable
)
3571 user_assert(Index
< ff_state
->num_lights
, D3DERR_INVALIDCALL
);
3573 for (i
= 0; i
< ff_state
->num_lights_active
; ++i
) {
3574 if (ff_state
->active_light
[i
] == Index
)
3579 if (i
< ff_state
->num_lights_active
)
3581 /* XXX wine thinks this should still succeed:
3583 user_assert(i
< NINE_MAX_LIGHTS_ACTIVE
, D3DERR_INVALIDCALL
);
3585 ff_state
->active_light
[i
] = Index
;
3586 ff_state
->num_lights_active
++;
3588 if (i
== ff_state
->num_lights_active
)
3590 --ff_state
->num_lights_active
;
3591 for (; i
< ff_state
->num_lights_active
; ++i
)
3592 ff_state
->active_light
[i
] = ff_state
->active_light
[i
+ 1];
3598 #define D3DRS_TO_STRING_CASE(n) case D3DRS_##n: return "D3DRS_"#n
3599 const char *nine_d3drs_to_string(DWORD State
)
3602 D3DRS_TO_STRING_CASE(ZENABLE
);
3603 D3DRS_TO_STRING_CASE(FILLMODE
);
3604 D3DRS_TO_STRING_CASE(SHADEMODE
);
3605 D3DRS_TO_STRING_CASE(ZWRITEENABLE
);
3606 D3DRS_TO_STRING_CASE(ALPHATESTENABLE
);
3607 D3DRS_TO_STRING_CASE(LASTPIXEL
);
3608 D3DRS_TO_STRING_CASE(SRCBLEND
);
3609 D3DRS_TO_STRING_CASE(DESTBLEND
);
3610 D3DRS_TO_STRING_CASE(CULLMODE
);
3611 D3DRS_TO_STRING_CASE(ZFUNC
);
3612 D3DRS_TO_STRING_CASE(ALPHAREF
);
3613 D3DRS_TO_STRING_CASE(ALPHAFUNC
);
3614 D3DRS_TO_STRING_CASE(DITHERENABLE
);
3615 D3DRS_TO_STRING_CASE(ALPHABLENDENABLE
);
3616 D3DRS_TO_STRING_CASE(FOGENABLE
);
3617 D3DRS_TO_STRING_CASE(SPECULARENABLE
);
3618 D3DRS_TO_STRING_CASE(FOGCOLOR
);
3619 D3DRS_TO_STRING_CASE(FOGTABLEMODE
);
3620 D3DRS_TO_STRING_CASE(FOGSTART
);
3621 D3DRS_TO_STRING_CASE(FOGEND
);
3622 D3DRS_TO_STRING_CASE(FOGDENSITY
);
3623 D3DRS_TO_STRING_CASE(RANGEFOGENABLE
);
3624 D3DRS_TO_STRING_CASE(STENCILENABLE
);
3625 D3DRS_TO_STRING_CASE(STENCILFAIL
);
3626 D3DRS_TO_STRING_CASE(STENCILZFAIL
);
3627 D3DRS_TO_STRING_CASE(STENCILPASS
);
3628 D3DRS_TO_STRING_CASE(STENCILFUNC
);
3629 D3DRS_TO_STRING_CASE(STENCILREF
);
3630 D3DRS_TO_STRING_CASE(STENCILMASK
);
3631 D3DRS_TO_STRING_CASE(STENCILWRITEMASK
);
3632 D3DRS_TO_STRING_CASE(TEXTUREFACTOR
);
3633 D3DRS_TO_STRING_CASE(WRAP0
);
3634 D3DRS_TO_STRING_CASE(WRAP1
);
3635 D3DRS_TO_STRING_CASE(WRAP2
);
3636 D3DRS_TO_STRING_CASE(WRAP3
);
3637 D3DRS_TO_STRING_CASE(WRAP4
);
3638 D3DRS_TO_STRING_CASE(WRAP5
);
3639 D3DRS_TO_STRING_CASE(WRAP6
);
3640 D3DRS_TO_STRING_CASE(WRAP7
);
3641 D3DRS_TO_STRING_CASE(CLIPPING
);
3642 D3DRS_TO_STRING_CASE(LIGHTING
);
3643 D3DRS_TO_STRING_CASE(AMBIENT
);
3644 D3DRS_TO_STRING_CASE(FOGVERTEXMODE
);
3645 D3DRS_TO_STRING_CASE(COLORVERTEX
);
3646 D3DRS_TO_STRING_CASE(LOCALVIEWER
);
3647 D3DRS_TO_STRING_CASE(NORMALIZENORMALS
);
3648 D3DRS_TO_STRING_CASE(DIFFUSEMATERIALSOURCE
);
3649 D3DRS_TO_STRING_CASE(SPECULARMATERIALSOURCE
);
3650 D3DRS_TO_STRING_CASE(AMBIENTMATERIALSOURCE
);
3651 D3DRS_TO_STRING_CASE(EMISSIVEMATERIALSOURCE
);
3652 D3DRS_TO_STRING_CASE(VERTEXBLEND
);
3653 D3DRS_TO_STRING_CASE(CLIPPLANEENABLE
);
3654 D3DRS_TO_STRING_CASE(POINTSIZE
);
3655 D3DRS_TO_STRING_CASE(POINTSIZE_MIN
);
3656 D3DRS_TO_STRING_CASE(POINTSPRITEENABLE
);
3657 D3DRS_TO_STRING_CASE(POINTSCALEENABLE
);
3658 D3DRS_TO_STRING_CASE(POINTSCALE_A
);
3659 D3DRS_TO_STRING_CASE(POINTSCALE_B
);
3660 D3DRS_TO_STRING_CASE(POINTSCALE_C
);
3661 D3DRS_TO_STRING_CASE(MULTISAMPLEANTIALIAS
);
3662 D3DRS_TO_STRING_CASE(MULTISAMPLEMASK
);
3663 D3DRS_TO_STRING_CASE(PATCHEDGESTYLE
);
3664 D3DRS_TO_STRING_CASE(DEBUGMONITORTOKEN
);
3665 D3DRS_TO_STRING_CASE(POINTSIZE_MAX
);
3666 D3DRS_TO_STRING_CASE(INDEXEDVERTEXBLENDENABLE
);
3667 D3DRS_TO_STRING_CASE(COLORWRITEENABLE
);
3668 D3DRS_TO_STRING_CASE(TWEENFACTOR
);
3669 D3DRS_TO_STRING_CASE(BLENDOP
);
3670 D3DRS_TO_STRING_CASE(POSITIONDEGREE
);
3671 D3DRS_TO_STRING_CASE(NORMALDEGREE
);
3672 D3DRS_TO_STRING_CASE(SCISSORTESTENABLE
);
3673 D3DRS_TO_STRING_CASE(SLOPESCALEDEPTHBIAS
);
3674 D3DRS_TO_STRING_CASE(ANTIALIASEDLINEENABLE
);
3675 D3DRS_TO_STRING_CASE(MINTESSELLATIONLEVEL
);
3676 D3DRS_TO_STRING_CASE(MAXTESSELLATIONLEVEL
);
3677 D3DRS_TO_STRING_CASE(ADAPTIVETESS_X
);
3678 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Y
);
3679 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Z
);
3680 D3DRS_TO_STRING_CASE(ADAPTIVETESS_W
);
3681 D3DRS_TO_STRING_CASE(ENABLEADAPTIVETESSELLATION
);
3682 D3DRS_TO_STRING_CASE(TWOSIDEDSTENCILMODE
);
3683 D3DRS_TO_STRING_CASE(CCW_STENCILFAIL
);
3684 D3DRS_TO_STRING_CASE(CCW_STENCILZFAIL
);
3685 D3DRS_TO_STRING_CASE(CCW_STENCILPASS
);
3686 D3DRS_TO_STRING_CASE(CCW_STENCILFUNC
);
3687 D3DRS_TO_STRING_CASE(COLORWRITEENABLE1
);
3688 D3DRS_TO_STRING_CASE(COLORWRITEENABLE2
);
3689 D3DRS_TO_STRING_CASE(COLORWRITEENABLE3
);
3690 D3DRS_TO_STRING_CASE(BLENDFACTOR
);
3691 D3DRS_TO_STRING_CASE(SRGBWRITEENABLE
);
3692 D3DRS_TO_STRING_CASE(DEPTHBIAS
);
3693 D3DRS_TO_STRING_CASE(WRAP8
);
3694 D3DRS_TO_STRING_CASE(WRAP9
);
3695 D3DRS_TO_STRING_CASE(WRAP10
);
3696 D3DRS_TO_STRING_CASE(WRAP11
);
3697 D3DRS_TO_STRING_CASE(WRAP12
);
3698 D3DRS_TO_STRING_CASE(WRAP13
);
3699 D3DRS_TO_STRING_CASE(WRAP14
);
3700 D3DRS_TO_STRING_CASE(WRAP15
);
3701 D3DRS_TO_STRING_CASE(SEPARATEALPHABLENDENABLE
);
3702 D3DRS_TO_STRING_CASE(SRCBLENDALPHA
);
3703 D3DRS_TO_STRING_CASE(DESTBLENDALPHA
);
3704 D3DRS_TO_STRING_CASE(BLENDOPALPHA
);