2 * Copyright 2011 Joakim Sindholt <opensource@zhasha.com>
3 * Copyright 2013 Christoph Bumiller
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
27 #include "swapchain9.h"
28 #include "basetexture9.h"
30 #include "indexbuffer9.h"
32 #include "vertexbuffer9.h"
33 #include "vertexdeclaration9.h"
34 #include "vertexshader9.h"
35 #include "pixelshader9.h"
36 #include "nine_pipe.h"
38 #include "nine_limits.h"
39 #include "pipe/p_context.h"
40 #include "pipe/p_state.h"
41 #include "cso_cache/cso_context.h"
42 #include "util/u_atomic.h"
43 #include "util/u_upload_mgr.h"
44 #include "util/u_math.h"
45 #include "util/u_box.h"
46 #include "util/u_simple_shaders.h"
47 #include "util/u_gen_mipmap.h"
50 #include "nine_queue.h"
51 #include "nine_csmt_helper.h"
52 #include "os/os_thread.h"
54 #define DBG_CHANNEL DBG_DEVICE
58 struct csmt_instruction
{
59 int (* func
)(struct NineDevice9
*This
, struct csmt_instruction
*instr
);
64 struct nine_queue_pool
* pool
;
66 pipe_condvar event_processed
;
67 pipe_mutex mutex_processed
;
68 struct NineDevice9
*device
;
72 pipe_mutex thread_running
;
73 pipe_mutex thread_resume
;
76 /* Wait for instruction to be processed.
77 * Caller has to ensure that only one thread waits at time.
80 nine_csmt_wait_processed(struct csmt_context
*ctx
)
82 pipe_mutex_lock(ctx
->mutex_processed
);
83 while (!p_atomic_read(&ctx
->processed
)) {
84 pipe_condvar_wait(ctx
->event_processed
, ctx
->mutex_processed
);
86 pipe_mutex_unlock(ctx
->mutex_processed
);
89 /* CSMT worker thread */
91 PIPE_THREAD_ROUTINE(nine_csmt_worker
, arg
)
93 struct csmt_context
*ctx
= arg
;
94 struct csmt_instruction
*instr
;
95 DBG("CSMT worker spawned\n");
97 pipe_thread_setname("CSMT-Worker");
100 nine_queue_wait_flush(ctx
->pool
);
101 pipe_mutex_lock(ctx
->thread_running
);
103 /* Get instruction. NULL on empty cmdbuf. */
104 while (!p_atomic_read(&ctx
->terminate
) &&
105 (instr
= (struct csmt_instruction
*)nine_queue_get(ctx
->pool
))) {
108 if (instr
->func(ctx
->device
, instr
)) {
109 pipe_mutex_lock(ctx
->mutex_processed
);
110 p_atomic_set(&ctx
->processed
, TRUE
);
111 pipe_condvar_signal(ctx
->event_processed
);
112 pipe_mutex_unlock(ctx
->mutex_processed
);
114 if (p_atomic_read(&ctx
->toPause
)) {
115 pipe_mutex_unlock(ctx
->thread_running
);
116 /* will wait here the thread can be resumed */
117 pipe_mutex_lock(ctx
->thread_resume
);
118 pipe_mutex_lock(ctx
->thread_running
);
119 pipe_mutex_unlock(ctx
->thread_resume
);
123 pipe_mutex_unlock(ctx
->thread_running
);
124 if (p_atomic_read(&ctx
->terminate
)) {
125 pipe_mutex_lock(ctx
->mutex_processed
);
126 p_atomic_set(&ctx
->processed
, TRUE
);
127 pipe_condvar_signal(ctx
->event_processed
);
128 pipe_mutex_unlock(ctx
->mutex_processed
);
133 DBG("CSMT worker destroyed\n");
137 /* Create a CSMT context.
138 * Spawns a worker thread.
140 struct csmt_context
*
141 nine_csmt_create( struct NineDevice9
*This
)
143 struct csmt_context
*ctx
;
145 ctx
= CALLOC_STRUCT(csmt_context
);
149 ctx
->pool
= nine_queue_create();
154 pipe_condvar_init(ctx
->event_processed
);
155 pipe_mutex_init(ctx
->mutex_processed
);
156 pipe_mutex_init(ctx
->thread_running
);
157 pipe_mutex_init(ctx
->thread_resume
);
160 pipe_thread_setname("Main thread");
165 ctx
->worker
= pipe_thread_create(nine_csmt_worker
, ctx
);
167 nine_queue_delete(ctx
->pool
);
172 DBG("Returning context %p\n", ctx
);
178 nop_func( struct NineDevice9
*This
, struct csmt_instruction
*instr
)
186 /* Push nop instruction and flush the queue.
187 * Waits for the worker to complete. */
189 nine_csmt_process( struct NineDevice9
*device
)
191 struct csmt_instruction
* instr
;
192 struct csmt_context
*ctx
= device
->csmt_ctx
;
194 if (!device
->csmt_active
)
197 if (nine_queue_isempty(ctx
->pool
))
200 DBG("device=%p\n", device
);
203 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
205 instr
->func
= nop_func
;
207 p_atomic_set(&ctx
->processed
, FALSE
);
208 nine_queue_flush(ctx
->pool
);
210 nine_csmt_wait_processed(ctx
);
213 /* Destroys a CSMT context.
214 * Waits for the worker thread to terminate.
217 nine_csmt_destroy( struct NineDevice9
*device
, struct csmt_context
*ctx
)
219 struct csmt_instruction
* instr
;
220 pipe_thread render_thread
= ctx
->worker
;
222 DBG("device=%p ctx=%p\n", device
, ctx
);
224 /* Push nop and flush the queue. */
225 instr
= nine_queue_alloc(ctx
->pool
, sizeof(struct csmt_instruction
));
227 instr
->func
= nop_func
;
229 p_atomic_set(&ctx
->processed
, FALSE
);
230 /* Signal worker to terminate. */
231 p_atomic_set(&ctx
->terminate
, TRUE
);
232 nine_queue_flush(ctx
->pool
);
234 nine_csmt_wait_processed(ctx
);
235 nine_queue_delete(ctx
->pool
);
236 pipe_mutex_destroy(ctx
->mutex_processed
);
240 pipe_thread_wait(render_thread
);
244 nine_csmt_pause( struct NineDevice9
*device
)
246 struct csmt_context
*ctx
= device
->csmt_ctx
;
248 if (!device
->csmt_active
)
251 /* No need to pause the thread */
252 if (nine_queue_no_flushed_work(ctx
->pool
))
255 pipe_mutex_lock(ctx
->thread_resume
);
256 p_atomic_set(&ctx
->toPause
, TRUE
);
258 /* Wait the thread is paused */
259 pipe_mutex_lock(ctx
->thread_running
);
260 ctx
->hasPaused
= TRUE
;
261 p_atomic_set(&ctx
->toPause
, FALSE
);
265 nine_csmt_resume( struct NineDevice9
*device
)
267 struct csmt_context
*ctx
= device
->csmt_ctx
;
269 if (!device
->csmt_active
)
275 ctx
->hasPaused
= FALSE
;
276 pipe_mutex_unlock(ctx
->thread_running
);
277 pipe_mutex_unlock(ctx
->thread_resume
);
280 struct pipe_context
*
281 nine_context_get_pipe( struct NineDevice9
*device
)
283 if (device
->csmt_active
)
284 nine_csmt_process(device
);
285 return device
->context
.pipe
;
288 struct pipe_context
*
289 nine_context_get_pipe_multithread( struct NineDevice9
*device
)
291 struct csmt_context
*ctx
= device
->csmt_ctx
;
293 if (!device
->csmt_active
)
294 return device
->context
.pipe
;
296 if (!pipe_thread_is_self(ctx
->worker
))
297 nine_csmt_process(device
);
299 return device
->context
.pipe
;
302 struct pipe_context
*
303 nine_context_get_pipe_acquire( struct NineDevice9
*device
)
305 nine_csmt_pause(device
);
306 return device
->context
.pipe
;
310 nine_context_get_pipe_release( struct NineDevice9
*device
)
312 nine_csmt_resume(device
);
315 /* Nine state functions */
317 /* Check if some states need to be set dirty */
320 check_multisample(struct NineDevice9
*device
)
322 DWORD
*rs
= device
->context
.rs
;
323 DWORD new_value
= (rs
[D3DRS_ZENABLE
] || rs
[D3DRS_STENCILENABLE
]) &&
324 device
->context
.rt
[0]->desc
.MultiSampleType
>= 1 &&
325 rs
[D3DRS_MULTISAMPLEANTIALIAS
];
326 if (rs
[NINED3DRS_MULTISAMPLE
] != new_value
) {
327 rs
[NINED3DRS_MULTISAMPLE
] = new_value
;
328 return NINE_STATE_RASTERIZER
;
333 /* State preparation only */
336 prepare_blend(struct NineDevice9
*device
)
338 nine_convert_blend_state(&device
->context
.pipe_data
.blend
, device
->context
.rs
);
339 device
->context
.commit
|= NINE_STATE_COMMIT_BLEND
;
343 prepare_dsa(struct NineDevice9
*device
)
345 nine_convert_dsa_state(&device
->context
.pipe_data
.dsa
, device
->context
.rs
);
346 device
->context
.commit
|= NINE_STATE_COMMIT_DSA
;
350 prepare_rasterizer(struct NineDevice9
*device
)
352 nine_convert_rasterizer_state(device
, &device
->context
.pipe_data
.rast
, device
->context
.rs
);
353 device
->context
.commit
|= NINE_STATE_COMMIT_RASTERIZER
;
357 prepare_vs_constants_userbuf_swvp(struct NineDevice9
*device
)
359 struct nine_context
*context
= &device
->context
;
361 if (context
->changed
.vs_const_f
|| context
->changed
.group
& NINE_STATE_SWVP
) {
362 struct pipe_constant_buffer cb
;
364 cb
.buffer_offset
= 0;
365 cb
.buffer_size
= 4096 * sizeof(float[4]);
366 cb
.user_buffer
= context
->vs_const_f_swvp
;
368 if (context
->vs
->lconstf
.ranges
) {
369 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
370 const struct nine_range
*r
= lconstf
->ranges
;
372 float *dst
= context
->vs_lconstf_temp
;
373 float *src
= (float *)cb
.user_buffer
;
374 memcpy(dst
, src
, cb
.buffer_size
);
377 unsigned c
= r
->end
- r
->bgn
;
378 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
382 cb
.user_buffer
= dst
;
385 /* Do not erase the buffer field.
386 * It is either NULL (user_cbufs), or a resource.
387 * u_upload_data will do the proper refcount */
388 context
->pipe_data
.cb0_swvp
.buffer_offset
= cb
.buffer_offset
;
389 context
->pipe_data
.cb0_swvp
.buffer_size
= cb
.buffer_size
;
390 context
->pipe_data
.cb0_swvp
.user_buffer
= cb
.user_buffer
;
392 cb
.user_buffer
= (char *)cb
.user_buffer
+ 4096 * sizeof(float[4]);
393 context
->pipe_data
.cb1_swvp
.buffer_offset
= cb
.buffer_offset
;
394 context
->pipe_data
.cb1_swvp
.buffer_size
= cb
.buffer_size
;
395 context
->pipe_data
.cb1_swvp
.user_buffer
= cb
.user_buffer
;
397 context
->changed
.vs_const_f
= 0;
400 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
401 struct pipe_constant_buffer cb
;
403 cb
.buffer_offset
= 0;
404 cb
.buffer_size
= 2048 * sizeof(float[4]);
405 cb
.user_buffer
= context
->vs_const_i
;
407 context
->pipe_data
.cb2_swvp
.buffer_offset
= cb
.buffer_offset
;
408 context
->pipe_data
.cb2_swvp
.buffer_size
= cb
.buffer_size
;
409 context
->pipe_data
.cb2_swvp
.user_buffer
= cb
.user_buffer
;
410 context
->changed
.vs_const_i
= 0;
413 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
414 struct pipe_constant_buffer cb
;
416 cb
.buffer_offset
= 0;
417 cb
.buffer_size
= 512 * sizeof(float[4]);
418 cb
.user_buffer
= context
->vs_const_b
;
420 context
->pipe_data
.cb3_swvp
.buffer_offset
= cb
.buffer_offset
;
421 context
->pipe_data
.cb3_swvp
.buffer_size
= cb
.buffer_size
;
422 context
->pipe_data
.cb3_swvp
.user_buffer
= cb
.user_buffer
;
423 context
->changed
.vs_const_b
= 0;
426 if (!device
->driver_caps
.user_cbufs
) {
427 struct pipe_constant_buffer
*cb
= &(context
->pipe_data
.cb0_swvp
);
428 u_upload_data(device
->constbuf_uploader
,
431 device
->constbuf_alignment
,
433 &(cb
->buffer_offset
),
435 u_upload_unmap(device
->constbuf_uploader
);
436 cb
->user_buffer
= NULL
;
438 cb
= &(context
->pipe_data
.cb1_swvp
);
439 u_upload_data(device
->constbuf_uploader
,
442 device
->constbuf_alignment
,
444 &(cb
->buffer_offset
),
446 u_upload_unmap(device
->constbuf_uploader
);
447 cb
->user_buffer
= NULL
;
449 cb
= &(context
->pipe_data
.cb2_swvp
);
450 u_upload_data(device
->constbuf_uploader
,
453 device
->constbuf_alignment
,
455 &(cb
->buffer_offset
),
457 u_upload_unmap(device
->constbuf_uploader
);
458 cb
->user_buffer
= NULL
;
460 cb
= &(context
->pipe_data
.cb3_swvp
);
461 u_upload_data(device
->constbuf_uploader
,
464 device
->constbuf_alignment
,
466 &(cb
->buffer_offset
),
468 u_upload_unmap(device
->constbuf_uploader
);
469 cb
->user_buffer
= NULL
;
472 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
473 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
477 prepare_vs_constants_userbuf(struct NineDevice9
*device
)
479 struct nine_context
*context
= &device
->context
;
480 struct pipe_constant_buffer cb
;
482 cb
.buffer_offset
= 0;
483 cb
.buffer_size
= context
->vs
->const_used_size
;
484 cb
.user_buffer
= context
->vs_const_f
;
487 prepare_vs_constants_userbuf_swvp(device
);
491 if (context
->changed
.vs_const_i
|| context
->changed
.group
& NINE_STATE_SWVP
) {
492 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
493 memcpy(idst
, context
->vs_const_i
, NINE_MAX_CONST_I
* sizeof(int[4]));
494 context
->changed
.vs_const_i
= 0;
497 if (context
->changed
.vs_const_b
|| context
->changed
.group
& NINE_STATE_SWVP
) {
498 int *idst
= (int *)&context
->vs_const_f
[4 * device
->max_vs_const_f
];
499 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
500 memcpy(bdst
, context
->vs_const_b
, NINE_MAX_CONST_B
* sizeof(BOOL
));
501 context
->changed
.vs_const_b
= 0;
507 if (context
->vs
->lconstf
.ranges
) {
508 /* TODO: Can we make it so that we don't have to copy everything ? */
509 const struct nine_lconstf
*lconstf
= &(context
->vs
->lconstf
);
510 const struct nine_range
*r
= lconstf
->ranges
;
512 float *dst
= context
->vs_lconstf_temp
;
513 float *src
= (float *)cb
.user_buffer
;
514 memcpy(dst
, src
, cb
.buffer_size
);
517 unsigned c
= r
->end
- r
->bgn
;
518 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
522 cb
.user_buffer
= dst
;
525 if (!device
->driver_caps
.user_cbufs
) {
526 context
->pipe_data
.cb_vs
.buffer_size
= cb
.buffer_size
;
527 u_upload_data(device
->constbuf_uploader
,
530 device
->constbuf_alignment
,
532 &context
->pipe_data
.cb_vs
.buffer_offset
,
533 &context
->pipe_data
.cb_vs
.buffer
);
534 u_upload_unmap(device
->constbuf_uploader
);
535 context
->pipe_data
.cb_vs
.user_buffer
= NULL
;
537 context
->pipe_data
.cb_vs
= cb
;
539 context
->changed
.vs_const_f
= 0;
541 context
->changed
.group
&= ~NINE_STATE_VS_CONST
;
542 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
546 prepare_ps_constants_userbuf(struct NineDevice9
*device
)
548 struct nine_context
*context
= &device
->context
;
549 struct pipe_constant_buffer cb
;
551 cb
.buffer_offset
= 0;
552 cb
.buffer_size
= context
->ps
->const_used_size
;
553 cb
.user_buffer
= context
->ps_const_f
;
555 if (context
->changed
.ps_const_i
) {
556 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
557 memcpy(idst
, context
->ps_const_i
, sizeof(context
->ps_const_i
));
558 context
->changed
.ps_const_i
= 0;
560 if (context
->changed
.ps_const_b
) {
561 int *idst
= (int *)&context
->ps_const_f
[4 * device
->max_ps_const_f
];
562 uint32_t *bdst
= (uint32_t *)&idst
[4 * NINE_MAX_CONST_I
];
563 memcpy(bdst
, context
->ps_const_b
, sizeof(context
->ps_const_b
));
564 context
->changed
.ps_const_b
= 0;
567 /* Upload special constants needed to implement PS1.x instructions like TEXBEM,TEXBEML and BEM */
568 if (context
->ps
->bumpenvmat_needed
) {
569 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, cb
.buffer_size
);
570 memcpy(&context
->ps_lconstf_temp
[4 * 8], &device
->context
.bumpmap_vars
, sizeof(device
->context
.bumpmap_vars
));
572 cb
.user_buffer
= context
->ps_lconstf_temp
;
575 if (context
->ps
->byte_code
.version
< 0x30 &&
576 context
->rs
[D3DRS_FOGENABLE
]) {
577 float *dst
= &context
->ps_lconstf_temp
[4 * 32];
578 if (cb
.user_buffer
!= context
->ps_lconstf_temp
) {
579 memcpy(context
->ps_lconstf_temp
, cb
.user_buffer
, cb
.buffer_size
);
580 cb
.user_buffer
= context
->ps_lconstf_temp
;
583 d3dcolor_to_rgba(dst
, context
->rs
[D3DRS_FOGCOLOR
]);
584 if (context
->rs
[D3DRS_FOGTABLEMODE
] == D3DFOG_LINEAR
) {
585 dst
[4] = asfloat(context
->rs
[D3DRS_FOGEND
]);
586 dst
[5] = 1.0f
/ (asfloat(context
->rs
[D3DRS_FOGEND
]) - asfloat(context
->rs
[D3DRS_FOGSTART
]));
587 } else if (context
->rs
[D3DRS_FOGTABLEMODE
] != D3DFOG_NONE
) {
588 dst
[4] = asfloat(context
->rs
[D3DRS_FOGDENSITY
]);
590 cb
.buffer_size
= 4 * 4 * 34;
596 if (!device
->driver_caps
.user_cbufs
) {
597 context
->pipe_data
.cb_ps
.buffer_size
= cb
.buffer_size
;
598 u_upload_data(device
->constbuf_uploader
,
601 device
->constbuf_alignment
,
603 &context
->pipe_data
.cb_ps
.buffer_offset
,
604 &context
->pipe_data
.cb_ps
.buffer
);
605 u_upload_unmap(device
->constbuf_uploader
);
606 context
->pipe_data
.cb_ps
.user_buffer
= NULL
;
608 context
->pipe_data
.cb_ps
= cb
;
610 context
->changed
.ps_const_f
= 0;
612 context
->changed
.group
&= ~NINE_STATE_PS_CONST
;
613 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
616 static inline uint32_t
617 prepare_vs(struct NineDevice9
*device
, uint8_t shader_changed
)
619 struct nine_context
*context
= &device
->context
;
620 struct NineVertexShader9
*vs
= context
->vs
;
621 uint32_t changed_group
= 0;
622 int has_key_changed
= 0;
624 if (likely(context
->programmable_vs
))
625 has_key_changed
= NineVertexShader9_UpdateKey(vs
, device
);
627 if (!shader_changed
&& !has_key_changed
)
630 /* likely because we dislike FF */
631 if (likely(context
->programmable_vs
)) {
632 context
->cso_shader
.vs
= NineVertexShader9_GetVariant(vs
);
635 context
->cso_shader
.vs
= vs
->ff_cso
;
638 if (context
->rs
[NINED3DRS_VSPOINTSIZE
] != vs
->point_size
) {
639 context
->rs
[NINED3DRS_VSPOINTSIZE
] = vs
->point_size
;
640 changed_group
|= NINE_STATE_RASTERIZER
;
643 if ((context
->bound_samplers_mask_vs
& vs
->sampler_mask
) != vs
->sampler_mask
)
644 /* Bound dummy sampler. */
645 changed_group
|= NINE_STATE_SAMPLER
;
647 context
->commit
|= NINE_STATE_COMMIT_VS
;
648 return changed_group
;
651 static inline uint32_t
652 prepare_ps(struct NineDevice9
*device
, uint8_t shader_changed
)
654 struct nine_context
*context
= &device
->context
;
655 struct NinePixelShader9
*ps
= context
->ps
;
656 uint32_t changed_group
= 0;
657 int has_key_changed
= 0;
660 has_key_changed
= NinePixelShader9_UpdateKey(ps
, context
);
662 if (!shader_changed
&& !has_key_changed
)
666 context
->cso_shader
.ps
= NinePixelShader9_GetVariant(ps
);
669 context
->cso_shader
.ps
= ps
->ff_cso
;
672 if ((context
->bound_samplers_mask_ps
& ps
->sampler_mask
) != ps
->sampler_mask
)
673 /* Bound dummy sampler. */
674 changed_group
|= NINE_STATE_SAMPLER
;
676 context
->commit
|= NINE_STATE_COMMIT_PS
;
677 return changed_group
;
680 /* State preparation incremental */
682 /* State preparation + State commit */
685 update_framebuffer(struct NineDevice9
*device
, bool is_clear
)
687 struct nine_context
*context
= &device
->context
;
688 struct pipe_context
*pipe
= context
->pipe
;
689 struct pipe_framebuffer_state
*fb
= &context
->pipe_data
.fb
;
691 struct NineSurface9
*rt0
= context
->rt
[0];
692 unsigned w
= rt0
->desc
.Width
;
693 unsigned h
= rt0
->desc
.Height
;
694 unsigned nr_samples
= rt0
->base
.info
.nr_samples
;
695 unsigned ps_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
696 unsigned mask
= is_clear
? 0xf : ps_mask
;
697 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
701 context
->rt_mask
= 0x0;
704 /* all render targets must have the same size and the depth buffer must be
705 * bigger. Multisample has to match, according to spec. But some apps do
706 * things wrong there, and no error is returned. The behaviour they get
707 * apparently is that depth buffer is disabled if it doesn't match.
708 * Surely the same for render targets. */
710 /* Special case: D3DFMT_NULL is used to bound no real render target,
711 * but render to depth buffer. We have to not take into account the render
712 * target info. TODO: know what should happen when there are several render targers
713 * and the first one is D3DFMT_NULL */
714 if (rt0
->desc
.Format
== D3DFMT_NULL
&& context
->ds
) {
715 w
= context
->ds
->desc
.Width
;
716 h
= context
->ds
->desc
.Height
;
717 nr_samples
= context
->ds
->base
.info
.nr_samples
;
720 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
721 struct NineSurface9
*rt
= context
->rt
[i
];
723 if (rt
&& rt
->desc
.Format
!= D3DFMT_NULL
&& (mask
& (1 << i
)) &&
724 rt
->desc
.Width
== w
&& rt
->desc
.Height
== h
&&
725 rt
->base
.info
.nr_samples
== nr_samples
) {
726 fb
->cbufs
[i
] = NineSurface9_GetSurface(rt
, sRGB
);
727 context
->rt_mask
|= 1 << i
;
728 fb
->nr_cbufs
= i
+ 1;
730 /* Color outputs must match RT slot,
731 * drivers will have to handle NULL entries for GL, too.
737 if (context
->ds
&& context
->ds
->desc
.Width
>= w
&&
738 context
->ds
->desc
.Height
>= h
&&
739 context
->ds
->base
.info
.nr_samples
== nr_samples
) {
740 fb
->zsbuf
= NineSurface9_GetSurface(context
->ds
, 0);
748 pipe
->set_framebuffer_state(pipe
, fb
); /* XXX: cso ? */
750 if (is_clear
&& context
->rt_mask
== ps_mask
)
751 context
->changed
.group
&= ~NINE_STATE_FB
;
755 update_viewport(struct NineDevice9
*device
)
757 struct nine_context
*context
= &device
->context
;
758 const D3DVIEWPORT9
*vport
= &context
->viewport
;
759 struct pipe_viewport_state pvport
;
761 /* D3D coordinates are:
762 * -1 .. +1 for X,Y and
763 * 0 .. +1 for Z (we use pipe_rasterizer_state.clip_halfz)
765 pvport
.scale
[0] = (float)vport
->Width
* 0.5f
;
766 pvport
.scale
[1] = (float)vport
->Height
* -0.5f
;
767 pvport
.scale
[2] = vport
->MaxZ
- vport
->MinZ
;
768 pvport
.translate
[0] = (float)vport
->Width
* 0.5f
+ (float)vport
->X
;
769 pvport
.translate
[1] = (float)vport
->Height
* 0.5f
+ (float)vport
->Y
;
770 pvport
.translate
[2] = vport
->MinZ
;
772 /* We found R600 and SI cards have some imprecision
773 * on the barycentric coordinates used for interpolation.
774 * Some shaders rely on having something precise.
775 * We found that the proprietary driver has the imprecision issue,
776 * except when the render target width and height are powers of two.
777 * It is using some sort of workaround for these cases
778 * which covers likely all the cases the applications rely
779 * on something precise.
780 * We haven't found the workaround, but it seems like it's better
781 * for applications if the imprecision is biased towards infinity
782 * instead of -infinity (which is what measured). So shift slightly
783 * the viewport: not enough to change rasterization result (in particular
784 * for multisampling), but enough to make the imprecision biased
785 * towards infinity. We do this shift only if render target width and
786 * height are powers of two.
787 * Solves 'red shadows' bug on UE3 games.
789 if (device
->driver_bugs
.buggy_barycentrics
&&
790 ((vport
->Width
& (vport
->Width
-1)) == 0) &&
791 ((vport
->Height
& (vport
->Height
-1)) == 0)) {
792 pvport
.translate
[0] -= 1.0f
/ 128.0f
;
793 pvport
.translate
[1] -= 1.0f
/ 128.0f
;
796 cso_set_viewport(context
->cso
, &pvport
);
799 /* Loop through VS inputs and pick the vertex elements with the declared
800 * usage from the vertex declaration, then insert the instance divisor from
801 * the stream source frequency setting.
804 update_vertex_elements(struct NineDevice9
*device
)
806 struct nine_context
*context
= &device
->context
;
807 const struct NineVertexDeclaration9
*vdecl
= device
->context
.vdecl
;
808 const struct NineVertexShader9
*vs
;
811 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
812 char used_streams
[device
->caps
.MaxStreams
];
813 int dummy_vbo_stream
= -1;
814 BOOL need_dummy_vbo
= FALSE
;
815 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
817 context
->stream_usage_mask
= 0;
818 memset(vdecl_index_map
, -1, 16);
819 memset(used_streams
, 0, device
->caps
.MaxStreams
);
820 vs
= context
->programmable_vs
? context
->vs
: device
->ff
.vs
;
823 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
824 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
825 n
, vs
->input_map
[n
].ndecl
, vdecl
);
827 for (i
= 0; i
< vdecl
->nelems
; i
++) {
828 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
829 vdecl_index_map
[n
] = i
;
830 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
834 if (vdecl_index_map
[n
] < 0)
835 need_dummy_vbo
= TRUE
;
838 /* No vertex declaration. Likely will never happen in practice,
839 * but we need not crash on this */
840 need_dummy_vbo
= TRUE
;
843 if (need_dummy_vbo
) {
844 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
845 if (!used_streams
[i
]) {
846 dummy_vbo_stream
= i
;
851 /* there are less vertex shader inputs than stream slots,
852 * so if we need a slot for the dummy vbo, we should have found one */
853 assert (!need_dummy_vbo
|| dummy_vbo_stream
!= -1);
855 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
856 index
= vdecl_index_map
[n
];
858 ve
[n
] = vdecl
->elems
[index
];
859 b
= ve
[n
].vertex_buffer_index
;
860 context
->stream_usage_mask
|= 1 << b
;
861 /* XXX wine just uses 1 here: */
862 if (context
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
863 ve
[n
].instance_divisor
= context
->stream_freq
[b
] & 0x7FFFFF;
865 /* if the vertex declaration is incomplete compared to what the
866 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
867 * This is not precised by the spec, but is the behaviour
869 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
870 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
871 ve
[n
].src_offset
= 0;
872 ve
[n
].instance_divisor
= 0;
876 if (context
->dummy_vbo_bound_at
!= dummy_vbo_stream
) {
877 if (context
->dummy_vbo_bound_at
>= 0)
878 context
->changed
.vtxbuf
|= 1 << context
->dummy_vbo_bound_at
;
879 if (dummy_vbo_stream
>= 0) {
880 context
->changed
.vtxbuf
|= 1 << dummy_vbo_stream
;
881 context
->vbo_bound_done
= FALSE
;
883 context
->dummy_vbo_bound_at
= dummy_vbo_stream
;
886 cso_set_vertex_elements(context
->cso
, vs
->num_inputs
, ve
);
890 update_vertex_buffers(struct NineDevice9
*device
)
892 struct nine_context
*context
= &device
->context
;
893 struct pipe_context
*pipe
= context
->pipe
;
894 struct pipe_vertex_buffer dummy_vtxbuf
;
895 uint32_t mask
= context
->changed
.vtxbuf
;
898 DBG("mask=%x\n", mask
);
900 if (context
->dummy_vbo_bound_at
>= 0) {
901 if (!context
->vbo_bound_done
) {
902 dummy_vtxbuf
.buffer
= device
->dummy_vbo
;
903 dummy_vtxbuf
.stride
= 0;
904 dummy_vtxbuf
.user_buffer
= NULL
;
905 dummy_vtxbuf
.buffer_offset
= 0;
906 pipe
->set_vertex_buffers(pipe
, context
->dummy_vbo_bound_at
,
908 context
->vbo_bound_done
= TRUE
;
910 mask
&= ~(1 << context
->dummy_vbo_bound_at
);
913 for (i
= 0; mask
; mask
>>= 1, ++i
) {
915 if (context
->vtxbuf
[i
].buffer
)
916 pipe
->set_vertex_buffers(pipe
, i
, 1, &context
->vtxbuf
[i
]);
918 pipe
->set_vertex_buffers(pipe
, i
, 1, NULL
);
922 context
->changed
.vtxbuf
= 0;
925 static inline boolean
926 update_sampler_derived(struct nine_context
*context
, unsigned s
)
928 boolean changed
= FALSE
;
930 if (context
->samp
[s
][NINED3DSAMP_SHADOW
] != context
->texture
[s
].shadow
) {
932 context
->samp
[s
][NINED3DSAMP_SHADOW
] = context
->texture
[s
].shadow
;
935 if (context
->samp
[s
][NINED3DSAMP_CUBETEX
] !=
936 (context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
)) {
938 context
->samp
[s
][NINED3DSAMP_CUBETEX
] =
939 context
->texture
[s
].type
== D3DRTYPE_CUBETEXTURE
;
942 if (context
->samp
[s
][D3DSAMP_MIPFILTER
] != D3DTEXF_NONE
) {
943 int lod
= context
->samp
[s
][D3DSAMP_MAXMIPLEVEL
] - context
->texture
[s
].lod
;
946 if (context
->samp
[s
][NINED3DSAMP_MINLOD
] != lod
) {
948 context
->samp
[s
][NINED3DSAMP_MINLOD
] = lod
;
951 context
->changed
.sampler
[s
] &= ~0x300; /* lod changes irrelevant */
957 /* TODO: add sRGB override to pipe_sampler_state ? */
959 update_textures_and_samplers(struct NineDevice9
*device
)
961 struct nine_context
*context
= &device
->context
;
962 struct pipe_sampler_view
*view
[NINE_MAX_SAMPLERS
];
963 unsigned num_textures
;
965 boolean commit_samplers
;
966 uint16_t sampler_mask
= context
->ps
? context
->ps
->sampler_mask
:
967 device
->ff
.ps
->sampler_mask
;
969 /* TODO: Can we reduce iterations here ? */
971 commit_samplers
= FALSE
;
972 context
->bound_samplers_mask_ps
= 0;
973 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_PS
; ++i
) {
974 const unsigned s
= NINE_SAMPLER_PS(i
);
977 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
982 if (context
->texture
[s
].enabled
) {
983 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
985 view
[i
] = context
->texture
[s
].view
[sRGB
];
986 num_textures
= i
+ 1;
988 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
989 context
->changed
.sampler
[s
] = 0;
990 commit_samplers
= TRUE
;
991 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
994 /* Bind dummy sampler. We do not bind dummy sampler when
995 * it is not needed because it could add overhead. The
996 * dummy sampler should have r=g=b=0 and a=1. We do not
997 * unbind dummy sampler directly when they are not needed
998 * anymore, but they're going to be removed as long as texture
999 * or sampler states are changed. */
1000 view
[i
] = device
->dummy_sampler_view
;
1001 num_textures
= i
+ 1;
1003 cso_single_sampler(context
->cso
, PIPE_SHADER_FRAGMENT
,
1004 s
- NINE_SAMPLER_PS(0), &device
->dummy_sampler_state
);
1006 commit_samplers
= TRUE
;
1007 context
->changed
.sampler
[s
] = ~0;
1010 context
->bound_samplers_mask_ps
|= (1 << s
);
1013 cso_set_sampler_views(context
->cso
, PIPE_SHADER_FRAGMENT
, num_textures
, view
);
1015 if (commit_samplers
)
1016 cso_single_sampler_done(context
->cso
, PIPE_SHADER_FRAGMENT
);
1018 commit_samplers
= FALSE
;
1019 sampler_mask
= context
->programmable_vs
? context
->vs
->sampler_mask
: 0;
1020 context
->bound_samplers_mask_vs
= 0;
1021 for (num_textures
= 0, i
= 0; i
< NINE_MAX_SAMPLERS_VS
; ++i
) {
1022 const unsigned s
= NINE_SAMPLER_VS(i
);
1025 if (!context
->texture
[s
].enabled
&& !(sampler_mask
& (1 << i
))) {
1030 if (context
->texture
[s
].enabled
) {
1031 sRGB
= context
->samp
[s
][D3DSAMP_SRGBTEXTURE
] ? 1 : 0;
1033 view
[i
] = context
->texture
[s
].view
[sRGB
];
1034 num_textures
= i
+ 1;
1036 if (update_sampler_derived(context
, s
) || (context
->changed
.sampler
[s
] & 0x05fe)) {
1037 context
->changed
.sampler
[s
] = 0;
1038 commit_samplers
= TRUE
;
1039 nine_convert_sampler_state(context
->cso
, s
, context
->samp
[s
]);
1042 /* Bind dummy sampler. We do not bind dummy sampler when
1043 * it is not needed because it could add overhead. The
1044 * dummy sampler should have r=g=b=0 and a=1. We do not
1045 * unbind dummy sampler directly when they are not needed
1046 * anymore, but they're going to be removed as long as texture
1047 * or sampler states are changed. */
1048 view
[i
] = device
->dummy_sampler_view
;
1049 num_textures
= i
+ 1;
1051 cso_single_sampler(context
->cso
, PIPE_SHADER_VERTEX
,
1052 s
- NINE_SAMPLER_VS(0), &device
->dummy_sampler_state
);
1054 commit_samplers
= TRUE
;
1055 context
->changed
.sampler
[s
] = ~0;
1058 context
->bound_samplers_mask_vs
|= (1 << s
);
1061 cso_set_sampler_views(context
->cso
, PIPE_SHADER_VERTEX
, num_textures
, view
);
1063 if (commit_samplers
)
1064 cso_single_sampler_done(context
->cso
, PIPE_SHADER_VERTEX
);
1067 /* State commit only */
1070 commit_blend(struct NineDevice9
*device
)
1072 struct nine_context
*context
= &device
->context
;
1074 cso_set_blend(context
->cso
, &context
->pipe_data
.blend
);
1078 commit_dsa(struct NineDevice9
*device
)
1080 struct nine_context
*context
= &device
->context
;
1082 cso_set_depth_stencil_alpha(context
->cso
, &context
->pipe_data
.dsa
);
1086 commit_scissor(struct NineDevice9
*device
)
1088 struct nine_context
*context
= &device
->context
;
1089 struct pipe_context
*pipe
= context
->pipe
;
1091 pipe
->set_scissor_states(pipe
, 0, 1, &context
->scissor
);
1095 commit_rasterizer(struct NineDevice9
*device
)
1097 struct nine_context
*context
= &device
->context
;
1099 cso_set_rasterizer(context
->cso
, &context
->pipe_data
.rast
);
1103 commit_index_buffer(struct NineDevice9
*device
)
1105 struct nine_context
*context
= &device
->context
;
1106 struct pipe_context
*pipe
= context
->pipe
;
1107 if (context
->idxbuf
.buffer
)
1108 pipe
->set_index_buffer(pipe
, &context
->idxbuf
);
1110 pipe
->set_index_buffer(pipe
, NULL
);
1114 commit_vs_constants(struct NineDevice9
*device
)
1116 struct nine_context
*context
= &device
->context
;
1117 struct pipe_context
*pipe
= context
->pipe
;
1119 if (unlikely(!context
->programmable_vs
))
1120 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs_ff
);
1122 if (context
->swvp
) {
1123 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb0_swvp
);
1124 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 1, &context
->pipe_data
.cb1_swvp
);
1125 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 2, &context
->pipe_data
.cb2_swvp
);
1126 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 3, &context
->pipe_data
.cb3_swvp
);
1128 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_VERTEX
, 0, &context
->pipe_data
.cb_vs
);
1134 commit_ps_constants(struct NineDevice9
*device
)
1136 struct nine_context
*context
= &device
->context
;
1137 struct pipe_context
*pipe
= context
->pipe
;
1139 if (unlikely(!context
->ps
))
1140 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps_ff
);
1142 pipe
->set_constant_buffer(pipe
, PIPE_SHADER_FRAGMENT
, 0, &context
->pipe_data
.cb_ps
);
1146 commit_vs(struct NineDevice9
*device
)
1148 struct nine_context
*context
= &device
->context
;
1150 context
->pipe
->bind_vs_state(context
->pipe
, context
->cso_shader
.vs
);
1155 commit_ps(struct NineDevice9
*device
)
1157 struct nine_context
*context
= &device
->context
;
1159 context
->pipe
->bind_fs_state(context
->pipe
, context
->cso_shader
.ps
);
1163 #define NINE_STATE_SHADER_CHANGE_VS \
1165 NINE_STATE_TEXTURE | \
1166 NINE_STATE_FOG_SHADER | \
1167 NINE_STATE_POINTSIZE_SHADER | \
1170 #define NINE_STATE_SHADER_CHANGE_PS \
1172 NINE_STATE_TEXTURE | \
1173 NINE_STATE_FOG_SHADER | \
1174 NINE_STATE_PS1X_SHADER)
1176 #define NINE_STATE_FREQUENT \
1177 (NINE_STATE_RASTERIZER | \
1178 NINE_STATE_TEXTURE | \
1179 NINE_STATE_SAMPLER | \
1180 NINE_STATE_VS_CONST | \
1181 NINE_STATE_PS_CONST | \
1182 NINE_STATE_MULTISAMPLE)
1184 #define NINE_STATE_COMMON \
1186 NINE_STATE_BLEND | \
1188 NINE_STATE_VIEWPORT | \
1189 NINE_STATE_VDECL | \
1190 NINE_STATE_IDXBUF | \
1191 NINE_STATE_STREAMFREQ)
1193 #define NINE_STATE_RARE \
1194 (NINE_STATE_SCISSOR | \
1195 NINE_STATE_BLEND_COLOR | \
1196 NINE_STATE_STENCIL_REF | \
1197 NINE_STATE_SAMPLE_MASK)
1200 nine_update_state(struct NineDevice9
*device
)
1202 struct nine_context
*context
= &device
->context
;
1203 struct pipe_context
*pipe
= context
->pipe
;
1206 DBG("changed state groups: %x\n", context
->changed
.group
);
1208 /* NOTE: We may want to use the cso cache for everything, or let
1209 * NineDevice9.RestoreNonCSOState actually set the states, then we wouldn't
1210 * have to care about state being clobbered here and could merge this back
1211 * into update_textures. Except, we also need to re-validate textures that
1212 * may be dirty anyway, even if no texture bindings changed.
1215 /* ff_update may change VS/PS dirty bits */
1216 if (unlikely(!context
->programmable_vs
|| !context
->ps
))
1217 nine_ff_update(device
);
1218 group
= context
->changed
.group
;
1220 if (group
& (NINE_STATE_SHADER_CHANGE_VS
| NINE_STATE_SHADER_CHANGE_PS
)) {
1221 if (group
& NINE_STATE_SHADER_CHANGE_VS
)
1222 group
|= prepare_vs(device
, (group
& NINE_STATE_VS
) != 0); /* may set NINE_STATE_RASTERIZER and NINE_STATE_SAMPLER*/
1223 if (group
& NINE_STATE_SHADER_CHANGE_PS
)
1224 group
|= prepare_ps(device
, (group
& NINE_STATE_PS
) != 0);
1227 if (group
& (NINE_STATE_COMMON
| NINE_STATE_VS
)) {
1228 if (group
& NINE_STATE_FB
)
1229 update_framebuffer(device
, FALSE
);
1230 if (group
& NINE_STATE_BLEND
)
1231 prepare_blend(device
);
1232 if (group
& NINE_STATE_DSA
)
1233 prepare_dsa(device
);
1234 if (group
& NINE_STATE_VIEWPORT
)
1235 update_viewport(device
);
1236 if (group
& (NINE_STATE_VDECL
| NINE_STATE_VS
| NINE_STATE_STREAMFREQ
))
1237 update_vertex_elements(device
);
1238 if (group
& NINE_STATE_IDXBUF
)
1239 commit_index_buffer(device
);
1242 if (likely(group
& (NINE_STATE_FREQUENT
| NINE_STATE_VS
| NINE_STATE_PS
| NINE_STATE_SWVP
))) {
1243 if (group
& NINE_STATE_MULTISAMPLE
)
1244 group
|= check_multisample(device
);
1245 if (group
& NINE_STATE_RASTERIZER
)
1246 prepare_rasterizer(device
);
1247 if (group
& (NINE_STATE_TEXTURE
| NINE_STATE_SAMPLER
))
1248 update_textures_and_samplers(device
);
1249 if ((group
& (NINE_STATE_VS_CONST
| NINE_STATE_VS
| NINE_STATE_SWVP
)) && context
->programmable_vs
)
1250 prepare_vs_constants_userbuf(device
);
1251 if ((group
& (NINE_STATE_PS_CONST
| NINE_STATE_PS
)) && context
->ps
)
1252 prepare_ps_constants_userbuf(device
);
1255 if (context
->changed
.vtxbuf
)
1256 update_vertex_buffers(device
);
1258 if (context
->commit
& NINE_STATE_COMMIT_BLEND
)
1259 commit_blend(device
);
1260 if (context
->commit
& NINE_STATE_COMMIT_DSA
)
1262 if (context
->commit
& NINE_STATE_COMMIT_RASTERIZER
)
1263 commit_rasterizer(device
);
1264 if (context
->commit
& NINE_STATE_COMMIT_CONST_VS
)
1265 commit_vs_constants(device
);
1266 if (context
->commit
& NINE_STATE_COMMIT_CONST_PS
)
1267 commit_ps_constants(device
);
1268 if (context
->commit
& NINE_STATE_COMMIT_VS
)
1270 if (context
->commit
& NINE_STATE_COMMIT_PS
)
1273 context
->commit
= 0;
1275 if (unlikely(context
->changed
.ucp
)) {
1276 pipe
->set_clip_state(pipe
, &context
->clip
);
1277 context
->changed
.ucp
= FALSE
;
1280 if (unlikely(group
& NINE_STATE_RARE
)) {
1281 if (group
& NINE_STATE_SCISSOR
)
1282 commit_scissor(device
);
1283 if (group
& NINE_STATE_BLEND_COLOR
) {
1284 struct pipe_blend_color color
;
1285 d3dcolor_to_rgba(&color
.color
[0], context
->rs
[D3DRS_BLENDFACTOR
]);
1286 pipe
->set_blend_color(pipe
, &color
);
1288 if (group
& NINE_STATE_SAMPLE_MASK
) {
1289 if (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) {
1290 pipe
->set_sample_mask(pipe
, ~0);
1292 pipe
->set_sample_mask(pipe
, context
->rs
[D3DRS_MULTISAMPLEMASK
]);
1295 if (group
& NINE_STATE_STENCIL_REF
) {
1296 struct pipe_stencil_ref ref
;
1297 ref
.ref_value
[0] = context
->rs
[D3DRS_STENCILREF
];
1298 ref
.ref_value
[1] = ref
.ref_value
[0];
1299 pipe
->set_stencil_ref(pipe
, &ref
);
1303 context
->changed
.group
&=
1304 (NINE_STATE_FF
| NINE_STATE_VS_CONST
| NINE_STATE_PS_CONST
);
1309 #define RESZ_CODE 0x7fa05000
1312 NineDevice9_ResolveZ( struct NineDevice9
*device
)
1314 struct nine_context
*context
= &device
->context
;
1315 const struct util_format_description
*desc
;
1316 struct NineSurface9
*source
= context
->ds
;
1317 struct pipe_resource
*src
, *dst
;
1318 struct pipe_blit_info blit
;
1320 DBG("RESZ resolve\n");
1322 if (!source
|| !context
->texture
[0].enabled
||
1323 context
->texture
[0].type
!= D3DRTYPE_TEXTURE
)
1326 src
= source
->base
.resource
;
1327 dst
= context
->texture
[0].resource
;
1332 /* check dst is depth format. we know already for src */
1333 desc
= util_format_description(dst
->format
);
1334 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
)
1337 memset(&blit
, 0, sizeof(blit
));
1338 blit
.src
.resource
= src
;
1340 blit
.src
.format
= src
->format
;
1342 blit
.src
.box
.depth
= 1;
1345 blit
.src
.box
.width
= src
->width0
;
1346 blit
.src
.box
.height
= src
->height0
;
1348 blit
.dst
.resource
= dst
;
1350 blit
.dst
.format
= dst
->format
;
1352 blit
.dst
.box
.depth
= 1;
1355 blit
.dst
.box
.width
= dst
->width0
;
1356 blit
.dst
.box
.height
= dst
->height0
;
1358 blit
.mask
= PIPE_MASK_ZS
;
1359 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
1360 blit
.scissor_enable
= FALSE
;
1362 context
->pipe
->blit(context
->pipe
, &blit
);
1365 #define ALPHA_TO_COVERAGE_ENABLE MAKEFOURCC('A', '2', 'M', '1')
1366 #define ALPHA_TO_COVERAGE_DISABLE MAKEFOURCC('A', '2', 'M', '0')
1368 /* Nine_context functions.
1369 * Serialized through CSMT macros.
1373 nine_context_set_texture_apply(struct NineDevice9
*device
,
1378 D3DRESOURCETYPE type
,
1380 struct pipe_resource
*res
,
1381 struct pipe_sampler_view
*view0
,
1382 struct pipe_sampler_view
*view1
);
1384 nine_context_set_stream_source_apply(struct NineDevice9
*device
,
1386 struct pipe_resource
*res
,
1391 nine_context_set_indices_apply(struct NineDevice9
*device
,
1392 struct pipe_resource
*res
,
1394 UINT OffsetInBytes
);
1397 nine_context_set_pixel_shader_constant_i_transformed(struct NineDevice9
*device
,
1399 const int *pConstantData
,
1400 unsigned pConstantData_size
,
1401 UINT Vector4iCount
);
1403 CSMT_ITEM_NO_WAIT(nine_context_set_render_state
,
1404 ARG_VAL(D3DRENDERSTATETYPE
, State
),
1405 ARG_VAL(DWORD
, Value
))
1407 struct nine_context
*context
= &device
->context
;
1409 /* Amd hacks (equivalent to GL extensions) */
1410 if (unlikely(State
== D3DRS_POINTSIZE
)) {
1411 if (Value
== RESZ_CODE
) {
1412 NineDevice9_ResolveZ(device
);
1416 if (Value
== ALPHA_TO_COVERAGE_ENABLE
||
1417 Value
== ALPHA_TO_COVERAGE_DISABLE
) {
1418 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== ALPHA_TO_COVERAGE_ENABLE
);
1419 context
->changed
.group
|= NINE_STATE_BLEND
;
1425 if (unlikely(State
== D3DRS_ADAPTIVETESS_Y
)) {
1426 if (Value
== D3DFMT_ATOC
|| (Value
== D3DFMT_UNKNOWN
&& context
->rs
[NINED3DRS_ALPHACOVERAGE
])) {
1427 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
== D3DFMT_ATOC
) ? 3 : 0;
1428 context
->rs
[NINED3DRS_ALPHACOVERAGE
] &= context
->rs
[D3DRS_ALPHATESTENABLE
] ? 3 : 2;
1429 context
->changed
.group
|= NINE_STATE_BLEND
;
1433 if (unlikely(State
== D3DRS_ALPHATESTENABLE
&& (context
->rs
[NINED3DRS_ALPHACOVERAGE
] & 2))) {
1434 DWORD alphacoverage_prev
= context
->rs
[NINED3DRS_ALPHACOVERAGE
];
1435 context
->rs
[NINED3DRS_ALPHACOVERAGE
] = (Value
? 3 : 2);
1436 if (context
->rs
[NINED3DRS_ALPHACOVERAGE
] != alphacoverage_prev
)
1437 context
->changed
.group
|= NINE_STATE_BLEND
;
1440 context
->rs
[State
] = nine_fix_render_state_value(State
, Value
);
1441 context
->changed
.group
|= nine_render_state_group
[State
];
1444 CSMT_ITEM_NO_WAIT(nine_context_set_texture_apply
,
1445 ARG_VAL(DWORD
, stage
),
1446 ARG_VAL(BOOL
, enabled
),
1447 ARG_VAL(BOOL
, shadow
),
1448 ARG_VAL(DWORD
, lod
),
1449 ARG_VAL(D3DRESOURCETYPE
, type
),
1450 ARG_VAL(uint8_t, pstype
),
1451 ARG_BIND_RES(struct pipe_resource
, res
),
1452 ARG_BIND_VIEW(struct pipe_sampler_view
, view0
),
1453 ARG_BIND_VIEW(struct pipe_sampler_view
, view1
))
1455 struct nine_context
*context
= &device
->context
;
1457 context
->texture
[stage
].enabled
= enabled
;
1458 context
->samplers_shadow
&= ~(1 << stage
);
1459 context
->samplers_shadow
|= shadow
<< stage
;
1460 context
->texture
[stage
].shadow
= shadow
;
1461 context
->texture
[stage
].lod
= lod
;
1462 context
->texture
[stage
].type
= type
;
1463 context
->texture
[stage
].pstype
= pstype
;
1464 pipe_resource_reference(&context
->texture
[stage
].resource
, res
);
1465 pipe_sampler_view_reference(&context
->texture
[stage
].view
[0], view0
);
1466 pipe_sampler_view_reference(&context
->texture
[stage
].view
[1], view1
);
1468 context
->changed
.group
|= NINE_STATE_TEXTURE
;
1472 nine_context_set_texture(struct NineDevice9
*device
,
1474 struct NineBaseTexture9
*tex
)
1476 BOOL enabled
= FALSE
;
1477 BOOL shadow
= FALSE
;
1479 D3DRESOURCETYPE type
= D3DRTYPE_TEXTURE
;
1481 struct pipe_resource
*res
= NULL
;
1482 struct pipe_sampler_view
*view0
= NULL
, *view1
= NULL
;
1484 /* For managed pool, the data can be initially incomplete.
1485 * In that case, the texture is rebound later
1486 * (in NineBaseTexture9_Validate/NineBaseTexture9_UploadSelf). */
1487 if (tex
&& tex
->base
.resource
) {
1489 shadow
= tex
->shadow
;
1490 lod
= tex
->managed
.lod
;
1491 type
= tex
->base
.type
;
1492 pstype
= tex
->pstype
;
1493 res
= tex
->base
.resource
;
1494 view0
= NineBaseTexture9_GetSamplerView(tex
, 0);
1495 view1
= NineBaseTexture9_GetSamplerView(tex
, 1);
1498 nine_context_set_texture_apply(device
, Stage
, enabled
,
1499 shadow
, lod
, type
, pstype
,
1503 CSMT_ITEM_NO_WAIT(nine_context_set_sampler_state
,
1504 ARG_VAL(DWORD
, Sampler
),
1505 ARG_VAL(D3DSAMPLERSTATETYPE
, Type
),
1506 ARG_VAL(DWORD
, Value
))
1508 struct nine_context
*context
= &device
->context
;
1510 if (unlikely(!nine_check_sampler_state_value(Type
, Value
)))
1513 context
->samp
[Sampler
][Type
] = Value
;
1514 context
->changed
.group
|= NINE_STATE_SAMPLER
;
1515 context
->changed
.sampler
[Sampler
] |= 1 << Type
;
1518 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_apply
,
1519 ARG_VAL(UINT
, StreamNumber
),
1520 ARG_BIND_RES(struct pipe_resource
, res
),
1521 ARG_VAL(UINT
, OffsetInBytes
),
1522 ARG_VAL(UINT
, Stride
))
1524 struct nine_context
*context
= &device
->context
;
1525 const unsigned i
= StreamNumber
;
1527 context
->vtxbuf
[i
].stride
= Stride
;
1528 context
->vtxbuf
[i
].buffer_offset
= OffsetInBytes
;
1529 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
, res
);
1531 context
->changed
.vtxbuf
|= 1 << StreamNumber
;
1535 nine_context_set_stream_source(struct NineDevice9
*device
,
1537 struct NineVertexBuffer9
*pVBuf9
,
1541 struct pipe_resource
*res
= NULL
;
1544 res
= NineVertexBuffer9_GetResource(pVBuf9
);
1545 /* in the future when there is internal offset, add it
1546 * to OffsetInBytes */
1548 nine_context_set_stream_source_apply(device
, StreamNumber
,
1553 CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_freq
,
1554 ARG_VAL(UINT
, StreamNumber
),
1555 ARG_VAL(UINT
, Setting
))
1557 struct nine_context
*context
= &device
->context
;
1559 context
->stream_freq
[StreamNumber
] = Setting
;
1561 if (Setting
& D3DSTREAMSOURCE_INSTANCEDATA
)
1562 context
->stream_instancedata_mask
|= 1 << StreamNumber
;
1564 context
->stream_instancedata_mask
&= ~(1 << StreamNumber
);
1566 if (StreamNumber
!= 0)
1567 context
->changed
.group
|= NINE_STATE_STREAMFREQ
;
1570 CSMT_ITEM_NO_WAIT(nine_context_set_indices_apply
,
1571 ARG_BIND_RES(struct pipe_resource
, res
),
1572 ARG_VAL(UINT
, IndexSize
),
1573 ARG_VAL(UINT
, OffsetInBytes
))
1575 struct nine_context
*context
= &device
->context
;
1577 context
->idxbuf
.index_size
= IndexSize
;
1578 context
->idxbuf
.offset
= OffsetInBytes
;
1579 pipe_resource_reference(&context
->idxbuf
.buffer
, res
);
1580 context
->idxbuf
.user_buffer
= NULL
;
1582 context
->changed
.group
|= NINE_STATE_IDXBUF
;
1586 nine_context_set_indices(struct NineDevice9
*device
,
1587 struct NineIndexBuffer9
*idxbuf
)
1589 const struct pipe_index_buffer
*pipe_idxbuf
;
1590 struct pipe_resource
*res
= NULL
;
1592 UINT OffsetInBytes
= 0;
1595 pipe_idxbuf
= NineIndexBuffer9_GetBuffer(idxbuf
);
1596 IndexSize
= pipe_idxbuf
->index_size
;
1597 res
= pipe_idxbuf
->buffer
;
1598 OffsetInBytes
= pipe_idxbuf
->offset
;
1601 nine_context_set_indices_apply(device
, res
, IndexSize
, OffsetInBytes
);
1604 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_declaration
,
1605 ARG_BIND_REF(struct NineVertexDeclaration9
, vdecl
))
1607 struct nine_context
*context
= &device
->context
;
1608 BOOL was_programmable_vs
= context
->programmable_vs
;
1610 nine_bind(&context
->vdecl
, vdecl
);
1612 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1613 if (was_programmable_vs
!= context
->programmable_vs
) {
1614 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1615 context
->changed
.group
|= NINE_STATE_VS
;
1618 context
->changed
.group
|= NINE_STATE_VDECL
;
1621 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader
,
1622 ARG_BIND_REF(struct NineVertexShader9
, pShader
))
1624 struct nine_context
*context
= &device
->context
;
1625 BOOL was_programmable_vs
= context
->programmable_vs
;
1627 nine_bind(&context
->vs
, pShader
);
1629 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
1631 /* ff -> non-ff: commit back non-ff constants */
1632 if (!was_programmable_vs
&& context
->programmable_vs
)
1633 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
;
1635 context
->changed
.group
|= NINE_STATE_VS
;
1638 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_f
,
1639 ARG_VAL(UINT
, StartRegister
),
1640 ARG_MEM(float, pConstantData
),
1641 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1642 ARG_VAL(UINT
, Vector4fCount
))
1644 struct nine_context
*context
= &device
->context
;
1645 float *vs_const_f
= device
->may_swvp
? context
->vs_const_f_swvp
: context
->vs_const_f
;
1647 memcpy(&vs_const_f
[StartRegister
* 4],
1649 pConstantData_size
);
1651 if (device
->may_swvp
) {
1652 Vector4fCount
= MIN2(StartRegister
+ Vector4fCount
, NINE_MAX_CONST_F
) - StartRegister
;
1653 if (StartRegister
< NINE_MAX_CONST_F
)
1654 memcpy(&context
->vs_const_f
[StartRegister
* 4],
1656 Vector4fCount
* 4 * sizeof(context
->vs_const_f
[0]));
1659 context
->changed
.vs_const_f
= TRUE
;
1660 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1663 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_i
,
1664 ARG_VAL(UINT
, StartRegister
),
1665 ARG_MEM(int, pConstantData
),
1666 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1667 ARG_VAL(UINT
, Vector4iCount
))
1669 struct nine_context
*context
= &device
->context
;
1672 if (device
->driver_caps
.vs_integer
) {
1673 memcpy(&context
->vs_const_i
[4 * StartRegister
],
1675 pConstantData_size
);
1677 for (i
= 0; i
< Vector4iCount
; i
++) {
1678 context
->vs_const_i
[4 * (StartRegister
+ i
)] = fui((float)(pConstantData
[4 * i
]));
1679 context
->vs_const_i
[4 * (StartRegister
+ i
) + 1] = fui((float)(pConstantData
[4 * i
+ 1]));
1680 context
->vs_const_i
[4 * (StartRegister
+ i
) + 2] = fui((float)(pConstantData
[4 * i
+ 2]));
1681 context
->vs_const_i
[4 * (StartRegister
+ i
) + 3] = fui((float)(pConstantData
[4 * i
+ 3]));
1685 context
->changed
.vs_const_i
= TRUE
;
1686 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1689 CSMT_ITEM_NO_WAIT(nine_context_set_vertex_shader_constant_b
,
1690 ARG_VAL(UINT
, StartRegister
),
1691 ARG_MEM(BOOL
, pConstantData
),
1692 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1693 ARG_VAL(UINT
, BoolCount
))
1695 struct nine_context
*context
= &device
->context
;
1697 uint32_t bool_true
= device
->driver_caps
.vs_integer
? 0xFFFFFFFF : fui(1.0f
);
1699 (void) pConstantData_size
;
1701 for (i
= 0; i
< BoolCount
; i
++)
1702 context
->vs_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1704 context
->changed
.vs_const_b
= TRUE
;
1705 context
->changed
.group
|= NINE_STATE_VS_CONST
;
1708 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader
,
1709 ARG_BIND_REF(struct NinePixelShader9
, ps
))
1711 struct nine_context
*context
= &device
->context
;
1712 unsigned old_mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1715 /* ff -> non-ff: commit back non-ff constants */
1716 if (!context
->ps
&& ps
)
1717 context
->commit
|= NINE_STATE_COMMIT_CONST_PS
;
1719 nine_bind(&context
->ps
, ps
);
1721 context
->changed
.group
|= NINE_STATE_PS
;
1723 mask
= context
->ps
? context
->ps
->rt_mask
: 1;
1724 /* We need to update cbufs if the pixel shader would
1725 * write to different render targets */
1726 if (mask
!= old_mask
)
1727 context
->changed
.group
|= NINE_STATE_FB
;
1730 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_f
,
1731 ARG_VAL(UINT
, StartRegister
),
1732 ARG_MEM(float, pConstantData
),
1733 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1734 ARG_VAL(UINT
, Vector4fCount
))
1736 struct nine_context
*context
= &device
->context
;
1738 memcpy(&context
->ps_const_f
[StartRegister
* 4],
1740 pConstantData_size
);
1742 context
->changed
.ps_const_f
= TRUE
;
1743 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1746 /* For stateblocks */
1747 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i_transformed
,
1748 ARG_VAL(UINT
, StartRegister
),
1749 ARG_MEM(int, pConstantData
),
1750 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1751 ARG_VAL(UINT
, Vector4iCount
))
1753 struct nine_context
*context
= &device
->context
;
1755 memcpy(&context
->ps_const_i
[StartRegister
][0],
1757 Vector4iCount
* sizeof(context
->ps_const_i
[0]));
1759 context
->changed
.ps_const_i
= TRUE
;
1760 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1763 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_i
,
1764 ARG_VAL(UINT
, StartRegister
),
1765 ARG_MEM(int, pConstantData
),
1766 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1767 ARG_VAL(UINT
, Vector4iCount
))
1769 struct nine_context
*context
= &device
->context
;
1772 if (device
->driver_caps
.ps_integer
) {
1773 memcpy(&context
->ps_const_i
[StartRegister
][0],
1775 pConstantData_size
);
1777 for (i
= 0; i
< Vector4iCount
; i
++) {
1778 context
->ps_const_i
[StartRegister
+i
][0] = fui((float)(pConstantData
[4*i
]));
1779 context
->ps_const_i
[StartRegister
+i
][1] = fui((float)(pConstantData
[4*i
+1]));
1780 context
->ps_const_i
[StartRegister
+i
][2] = fui((float)(pConstantData
[4*i
+2]));
1781 context
->ps_const_i
[StartRegister
+i
][3] = fui((float)(pConstantData
[4*i
+3]));
1784 context
->changed
.ps_const_i
= TRUE
;
1785 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1788 CSMT_ITEM_NO_WAIT(nine_context_set_pixel_shader_constant_b
,
1789 ARG_VAL(UINT
, StartRegister
),
1790 ARG_MEM(BOOL
, pConstantData
),
1791 ARG_MEM_SIZE(unsigned, pConstantData_size
),
1792 ARG_VAL(UINT
, BoolCount
))
1794 struct nine_context
*context
= &device
->context
;
1796 uint32_t bool_true
= device
->driver_caps
.ps_integer
? 0xFFFFFFFF : fui(1.0f
);
1798 (void) pConstantData_size
;
1800 for (i
= 0; i
< BoolCount
; i
++)
1801 context
->ps_const_b
[StartRegister
+ i
] = pConstantData
[i
] ? bool_true
: 0;
1803 context
->changed
.ps_const_b
= TRUE
;
1804 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1807 /* XXX: use resource, as resource might change */
1808 CSMT_ITEM_NO_WAIT(nine_context_set_render_target
,
1809 ARG_VAL(DWORD
, RenderTargetIndex
),
1810 ARG_BIND_REF(struct NineSurface9
, rt
))
1812 struct nine_context
*context
= &device
->context
;
1813 const unsigned i
= RenderTargetIndex
;
1816 context
->viewport
.X
= 0;
1817 context
->viewport
.Y
= 0;
1818 context
->viewport
.Width
= rt
->desc
.Width
;
1819 context
->viewport
.Height
= rt
->desc
.Height
;
1820 context
->viewport
.MinZ
= 0.0f
;
1821 context
->viewport
.MaxZ
= 1.0f
;
1823 context
->scissor
.minx
= 0;
1824 context
->scissor
.miny
= 0;
1825 context
->scissor
.maxx
= rt
->desc
.Width
;
1826 context
->scissor
.maxy
= rt
->desc
.Height
;
1828 context
->changed
.group
|= NINE_STATE_VIEWPORT
| NINE_STATE_SCISSOR
| NINE_STATE_MULTISAMPLE
;
1830 if (context
->rt
[0] &&
1831 (context
->rt
[0]->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
) !=
1832 (rt
->desc
.MultiSampleType
<= D3DMULTISAMPLE_NONMASKABLE
))
1833 context
->changed
.group
|= NINE_STATE_SAMPLE_MASK
;
1836 if (context
->rt
[i
] != rt
) {
1837 nine_bind(&context
->rt
[i
], rt
);
1838 context
->changed
.group
|= NINE_STATE_FB
;
1842 /* XXX: use resource instead of ds, as resource might change */
1843 CSMT_ITEM_NO_WAIT(nine_context_set_depth_stencil
,
1844 ARG_BIND_REF(struct NineSurface9
, ds
))
1846 struct nine_context
*context
= &device
->context
;
1848 nine_bind(&context
->ds
, ds
);
1849 context
->changed
.group
|= NINE_STATE_FB
;
1852 CSMT_ITEM_NO_WAIT(nine_context_set_viewport
,
1853 ARG_COPY_REF(D3DVIEWPORT9
, viewport
))
1855 struct nine_context
*context
= &device
->context
;
1857 context
->viewport
= *viewport
;
1858 context
->changed
.group
|= NINE_STATE_VIEWPORT
;
1861 CSMT_ITEM_NO_WAIT(nine_context_set_scissor
,
1862 ARG_COPY_REF(struct pipe_scissor_state
, scissor
))
1864 struct nine_context
*context
= &device
->context
;
1866 context
->scissor
= *scissor
;
1867 context
->changed
.group
|= NINE_STATE_SCISSOR
;
1870 CSMT_ITEM_NO_WAIT(nine_context_set_transform
,
1871 ARG_VAL(D3DTRANSFORMSTATETYPE
, State
),
1872 ARG_COPY_REF(D3DMATRIX
, pMatrix
))
1874 struct nine_context
*context
= &device
->context
;
1875 D3DMATRIX
*M
= nine_state_access_transform(&context
->ff
, State
, TRUE
);
1878 context
->ff
.changed
.transform
[State
/ 32] |= 1 << (State
% 32);
1879 context
->changed
.group
|= NINE_STATE_FF
;
1882 CSMT_ITEM_NO_WAIT(nine_context_set_material
,
1883 ARG_COPY_REF(D3DMATERIAL9
, pMaterial
))
1885 struct nine_context
*context
= &device
->context
;
1887 context
->ff
.material
= *pMaterial
;
1888 context
->changed
.group
|= NINE_STATE_FF_MATERIAL
;
1891 CSMT_ITEM_NO_WAIT(nine_context_set_light
,
1892 ARG_VAL(DWORD
, Index
),
1893 ARG_COPY_REF(D3DLIGHT9
, pLight
))
1895 struct nine_context
*context
= &device
->context
;
1897 (void)nine_state_set_light(&context
->ff
, Index
, pLight
);
1898 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1902 /* For stateblocks */
1904 nine_context_light_enable_stateblock(struct NineDevice9
*device
,
1905 const uint16_t active_light
[NINE_MAX_LIGHTS_ACTIVE
], /* TODO: use pointer that convey size for csmt */
1906 unsigned int num_lights_active
)
1908 struct nine_context
*context
= &device
->context
;
1910 if (device
->csmt_active
) /* TODO: fix */
1911 nine_csmt_process(device
);
1912 memcpy(context
->ff
.active_light
, active_light
, NINE_MAX_LIGHTS_ACTIVE
* sizeof(context
->ff
.active_light
[0]));
1913 context
->ff
.num_lights_active
= num_lights_active
;
1914 context
->changed
.group
|= NINE_STATE_FF_LIGHTING
;
1917 CSMT_ITEM_NO_WAIT(nine_context_light_enable
,
1918 ARG_VAL(DWORD
, Index
),
1919 ARG_VAL(BOOL
, Enable
))
1921 struct nine_context
*context
= &device
->context
;
1923 nine_state_light_enable(&context
->ff
, &context
->changed
.group
, Index
, Enable
);
1926 CSMT_ITEM_NO_WAIT(nine_context_set_texture_stage_state
,
1927 ARG_VAL(DWORD
, Stage
),
1928 ARG_VAL(D3DTEXTURESTAGESTATETYPE
, Type
),
1929 ARG_VAL(DWORD
, Value
))
1931 struct nine_context
*context
= &device
->context
;
1932 int bumpmap_index
= -1;
1934 context
->ff
.tex_stage
[Stage
][Type
] = Value
;
1936 case D3DTSS_BUMPENVMAT00
:
1937 bumpmap_index
= 4 * Stage
;
1939 case D3DTSS_BUMPENVMAT01
:
1940 bumpmap_index
= 4 * Stage
+ 1;
1942 case D3DTSS_BUMPENVMAT10
:
1943 bumpmap_index
= 4 * Stage
+ 2;
1945 case D3DTSS_BUMPENVMAT11
:
1946 bumpmap_index
= 4 * Stage
+ 3;
1948 case D3DTSS_BUMPENVLSCALE
:
1949 bumpmap_index
= 4 * 8 + 2 * Stage
;
1951 case D3DTSS_BUMPENVLOFFSET
:
1952 bumpmap_index
= 4 * 8 + 2 * Stage
+ 1;
1954 case D3DTSS_TEXTURETRANSFORMFLAGS
:
1955 context
->changed
.group
|= NINE_STATE_PS1X_SHADER
;
1961 if (bumpmap_index
>= 0) {
1962 context
->bumpmap_vars
[bumpmap_index
] = Value
;
1963 context
->changed
.group
|= NINE_STATE_PS_CONST
;
1966 context
->changed
.group
|= NINE_STATE_FF_PSSTAGES
;
1967 context
->ff
.changed
.tex_stage
[Stage
][Type
/ 32] |= 1 << (Type
% 32);
1970 CSMT_ITEM_NO_WAIT(nine_context_set_clip_plane
,
1971 ARG_VAL(DWORD
, Index
),
1972 ARG_COPY_REF(struct nine_clipplane
, pPlane
))
1974 struct nine_context
*context
= &device
->context
;
1976 memcpy(&context
->clip
.ucp
[Index
][0], pPlane
, sizeof(context
->clip
.ucp
[0]));
1977 context
->changed
.ucp
= TRUE
;
1980 CSMT_ITEM_NO_WAIT(nine_context_set_swvp
,
1981 ARG_VAL(boolean
, swvp
))
1983 struct nine_context
*context
= &device
->context
;
1985 context
->swvp
= swvp
;
1986 context
->changed
.group
|= NINE_STATE_SWVP
;
1992 nine_context_apply_stateblock(struct NineDevice9
*device
,
1993 const struct nine_state
*src
)
1995 struct nine_context
*context
= &device
->context
;
1998 context
->changed
.group
|= src
->changed
.group
;
2000 for (i
= 0; i
< ARRAY_SIZE(src
->changed
.rs
); ++i
) {
2001 uint32_t m
= src
->changed
.rs
[i
];
2003 const int r
= ffs(m
) - 1;
2005 context
->rs
[i
* 32 + r
] = nine_fix_render_state_value(i
* 32 + r
, src
->rs_advertised
[i
* 32 + r
]);
2010 if (src
->changed
.texture
) {
2011 uint32_t m
= src
->changed
.texture
;
2014 for (s
= 0; m
; ++s
, m
>>= 1) {
2015 struct NineBaseTexture9
*tex
= src
->texture
[s
];
2018 nine_context_set_texture(device
, s
, tex
);
2023 if (src
->changed
.group
& NINE_STATE_SAMPLER
) {
2026 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2027 uint32_t m
= src
->changed
.sampler
[s
];
2029 const int i
= ffs(m
) - 1;
2031 if (nine_check_sampler_state_value(i
, src
->samp_advertised
[s
][i
]))
2032 context
->samp
[s
][i
] = src
->samp_advertised
[s
][i
];
2034 context
->changed
.sampler
[s
] |= src
->changed
.sampler
[s
];
2038 /* Vertex buffers */
2039 if (src
->changed
.vtxbuf
| src
->changed
.stream_freq
) {
2040 uint32_t m
= src
->changed
.vtxbuf
| src
->changed
.stream_freq
;
2041 for (i
= 0; m
; ++i
, m
>>= 1) {
2042 if (src
->changed
.vtxbuf
& (1 << i
)) {
2043 if (src
->stream
[i
]) {
2044 context
->vtxbuf
[i
].buffer_offset
= src
->vtxbuf
[i
].buffer_offset
;
2045 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
,
2046 src
->stream
[i
] ? NineVertexBuffer9_GetResource(src
->stream
[i
]) : NULL
);
2047 context
->vtxbuf
[i
].stride
= src
->vtxbuf
[i
].stride
;
2050 if (src
->changed
.stream_freq
& (1 << i
)) {
2051 context
->stream_freq
[i
] = src
->stream_freq
[i
];
2052 if (src
->stream_freq
[i
] & D3DSTREAMSOURCE_INSTANCEDATA
)
2053 context
->stream_instancedata_mask
|= 1 << i
;
2055 context
->stream_instancedata_mask
&= ~(1 << i
);
2058 context
->changed
.vtxbuf
|= src
->changed
.vtxbuf
;
2062 if (src
->changed
.group
& NINE_STATE_IDXBUF
)
2063 nine_context_set_indices(device
, src
->idxbuf
);
2065 /* Vertex declaration */
2066 if ((src
->changed
.group
& NINE_STATE_VDECL
) && src
->vdecl
)
2067 nine_context_set_vertex_declaration(device
, src
->vdecl
);
2070 if (src
->changed
.group
& NINE_STATE_VS
)
2071 nine_bind(&context
->vs
, src
->vs
);
2073 context
->programmable_vs
= context
->vs
&& !(context
->vdecl
&& context
->vdecl
->position_t
);
2076 if (src
->changed
.group
& NINE_STATE_PS
)
2077 nine_bind(&context
->ps
, src
->ps
);
2079 /* Vertex constants */
2080 if (src
->changed
.group
& NINE_STATE_VS_CONST
) {
2081 struct nine_range
*r
;
2082 if (device
->may_swvp
) {
2083 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
) {
2086 memcpy(&context
->vs_const_f_swvp
[bgn
* 4],
2087 &src
->vs_const_f
[bgn
* 4],
2088 (end
- bgn
) * 4 * sizeof(float));
2089 if (bgn
< device
->max_vs_const_f
) {
2090 end
= MIN2(end
, device
->max_vs_const_f
);
2091 memcpy(&context
->vs_const_f
[bgn
* 4],
2092 &src
->vs_const_f
[bgn
* 4],
2093 (end
- bgn
) * 4 * sizeof(float));
2097 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
) {
2098 memcpy(&context
->vs_const_f
[r
->bgn
* 4],
2099 &src
->vs_const_f
[r
->bgn
* 4],
2100 (r
->end
- r
->bgn
) * 4 * sizeof(float));
2103 for (r
= src
->changed
.vs_const_i
; r
; r
= r
->next
) {
2104 memcpy(&context
->vs_const_i
[r
->bgn
* 4],
2105 &src
->vs_const_i
[r
->bgn
* 4],
2106 (r
->end
- r
->bgn
) * 4 * sizeof(int));
2108 for (r
= src
->changed
.vs_const_b
; r
; r
= r
->next
) {
2109 memcpy(&context
->vs_const_b
[r
->bgn
],
2110 &src
->vs_const_b
[r
->bgn
],
2111 (r
->end
- r
->bgn
) * sizeof(int));
2113 context
->changed
.vs_const_f
= !!src
->changed
.vs_const_f
;
2114 context
->changed
.vs_const_i
= !!src
->changed
.vs_const_i
;
2115 context
->changed
.vs_const_b
= !!src
->changed
.vs_const_b
;
2118 /* Pixel constants */
2119 if (src
->changed
.group
& NINE_STATE_PS_CONST
) {
2120 struct nine_range
*r
;
2121 for (r
= src
->changed
.ps_const_f
; r
; r
= r
->next
) {
2122 memcpy(&context
->ps_const_f
[r
->bgn
* 4],
2123 &src
->ps_const_f
[r
->bgn
* 4],
2124 (r
->end
- r
->bgn
) * 4 * sizeof(float));
2126 if (src
->changed
.ps_const_i
) {
2127 uint16_t m
= src
->changed
.ps_const_i
;
2128 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2130 memcpy(context
->ps_const_i
[i
], src
->ps_const_i
[i
], 4 * sizeof(int));
2132 if (src
->changed
.ps_const_b
) {
2133 uint16_t m
= src
->changed
.ps_const_b
;
2134 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2136 context
->ps_const_b
[i
] = src
->ps_const_b
[i
];
2138 context
->changed
.ps_const_f
= !!src
->changed
.ps_const_f
;
2139 context
->changed
.ps_const_i
= !!src
->changed
.ps_const_i
;
2140 context
->changed
.ps_const_b
= !!src
->changed
.ps_const_b
;
2144 if (src
->changed
.group
& NINE_STATE_VIEWPORT
)
2145 context
->viewport
= src
->viewport
;
2148 if (src
->changed
.group
& NINE_STATE_SCISSOR
)
2149 context
->scissor
= src
->scissor
;
2151 /* User Clip Planes */
2152 if (src
->changed
.ucp
) {
2153 for (i
= 0; i
< PIPE_MAX_CLIP_PLANES
; ++i
)
2154 if (src
->changed
.ucp
& (1 << i
))
2155 memcpy(context
->clip
.ucp
[i
],
2156 src
->clip
.ucp
[i
], sizeof(src
->clip
.ucp
[0]));
2157 context
->changed
.ucp
= TRUE
;
2160 if (!(src
->changed
.group
& NINE_STATE_FF
))
2163 /* Fixed function state. */
2165 if (src
->changed
.group
& NINE_STATE_FF_MATERIAL
)
2166 context
->ff
.material
= src
->ff
.material
;
2168 if (src
->changed
.group
& NINE_STATE_FF_PSSTAGES
) {
2170 for (s
= 0; s
< NINE_MAX_TEXTURE_STAGES
; ++s
) {
2171 for (i
= 0; i
< NINED3DTSS_COUNT
; ++i
)
2172 if (src
->ff
.changed
.tex_stage
[s
][i
/ 32] & (1 << (i
% 32)))
2173 context
->ff
.tex_stage
[s
][i
] = src
->ff
.tex_stage
[s
][i
];
2176 if (src
->changed
.group
& NINE_STATE_FF_LIGHTING
) {
2177 unsigned num_lights
= MAX2(context
->ff
.num_lights
, src
->ff
.num_lights
);
2178 /* Can happen if the stateblock had recorded the creation of
2180 if (context
->ff
.num_lights
< num_lights
) {
2181 context
->ff
.light
= REALLOC(context
->ff
.light
,
2182 context
->ff
.num_lights
* sizeof(D3DLIGHT9
),
2183 num_lights
* sizeof(D3DLIGHT9
));
2184 memset(&context
->ff
.light
[context
->ff
.num_lights
], 0, (num_lights
- context
->ff
.num_lights
) * sizeof(D3DLIGHT9
));
2185 for (i
= context
->ff
.num_lights
; i
< num_lights
; ++i
)
2186 context
->ff
.light
[i
].Type
= (D3DLIGHTTYPE
)NINED3DLIGHT_INVALID
;
2187 context
->ff
.num_lights
= num_lights
;
2189 /* src->ff.num_lights < num_lights has been handled before */
2190 assert (src
->ff
.num_lights
== num_lights
);
2192 for (i
= 0; i
< num_lights
; ++i
)
2193 if (src
->ff
.light
[i
].Type
!= NINED3DLIGHT_INVALID
)
2194 context
->ff
.light
[i
] = src
->ff
.light
[i
];
2196 memcpy(context
->ff
.active_light
, src
->ff
.active_light
, sizeof(src
->ff
.active_light
) );
2197 context
->ff
.num_lights_active
= src
->ff
.num_lights_active
;
2199 if (src
->changed
.group
& NINE_STATE_FF_VSTRANSF
) {
2200 for (i
= 0; i
< ARRAY_SIZE(src
->ff
.changed
.transform
); ++i
) {
2202 if (!src
->ff
.changed
.transform
[i
])
2204 for (s
= i
* 32; s
< (i
* 32 + 32); ++s
) {
2205 if (!(src
->ff
.changed
.transform
[i
] & (1 << (s
% 32))))
2207 *nine_state_access_transform(&context
->ff
, s
, TRUE
) =
2208 *nine_state_access_transform( /* const because !alloc */
2209 (struct nine_ff_state
*)&src
->ff
, s
, FALSE
);
2211 context
->ff
.changed
.transform
[i
] |= src
->ff
.changed
.transform
[i
];
2218 /* Do not write to nine_context directly. Slower,
2219 * but works with csmt. TODO: write a special csmt version that
2220 * would record the list of commands as much as possible,
2221 * and use the version above else.
2224 nine_context_apply_stateblock(struct NineDevice9
*device
,
2225 const struct nine_state
*src
)
2229 /* No need to apply src->changed.group, since all calls do
2230 * set context->changed.group */
2232 for (i
= 0; i
< ARRAY_SIZE(src
->changed
.rs
); ++i
) {
2233 uint32_t m
= src
->changed
.rs
[i
];
2235 const int r
= ffs(m
) - 1;
2237 nine_context_set_render_state(device
, i
* 32 + r
, src
->rs_advertised
[i
* 32 + r
]);
2242 if (src
->changed
.texture
) {
2243 uint32_t m
= src
->changed
.texture
;
2246 for (s
= 0; m
; ++s
, m
>>= 1) {
2247 struct NineBaseTexture9
*tex
= src
->texture
[s
];
2250 nine_context_set_texture(device
, s
, tex
);
2255 if (src
->changed
.group
& NINE_STATE_SAMPLER
) {
2258 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2259 uint32_t m
= src
->changed
.sampler
[s
];
2261 const int i
= ffs(m
) - 1;
2263 nine_context_set_sampler_state(device
, s
, i
, src
->samp_advertised
[s
][i
]);
2268 /* Vertex buffers */
2269 if (src
->changed
.vtxbuf
| src
->changed
.stream_freq
) {
2270 uint32_t m
= src
->changed
.vtxbuf
| src
->changed
.stream_freq
;
2271 for (i
= 0; m
; ++i
, m
>>= 1) {
2272 if (src
->changed
.vtxbuf
& (1 << i
))
2273 nine_context_set_stream_source(device
, i
, src
->stream
[i
], src
->vtxbuf
[i
].buffer_offset
, src
->vtxbuf
[i
].stride
);
2274 if (src
->changed
.stream_freq
& (1 << i
))
2275 nine_context_set_stream_source_freq(device
, i
, src
->stream_freq
[i
]);
2280 if (src
->changed
.group
& NINE_STATE_IDXBUF
)
2281 nine_context_set_indices(device
, src
->idxbuf
);
2283 /* Vertex declaration */
2284 if ((src
->changed
.group
& NINE_STATE_VDECL
) && src
->vdecl
)
2285 nine_context_set_vertex_declaration(device
, src
->vdecl
);
2288 if (src
->changed
.group
& NINE_STATE_VS
)
2289 nine_context_set_vertex_shader(device
, src
->vs
);
2292 if (src
->changed
.group
& NINE_STATE_PS
)
2293 nine_context_set_pixel_shader(device
, src
->ps
);
2295 /* Vertex constants */
2296 if (src
->changed
.group
& NINE_STATE_VS_CONST
) {
2297 struct nine_range
*r
;
2298 for (r
= src
->changed
.vs_const_f
; r
; r
= r
->next
)
2299 nine_context_set_vertex_shader_constant_f(device
, r
->bgn
,
2300 &src
->vs_const_f
[r
->bgn
* 4],
2301 sizeof(float[4]) * (r
->end
- r
->bgn
),
2303 for (r
= src
->changed
.vs_const_i
; r
; r
= r
->next
)
2304 nine_context_set_vertex_shader_constant_i(device
, r
->bgn
,
2305 &src
->vs_const_i
[r
->bgn
* 4],
2306 sizeof(int[4]) * (r
->end
- r
->bgn
),
2308 for (r
= src
->changed
.vs_const_b
; r
; r
= r
->next
)
2309 nine_context_set_vertex_shader_constant_b(device
, r
->bgn
,
2310 &src
->vs_const_b
[r
->bgn
* 4],
2311 sizeof(BOOL
) * (r
->end
- r
->bgn
),
2315 /* Pixel constants */
2316 if (src
->changed
.group
& NINE_STATE_PS_CONST
) {
2317 struct nine_range
*r
;
2318 for (r
= src
->changed
.ps_const_f
; r
; r
= r
->next
)
2319 nine_context_set_pixel_shader_constant_f(device
, r
->bgn
,
2320 &src
->ps_const_f
[r
->bgn
* 4],
2321 sizeof(float[4]) * (r
->end
- r
->bgn
),
2323 if (src
->changed
.ps_const_i
) {
2324 uint16_t m
= src
->changed
.ps_const_i
;
2325 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2327 nine_context_set_pixel_shader_constant_i_transformed(device
, i
,
2328 src
->ps_const_i
[i
], sizeof(int[4]), 1);
2330 if (src
->changed
.ps_const_b
) {
2331 uint16_t m
= src
->changed
.ps_const_b
;
2332 for (i
= ffs(m
) - 1, m
>>= i
; m
; ++i
, m
>>= 1)
2334 nine_context_set_pixel_shader_constant_b(device
, i
,
2335 &src
->ps_const_b
[i
], sizeof(BOOL
), 1);
2340 if (src
->changed
.group
& NINE_STATE_VIEWPORT
)
2341 nine_context_set_viewport(device
, &src
->viewport
);
2344 if (src
->changed
.group
& NINE_STATE_SCISSOR
)
2345 nine_context_set_scissor(device
, &src
->scissor
);
2347 /* User Clip Planes */
2348 if (src
->changed
.ucp
)
2349 for (i
= 0; i
< PIPE_MAX_CLIP_PLANES
; ++i
)
2350 if (src
->changed
.ucp
& (1 << i
))
2351 nine_context_set_clip_plane(device
, i
, (struct nine_clipplane
*)&src
->clip
.ucp
[i
][0]);
2353 if (!(src
->changed
.group
& NINE_STATE_FF
))
2356 /* Fixed function state. */
2358 if (src
->changed
.group
& NINE_STATE_FF_MATERIAL
)
2359 nine_context_set_material(device
, &src
->ff
.material
);
2361 if (src
->changed
.group
& NINE_STATE_FF_PSSTAGES
) {
2363 for (s
= 0; s
< NINE_MAX_TEXTURE_STAGES
; ++s
) {
2364 for (i
= 0; i
< NINED3DTSS_COUNT
; ++i
)
2365 if (src
->ff
.changed
.tex_stage
[s
][i
/ 32] & (1 << (i
% 32)))
2366 nine_context_set_texture_stage_state(device
, s
, i
, src
->ff
.tex_stage
[s
][i
]);
2369 if (src
->changed
.group
& NINE_STATE_FF_LIGHTING
) {
2370 for (i
= 0; i
< src
->ff
.num_lights
; ++i
)
2371 if (src
->ff
.light
[i
].Type
!= NINED3DLIGHT_INVALID
)
2372 nine_context_set_light(device
, i
, &src
->ff
.light
[i
]);
2374 nine_context_light_enable_stateblock(device
, src
->ff
.active_light
, src
->ff
.num_lights_active
);
2376 if (src
->changed
.group
& NINE_STATE_FF_VSTRANSF
) {
2377 for (i
= 0; i
< ARRAY_SIZE(src
->ff
.changed
.transform
); ++i
) {
2379 if (!src
->ff
.changed
.transform
[i
])
2381 for (s
= i
* 32; s
< (i
* 32 + 32); ++s
) {
2382 if (!(src
->ff
.changed
.transform
[i
] & (1 << (s
% 32))))
2384 nine_context_set_transform(device
, s
,
2385 nine_state_access_transform(
2386 (struct nine_ff_state
*)&src
->ff
,
2394 nine_update_state_framebuffer_clear(struct NineDevice9
*device
)
2396 struct nine_context
*context
= &device
->context
;
2398 if (context
->changed
.group
& NINE_STATE_FB
)
2399 update_framebuffer(device
, TRUE
);
2402 CSMT_ITEM_NO_WAIT(nine_context_clear_fb
,
2403 ARG_VAL(DWORD
, Count
),
2404 ARG_COPY_REF(D3DRECT
, pRects
),
2405 ARG_VAL(DWORD
, Flags
),
2406 ARG_VAL(D3DCOLOR
, Color
),
2408 ARG_VAL(DWORD
, Stencil
))
2410 struct nine_context
*context
= &device
->context
;
2411 const int sRGB
= context
->rs
[D3DRS_SRGBWRITEENABLE
] ? 1 : 0;
2412 struct pipe_surface
*cbuf
, *zsbuf
;
2413 struct pipe_context
*pipe
= context
->pipe
;
2414 struct NineSurface9
*zsbuf_surf
= context
->ds
;
2415 struct NineSurface9
*rt
;
2418 union pipe_color_union rgba
;
2419 unsigned rt_mask
= 0;
2422 nine_update_state_framebuffer_clear(device
);
2424 if (Flags
& D3DCLEAR_TARGET
) bufs
|= PIPE_CLEAR_COLOR
;
2425 /* Ignore Z buffer if not bound */
2426 if (context
->pipe_data
.fb
.zsbuf
!= NULL
) {
2427 if (Flags
& D3DCLEAR_ZBUFFER
) bufs
|= PIPE_CLEAR_DEPTH
;
2428 if (Flags
& D3DCLEAR_STENCIL
) bufs
|= PIPE_CLEAR_STENCIL
;
2432 d3dcolor_to_pipe_color_union(&rgba
, Color
);
2434 rect
.x1
= context
->viewport
.X
;
2435 rect
.y1
= context
->viewport
.Y
;
2436 rect
.x2
= context
->viewport
.Width
+ rect
.x1
;
2437 rect
.y2
= context
->viewport
.Height
+ rect
.y1
;
2439 /* Both rectangles apply, which is weird, but that's D3D9. */
2440 if (context
->rs
[D3DRS_SCISSORTESTENABLE
]) {
2441 rect
.x1
= MAX2(rect
.x1
, context
->scissor
.minx
);
2442 rect
.y1
= MAX2(rect
.y1
, context
->scissor
.miny
);
2443 rect
.x2
= MIN2(rect
.x2
, context
->scissor
.maxx
);
2444 rect
.y2
= MIN2(rect
.y2
, context
->scissor
.maxy
);
2448 /* Maybe apps like to specify a large rect ? */
2449 if (pRects
[0].x1
<= rect
.x1
&& pRects
[0].x2
>= rect
.x2
&&
2450 pRects
[0].y1
<= rect
.y1
&& pRects
[0].y2
>= rect
.y2
) {
2451 DBG("First rect covers viewport.\n");
2457 if (rect
.x1
>= context
->pipe_data
.fb
.width
|| rect
.y1
>= context
->pipe_data
.fb
.height
)
2460 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2461 if (context
->rt
[i
] && context
->rt
[i
]->desc
.Format
!= D3DFMT_NULL
)
2465 /* fast path, clears everything at once */
2467 (!(bufs
& PIPE_CLEAR_COLOR
) || (rt_mask
== context
->rt_mask
)) &&
2468 rect
.x1
== 0 && rect
.y1
== 0 &&
2469 /* Case we clear only render target. Check clear region vs rt. */
2470 ((!(bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2471 rect
.x2
>= context
->pipe_data
.fb
.width
&&
2472 rect
.y2
>= context
->pipe_data
.fb
.height
) ||
2473 /* Case we clear depth buffer (and eventually rt too).
2474 * depth buffer size is always >= rt size. Compare to clear region */
2475 ((bufs
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
2476 rect
.x2
>= zsbuf_surf
->desc
.Width
&&
2477 rect
.y2
>= zsbuf_surf
->desc
.Height
))) {
2478 DBG("Clear fast path\n");
2479 pipe
->clear(pipe
, bufs
, &rgba
, Z
, Stencil
);
2488 for (i
= 0; i
< device
->caps
.NumSimultaneousRTs
; ++i
) {
2489 rt
= context
->rt
[i
];
2490 if (!rt
|| rt
->desc
.Format
== D3DFMT_NULL
||
2491 !(bufs
& PIPE_CLEAR_COLOR
))
2492 continue; /* save space, compiler should hoist this */
2493 cbuf
= NineSurface9_GetSurface(rt
, sRGB
);
2494 for (r
= 0; r
< Count
; ++r
) {
2495 /* Don't trust users to pass these in the right order. */
2496 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2497 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2498 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2499 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2501 /* Drop negative rectangles (like wine expects). */
2502 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2503 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2506 x1
= MAX2(x1
, rect
.x1
);
2507 y1
= MAX2(y1
, rect
.y1
);
2508 x2
= MIN3(x2
, rect
.x2
, rt
->desc
.Width
);
2509 y2
= MIN3(y2
, rect
.y2
, rt
->desc
.Height
);
2511 DBG("Clearing (%u..%u)x(%u..%u)\n", x1
, x2
, y1
, y2
);
2512 pipe
->clear_render_target(pipe
, cbuf
, &rgba
,
2513 x1
, y1
, x2
- x1
, y2
- y1
, false);
2516 if (!(bufs
& PIPE_CLEAR_DEPTHSTENCIL
))
2519 bufs
&= PIPE_CLEAR_DEPTHSTENCIL
;
2521 for (r
= 0; r
< Count
; ++r
) {
2522 unsigned x1
= MIN2(pRects
[r
].x1
, pRects
[r
].x2
);
2523 unsigned y1
= MIN2(pRects
[r
].y1
, pRects
[r
].y2
);
2524 unsigned x2
= MAX2(pRects
[r
].x1
, pRects
[r
].x2
);
2525 unsigned y2
= MAX2(pRects
[r
].y1
, pRects
[r
].y2
);
2527 /* Drop negative rectangles. */
2528 if (pRects
[r
].x1
> pRects
[r
].x2
) continue;
2529 if (pRects
[r
].y1
> pRects
[r
].y2
) continue;
2532 x1
= MIN2(x1
, rect
.x1
);
2533 y1
= MIN2(y1
, rect
.y1
);
2534 x2
= MIN3(x2
, rect
.x2
, zsbuf_surf
->desc
.Width
);
2535 y2
= MIN3(y2
, rect
.y2
, zsbuf_surf
->desc
.Height
);
2537 zsbuf
= NineSurface9_GetSurface(zsbuf_surf
, 0);
2539 pipe
->clear_depth_stencil(pipe
, zsbuf
, bufs
, Z
, Stencil
,
2540 x1
, y1
, x2
- x1
, y2
- y1
, false);
2547 init_draw_info(struct pipe_draw_info
*info
,
2548 struct NineDevice9
*dev
, D3DPRIMITIVETYPE type
, UINT count
)
2550 info
->mode
= d3dprimitivetype_to_pipe_prim(type
);
2551 info
->count
= prim_count_to_vertex_count(type
, count
);
2552 info
->start_instance
= 0;
2553 info
->instance_count
= 1;
2554 if (dev
->context
.stream_instancedata_mask
& dev
->context
.stream_usage_mask
)
2555 info
->instance_count
= MAX2(dev
->context
.stream_freq
[0] & 0x7FFFFF, 1);
2556 info
->primitive_restart
= FALSE
;
2557 info
->restart_index
= 0;
2558 info
->count_from_stream_output
= NULL
;
2559 info
->indirect
= NULL
;
2560 info
->indirect_params
= NULL
;
2563 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive
,
2564 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2565 ARG_VAL(UINT
, StartVertex
),
2566 ARG_VAL(UINT
, PrimitiveCount
))
2568 struct nine_context
*context
= &device
->context
;
2569 struct pipe_draw_info info
;
2571 nine_update_state(device
);
2573 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2574 info
.indexed
= FALSE
;
2575 info
.start
= StartVertex
;
2576 info
.index_bias
= 0;
2577 info
.min_index
= info
.start
;
2578 info
.max_index
= info
.count
- 1;
2580 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2583 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive
,
2584 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2585 ARG_VAL(INT
, BaseVertexIndex
),
2586 ARG_VAL(UINT
, MinVertexIndex
),
2587 ARG_VAL(UINT
, NumVertices
),
2588 ARG_VAL(UINT
, StartIndex
),
2589 ARG_VAL(UINT
, PrimitiveCount
))
2591 struct nine_context
*context
= &device
->context
;
2592 struct pipe_draw_info info
;
2594 nine_update_state(device
);
2596 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2597 info
.indexed
= TRUE
;
2598 info
.start
= StartIndex
;
2599 info
.index_bias
= BaseVertexIndex
;
2600 /* These don't include index bias: */
2601 info
.min_index
= MinVertexIndex
;
2602 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2604 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2607 CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf
,
2608 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2609 ARG_VAL(UINT
, PrimitiveCount
),
2610 ARG_BIND_BUF(struct pipe_vertex_buffer
, vtxbuf
))
2612 struct nine_context
*context
= &device
->context
;
2613 struct pipe_draw_info info
;
2615 nine_update_state(device
);
2617 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2618 info
.indexed
= FALSE
;
2620 info
.index_bias
= 0;
2622 info
.max_index
= info
.count
- 1;
2624 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vtxbuf
);
2626 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2629 CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf
,
2630 ARG_VAL(D3DPRIMITIVETYPE
, PrimitiveType
),
2631 ARG_VAL(UINT
, MinVertexIndex
),
2632 ARG_VAL(UINT
, NumVertices
),
2633 ARG_VAL(UINT
, PrimitiveCount
),
2634 ARG_BIND_BUF(struct pipe_vertex_buffer
, vbuf
),
2635 ARG_BIND_BUF(struct pipe_index_buffer
, ibuf
))
2637 struct nine_context
*context
= &device
->context
;
2638 struct pipe_draw_info info
;
2640 nine_update_state(device
);
2642 init_draw_info(&info
, device
, PrimitiveType
, PrimitiveCount
);
2643 info
.indexed
= TRUE
;
2645 info
.index_bias
= 0;
2646 info
.min_index
= MinVertexIndex
;
2647 info
.max_index
= MinVertexIndex
+ NumVertices
- 1;
2648 context
->pipe
->set_vertex_buffers(context
->pipe
, 0, 1, vbuf
);
2649 context
->pipe
->set_index_buffer(context
->pipe
, ibuf
);
2651 context
->pipe
->draw_vbo(context
->pipe
, &info
);
2654 CSMT_ITEM_NO_WAIT(nine_context_resource_copy_region
,
2655 ARG_BIND_RES(struct pipe_resource
, dst_res
),
2656 ARG_VAL(unsigned, dst_level
),
2657 ARG_COPY_REF(struct pipe_box
, dst_box
),
2658 ARG_BIND_RES(struct pipe_resource
, src_res
),
2659 ARG_VAL(unsigned, src_level
),
2660 ARG_COPY_REF(struct pipe_box
, src_box
))
2662 struct nine_context
*context
= &device
->context
;
2664 context
->pipe
->resource_copy_region(context
->pipe
,
2666 dst_box
->x
, dst_box
->y
, dst_box
->z
,
2671 CSMT_ITEM_NO_WAIT(nine_context_blit
,
2672 ARG_BIND_BLIT(struct pipe_blit_info
, blit
))
2674 struct nine_context
*context
= &device
->context
;
2676 context
->pipe
->blit(context
->pipe
, blit
);
2679 CSMT_ITEM_NO_WAIT(nine_context_clear_render_target
,
2680 ARG_BIND_REF(struct NineSurface9
, surface
),
2681 ARG_VAL(D3DCOLOR
, color
),
2684 ARG_VAL(UINT
, width
),
2685 ARG_VAL(UINT
, height
))
2687 struct nine_context
*context
= &device
->context
;
2688 struct pipe_surface
*surf
;
2689 union pipe_color_union rgba
;
2691 d3dcolor_to_pipe_color_union(&rgba
, color
);
2692 surf
= NineSurface9_GetSurface(surface
, 0);
2693 context
->pipe
->clear_render_target(context
->pipe
, surf
, &rgba
, x
, y
, width
, height
, false);
2696 CSMT_ITEM_NO_WAIT(nine_context_gen_mipmap
,
2697 ARG_BIND_RES(struct pipe_resource
, res
),
2698 ARG_VAL(UINT
, base_level
),
2699 ARG_VAL(UINT
, last_level
),
2700 ARG_VAL(UINT
, first_layer
),
2701 ARG_VAL(UINT
, last_layer
),
2702 ARG_VAL(UINT
, filter
))
2704 struct nine_context
*context
= &device
->context
;
2706 util_gen_mipmap(context
->pipe
, res
, res
->format
, base_level
,
2707 last_level
, first_layer
, last_layer
, filter
);
2710 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_range_upload
,
2711 ARG_BIND_RES(struct pipe_resource
, res
),
2712 ARG_VAL(unsigned, offset
),
2713 ARG_VAL(unsigned, size
),
2714 ARG_VAL(const void *, data
))
2716 struct nine_context
*context
= &device
->context
;
2718 context
->pipe
->buffer_subdata(context
->pipe
, res
, 0, offset
, size
, data
);
2722 nine_context_create_query(struct NineDevice9
*device
, unsigned query_type
)
2724 struct pipe_context
*pipe
;
2725 struct pipe_query
*res
;
2727 pipe
= nine_context_get_pipe_acquire(device
);
2728 res
= pipe
->create_query(pipe
, query_type
, 0);
2729 nine_context_get_pipe_release(device
);
2733 CSMT_ITEM_DO_WAIT(nine_context_destroy_query
,
2734 ARG_REF(struct pipe_query
, query
))
2736 struct nine_context
*context
= &device
->context
;
2738 context
->pipe
->destroy_query(context
->pipe
, query
);
2741 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_begin_query
,
2742 ARG_REF(struct pipe_query
, query
))
2744 struct nine_context
*context
= &device
->context
;
2746 (void) context
->pipe
->begin_query(context
->pipe
, query
);
2749 CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_end_query
,
2750 ARG_REF(struct pipe_query
, query
))
2752 struct nine_context
*context
= &device
->context
;
2754 (void) context
->pipe
->end_query(context
->pipe
, query
);
2758 nine_context_get_query_result(struct NineDevice9
*device
, struct pipe_query
*query
,
2759 unsigned *counter
, boolean flush
, boolean wait
,
2760 union pipe_query_result
*result
)
2762 struct pipe_context
*pipe
;
2766 if (device
->csmt_active
)
2767 nine_csmt_process(device
);
2768 } else if (p_atomic_read(counter
) > 0) {
2769 if (flush
&& device
->csmt_active
)
2770 nine_queue_flush(device
->csmt_ctx
->pool
);
2771 DBG("Pending begin/end. Returning\n");
2775 pipe
= nine_context_get_pipe_acquire(device
);
2776 ret
= pipe
->get_query_result(pipe
, query
, wait
, result
);
2777 nine_context_get_pipe_release(device
);
2779 DBG("Query result %s\n", ret
? "found" : "not yet available");
2783 /* State defaults */
2785 static const DWORD nine_render_state_defaults
[NINED3DRS_LAST
+ 1] =
2787 /* [D3DRS_ZENABLE] = D3DZB_TRUE; wine: auto_depth_stencil */
2788 [D3DRS_ZENABLE
] = D3DZB_FALSE
,
2789 [D3DRS_FILLMODE
] = D3DFILL_SOLID
,
2790 [D3DRS_SHADEMODE
] = D3DSHADE_GOURAUD
,
2791 /* [D3DRS_LINEPATTERN] = 0x00000000, */
2792 [D3DRS_ZWRITEENABLE
] = TRUE
,
2793 [D3DRS_ALPHATESTENABLE
] = FALSE
,
2794 [D3DRS_LASTPIXEL
] = TRUE
,
2795 [D3DRS_SRCBLEND
] = D3DBLEND_ONE
,
2796 [D3DRS_DESTBLEND
] = D3DBLEND_ZERO
,
2797 [D3DRS_CULLMODE
] = D3DCULL_CCW
,
2798 [D3DRS_ZFUNC
] = D3DCMP_LESSEQUAL
,
2799 [D3DRS_ALPHAFUNC
] = D3DCMP_ALWAYS
,
2800 [D3DRS_ALPHAREF
] = 0,
2801 [D3DRS_DITHERENABLE
] = FALSE
,
2802 [D3DRS_ALPHABLENDENABLE
] = FALSE
,
2803 [D3DRS_FOGENABLE
] = FALSE
,
2804 [D3DRS_SPECULARENABLE
] = FALSE
,
2805 /* [D3DRS_ZVISIBLE] = 0, */
2806 [D3DRS_FOGCOLOR
] = 0,
2807 [D3DRS_FOGTABLEMODE
] = D3DFOG_NONE
,
2808 [D3DRS_FOGSTART
] = 0x00000000,
2809 [D3DRS_FOGEND
] = 0x3F800000,
2810 [D3DRS_FOGDENSITY
] = 0x3F800000,
2811 /* [D3DRS_EDGEANTIALIAS] = FALSE, */
2812 [D3DRS_RANGEFOGENABLE
] = FALSE
,
2813 [D3DRS_STENCILENABLE
] = FALSE
,
2814 [D3DRS_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2815 [D3DRS_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2816 [D3DRS_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2817 [D3DRS_STENCILREF
] = 0,
2818 [D3DRS_STENCILMASK
] = 0xFFFFFFFF,
2819 [D3DRS_STENCILFUNC
] = D3DCMP_ALWAYS
,
2820 [D3DRS_STENCILWRITEMASK
] = 0xFFFFFFFF,
2821 [D3DRS_TEXTUREFACTOR
] = 0xFFFFFFFF,
2830 [D3DRS_CLIPPING
] = TRUE
,
2831 [D3DRS_LIGHTING
] = TRUE
,
2832 [D3DRS_AMBIENT
] = 0,
2833 [D3DRS_FOGVERTEXMODE
] = D3DFOG_NONE
,
2834 [D3DRS_COLORVERTEX
] = TRUE
,
2835 [D3DRS_LOCALVIEWER
] = TRUE
,
2836 [D3DRS_NORMALIZENORMALS
] = FALSE
,
2837 [D3DRS_DIFFUSEMATERIALSOURCE
] = D3DMCS_COLOR1
,
2838 [D3DRS_SPECULARMATERIALSOURCE
] = D3DMCS_COLOR2
,
2839 [D3DRS_AMBIENTMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2840 [D3DRS_EMISSIVEMATERIALSOURCE
] = D3DMCS_MATERIAL
,
2841 [D3DRS_VERTEXBLEND
] = D3DVBF_DISABLE
,
2842 [D3DRS_CLIPPLANEENABLE
] = 0,
2843 /* [D3DRS_SOFTWAREVERTEXPROCESSING] = FALSE, */
2844 [D3DRS_POINTSIZE
] = 0x3F800000,
2845 [D3DRS_POINTSIZE_MIN
] = 0x3F800000,
2846 [D3DRS_POINTSPRITEENABLE
] = FALSE
,
2847 [D3DRS_POINTSCALEENABLE
] = FALSE
,
2848 [D3DRS_POINTSCALE_A
] = 0x3F800000,
2849 [D3DRS_POINTSCALE_B
] = 0x00000000,
2850 [D3DRS_POINTSCALE_C
] = 0x00000000,
2851 [D3DRS_MULTISAMPLEANTIALIAS
] = TRUE
,
2852 [D3DRS_MULTISAMPLEMASK
] = 0xFFFFFFFF,
2853 [D3DRS_PATCHEDGESTYLE
] = D3DPATCHEDGE_DISCRETE
,
2854 /* [D3DRS_PATCHSEGMENTS] = 0x3F800000, */
2855 [D3DRS_DEBUGMONITORTOKEN
] = 0xDEADCAFE,
2856 [D3DRS_POINTSIZE_MAX
] = 0x3F800000, /* depends on cap */
2857 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = FALSE
,
2858 [D3DRS_COLORWRITEENABLE
] = 0x0000000f,
2859 [D3DRS_TWEENFACTOR
] = 0x00000000,
2860 [D3DRS_BLENDOP
] = D3DBLENDOP_ADD
,
2861 [D3DRS_POSITIONDEGREE
] = D3DDEGREE_CUBIC
,
2862 [D3DRS_NORMALDEGREE
] = D3DDEGREE_LINEAR
,
2863 [D3DRS_SCISSORTESTENABLE
] = FALSE
,
2864 [D3DRS_SLOPESCALEDEPTHBIAS
] = 0,
2865 [D3DRS_MINTESSELLATIONLEVEL
] = 0x3F800000,
2866 [D3DRS_MAXTESSELLATIONLEVEL
] = 0x3F800000,
2867 [D3DRS_ANTIALIASEDLINEENABLE
] = FALSE
,
2868 [D3DRS_ADAPTIVETESS_X
] = 0x00000000,
2869 [D3DRS_ADAPTIVETESS_Y
] = 0x00000000,
2870 [D3DRS_ADAPTIVETESS_Z
] = 0x3F800000,
2871 [D3DRS_ADAPTIVETESS_W
] = 0x00000000,
2872 [D3DRS_ENABLEADAPTIVETESSELLATION
] = FALSE
,
2873 [D3DRS_TWOSIDEDSTENCILMODE
] = FALSE
,
2874 [D3DRS_CCW_STENCILFAIL
] = D3DSTENCILOP_KEEP
,
2875 [D3DRS_CCW_STENCILZFAIL
] = D3DSTENCILOP_KEEP
,
2876 [D3DRS_CCW_STENCILPASS
] = D3DSTENCILOP_KEEP
,
2877 [D3DRS_CCW_STENCILFUNC
] = D3DCMP_ALWAYS
,
2878 [D3DRS_COLORWRITEENABLE1
] = 0x0000000F,
2879 [D3DRS_COLORWRITEENABLE2
] = 0x0000000F,
2880 [D3DRS_COLORWRITEENABLE3
] = 0x0000000F,
2881 [D3DRS_BLENDFACTOR
] = 0xFFFFFFFF,
2882 [D3DRS_SRGBWRITEENABLE
] = 0,
2883 [D3DRS_DEPTHBIAS
] = 0,
2892 [D3DRS_SEPARATEALPHABLENDENABLE
] = FALSE
,
2893 [D3DRS_SRCBLENDALPHA
] = D3DBLEND_ONE
,
2894 [D3DRS_DESTBLENDALPHA
] = D3DBLEND_ZERO
,
2895 [D3DRS_BLENDOPALPHA
] = D3DBLENDOP_ADD
,
2896 [NINED3DRS_VSPOINTSIZE
] = FALSE
,
2897 [NINED3DRS_RTMASK
] = 0xf,
2898 [NINED3DRS_ALPHACOVERAGE
] = FALSE
,
2899 [NINED3DRS_MULTISAMPLE
] = FALSE
2901 static const DWORD nine_tex_stage_state_defaults
[NINED3DTSS_LAST
+ 1] =
2903 [D3DTSS_COLOROP
] = D3DTOP_DISABLE
,
2904 [D3DTSS_ALPHAOP
] = D3DTOP_DISABLE
,
2905 [D3DTSS_COLORARG1
] = D3DTA_TEXTURE
,
2906 [D3DTSS_COLORARG2
] = D3DTA_CURRENT
,
2907 [D3DTSS_COLORARG0
] = D3DTA_CURRENT
,
2908 [D3DTSS_ALPHAARG1
] = D3DTA_TEXTURE
,
2909 [D3DTSS_ALPHAARG2
] = D3DTA_CURRENT
,
2910 [D3DTSS_ALPHAARG0
] = D3DTA_CURRENT
,
2911 [D3DTSS_RESULTARG
] = D3DTA_CURRENT
,
2912 [D3DTSS_BUMPENVMAT00
] = 0,
2913 [D3DTSS_BUMPENVMAT01
] = 0,
2914 [D3DTSS_BUMPENVMAT10
] = 0,
2915 [D3DTSS_BUMPENVMAT11
] = 0,
2916 [D3DTSS_BUMPENVLSCALE
] = 0,
2917 [D3DTSS_BUMPENVLOFFSET
] = 0,
2918 [D3DTSS_TEXCOORDINDEX
] = 0,
2919 [D3DTSS_TEXTURETRANSFORMFLAGS
] = D3DTTFF_DISABLE
,
2921 static const DWORD nine_samp_state_defaults
[NINED3DSAMP_LAST
+ 1] =
2923 [D3DSAMP_ADDRESSU
] = D3DTADDRESS_WRAP
,
2924 [D3DSAMP_ADDRESSV
] = D3DTADDRESS_WRAP
,
2925 [D3DSAMP_ADDRESSW
] = D3DTADDRESS_WRAP
,
2926 [D3DSAMP_BORDERCOLOR
] = 0,
2927 [D3DSAMP_MAGFILTER
] = D3DTEXF_POINT
,
2928 [D3DSAMP_MINFILTER
] = D3DTEXF_POINT
,
2929 [D3DSAMP_MIPFILTER
] = D3DTEXF_NONE
,
2930 [D3DSAMP_MIPMAPLODBIAS
] = 0,
2931 [D3DSAMP_MAXMIPLEVEL
] = 0,
2932 [D3DSAMP_MAXANISOTROPY
] = 1,
2933 [D3DSAMP_SRGBTEXTURE
] = 0,
2934 [D3DSAMP_ELEMENTINDEX
] = 0,
2935 [D3DSAMP_DMAPOFFSET
] = 0,
2936 [NINED3DSAMP_MINLOD
] = 0,
2937 [NINED3DSAMP_SHADOW
] = 0,
2938 [NINED3DSAMP_CUBETEX
] = 0
2941 void nine_state_restore_non_cso(struct NineDevice9
*device
)
2943 struct nine_context
*context
= &device
->context
;
2945 context
->changed
.group
= NINE_STATE_ALL
;
2946 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
2947 context
->changed
.ucp
= TRUE
;
2948 context
->commit
|= NINE_STATE_COMMIT_CONST_VS
| NINE_STATE_COMMIT_CONST_PS
;
2952 nine_state_set_defaults(struct NineDevice9
*device
, const D3DCAPS9
*caps
,
2955 struct nine_state
*state
= &device
->state
;
2956 struct nine_context
*context
= &device
->context
;
2959 /* Initialize defaults.
2961 memcpy(context
->rs
, nine_render_state_defaults
, sizeof(context
->rs
));
2963 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
) {
2964 memcpy(&state
->ff
.tex_stage
[s
], nine_tex_stage_state_defaults
,
2965 sizeof(state
->ff
.tex_stage
[s
]));
2966 state
->ff
.tex_stage
[s
][D3DTSS_TEXCOORDINDEX
] = s
;
2968 state
->ff
.tex_stage
[0][D3DTSS_COLOROP
] = D3DTOP_MODULATE
;
2969 state
->ff
.tex_stage
[0][D3DTSS_ALPHAOP
] = D3DTOP_SELECTARG1
;
2971 for (s
= 0; s
< ARRAY_SIZE(state
->ff
.tex_stage
); ++s
)
2972 memcpy(&context
->ff
.tex_stage
[s
], state
->ff
.tex_stage
[s
],
2973 sizeof(state
->ff
.tex_stage
[s
]));
2975 memset(&context
->bumpmap_vars
, 0, sizeof(context
->bumpmap_vars
));
2977 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
) {
2978 memcpy(&context
->samp
[s
], nine_samp_state_defaults
,
2979 sizeof(context
->samp
[s
]));
2980 memcpy(&state
->samp_advertised
[s
], nine_samp_state_defaults
,
2981 sizeof(state
->samp_advertised
[s
]));
2984 memset(state
->vs_const_f
, 0, VS_CONST_F_SIZE(device
));
2985 memset(context
->vs_const_f
, 0, device
->vs_const_size
);
2986 if (context
->vs_const_f_swvp
)
2987 memset(context
->vs_const_f_swvp
, 0, NINE_MAX_CONST_F_SWVP
* sizeof(float[4]));
2988 memset(state
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
2989 memset(context
->vs_const_i
, 0, VS_CONST_I_SIZE(device
));
2990 memset(state
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
2991 memset(context
->vs_const_b
, 0, VS_CONST_B_SIZE(device
));
2992 memset(state
->ps_const_f
, 0, device
->ps_const_size
);
2993 memset(context
->ps_const_f
, 0, device
->ps_const_size
);
2994 memset(state
->ps_const_i
, 0, sizeof(state
->ps_const_i
));
2995 memset(context
->ps_const_i
, 0, sizeof(context
->ps_const_i
));
2996 memset(state
->ps_const_b
, 0, sizeof(state
->ps_const_b
));
2997 memset(context
->ps_const_b
, 0, sizeof(context
->ps_const_b
));
2999 /* Cap dependent initial state:
3001 context
->rs
[D3DRS_POINTSIZE_MAX
] = fui(caps
->MaxPointSize
);
3003 memcpy(state
->rs_advertised
, context
->rs
, sizeof(context
->rs
));
3005 /* Set changed flags to initialize driver.
3007 context
->changed
.group
= NINE_STATE_ALL
;
3008 context
->changed
.vtxbuf
= (1ULL << device
->caps
.MaxStreams
) - 1;
3009 context
->changed
.ucp
= TRUE
;
3011 context
->ff
.changed
.transform
[0] = ~0;
3012 context
->ff
.changed
.transform
[D3DTS_WORLD
/ 32] |= 1 << (D3DTS_WORLD
% 32);
3015 state
->viewport
.MinZ
= context
->viewport
.MinZ
= 0.0f
;
3016 state
->viewport
.MaxZ
= context
->viewport
.MaxZ
= 1.0f
;
3019 for (s
= 0; s
< NINE_MAX_SAMPLERS
; ++s
)
3020 context
->changed
.sampler
[s
] = ~0;
3023 context
->dummy_vbo_bound_at
= -1;
3024 context
->vbo_bound_done
= FALSE
;
3029 nine_state_clear(struct nine_state
*state
, const boolean device
)
3033 for (i
= 0; i
< ARRAY_SIZE(state
->rt
); ++i
)
3034 nine_bind(&state
->rt
[i
], NULL
);
3035 nine_bind(&state
->ds
, NULL
);
3036 nine_bind(&state
->vs
, NULL
);
3037 nine_bind(&state
->ps
, NULL
);
3038 nine_bind(&state
->vdecl
, NULL
);
3039 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
3040 nine_bind(&state
->stream
[i
], NULL
);
3042 nine_bind(&state
->idxbuf
, NULL
);
3043 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
) {
3045 state
->texture
[i
] &&
3046 --state
->texture
[i
]->bind_count
== 0)
3047 list_delinit(&state
->texture
[i
]->list
);
3048 nine_bind(&state
->texture
[i
], NULL
);
3053 nine_context_clear(struct NineDevice9
*device
)
3055 struct nine_context
*context
= &device
->context
;
3056 struct pipe_context
*pipe
= context
->pipe
;
3057 struct cso_context
*cso
= context
->cso
;
3060 /* Early device ctor failure. Nothing to do */
3064 pipe
->bind_vs_state(pipe
, NULL
);
3065 pipe
->bind_fs_state(pipe
, NULL
);
3067 /* Don't unbind constant buffers, they're device-private and
3068 * do not change on Reset.
3071 cso_set_samplers(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
3072 cso_set_samplers(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
3074 cso_set_sampler_views(cso
, PIPE_SHADER_VERTEX
, 0, NULL
);
3075 cso_set_sampler_views(cso
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
3077 pipe
->set_vertex_buffers(pipe
, 0, device
->caps
.MaxStreams
, NULL
);
3078 pipe
->set_index_buffer(pipe
, NULL
);
3080 for (i
= 0; i
< ARRAY_SIZE(context
->rt
); ++i
)
3081 nine_bind(&context
->rt
[i
], NULL
);
3082 nine_bind(&context
->ds
, NULL
);
3083 nine_bind(&context
->vs
, NULL
);
3084 nine_bind(&context
->ps
, NULL
);
3085 nine_bind(&context
->vdecl
, NULL
);
3086 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
3087 pipe_resource_reference(&context
->vtxbuf
[i
].buffer
, NULL
);
3088 pipe_resource_reference(&context
->idxbuf
.buffer
, NULL
);
3090 for (i
= 0; i
< NINE_MAX_SAMPLERS
; ++i
) {
3091 context
->texture
[i
].enabled
= FALSE
;
3092 pipe_resource_reference(&context
->texture
[i
].resource
,
3094 pipe_sampler_view_reference(&context
->texture
[i
].view
[0],
3096 pipe_sampler_view_reference(&context
->texture
[i
].view
[1],
3102 nine_state_init_sw(struct NineDevice9
*device
)
3104 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3105 struct pipe_rasterizer_state rast
;
3106 struct pipe_blend_state blend
;
3107 struct pipe_depth_stencil_alpha_state dsa
;
3108 struct pipe_framebuffer_state fb
;
3110 /* Only used with Streamout */
3111 memset(&rast
, 0, sizeof(rast
));
3112 rast
.rasterizer_discard
= true;
3113 rast
.point_quad_rasterization
= 1; /* to make llvmpipe happy */
3114 cso_set_rasterizer(device
->cso_sw
, &rast
);
3116 /* dummy settings */
3117 memset(&blend
, 0, sizeof(blend
));
3118 memset(&dsa
, 0, sizeof(dsa
));
3119 memset(&fb
, 0, sizeof(fb
));
3120 cso_set_blend(device
->cso_sw
, &blend
);
3121 cso_set_depth_stencil_alpha(device
->cso_sw
, &dsa
);
3122 cso_set_framebuffer(device
->cso_sw
, &fb
);
3123 cso_set_viewport_dims(device
->cso_sw
, 1.0, 1.0, false);
3124 cso_set_fragment_shader_handle(device
->cso_sw
, util_make_empty_fragment_shader(pipe_sw
));
3127 /* There is duplication with update_vertex_elements.
3128 * TODO: Share the code */
3131 update_vertex_elements_sw(struct NineDevice9
*device
)
3133 struct nine_state
*state
= &device
->state
;
3134 const struct NineVertexDeclaration9
*vdecl
= device
->state
.vdecl
;
3135 const struct NineVertexShader9
*vs
;
3138 char vdecl_index_map
[16]; /* vs->num_inputs <= 16 */
3139 char used_streams
[device
->caps
.MaxStreams
];
3140 int dummy_vbo_stream
= -1;
3141 BOOL need_dummy_vbo
= FALSE
;
3142 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
3143 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
3145 memset(vdecl_index_map
, -1, 16);
3146 memset(used_streams
, 0, device
->caps
.MaxStreams
);
3147 vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
3150 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
3151 DBG("looking up input %u (usage %u) from vdecl(%p)\n",
3152 n
, vs
->input_map
[n
].ndecl
, vdecl
);
3154 for (i
= 0; i
< vdecl
->nelems
; i
++) {
3155 if (vdecl
->usage_map
[i
] == vs
->input_map
[n
].ndecl
) {
3156 vdecl_index_map
[n
] = i
;
3157 used_streams
[vdecl
->elems
[i
].vertex_buffer_index
] = 1;
3161 if (vdecl_index_map
[n
] < 0)
3162 need_dummy_vbo
= TRUE
;
3165 /* No vertex declaration. Likely will never happen in practice,
3166 * but we need not crash on this */
3167 need_dummy_vbo
= TRUE
;
3170 if (need_dummy_vbo
) {
3171 for (i
= 0; i
< device
->caps
.MaxStreams
; i
++ ) {
3172 if (!used_streams
[i
]) {
3173 dummy_vbo_stream
= i
;
3178 /* TODO handle dummy_vbo */
3179 assert (!need_dummy_vbo
);
3181 for (n
= 0; n
< vs
->num_inputs
; ++n
) {
3182 index
= vdecl_index_map
[n
];
3184 ve
[n
] = vdecl
->elems
[index
];
3185 b
= ve
[n
].vertex_buffer_index
;
3186 /* XXX wine just uses 1 here: */
3187 if (state
->stream_freq
[b
] & D3DSTREAMSOURCE_INSTANCEDATA
)
3188 ve
[n
].instance_divisor
= state
->stream_freq
[b
] & 0x7FFFFF;
3190 /* if the vertex declaration is incomplete compared to what the
3191 * vertex shader needs, we bind a dummy vbo with 0 0 0 0.
3192 * This is not precised by the spec, but is the behaviour
3194 ve
[n
].vertex_buffer_index
= dummy_vbo_stream
;
3195 ve
[n
].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
3196 ve
[n
].src_offset
= 0;
3197 ve
[n
].instance_divisor
= 0;
3201 cso_set_vertex_elements(device
->cso_sw
, vs
->num_inputs
, ve
);
3205 update_vertex_buffers_sw(struct NineDevice9
*device
, int start_vertice
, int num_vertices
)
3207 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3208 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3209 struct nine_state
*state
= &device
->state
;
3210 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3211 struct pipe_vertex_buffer vtxbuf
;
3212 uint32_t mask
= 0xf;
3215 DBG("mask=%x\n", mask
);
3217 /* TODO: handle dummy_vbo_bound_at */
3219 for (i
= 0; mask
; mask
>>= 1, ++i
) {
3221 if (state
->stream
[i
]) {
3222 struct pipe_resource
*buf
;
3223 struct pipe_box box
;
3225 vtxbuf
= state
->vtxbuf
[i
];
3226 vtxbuf
.buffer
= NineVertexBuffer9_GetResource(state
->stream
[i
]);
3228 DBG("Locking %p (offset %d, length %d)\n", vtxbuf
.buffer
,
3229 vtxbuf
.buffer_offset
, num_vertices
* vtxbuf
.stride
);
3231 u_box_1d(vtxbuf
.buffer_offset
+ start_vertice
* vtxbuf
.stride
,
3232 num_vertices
* vtxbuf
.stride
, &box
);
3233 buf
= vtxbuf
.buffer
;
3234 vtxbuf
.user_buffer
= pipe
->transfer_map(pipe
, buf
, 0, PIPE_TRANSFER_READ
, &box
,
3235 &(sw_internal
->transfers_so
[i
]));
3236 vtxbuf
.buffer
= NULL
;
3237 if (!device
->driver_caps
.user_sw_vbufs
) {
3238 u_upload_data(device
->vertex_sw_uploader
,
3243 &(vtxbuf
.buffer_offset
),
3245 u_upload_unmap(device
->vertex_sw_uploader
);
3246 vtxbuf
.user_buffer
= NULL
;
3248 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, &vtxbuf
);
3250 pipe_resource_reference(&vtxbuf
.buffer
, NULL
);
3252 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3255 nine_context_get_pipe_release(device
);
3259 update_vs_constants_sw(struct NineDevice9
*device
)
3261 struct nine_state
*state
= &device
->state
;
3262 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3267 struct pipe_constant_buffer cb
;
3271 cb
.buffer_offset
= 0;
3272 cb
.buffer_size
= 4096 * sizeof(float[4]);
3273 cb
.user_buffer
= state
->vs_const_f
;
3275 if (state
->vs
->lconstf
.ranges
) {
3276 const struct nine_lconstf
*lconstf
= &device
->state
.vs
->lconstf
;
3277 const struct nine_range
*r
= lconstf
->ranges
;
3279 float *dst
= device
->state
.vs_lconstf_temp
;
3280 float *src
= (float *)cb
.user_buffer
;
3281 memcpy(dst
, src
, 8192 * sizeof(float[4]));
3283 unsigned p
= r
->bgn
;
3284 unsigned c
= r
->end
- r
->bgn
;
3285 memcpy(&dst
[p
* 4], &lconstf
->data
[n
* 4], c
* 4 * sizeof(float));
3289 cb
.user_buffer
= dst
;
3292 buf
= cb
.user_buffer
;
3293 if (!device
->driver_caps
.user_sw_cbufs
) {
3294 u_upload_data(device
->constbuf_sw_uploader
,
3299 &(cb
.buffer_offset
),
3301 u_upload_unmap(device
->constbuf_sw_uploader
);
3302 cb
.user_buffer
= NULL
;
3305 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 0, &cb
);
3307 pipe_resource_reference(&cb
.buffer
, NULL
);
3309 cb
.user_buffer
= (char *)buf
+ 4096 * sizeof(float[4]);
3310 if (!device
->driver_caps
.user_sw_cbufs
) {
3311 u_upload_data(device
->constbuf_sw_uploader
,
3316 &(cb
.buffer_offset
),
3318 u_upload_unmap(device
->constbuf_sw_uploader
);
3319 cb
.user_buffer
= NULL
;
3322 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 1, &cb
);
3324 pipe_resource_reference(&cb
.buffer
, NULL
);
3328 struct pipe_constant_buffer cb
;
3331 cb
.buffer_offset
= 0;
3332 cb
.buffer_size
= 2048 * sizeof(float[4]);
3333 cb
.user_buffer
= state
->vs_const_i
;
3335 if (!device
->driver_caps
.user_sw_cbufs
) {
3336 u_upload_data(device
->constbuf_sw_uploader
,
3341 &(cb
.buffer_offset
),
3343 u_upload_unmap(device
->constbuf_sw_uploader
);
3344 cb
.user_buffer
= NULL
;
3347 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 2, &cb
);
3349 pipe_resource_reference(&cb
.buffer
, NULL
);
3353 struct pipe_constant_buffer cb
;
3356 cb
.buffer_offset
= 0;
3357 cb
.buffer_size
= 512 * sizeof(float[4]);
3358 cb
.user_buffer
= state
->vs_const_b
;
3360 if (!device
->driver_caps
.user_sw_cbufs
) {
3361 u_upload_data(device
->constbuf_sw_uploader
,
3366 &(cb
.buffer_offset
),
3368 u_upload_unmap(device
->constbuf_sw_uploader
);
3369 cb
.user_buffer
= NULL
;
3372 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 3, &cb
);
3374 pipe_resource_reference(&cb
.buffer
, NULL
);
3378 struct pipe_constant_buffer cb
;
3379 const D3DVIEWPORT9
*vport
= &device
->state
.viewport
;
3380 float viewport_data
[8] = {(float)vport
->Width
* 0.5f
,
3381 (float)vport
->Height
* -0.5f
, vport
->MaxZ
- vport
->MinZ
, 0.f
,
3382 (float)vport
->Width
* 0.5f
+ (float)vport
->X
,
3383 (float)vport
->Height
* 0.5f
+ (float)vport
->Y
,
3387 cb
.buffer_offset
= 0;
3388 cb
.buffer_size
= 2 * sizeof(float[4]);
3389 cb
.user_buffer
= viewport_data
;
3392 u_upload_data(device
->constbuf_sw_uploader
,
3397 &(cb
.buffer_offset
),
3399 u_upload_unmap(device
->constbuf_sw_uploader
);
3400 cb
.user_buffer
= NULL
;
3403 pipe_sw
->set_constant_buffer(pipe_sw
, PIPE_SHADER_VERTEX
, 4, &cb
);
3405 pipe_resource_reference(&cb
.buffer
, NULL
);
3411 nine_state_prepare_draw_sw(struct NineDevice9
*device
, struct NineVertexDeclaration9
*vdecl_out
,
3412 int start_vertice
, int num_vertices
, struct pipe_stream_output_info
*so
)
3414 struct nine_state
*state
= &device
->state
;
3415 bool programmable_vs
= state
->vs
&& !(state
->vdecl
&& state
->vdecl
->position_t
);
3416 struct NineVertexShader9
*vs
= programmable_vs
? device
->state
.vs
: device
->ff
.vs
;
3418 assert(programmable_vs
);
3420 DBG("Preparing draw\n");
3421 cso_set_vertex_shader_handle(device
->cso_sw
,
3422 NineVertexShader9_GetVariantProcessVertices(vs
, vdecl_out
, so
));
3423 update_vertex_elements_sw(device
);
3424 update_vertex_buffers_sw(device
, start_vertice
, num_vertices
);
3425 update_vs_constants_sw(device
);
3426 DBG("Preparation succeeded\n");
3430 nine_state_after_draw_sw(struct NineDevice9
*device
)
3432 struct nine_state_sw_internal
*sw_internal
= &device
->state_sw_internal
;
3433 struct pipe_context
*pipe
= nine_context_get_pipe_acquire(device
);
3434 struct pipe_context
*pipe_sw
= device
->pipe_sw
;
3437 for (i
= 0; i
< 4; i
++) {
3438 pipe_sw
->set_vertex_buffers(pipe_sw
, i
, 1, NULL
);
3439 if (sw_internal
->transfers_so
[i
])
3440 pipe
->transfer_unmap(pipe
, sw_internal
->transfers_so
[i
]);
3441 sw_internal
->transfers_so
[i
] = NULL
;
3443 nine_context_get_pipe_release(device
);
3447 nine_state_destroy_sw(struct NineDevice9
*device
)
3450 /* Everything destroyed with cso */
3454 static const DWORD nine_render_states_pixel[] =
3456 D3DRS_ALPHABLENDENABLE,
3459 D3DRS_ALPHATESTENABLE,
3460 D3DRS_ANTIALIASEDLINEENABLE,
3464 D3DRS_CCW_STENCILFAIL,
3465 D3DRS_CCW_STENCILPASS,
3466 D3DRS_CCW_STENCILZFAIL,
3467 D3DRS_COLORWRITEENABLE,
3468 D3DRS_COLORWRITEENABLE1,
3469 D3DRS_COLORWRITEENABLE2,
3470 D3DRS_COLORWRITEENABLE3,
3473 D3DRS_DESTBLENDALPHA,
3480 D3DRS_SCISSORTESTENABLE,
3481 D3DRS_SEPARATEALPHABLENDENABLE,
3483 D3DRS_SLOPESCALEDEPTHBIAS,
3485 D3DRS_SRCBLENDALPHA,
3486 D3DRS_SRGBWRITEENABLE,
3487 D3DRS_STENCILENABLE,
3493 D3DRS_STENCILWRITEMASK,
3495 D3DRS_TEXTUREFACTOR,
3496 D3DRS_TWOSIDEDSTENCILMODE,
3518 const uint32_t nine_render_states_pixel
[(NINED3DRS_LAST
+ 31) / 32] =
3520 0x0f99c380, 0x1ff00070, 0x00000000, 0x00000000,
3521 0x000000ff, 0xde01c900, 0x0003ffcf
3525 static const DWORD nine_render_states_vertex[] =
3527 D3DRS_ADAPTIVETESS_W,
3528 D3DRS_ADAPTIVETESS_X,
3529 D3DRS_ADAPTIVETESS_Y,
3530 D3DRS_ADAPTIVETESS_Z,
3532 D3DRS_AMBIENTMATERIALSOURCE,
3534 D3DRS_CLIPPLANEENABLE,
3537 D3DRS_DIFFUSEMATERIALSOURCE,
3538 D3DRS_EMISSIVEMATERIALSOURCE,
3539 D3DRS_ENABLEADAPTIVETESSELLATION,
3546 D3DRS_FOGVERTEXMODE,
3547 D3DRS_INDEXEDVERTEXBLENDENABLE,
3550 D3DRS_MAXTESSELLATIONLEVEL,
3551 D3DRS_MINTESSELLATIONLEVEL,
3552 D3DRS_MULTISAMPLEANTIALIAS,
3553 D3DRS_MULTISAMPLEMASK,
3555 D3DRS_NORMALIZENORMALS,
3556 D3DRS_PATCHEDGESTYLE,
3560 D3DRS_POINTSCALEENABLE,
3562 D3DRS_POINTSIZE_MAX,
3563 D3DRS_POINTSIZE_MIN,
3564 D3DRS_POINTSPRITEENABLE,
3565 D3DRS_POSITIONDEGREE,
3566 D3DRS_RANGEFOGENABLE,
3568 D3DRS_SPECULARENABLE,
3569 D3DRS_SPECULARMATERIALSOURCE,
3574 const uint32_t nine_render_states_vertex
[(NINED3DRS_LAST
+ 31) / 32] =
3576 0x30400200, 0x0001007c, 0x00000000, 0x00000000,
3577 0xfd9efb00, 0x01fc34cf, 0x00000000
3580 /* TODO: put in the right values */
3581 const uint32_t nine_render_state_group
[NINED3DRS_LAST
+ 1] =
3583 [D3DRS_ZENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3584 [D3DRS_FILLMODE
] = NINE_STATE_RASTERIZER
,
3585 [D3DRS_SHADEMODE
] = NINE_STATE_RASTERIZER
,
3586 [D3DRS_ZWRITEENABLE
] = NINE_STATE_DSA
,
3587 [D3DRS_ALPHATESTENABLE
] = NINE_STATE_DSA
,
3588 [D3DRS_LASTPIXEL
] = NINE_STATE_RASTERIZER
,
3589 [D3DRS_SRCBLEND
] = NINE_STATE_BLEND
,
3590 [D3DRS_DESTBLEND
] = NINE_STATE_BLEND
,
3591 [D3DRS_CULLMODE
] = NINE_STATE_RASTERIZER
,
3592 [D3DRS_ZFUNC
] = NINE_STATE_DSA
,
3593 [D3DRS_ALPHAREF
] = NINE_STATE_DSA
,
3594 [D3DRS_ALPHAFUNC
] = NINE_STATE_DSA
,
3595 [D3DRS_DITHERENABLE
] = NINE_STATE_BLEND
,
3596 [D3DRS_ALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3597 [D3DRS_FOGENABLE
] = NINE_STATE_FF_OTHER
| NINE_STATE_FOG_SHADER
| NINE_STATE_PS_CONST
,
3598 [D3DRS_SPECULARENABLE
] = NINE_STATE_FF_LIGHTING
,
3599 [D3DRS_FOGCOLOR
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3600 [D3DRS_FOGTABLEMODE
] = NINE_STATE_FF_OTHER
| NINE_STATE_FOG_SHADER
| NINE_STATE_PS_CONST
,
3601 [D3DRS_FOGSTART
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3602 [D3DRS_FOGEND
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3603 [D3DRS_FOGDENSITY
] = NINE_STATE_FF_OTHER
| NINE_STATE_PS_CONST
,
3604 [D3DRS_RANGEFOGENABLE
] = NINE_STATE_FF_OTHER
,
3605 [D3DRS_STENCILENABLE
] = NINE_STATE_DSA
| NINE_STATE_MULTISAMPLE
,
3606 [D3DRS_STENCILFAIL
] = NINE_STATE_DSA
,
3607 [D3DRS_STENCILZFAIL
] = NINE_STATE_DSA
,
3608 [D3DRS_STENCILPASS
] = NINE_STATE_DSA
,
3609 [D3DRS_STENCILFUNC
] = NINE_STATE_DSA
,
3610 [D3DRS_STENCILREF
] = NINE_STATE_STENCIL_REF
,
3611 [D3DRS_STENCILMASK
] = NINE_STATE_DSA
,
3612 [D3DRS_STENCILWRITEMASK
] = NINE_STATE_DSA
,
3613 [D3DRS_TEXTUREFACTOR
] = NINE_STATE_FF_PSSTAGES
,
3614 [D3DRS_WRAP0
] = NINE_STATE_UNHANDLED
, /* cylindrical wrap is crazy */
3615 [D3DRS_WRAP1
] = NINE_STATE_UNHANDLED
,
3616 [D3DRS_WRAP2
] = NINE_STATE_UNHANDLED
,
3617 [D3DRS_WRAP3
] = NINE_STATE_UNHANDLED
,
3618 [D3DRS_WRAP4
] = NINE_STATE_UNHANDLED
,
3619 [D3DRS_WRAP5
] = NINE_STATE_UNHANDLED
,
3620 [D3DRS_WRAP6
] = NINE_STATE_UNHANDLED
,
3621 [D3DRS_WRAP7
] = NINE_STATE_UNHANDLED
,
3622 [D3DRS_CLIPPING
] = 0, /* software vertex processing only */
3623 [D3DRS_LIGHTING
] = NINE_STATE_FF_LIGHTING
,
3624 [D3DRS_AMBIENT
] = NINE_STATE_FF_LIGHTING
| NINE_STATE_FF_MATERIAL
,
3625 [D3DRS_FOGVERTEXMODE
] = NINE_STATE_FF_OTHER
,
3626 [D3DRS_COLORVERTEX
] = NINE_STATE_FF_LIGHTING
,
3627 [D3DRS_LOCALVIEWER
] = NINE_STATE_FF_LIGHTING
,
3628 [D3DRS_NORMALIZENORMALS
] = NINE_STATE_FF_OTHER
,
3629 [D3DRS_DIFFUSEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3630 [D3DRS_SPECULARMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3631 [D3DRS_AMBIENTMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3632 [D3DRS_EMISSIVEMATERIALSOURCE
] = NINE_STATE_FF_LIGHTING
,
3633 [D3DRS_VERTEXBLEND
] = NINE_STATE_FF_OTHER
,
3634 [D3DRS_CLIPPLANEENABLE
] = NINE_STATE_RASTERIZER
,
3635 [D3DRS_POINTSIZE
] = NINE_STATE_RASTERIZER
,
3636 [D3DRS_POINTSIZE_MIN
] = NINE_STATE_RASTERIZER
| NINE_STATE_POINTSIZE_SHADER
,
3637 [D3DRS_POINTSPRITEENABLE
] = NINE_STATE_RASTERIZER
,
3638 [D3DRS_POINTSCALEENABLE
] = NINE_STATE_FF_OTHER
,
3639 [D3DRS_POINTSCALE_A
] = NINE_STATE_FF_OTHER
,
3640 [D3DRS_POINTSCALE_B
] = NINE_STATE_FF_OTHER
,
3641 [D3DRS_POINTSCALE_C
] = NINE_STATE_FF_OTHER
,
3642 [D3DRS_MULTISAMPLEANTIALIAS
] = NINE_STATE_MULTISAMPLE
,
3643 [D3DRS_MULTISAMPLEMASK
] = NINE_STATE_SAMPLE_MASK
,
3644 [D3DRS_PATCHEDGESTYLE
] = NINE_STATE_UNHANDLED
,
3645 [D3DRS_DEBUGMONITORTOKEN
] = NINE_STATE_UNHANDLED
,
3646 [D3DRS_POINTSIZE_MAX
] = NINE_STATE_RASTERIZER
| NINE_STATE_POINTSIZE_SHADER
,
3647 [D3DRS_INDEXEDVERTEXBLENDENABLE
] = NINE_STATE_FF_OTHER
,
3648 [D3DRS_COLORWRITEENABLE
] = NINE_STATE_BLEND
,
3649 [D3DRS_TWEENFACTOR
] = NINE_STATE_FF_OTHER
,
3650 [D3DRS_BLENDOP
] = NINE_STATE_BLEND
,
3651 [D3DRS_POSITIONDEGREE
] = NINE_STATE_UNHANDLED
,
3652 [D3DRS_NORMALDEGREE
] = NINE_STATE_UNHANDLED
,
3653 [D3DRS_SCISSORTESTENABLE
] = NINE_STATE_RASTERIZER
,
3654 [D3DRS_SLOPESCALEDEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3655 [D3DRS_ANTIALIASEDLINEENABLE
] = NINE_STATE_RASTERIZER
,
3656 [D3DRS_MINTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3657 [D3DRS_MAXTESSELLATIONLEVEL
] = NINE_STATE_UNHANDLED
,
3658 [D3DRS_ADAPTIVETESS_X
] = NINE_STATE_UNHANDLED
,
3659 [D3DRS_ADAPTIVETESS_Y
] = NINE_STATE_UNHANDLED
,
3660 [D3DRS_ADAPTIVETESS_Z
] = NINE_STATE_UNHANDLED
,
3661 [D3DRS_ADAPTIVETESS_W
] = NINE_STATE_UNHANDLED
,
3662 [D3DRS_ENABLEADAPTIVETESSELLATION
] = NINE_STATE_UNHANDLED
,
3663 [D3DRS_TWOSIDEDSTENCILMODE
] = NINE_STATE_DSA
,
3664 [D3DRS_CCW_STENCILFAIL
] = NINE_STATE_DSA
,
3665 [D3DRS_CCW_STENCILZFAIL
] = NINE_STATE_DSA
,
3666 [D3DRS_CCW_STENCILPASS
] = NINE_STATE_DSA
,
3667 [D3DRS_CCW_STENCILFUNC
] = NINE_STATE_DSA
,
3668 [D3DRS_COLORWRITEENABLE1
] = NINE_STATE_BLEND
,
3669 [D3DRS_COLORWRITEENABLE2
] = NINE_STATE_BLEND
,
3670 [D3DRS_COLORWRITEENABLE3
] = NINE_STATE_BLEND
,
3671 [D3DRS_BLENDFACTOR
] = NINE_STATE_BLEND_COLOR
,
3672 [D3DRS_SRGBWRITEENABLE
] = NINE_STATE_FB
,
3673 [D3DRS_DEPTHBIAS
] = NINE_STATE_RASTERIZER
,
3674 [D3DRS_WRAP8
] = NINE_STATE_UNHANDLED
, /* cylwrap has to be done via GP */
3675 [D3DRS_WRAP9
] = NINE_STATE_UNHANDLED
,
3676 [D3DRS_WRAP10
] = NINE_STATE_UNHANDLED
,
3677 [D3DRS_WRAP11
] = NINE_STATE_UNHANDLED
,
3678 [D3DRS_WRAP12
] = NINE_STATE_UNHANDLED
,
3679 [D3DRS_WRAP13
] = NINE_STATE_UNHANDLED
,
3680 [D3DRS_WRAP14
] = NINE_STATE_UNHANDLED
,
3681 [D3DRS_WRAP15
] = NINE_STATE_UNHANDLED
,
3682 [D3DRS_SEPARATEALPHABLENDENABLE
] = NINE_STATE_BLEND
,
3683 [D3DRS_SRCBLENDALPHA
] = NINE_STATE_BLEND
,
3684 [D3DRS_DESTBLENDALPHA
] = NINE_STATE_BLEND
,
3685 [D3DRS_BLENDOPALPHA
] = NINE_STATE_BLEND
3691 nine_state_access_transform(struct nine_ff_state
*ff_state
, D3DTRANSFORMSTATETYPE t
,
3694 static D3DMATRIX Identity
= { .m
[0] = { 1, 0, 0, 0 },
3695 .m
[1] = { 0, 1, 0, 0 },
3696 .m
[2] = { 0, 0, 1, 0 },
3697 .m
[3] = { 0, 0, 0, 1 } };
3701 case D3DTS_VIEW
: index
= 0; break;
3702 case D3DTS_PROJECTION
: index
= 1; break;
3703 case D3DTS_TEXTURE0
: index
= 2; break;
3704 case D3DTS_TEXTURE1
: index
= 3; break;
3705 case D3DTS_TEXTURE2
: index
= 4; break;
3706 case D3DTS_TEXTURE3
: index
= 5; break;
3707 case D3DTS_TEXTURE4
: index
= 6; break;
3708 case D3DTS_TEXTURE5
: index
= 7; break;
3709 case D3DTS_TEXTURE6
: index
= 8; break;
3710 case D3DTS_TEXTURE7
: index
= 9; break;
3712 if (!(t
>= D3DTS_WORLDMATRIX(0) && t
<= D3DTS_WORLDMATRIX(255)))
3714 index
= 10 + (t
- D3DTS_WORLDMATRIX(0));
3718 if (index
>= ff_state
->num_transforms
) {
3719 unsigned N
= index
+ 1;
3720 unsigned n
= ff_state
->num_transforms
;
3724 ff_state
->transform
= REALLOC(ff_state
->transform
,
3725 n
* sizeof(D3DMATRIX
),
3726 N
* sizeof(D3DMATRIX
));
3728 ff_state
->transform
[n
] = Identity
;
3729 ff_state
->num_transforms
= N
;
3731 return &ff_state
->transform
[index
];
3735 nine_state_set_light(struct nine_ff_state
*ff_state
, DWORD Index
,
3736 const D3DLIGHT9
*pLight
)
3738 if (Index
>= ff_state
->num_lights
) {
3739 unsigned n
= ff_state
->num_lights
;
3740 unsigned N
= Index
+ 1;
3742 ff_state
->light
= REALLOC(ff_state
->light
, n
* sizeof(D3DLIGHT9
),
3743 N
* sizeof(D3DLIGHT9
));
3744 if (!ff_state
->light
)
3745 return E_OUTOFMEMORY
;
3746 ff_state
->num_lights
= N
;
3748 for (; n
< Index
; ++n
) {
3749 memset(&ff_state
->light
[n
], 0, sizeof(D3DLIGHT9
));
3750 ff_state
->light
[n
].Type
= (D3DLIGHTTYPE
)NINED3DLIGHT_INVALID
;
3753 ff_state
->light
[Index
] = *pLight
;
3755 if (pLight
->Type
== D3DLIGHT_SPOT
&& pLight
->Theta
>= pLight
->Phi
) {
3756 DBG("Warning: clamping D3DLIGHT9.Theta\n");
3757 ff_state
->light
[Index
].Theta
= ff_state
->light
[Index
].Phi
;
3763 nine_state_light_enable(struct nine_ff_state
*ff_state
, uint32_t *change_group
,
3764 DWORD Index
, BOOL Enable
)
3768 user_assert(Index
< ff_state
->num_lights
, D3DERR_INVALIDCALL
);
3770 for (i
= 0; i
< ff_state
->num_lights_active
; ++i
) {
3771 if (ff_state
->active_light
[i
] == Index
)
3776 if (i
< ff_state
->num_lights_active
)
3778 /* XXX wine thinks this should still succeed:
3780 user_assert(i
< NINE_MAX_LIGHTS_ACTIVE
, D3DERR_INVALIDCALL
);
3782 ff_state
->active_light
[i
] = Index
;
3783 ff_state
->num_lights_active
++;
3785 if (i
== ff_state
->num_lights_active
)
3787 --ff_state
->num_lights_active
;
3788 for (; i
< ff_state
->num_lights_active
; ++i
)
3789 ff_state
->active_light
[i
] = ff_state
->active_light
[i
+ 1];
3792 *change_group
|= NINE_STATE_FF_LIGHTING
;
3797 #define D3DRS_TO_STRING_CASE(n) case D3DRS_##n: return "D3DRS_"#n
3798 const char *nine_d3drs_to_string(DWORD State
)
3801 D3DRS_TO_STRING_CASE(ZENABLE
);
3802 D3DRS_TO_STRING_CASE(FILLMODE
);
3803 D3DRS_TO_STRING_CASE(SHADEMODE
);
3804 D3DRS_TO_STRING_CASE(ZWRITEENABLE
);
3805 D3DRS_TO_STRING_CASE(ALPHATESTENABLE
);
3806 D3DRS_TO_STRING_CASE(LASTPIXEL
);
3807 D3DRS_TO_STRING_CASE(SRCBLEND
);
3808 D3DRS_TO_STRING_CASE(DESTBLEND
);
3809 D3DRS_TO_STRING_CASE(CULLMODE
);
3810 D3DRS_TO_STRING_CASE(ZFUNC
);
3811 D3DRS_TO_STRING_CASE(ALPHAREF
);
3812 D3DRS_TO_STRING_CASE(ALPHAFUNC
);
3813 D3DRS_TO_STRING_CASE(DITHERENABLE
);
3814 D3DRS_TO_STRING_CASE(ALPHABLENDENABLE
);
3815 D3DRS_TO_STRING_CASE(FOGENABLE
);
3816 D3DRS_TO_STRING_CASE(SPECULARENABLE
);
3817 D3DRS_TO_STRING_CASE(FOGCOLOR
);
3818 D3DRS_TO_STRING_CASE(FOGTABLEMODE
);
3819 D3DRS_TO_STRING_CASE(FOGSTART
);
3820 D3DRS_TO_STRING_CASE(FOGEND
);
3821 D3DRS_TO_STRING_CASE(FOGDENSITY
);
3822 D3DRS_TO_STRING_CASE(RANGEFOGENABLE
);
3823 D3DRS_TO_STRING_CASE(STENCILENABLE
);
3824 D3DRS_TO_STRING_CASE(STENCILFAIL
);
3825 D3DRS_TO_STRING_CASE(STENCILZFAIL
);
3826 D3DRS_TO_STRING_CASE(STENCILPASS
);
3827 D3DRS_TO_STRING_CASE(STENCILFUNC
);
3828 D3DRS_TO_STRING_CASE(STENCILREF
);
3829 D3DRS_TO_STRING_CASE(STENCILMASK
);
3830 D3DRS_TO_STRING_CASE(STENCILWRITEMASK
);
3831 D3DRS_TO_STRING_CASE(TEXTUREFACTOR
);
3832 D3DRS_TO_STRING_CASE(WRAP0
);
3833 D3DRS_TO_STRING_CASE(WRAP1
);
3834 D3DRS_TO_STRING_CASE(WRAP2
);
3835 D3DRS_TO_STRING_CASE(WRAP3
);
3836 D3DRS_TO_STRING_CASE(WRAP4
);
3837 D3DRS_TO_STRING_CASE(WRAP5
);
3838 D3DRS_TO_STRING_CASE(WRAP6
);
3839 D3DRS_TO_STRING_CASE(WRAP7
);
3840 D3DRS_TO_STRING_CASE(CLIPPING
);
3841 D3DRS_TO_STRING_CASE(LIGHTING
);
3842 D3DRS_TO_STRING_CASE(AMBIENT
);
3843 D3DRS_TO_STRING_CASE(FOGVERTEXMODE
);
3844 D3DRS_TO_STRING_CASE(COLORVERTEX
);
3845 D3DRS_TO_STRING_CASE(LOCALVIEWER
);
3846 D3DRS_TO_STRING_CASE(NORMALIZENORMALS
);
3847 D3DRS_TO_STRING_CASE(DIFFUSEMATERIALSOURCE
);
3848 D3DRS_TO_STRING_CASE(SPECULARMATERIALSOURCE
);
3849 D3DRS_TO_STRING_CASE(AMBIENTMATERIALSOURCE
);
3850 D3DRS_TO_STRING_CASE(EMISSIVEMATERIALSOURCE
);
3851 D3DRS_TO_STRING_CASE(VERTEXBLEND
);
3852 D3DRS_TO_STRING_CASE(CLIPPLANEENABLE
);
3853 D3DRS_TO_STRING_CASE(POINTSIZE
);
3854 D3DRS_TO_STRING_CASE(POINTSIZE_MIN
);
3855 D3DRS_TO_STRING_CASE(POINTSPRITEENABLE
);
3856 D3DRS_TO_STRING_CASE(POINTSCALEENABLE
);
3857 D3DRS_TO_STRING_CASE(POINTSCALE_A
);
3858 D3DRS_TO_STRING_CASE(POINTSCALE_B
);
3859 D3DRS_TO_STRING_CASE(POINTSCALE_C
);
3860 D3DRS_TO_STRING_CASE(MULTISAMPLEANTIALIAS
);
3861 D3DRS_TO_STRING_CASE(MULTISAMPLEMASK
);
3862 D3DRS_TO_STRING_CASE(PATCHEDGESTYLE
);
3863 D3DRS_TO_STRING_CASE(DEBUGMONITORTOKEN
);
3864 D3DRS_TO_STRING_CASE(POINTSIZE_MAX
);
3865 D3DRS_TO_STRING_CASE(INDEXEDVERTEXBLENDENABLE
);
3866 D3DRS_TO_STRING_CASE(COLORWRITEENABLE
);
3867 D3DRS_TO_STRING_CASE(TWEENFACTOR
);
3868 D3DRS_TO_STRING_CASE(BLENDOP
);
3869 D3DRS_TO_STRING_CASE(POSITIONDEGREE
);
3870 D3DRS_TO_STRING_CASE(NORMALDEGREE
);
3871 D3DRS_TO_STRING_CASE(SCISSORTESTENABLE
);
3872 D3DRS_TO_STRING_CASE(SLOPESCALEDEPTHBIAS
);
3873 D3DRS_TO_STRING_CASE(ANTIALIASEDLINEENABLE
);
3874 D3DRS_TO_STRING_CASE(MINTESSELLATIONLEVEL
);
3875 D3DRS_TO_STRING_CASE(MAXTESSELLATIONLEVEL
);
3876 D3DRS_TO_STRING_CASE(ADAPTIVETESS_X
);
3877 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Y
);
3878 D3DRS_TO_STRING_CASE(ADAPTIVETESS_Z
);
3879 D3DRS_TO_STRING_CASE(ADAPTIVETESS_W
);
3880 D3DRS_TO_STRING_CASE(ENABLEADAPTIVETESSELLATION
);
3881 D3DRS_TO_STRING_CASE(TWOSIDEDSTENCILMODE
);
3882 D3DRS_TO_STRING_CASE(CCW_STENCILFAIL
);
3883 D3DRS_TO_STRING_CASE(CCW_STENCILZFAIL
);
3884 D3DRS_TO_STRING_CASE(CCW_STENCILPASS
);
3885 D3DRS_TO_STRING_CASE(CCW_STENCILFUNC
);
3886 D3DRS_TO_STRING_CASE(COLORWRITEENABLE1
);
3887 D3DRS_TO_STRING_CASE(COLORWRITEENABLE2
);
3888 D3DRS_TO_STRING_CASE(COLORWRITEENABLE3
);
3889 D3DRS_TO_STRING_CASE(BLENDFACTOR
);
3890 D3DRS_TO_STRING_CASE(SRGBWRITEENABLE
);
3891 D3DRS_TO_STRING_CASE(DEPTHBIAS
);
3892 D3DRS_TO_STRING_CASE(WRAP8
);
3893 D3DRS_TO_STRING_CASE(WRAP9
);
3894 D3DRS_TO_STRING_CASE(WRAP10
);
3895 D3DRS_TO_STRING_CASE(WRAP11
);
3896 D3DRS_TO_STRING_CASE(WRAP12
);
3897 D3DRS_TO_STRING_CASE(WRAP13
);
3898 D3DRS_TO_STRING_CASE(WRAP14
);
3899 D3DRS_TO_STRING_CASE(WRAP15
);
3900 D3DRS_TO_STRING_CASE(SEPARATEALPHABLENDENABLE
);
3901 D3DRS_TO_STRING_CASE(SRCBLENDALPHA
);
3902 D3DRS_TO_STRING_CASE(DESTBLENDALPHA
);
3903 D3DRS_TO_STRING_CASE(BLENDOPALPHA
);