1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "i915_context.h"
31 #include "i915_batch.h"
32 #include "i915_debug.h"
34 #include "i915_resource.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_format.h"
40 #include "util/u_format.h"
41 #include "util/u_math.h"
42 #include "util/u_memory.h"
44 struct i915_tracked_hw_state
{
46 void (*validate
)(struct i915_context
*, unsigned *batch_space
);
47 void (*emit
)(struct i915_context
*);
48 unsigned dirty
, batch_space
;
53 validate_flush(struct i915_context
*i915
, unsigned *batch_space
)
55 *batch_space
= i915
->flush_dirty
? 1 : 0;
59 emit_flush(struct i915_context
*i915
)
61 /* Cache handling is very cheap atm. State handling can request to flushes:
62 * - I915_FLUSH_CACHE which is a flush everything request and
63 * - I915_PIPELINE_FLUSH which is specifically for the draw_offset flush.
64 * Because the cache handling is so dumb, no explicit "invalidate map cache".
65 * Also, the first is a strict superset of the latter, so the following logic
67 if (i915
->flush_dirty
& I915_FLUSH_CACHE
)
68 OUT_BATCH(MI_FLUSH
| FLUSH_MAP_CACHE
);
69 else if (i915
->flush_dirty
& I915_PIPELINE_FLUSH
)
70 OUT_BATCH(MI_FLUSH
| INHIBIT_FLUSH_RENDER_CACHE
);
73 uint32_t invariant_state
[] = {
74 _3DSTATE_AA_CMD
| AA_LINE_ECAAR_WIDTH_ENABLE
| AA_LINE_ECAAR_WIDTH_1_0
|
75 AA_LINE_REGION_WIDTH_ENABLE
| AA_LINE_REGION_WIDTH_1_0
,
77 _3DSTATE_DFLT_DIFFUSE_CMD
, 0,
79 _3DSTATE_DFLT_SPEC_CMD
, 0,
81 _3DSTATE_DFLT_Z_CMD
, 0,
83 _3DSTATE_COORD_SET_BINDINGS
|
93 _3DSTATE_RASTER_RULES_CMD
|
94 ENABLE_POINT_RASTER_RULE
|
95 OGL_POINT_RASTER_RULE
|
96 ENABLE_LINE_STRIP_PROVOKE_VRTX
|
97 ENABLE_TRI_FAN_PROVOKE_VRTX
|
98 LINE_STRIP_PROVOKE_VRTX(1) |
99 TRI_FAN_PROVOKE_VRTX(2) |
100 ENABLE_TEXKILL_3D_4D
|
103 _3DSTATE_DEPTH_SUBRECT_DISABLE
,
105 /* disable indirect state for now
107 _3DSTATE_LOAD_INDIRECT
| 0, 0};
110 emit_invariant(struct i915_context
*i915
)
112 i915_winsys_batchbuffer_write(i915
->batch
, invariant_state
,
113 ARRAY_SIZE(invariant_state
)*sizeof(uint32_t));
117 validate_immediate(struct i915_context
*i915
, unsigned *batch_space
)
119 unsigned dirty
= (1 << I915_IMMEDIATE_S0
| 1 << I915_IMMEDIATE_S1
|
120 1 << I915_IMMEDIATE_S2
| 1 << I915_IMMEDIATE_S3
|
121 1 << I915_IMMEDIATE_S3
| 1 << I915_IMMEDIATE_S4
|
122 1 << I915_IMMEDIATE_S5
| 1 << I915_IMMEDIATE_S6
) &
123 i915
->immediate_dirty
;
125 if (i915
->immediate_dirty
& (1 << I915_IMMEDIATE_S0
) && i915
->vbo
)
126 i915
->validation_buffers
[i915
->num_validation_buffers
++] = i915
->vbo
;
128 *batch_space
= 1 + util_bitcount(dirty
);
131 static uint
target_fixup(struct pipe_surface
*p
, int component
)
135 enum pipe_format format
;
138 { PIPE_FORMAT_R8G8B8A8_UNORM
, { S5_WRITEDISABLE_BLUE
, S5_WRITEDISABLE_GREEN
, S5_WRITEDISABLE_RED
, S5_WRITEDISABLE_ALPHA
}},
139 { PIPE_FORMAT_R8G8B8X8_UNORM
, { S5_WRITEDISABLE_BLUE
, S5_WRITEDISABLE_GREEN
, S5_WRITEDISABLE_RED
, S5_WRITEDISABLE_ALPHA
}},
140 { PIPE_FORMAT_L8_UNORM
, { S5_WRITEDISABLE_RED
| S5_WRITEDISABLE_GREEN
| S5_WRITEDISABLE_BLUE
, 0, 0, S5_WRITEDISABLE_ALPHA
}},
141 { PIPE_FORMAT_I8_UNORM
, { S5_WRITEDISABLE_RED
| S5_WRITEDISABLE_GREEN
| S5_WRITEDISABLE_BLUE
, 0, 0, S5_WRITEDISABLE_ALPHA
}},
142 { PIPE_FORMAT_A8_UNORM
, { 0, 0, 0, S5_WRITEDISABLE_RED
| S5_WRITEDISABLE_GREEN
| S5_WRITEDISABLE_BLUE
| S5_WRITEDISABLE_ALPHA
}},
143 { 0, { S5_WRITEDISABLE_RED
, S5_WRITEDISABLE_GREEN
, S5_WRITEDISABLE_BLUE
, S5_WRITEDISABLE_ALPHA
}}
145 int i
= sizeof(fixup_mask
) / sizeof(*fixup_mask
) - 1;
148 for(i
= 0; fixup_mask
[i
].format
!= 0; i
++)
149 if (p
->format
== fixup_mask
[i
].format
)
150 return fixup_mask
[i
].hw_mask
[component
];
152 /* Just return default masks */
153 return fixup_mask
[i
].hw_mask
[component
];
156 static void emit_immediate_s5(struct i915_context
*i915
, uint imm
)
158 /* Fixup write mask for non-BGRA render targets */
159 uint fixup_imm
= imm
& ~( S5_WRITEDISABLE_RED
| S5_WRITEDISABLE_GREEN
|
160 S5_WRITEDISABLE_BLUE
| S5_WRITEDISABLE_ALPHA
);
161 struct pipe_surface
*surf
= i915
->framebuffer
.cbufs
[0];
163 if (imm
& S5_WRITEDISABLE_RED
)
164 fixup_imm
|= target_fixup(surf
, 0);
165 if (imm
& S5_WRITEDISABLE_GREEN
)
166 fixup_imm
|= target_fixup(surf
, 1);
167 if (imm
& S5_WRITEDISABLE_BLUE
)
168 fixup_imm
|= target_fixup(surf
, 2);
169 if (imm
& S5_WRITEDISABLE_ALPHA
)
170 fixup_imm
|= target_fixup(surf
, 3);
172 OUT_BATCH(fixup_imm
);
175 static void emit_immediate_s6(struct i915_context
*i915
, uint imm
)
177 /* Fixup blend function for A8 dst buffers.
178 * When we blend to an A8 buffer, the GPU thinks it's a G8 buffer,
179 * and therefore we need to use the color factor for alphas. */
182 if (i915
->current
.target_fixup_format
== PIPE_FORMAT_A8_UNORM
) {
183 srcRGB
= (imm
>> S6_CBUF_SRC_BLEND_FACT_SHIFT
) & BLENDFACT_MASK
;
184 if (srcRGB
== BLENDFACT_DST_ALPHA
)
185 srcRGB
= BLENDFACT_DST_COLR
;
186 else if (srcRGB
== BLENDFACT_INV_DST_ALPHA
)
187 srcRGB
= BLENDFACT_INV_DST_COLR
;
188 imm
&= ~SRC_BLND_FACT(BLENDFACT_MASK
);
189 imm
|= SRC_BLND_FACT(srcRGB
);
196 emit_immediate(struct i915_context
*i915
)
198 /* remove unwanted bits and S7 */
199 unsigned dirty
= (1 << I915_IMMEDIATE_S0
| 1 << I915_IMMEDIATE_S1
|
200 1 << I915_IMMEDIATE_S2
| 1 << I915_IMMEDIATE_S3
|
201 1 << I915_IMMEDIATE_S3
| 1 << I915_IMMEDIATE_S4
|
202 1 << I915_IMMEDIATE_S5
| 1 << I915_IMMEDIATE_S6
) &
203 i915
->immediate_dirty
;
204 int i
, num
= util_bitcount(dirty
);
205 assert(num
&& num
<= I915_MAX_IMMEDIATE
);
207 OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1
|
208 dirty
<< 4 | (num
- 1));
210 if (i915
->immediate_dirty
& (1 << I915_IMMEDIATE_S0
)) {
212 OUT_RELOC(i915
->vbo
, I915_USAGE_VERTEX
,
213 i915
->current
.immediate
[I915_IMMEDIATE_S0
]);
218 for (i
= 1; i
< I915_MAX_IMMEDIATE
; i
++) {
219 if (dirty
& (1 << i
)) {
220 if (i
== I915_IMMEDIATE_S5
)
221 emit_immediate_s5(i915
, i915
->current
.immediate
[i
]);
222 else if (i
== I915_IMMEDIATE_S6
)
223 emit_immediate_s6(i915
, i915
->current
.immediate
[i
]);
225 OUT_BATCH(i915
->current
.immediate
[i
]);
231 validate_dynamic(struct i915_context
*i915
, unsigned *batch_space
)
233 *batch_space
= util_bitcount(i915
->dynamic_dirty
& ((1 << I915_MAX_DYNAMIC
) - 1));
237 emit_dynamic(struct i915_context
*i915
)
240 for (i
= 0; i
< I915_MAX_DYNAMIC
; i
++) {
241 if (i915
->dynamic_dirty
& (1 << i
))
242 OUT_BATCH(i915
->current
.dynamic
[i
]);
247 validate_static(struct i915_context
*i915
, unsigned *batch_space
)
251 if (i915
->current
.cbuf_bo
&& (i915
->static_dirty
& I915_DST_BUF_COLOR
)) {
252 i915
->validation_buffers
[i915
->num_validation_buffers
++]
253 = i915
->current
.cbuf_bo
;
257 if (i915
->current
.depth_bo
&& (i915
->static_dirty
& I915_DST_BUF_DEPTH
)) {
258 i915
->validation_buffers
[i915
->num_validation_buffers
++]
259 = i915
->current
.depth_bo
;
263 if (i915
->static_dirty
& I915_DST_VARS
)
266 if (i915
->static_dirty
& I915_DST_RECT
)
271 emit_static(struct i915_context
*i915
)
273 if (i915
->current
.cbuf_bo
&& (i915
->static_dirty
& I915_DST_BUF_COLOR
)) {
274 OUT_BATCH(_3DSTATE_BUF_INFO_CMD
);
275 OUT_BATCH(i915
->current
.cbuf_flags
);
276 OUT_RELOC(i915
->current
.cbuf_bo
,
281 /* What happens if no zbuf??
283 if (i915
->current
.depth_bo
&& (i915
->static_dirty
& I915_DST_BUF_DEPTH
)) {
284 OUT_BATCH(_3DSTATE_BUF_INFO_CMD
);
285 OUT_BATCH(i915
->current
.depth_flags
);
286 OUT_RELOC(i915
->current
.depth_bo
,
291 if (i915
->static_dirty
& I915_DST_VARS
) {
292 OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD
);
293 OUT_BATCH(i915
->current
.dst_buf_vars
);
298 validate_map(struct i915_context
*i915
, unsigned *batch_space
)
300 const uint enabled
= i915
->current
.sampler_enable_flags
;
302 struct i915_texture
*tex
;
304 *batch_space
= i915
->current
.sampler_enable_nr
?
305 2 + 3*i915
->current
.sampler_enable_nr
: 0;
307 for (unit
= 0; unit
< I915_TEX_UNITS
; unit
++) {
308 if (enabled
& (1 << unit
)) {
309 tex
= i915_texture(i915
->fragment_sampler_views
[unit
]->texture
);
310 i915
->validation_buffers
[i915
->num_validation_buffers
++] = tex
->buffer
;
316 emit_map(struct i915_context
*i915
)
318 const uint nr
= i915
->current
.sampler_enable_nr
;
320 const uint enabled
= i915
->current
.sampler_enable_flags
;
323 OUT_BATCH(_3DSTATE_MAP_STATE
| (3 * nr
));
325 for (unit
= 0; unit
< I915_TEX_UNITS
; unit
++) {
326 if (enabled
& (1 << unit
)) {
327 struct i915_texture
*texture
= i915_texture(i915
->fragment_sampler_views
[unit
]->texture
);
328 struct i915_winsys_buffer
*buf
= texture
->buffer
;
329 unsigned offset
= i915
->current
.texbuffer
[unit
][2];
335 OUT_RELOC(buf
, I915_USAGE_SAMPLER
, offset
);
336 OUT_BATCH(i915
->current
.texbuffer
[unit
][0]); /* MS3 */
337 OUT_BATCH(i915
->current
.texbuffer
[unit
][1]); /* MS4 */
345 validate_sampler(struct i915_context
*i915
, unsigned *batch_space
)
347 *batch_space
= i915
->current
.sampler_enable_nr
?
348 2 + 3*i915
->current
.sampler_enable_nr
: 0;
352 emit_sampler(struct i915_context
*i915
)
354 if (i915
->current
.sampler_enable_nr
) {
357 OUT_BATCH( _3DSTATE_SAMPLER_STATE
|
358 (3 * i915
->current
.sampler_enable_nr
) );
360 OUT_BATCH( i915
->current
.sampler_enable_flags
);
362 for (i
= 0; i
< I915_TEX_UNITS
; i
++) {
363 if (i915
->current
.sampler_enable_flags
& (1<<i
)) {
364 OUT_BATCH( i915
->current
.sampler
[i
][0] );
365 OUT_BATCH( i915
->current
.sampler
[i
][1] );
366 OUT_BATCH( i915
->current
.sampler
[i
][2] );
373 validate_constants(struct i915_context
*i915
, unsigned *batch_space
)
375 int nr
= i915
->fs
->num_constants
?
376 2 + 4*i915
->fs
->num_constants
: 0;
382 emit_constants(struct i915_context
*i915
)
384 /* Collate the user-defined constants with the fragment shader's
385 * immediates according to the constant_flags[] array.
387 const uint nr
= i915
->fs
->num_constants
;
389 assert(nr
< I915_MAX_CONSTANT
);
393 OUT_BATCH( _3DSTATE_PIXEL_SHADER_CONSTANTS
| (nr
* 4) );
394 OUT_BATCH((1 << nr
) - 1);
396 for (i
= 0; i
< nr
; i
++) {
398 if (i915
->fs
->constant_flags
[i
] == I915_CONSTFLAG_USER
) {
399 /* grab user-defined constant */
400 c
= (uint
*) i915_buffer(i915
->constants
[PIPE_SHADER_FRAGMENT
])->data
;
404 /* emit program constant */
405 c
= (uint
*) i915
->fs
->constants
[i
];
409 float *f
= (float *) c
;
410 printf("Const %2d: %f %f %f %f %s\n", i
, f
[0], f
[1], f
[2], f
[3],
411 (i915
->fs
->constant_flags
[i
] == I915_CONSTFLAG_USER
412 ? "user" : "immediate"));
424 validate_program(struct i915_context
*i915
, unsigned *batch_space
)
426 uint additional_size
= 0;
428 additional_size
+= i915
->current
.target_fixup_format
? 3 : 0;
430 /* we need more batch space if we want to emulate rgba framebuffers */
431 *batch_space
= i915
->fs
->decl_len
+ i915
->fs
->program_len
+ additional_size
;
435 emit_program(struct i915_context
*i915
)
437 uint additional_size
= 0;
440 /* count how much additional space we'll need */
441 validate_program(i915
, &additional_size
);
442 additional_size
-= i915
->fs
->decl_len
+ i915
->fs
->program_len
;
444 /* we should always have, at least, a pass-through program */
445 assert(i915
->fs
->program_len
> 0);
447 /* output the declarations */
449 /* first word has the size, we have to adjust that */
450 uint size
= (i915
->fs
->decl
[0]);
451 size
+= additional_size
;
455 for (i
= 1 ; i
< i915
->fs
->decl_len
; i
++)
456 OUT_BATCH(i915
->fs
->decl
[i
]);
458 /* output the program */
459 assert(i915
->fs
->program_len
% 3 == 0);
460 for (i
= 0 ; i
< i915
->fs
->program_len
; i
+=3) {
461 OUT_BATCH(i915
->fs
->program
[i
]);
462 OUT_BATCH(i915
->fs
->program
[i
+1]);
463 OUT_BATCH(i915
->fs
->program
[i
+2]);
466 /* we emit an additional mov with swizzle to fake RGBA framebuffers */
467 if (i915
->current
.target_fixup_format
) {
468 /* mov out_color, out_color.zyxw */
470 (REG_TYPE_OC
<< A0_DEST_TYPE_SHIFT
) |
471 A0_DEST_CHANNEL_ALL
|
472 (REG_TYPE_OC
<< A0_SRC0_TYPE_SHIFT
) |
473 (T_DIFFUSE
<< A0_SRC0_NR_SHIFT
));
474 OUT_BATCH(i915
->current
.fixup_swizzle
);
480 emit_draw_rect(struct i915_context
*i915
)
482 if (i915
->static_dirty
& I915_DST_RECT
) {
483 OUT_BATCH(_3DSTATE_DRAW_RECT_CMD
);
484 OUT_BATCH(DRAW_RECT_DIS_DEPTH_OFS
);
485 OUT_BATCH(i915
->current
.draw_offset
);
486 OUT_BATCH(i915
->current
.draw_size
);
487 OUT_BATCH(i915
->current
.draw_offset
);
492 i915_validate_state(struct i915_context
*i915
, unsigned *batch_space
)
496 i915
->num_validation_buffers
= 0;
497 if (i915
->hardware_dirty
& I915_HW_INVARIANT
)
498 *batch_space
= ARRAY_SIZE(invariant_state
);
503 static int counter_total
= 0;
504 #define VALIDATE_ATOM(atom, hw_dirty) \
505 if (i915->hardware_dirty & hw_dirty) { \
506 static int counter_##atom = 0;\
507 validate_##atom(i915, &tmp); \
508 *batch_space += tmp;\
509 counter_##atom += tmp;\
510 counter_total += tmp;\
511 printf("%s: \t%d/%d \t%2.2f\n",#atom, counter_##atom, counter_total, counter_##atom*100.f/counter_total);}
513 #define VALIDATE_ATOM(atom, hw_dirty) \
514 if (i915->hardware_dirty & hw_dirty) { \
515 validate_##atom(i915, &tmp); \
516 *batch_space += tmp; }
518 VALIDATE_ATOM(flush
, I915_HW_FLUSH
);
519 VALIDATE_ATOM(immediate
, I915_HW_IMMEDIATE
);
520 VALIDATE_ATOM(dynamic
, I915_HW_DYNAMIC
);
521 VALIDATE_ATOM(static, I915_HW_STATIC
);
522 VALIDATE_ATOM(map
, I915_HW_MAP
);
523 VALIDATE_ATOM(sampler
, I915_HW_SAMPLER
);
524 VALIDATE_ATOM(constants
, I915_HW_CONSTANTS
);
525 VALIDATE_ATOM(program
, I915_HW_PROGRAM
);
528 if (i915
->num_validation_buffers
== 0)
531 if (!i915_winsys_validate_buffers(i915
->batch
, i915
->validation_buffers
,
532 i915
->num_validation_buffers
))
538 /* Push the state into the sarea and/or texture memory.
541 i915_emit_hardware_state(struct i915_context
*i915
)
543 unsigned batch_space
;
546 assert(i915
->dirty
== 0);
548 if (I915_DBG_ON(DBG_ATOMS
))
549 i915_dump_hardware_dirty(i915
, __FUNCTION__
);
551 if (!i915_validate_state(i915
, &batch_space
)) {
552 FLUSH_BATCH(NULL
, I915_FLUSH_ASYNC
);
553 assert(i915_validate_state(i915
, &batch_space
));
556 if(!BEGIN_BATCH(batch_space
)) {
557 FLUSH_BATCH(NULL
, I915_FLUSH_ASYNC
);
558 assert(i915_validate_state(i915
, &batch_space
));
559 assert(BEGIN_BATCH(batch_space
));
562 save_ptr
= (uintptr_t)i915
->batch
->ptr
;
564 #define EMIT_ATOM(atom, hw_dirty) \
565 if (i915->hardware_dirty & hw_dirty) \
567 EMIT_ATOM(flush
, I915_HW_FLUSH
);
568 EMIT_ATOM(invariant
, I915_HW_INVARIANT
);
569 EMIT_ATOM(immediate
, I915_HW_IMMEDIATE
);
570 EMIT_ATOM(dynamic
, I915_HW_DYNAMIC
);
571 EMIT_ATOM(static, I915_HW_STATIC
);
572 EMIT_ATOM(map
, I915_HW_MAP
);
573 EMIT_ATOM(sampler
, I915_HW_SAMPLER
);
574 EMIT_ATOM(constants
, I915_HW_CONSTANTS
);
575 EMIT_ATOM(program
, I915_HW_PROGRAM
);
576 EMIT_ATOM(draw_rect
, I915_HW_STATIC
);
579 I915_DBG(DBG_EMIT
, "%s: used %d dwords, %d dwords reserved\n", __FUNCTION__
,
580 ((uintptr_t)i915
->batch
->ptr
- save_ptr
) / 4,
582 assert(((uintptr_t)i915
->batch
->ptr
- save_ptr
) / 4 == batch_space
);
584 i915
->hardware_dirty
= 0;
585 i915
->immediate_dirty
= 0;
586 i915
->dynamic_dirty
= 0;
587 i915
->static_dirty
= 0;
588 i915
->flush_dirty
= 0;