2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/program.h"
26 #include "brw_state.h"
27 #include "brw_defines.h"
28 #include "intel_batchbuffer.h"
31 upload_ps_extra(struct brw_context
*brw
)
33 struct gl_context
*ctx
= &brw
->ctx
;
34 /* BRW_NEW_FRAGMENT_PROGRAM */
35 const struct brw_fragment_program
*fp
=
36 brw_fragment_program_const(brw
->fragment_program
);
39 dw1
|= GEN8_PSX_PIXEL_SHADER_VALID
;
41 if (fp
->program
.UsesKill
)
42 dw1
|= GEN8_PSX_KILL_ENABLE
;
44 /* BRW_NEW_FRAGMENT_PROGRAM */
45 if (brw
->wm
.prog_data
->num_varying_inputs
!= 0)
46 dw1
|= GEN8_PSX_ATTRIBUTE_ENABLE
;
48 if (fp
->program
.Base
.OutputsWritten
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
)) {
49 switch (fp
->program
.FragDepthLayout
) {
50 case FRAG_DEPTH_LAYOUT_NONE
:
51 case FRAG_DEPTH_LAYOUT_ANY
:
52 dw1
|= GEN8_PSX_PSCDEPTH_ON
;
54 case FRAG_DEPTH_LAYOUT_GREATER
:
55 dw1
|= GEN8_PSX_PSCDEPTH_ON_GE
;
57 case FRAG_DEPTH_LAYOUT_LESS
:
58 dw1
|= GEN8_PSX_PSCDEPTH_ON_LE
;
60 case FRAG_DEPTH_LAYOUT_UNCHANGED
:
65 if (fp
->program
.Base
.InputsRead
& VARYING_BIT_POS
)
66 dw1
|= GEN8_PSX_USES_SOURCE_DEPTH
| GEN8_PSX_USES_SOURCE_W
;
68 /* BRW_NEW_NUM_SAMPLES | _NEW_MULTISAMPLE */
69 bool multisampled_fbo
= brw
->num_samples
> 1;
70 if (multisampled_fbo
&&
71 _mesa_get_min_invocations_per_fragment(ctx
, &fp
->program
, false) > 1)
72 dw1
|= GEN8_PSX_SHADER_IS_PER_SAMPLE
;
74 if (fp
->program
.Base
.SystemValuesRead
& SYSTEM_BIT_SAMPLE_MASK_IN
)
75 dw1
|= GEN8_PSX_SHADER_USES_INPUT_COVERAGE_MASK
;
77 if (brw
->wm
.prog_data
->uses_omask
)
78 dw1
|= GEN8_PSX_OMASK_TO_RENDER_TARGET
;
81 OUT_BATCH(_3DSTATE_PS_EXTRA
<< 16 | (2 - 2));
86 const struct brw_tracked_state gen8_ps_extra
= {
88 .mesa
= _NEW_MULTISAMPLE
,
89 .brw
= BRW_NEW_CONTEXT
| BRW_NEW_FRAGMENT_PROGRAM
| BRW_NEW_NUM_SAMPLES
,
92 .emit
= upload_ps_extra
,
96 upload_wm_state(struct brw_context
*brw
)
98 struct gl_context
*ctx
= &brw
->ctx
;
101 dw1
|= GEN7_WM_STATISTICS_ENABLE
;
102 dw1
|= GEN7_WM_LINE_AA_WIDTH_1_0
;
103 dw1
|= GEN7_WM_LINE_END_CAP_AA_WIDTH_0_5
;
104 dw1
|= GEN7_WM_POINT_RASTRULE_UPPER_RIGHT
;
107 if (ctx
->Line
.StippleFlag
)
108 dw1
|= GEN7_WM_LINE_STIPPLE_ENABLE
;
111 if (ctx
->Polygon
.StippleFlag
)
112 dw1
|= GEN7_WM_POLYGON_STIPPLE_ENABLE
;
114 /* CACHE_NEW_WM_PROG */
115 dw1
|= brw
->wm
.prog_data
->barycentric_interp_modes
<<
116 GEN7_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT
;
119 OUT_BATCH(_3DSTATE_WM
<< 16 | (2 - 2));
124 const struct brw_tracked_state gen8_wm_state
= {
126 .mesa
= _NEW_LINE
| _NEW_POLYGON
,
127 .brw
= BRW_NEW_CONTEXT
,
128 .cache
= CACHE_NEW_WM_PROG
,
130 .emit
= upload_wm_state
,
134 upload_ps_state(struct brw_context
*brw
)
136 struct gl_context
*ctx
= &brw
->ctx
;
137 uint32_t dw3
= 0, dw6
= 0, dw7
= 0;
139 /* CACHE_NEW_WM_PROG */
140 gen8_upload_constant_state(brw
, &brw
->wm
.base
, true, _3DSTATE_CONSTANT_PS
);
142 /* Initialize the execution mask with VMask. Otherwise, derivatives are
143 * incorrect for subspans where some of the pixels are unlit. We believe
144 * the bit just didn't take effect in previous generations.
146 dw3
|= GEN7_PS_VECTOR_MASK_ENABLE
;
149 (ALIGN(brw
->wm
.base
.sampler_count
, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT
;
151 /* CACHE_NEW_WM_PROG */
153 ((brw
->wm
.prog_data
->base
.binding_table
.size_bytes
/ 4) <<
154 GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT
);
156 /* Use ALT floating point mode for ARB fragment programs, because they
157 * require 0^0 == 1. Even though _CurrentFragmentProgram is used for
158 * rendering, CurrentFragmentProgram is used for this check to
159 * differentiate between the GLSL and non-GLSL cases.
161 if (ctx
->Shader
.CurrentProgram
[MESA_SHADER_FRAGMENT
] == NULL
)
162 dw3
|= GEN7_PS_FLOATING_POINT_MODE_ALT
;
164 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64;
165 * it implicitly scales for different GT levels (which have some # of PSDs).
167 dw6
|= (64 - 2) << HSW_PS_MAX_THREADS_SHIFT
;
169 /* CACHE_NEW_WM_PROG */
170 if (brw
->wm
.prog_data
->base
.nr_params
> 0)
171 dw6
|= GEN7_PS_PUSH_CONSTANT_ENABLE
;
173 /* From the documentation for this packet:
174 * "If the PS kernel does not need the Position XY Offsets to
175 * compute a Position Value, then this field should be programmed
176 * to POSOFFSET_NONE."
178 * "SW Recommendation: If the PS kernel needs the Position Offsets
179 * to compute a Position XY value, this field should match Position
180 * ZW Interpolation Mode to ensure a consistent position.xyzw
183 * We only require XY sample offsets. So, this recommendation doesn't
184 * look useful at the moment. We might need this in future.
186 if (brw
->wm
.prog_data
->uses_pos_offset
)
187 dw6
|= GEN7_PS_POSOFFSET_SAMPLE
;
189 dw6
|= GEN7_PS_POSOFFSET_NONE
;
192 * In case of non 1x per sample shading, only one of SIMD8 and SIMD16
193 * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
194 * is successfully compiled. In majority of the cases that bring us
195 * better performance than 'SIMD8 only' dispatch.
197 int min_invocations_per_fragment
=
198 _mesa_get_min_invocations_per_fragment(ctx
, brw
->fragment_program
, false);
199 assert(min_invocations_per_fragment
>= 1);
201 if (brw
->wm
.prog_data
->prog_offset_16
) {
202 dw6
|= GEN7_PS_16_DISPATCH_ENABLE
;
203 if (min_invocations_per_fragment
== 1) {
204 dw6
|= GEN7_PS_8_DISPATCH_ENABLE
;
205 dw7
|= (brw
->wm
.prog_data
->first_curbe_grf
<<
206 GEN7_PS_DISPATCH_START_GRF_SHIFT_0
);
207 dw7
|= (brw
->wm
.prog_data
->first_curbe_grf_16
<<
208 GEN7_PS_DISPATCH_START_GRF_SHIFT_2
);
210 dw7
|= (brw
->wm
.prog_data
->first_curbe_grf_16
<<
211 GEN7_PS_DISPATCH_START_GRF_SHIFT_0
);
214 dw6
|= GEN7_PS_8_DISPATCH_ENABLE
;
215 dw7
|= (brw
->wm
.prog_data
->first_curbe_grf
<<
216 GEN7_PS_DISPATCH_START_GRF_SHIFT_0
);
220 OUT_BATCH(_3DSTATE_PS
<< 16 | (12 - 2));
221 if (brw
->wm
.prog_data
->prog_offset_16
&& min_invocations_per_fragment
> 1)
222 OUT_BATCH(brw
->wm
.base
.prog_offset
+ brw
->wm
.prog_data
->prog_offset_16
);
224 OUT_BATCH(brw
->wm
.base
.prog_offset
);
227 if (brw
->wm
.prog_data
->total_scratch
) {
228 OUT_RELOC64(brw
->wm
.base
.scratch_bo
,
229 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
230 ffs(brw
->wm
.prog_data
->total_scratch
) - 11);
237 OUT_BATCH(0); /* kernel 1 pointer */
239 OUT_BATCH(brw
->wm
.base
.prog_offset
+ brw
->wm
.prog_data
->prog_offset_16
);
244 const struct brw_tracked_state gen8_ps_state
= {
246 .mesa
= _NEW_PROGRAM_CONSTANTS
| _NEW_MULTISAMPLE
,
247 .brw
= BRW_NEW_FRAGMENT_PROGRAM
|
248 BRW_NEW_PS_BINDING_TABLE
|
250 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
251 .cache
= CACHE_NEW_WM_PROG
253 .emit
= upload_ps_state
,