i965/gen4: Move WM state to state streaming.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_defines.h"
37 #include "brw_wm.h"
38
39 /***********************************************************************
40 * WM unit - fragment programs and rasterization
41 */
42
43 bool
44 brw_color_buffer_write_enabled(struct brw_context *brw)
45 {
46 struct gl_context *ctx = &brw->intel.ctx;
47 const struct gl_fragment_program *fp = brw->fragment_program;
48 int i;
49
50 /* _NEW_BUFFERS */
51 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
52 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
53
54 /* _NEW_COLOR */
55 if (rb &&
56 (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_COLOR) ||
57 fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DATA0 + i)) &&
58 (ctx->Color.ColorMask[i][0] ||
59 ctx->Color.ColorMask[i][1] ||
60 ctx->Color.ColorMask[i][2] ||
61 ctx->Color.ColorMask[i][3])) {
62 return true;
63 }
64 }
65
66 return false;
67 }
68
69 /**
70 * Setup wm hardware state. See page 225 of Volume 2
71 */
72 static void
73 brw_prepare_wm_unit(struct brw_context *brw)
74 {
75 struct intel_context *intel = &brw->intel;
76 struct gl_context *ctx = &intel->ctx;
77 const struct gl_fragment_program *fp = brw->fragment_program;
78 struct brw_wm_unit_state *wm;
79
80 wm = brw_state_batch(brw, sizeof(*wm), 32, &brw->wm.state_offset);
81 memset(wm, 0, sizeof(*wm));
82
83 if (brw->wm.prog_data->prog_offset_16) {
84 /* These two fields should be the same pre-gen6, which is why we
85 * only have one hardware field to program for both dispatch
86 * widths.
87 */
88 assert(brw->wm.prog_data->first_curbe_grf ==
89 brw->wm.prog_data->first_curbe_grf_16);
90 }
91
92 /* CACHE_NEW_WM_PROG */
93 wm->thread0.grf_reg_count = ALIGN(brw->wm.prog_data->total_grf, 16) / 16 - 1;
94 wm->wm9.grf_reg_count_2 = ALIGN(brw->wm.prog_data->total_grf_16, 16) / 16 - 1;
95 wm->thread0.kernel_start_pointer = brw->wm.prog_bo->offset >> 6; /* reloc */
96 /* reloc */
97 wm->wm9.kernel_start_pointer_2 = (brw->wm.prog_bo->offset +
98 brw->wm.prog_data->prog_offset_16) >> 6;
99 wm->thread1.depth_coef_urb_read_offset = 1;
100 wm->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
101
102 if (intel->gen == 5)
103 wm->thread1.binding_table_entry_count = 0; /* hardware requirement */
104 else {
105 /* BRW_NEW_NR_SURFACES */
106 wm->thread1.binding_table_entry_count = brw->wm.nr_surfaces;
107 }
108
109 if (brw->wm.prog_data->total_scratch != 0) {
110 wm->thread2.scratch_space_base_pointer =
111 brw->wm.scratch_bo->offset >> 10; /* reloc */
112 wm->thread2.per_thread_scratch_space =
113 ffs(brw->wm.prog_data->total_scratch) - 11;
114 } else {
115 wm->thread2.scratch_space_base_pointer = 0;
116 wm->thread2.per_thread_scratch_space = 0;
117 }
118
119 wm->thread3.dispatch_grf_start_reg = brw->wm.prog_data->first_curbe_grf;
120 wm->thread3.urb_entry_read_length = brw->wm.prog_data->urb_read_length;
121 wm->thread3.urb_entry_read_offset = 0;
122 wm->thread3.const_urb_entry_read_length =
123 brw->wm.prog_data->curb_read_length;
124 /* BRW_NEW_CURBE_OFFSETS */
125 wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
126
127 if (intel->gen == 5)
128 wm->wm4.sampler_count = 0; /* hardware requirement */
129 else {
130 /* CACHE_NEW_SAMPLER */
131 wm->wm4.sampler_count = (brw->wm.sampler_count + 1) / 4;
132 }
133
134 if (brw->wm.sampler_bo != NULL) {
135 /* reloc */
136 wm->wm4.sampler_state_pointer = brw->wm.sampler_bo->offset >> 5;
137 } else {
138 wm->wm4.sampler_state_pointer = 0;
139 }
140
141 /* BRW_NEW_FRAGMENT_PROGRAM */
142 wm->wm5.program_uses_depth = (fp->Base.InputsRead &
143 (1 << FRAG_ATTRIB_WPOS)) != 0;
144 wm->wm5.program_computes_depth = (fp->Base.OutputsWritten &
145 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) != 0;
146 /* BRW_NEW_DEPTH_BUFFER
147 * Override for NULL depthbuffer case, required by the Pixel Shader Computed
148 * Depth field.
149 */
150 if (brw->state.depth_region == NULL)
151 wm->wm5.program_computes_depth = 0;
152
153 /* _NEW_COLOR */
154 wm->wm5.program_uses_killpixel = fp->UsesKill || ctx->Color.AlphaEnabled;
155
156
157 /* BRW_NEW_FRAGMENT_PROGRAM
158 *
159 * If using the fragment shader backend, the program is always
160 * 8-wide. If not, it's always 16.
161 */
162 if (ctx->Shader.CurrentFragmentProgram) {
163 struct brw_shader *shader = (struct brw_shader *)
164 ctx->Shader.CurrentFragmentProgram->_LinkedShaders[MESA_SHADER_FRAGMENT];
165
166 if (shader != NULL && shader->ir != NULL) {
167 wm->wm5.enable_8_pix = 1;
168 if (brw->wm.prog_data->prog_offset_16)
169 wm->wm5.enable_16_pix = 1;
170 }
171 }
172 if (!wm->wm5.enable_8_pix)
173 wm->wm5.enable_16_pix = 1;
174
175 wm->wm5.max_threads = brw->wm_max_threads - 1;
176
177 /* _NEW_BUFFERS | _NEW_COLOR */
178 if (brw_color_buffer_write_enabled(brw) ||
179 wm->wm5.program_uses_killpixel ||
180 wm->wm5.program_computes_depth) {
181 wm->wm5.thread_dispatch_enable = 1;
182 }
183
184 wm->wm5.legacy_line_rast = 0;
185 wm->wm5.legacy_global_depth_bias = 0;
186 wm->wm5.early_depth_test = 1; /* never need to disable */
187 wm->wm5.line_aa_region_width = 0;
188 wm->wm5.line_endcap_aa_region_width = 1;
189
190 /* _NEW_POLYGONSTIPPLE */
191 wm->wm5.polygon_stipple = ctx->Polygon.StippleFlag;
192
193 /* _NEW_POLYGON */
194 if (ctx->Polygon.OffsetFill) {
195 wm->wm5.depth_offset = 1;
196 /* Something wierd going on with legacy_global_depth_bias,
197 * offset_constant, scaling and MRD. This value passes glean
198 * but gives some odd results elsewere (eg. the
199 * quad-offset-units test).
200 */
201 wm->global_depth_offset_constant = ctx->Polygon.OffsetUnits * 2;
202
203 /* This is the only value that passes glean:
204 */
205 wm->global_depth_offset_scale = ctx->Polygon.OffsetFactor;
206 }
207
208 /* _NEW_LINE */
209 wm->wm5.line_stipple = ctx->Line.StippleFlag;
210
211 /* _NEW_DEPTH */
212 if (unlikely(INTEL_DEBUG & DEBUG_STATS) || intel->stats_wm)
213 wm->wm4.stats_enable = 1;
214
215 /* Emit WM program relocation */
216 drm_intel_bo_emit_reloc(intel->batch.bo,
217 brw->wm.state_offset +
218 offsetof(struct brw_wm_unit_state, thread0),
219 brw->wm.prog_bo, wm->thread0.grf_reg_count << 1,
220 I915_GEM_DOMAIN_INSTRUCTION, 0);
221
222 if (brw->wm.prog_data->prog_offset_16) {
223 drm_intel_bo_emit_reloc(intel->batch.bo,
224 brw->wm.state_offset +
225 offsetof(struct brw_wm_unit_state, wm9),
226 brw->wm.prog_bo,
227 ((wm->wm9.grf_reg_count_2 << 1) +
228 brw->wm.prog_data->prog_offset_16),
229 I915_GEM_DOMAIN_INSTRUCTION, 0);
230 }
231
232 /* Emit scratch space relocation */
233 if (brw->wm.prog_data->total_scratch != 0) {
234 drm_intel_bo_emit_reloc(intel->batch.bo,
235 brw->wm.state_offset +
236 offsetof(struct brw_wm_unit_state, thread2),
237 brw->wm.scratch_bo,
238 wm->thread2.per_thread_scratch_space,
239 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
240 }
241
242 /* Emit sampler state relocation */
243 if (brw->wm.sampler_count != 0) {
244 drm_intel_bo_emit_reloc(intel->batch.bo,
245 brw->wm.state_offset +
246 offsetof(struct brw_wm_unit_state, wm4),
247 brw->wm.sampler_bo, (wm->wm4.stats_enable |
248 (wm->wm4.sampler_count << 2)),
249 I915_GEM_DOMAIN_INSTRUCTION, 0);
250 }
251
252 brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
253 }
254
255 const struct brw_tracked_state brw_wm_unit = {
256 .dirty = {
257 .mesa = (_NEW_POLYGON |
258 _NEW_POLYGONSTIPPLE |
259 _NEW_LINE |
260 _NEW_COLOR |
261 _NEW_DEPTH |
262 _NEW_BUFFERS),
263
264 .brw = (BRW_NEW_BATCH |
265 BRW_NEW_FRAGMENT_PROGRAM |
266 BRW_NEW_CURBE_OFFSETS |
267 BRW_NEW_DEPTH_BUFFER |
268 BRW_NEW_NR_WM_SURFACES),
269
270 .cache = (CACHE_NEW_WM_PROG |
271 CACHE_NEW_SAMPLER)
272 },
273 .prepare = brw_prepare_wm_unit,
274 };
275