glsl: Lower UBO and SSBO access in glsl linker
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "brw_context.h"
37 #include "brw_state.h"
38 #include "brw_defines.h"
39 #include "brw_wm.h"
40
41 /***********************************************************************
42 * WM unit - fragment programs and rasterization
43 */
44
45 bool
46 brw_color_buffer_write_enabled(struct brw_context *brw)
47 {
48 struct gl_context *ctx = &brw->ctx;
49 /* BRW_NEW_FRAGMENT_PROGRAM */
50 const struct gl_fragment_program *fp = brw->fragment_program;
51 unsigned i;
52
53 /* _NEW_BUFFERS */
54 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
55 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
56
57 /* _NEW_COLOR */
58 if (rb &&
59 (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_COLOR) ||
60 fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DATA0 + i)) &&
61 (ctx->Color.ColorMask[i][0] ||
62 ctx->Color.ColorMask[i][1] ||
63 ctx->Color.ColorMask[i][2] ||
64 ctx->Color.ColorMask[i][3])) {
65 return true;
66 }
67 }
68
69 return false;
70 }
71
72 /**
73 * Setup wm hardware state. See page 225 of Volume 2
74 */
75 static void
76 brw_upload_wm_unit(struct brw_context *brw)
77 {
78 struct gl_context *ctx = &brw->ctx;
79 /* BRW_NEW_FRAGMENT_PROGRAM */
80 const struct gl_fragment_program *fp = brw->fragment_program;
81 /* BRW_NEW_FS_PROG_DATA */
82 const struct brw_wm_prog_data *prog_data = brw->wm.prog_data;
83 struct brw_wm_unit_state *wm;
84
85 wm = brw_state_batch(brw, AUB_TRACE_WM_STATE,
86 sizeof(*wm), 32, &brw->wm.base.state_offset);
87 memset(wm, 0, sizeof(*wm));
88
89 if (prog_data->prog_offset_16) {
90 /* These two fields should be the same pre-gen6, which is why we
91 * only have one hardware field to program for both dispatch
92 * widths.
93 */
94 assert(prog_data->base.dispatch_grf_start_reg ==
95 prog_data->dispatch_grf_start_reg_16);
96 }
97
98 /* BRW_NEW_PROGRAM_CACHE | BRW_NEW_FS_PROG_DATA */
99 if (prog_data->no_8) {
100 wm->wm5.enable_16_pix = 1;
101 wm->thread0.grf_reg_count = prog_data->reg_blocks_16;
102 wm->thread0.kernel_start_pointer =
103 brw_program_reloc(brw,
104 brw->wm.base.state_offset +
105 offsetof(struct brw_wm_unit_state, thread0),
106 brw->wm.base.prog_offset +
107 prog_data->prog_offset_16 +
108 (prog_data->reg_blocks_16 << 1)) >> 6;
109
110 } else {
111 wm->thread0.grf_reg_count = prog_data->reg_blocks;
112 wm->wm9.grf_reg_count_2 = prog_data->reg_blocks_16;
113
114 wm->wm5.enable_8_pix = 1;
115 if (prog_data->prog_offset_16)
116 wm->wm5.enable_16_pix = 1;
117
118 wm->thread0.kernel_start_pointer =
119 brw_program_reloc(brw,
120 brw->wm.base.state_offset +
121 offsetof(struct brw_wm_unit_state, thread0),
122 brw->wm.base.prog_offset +
123 (wm->thread0.grf_reg_count << 1)) >> 6;
124
125 wm->wm9.kernel_start_pointer_2 =
126 brw_program_reloc(brw,
127 brw->wm.base.state_offset +
128 offsetof(struct brw_wm_unit_state, wm9),
129 brw->wm.base.prog_offset +
130 prog_data->prog_offset_16 +
131 (wm->wm9.grf_reg_count_2 << 1)) >> 6;
132 }
133
134 wm->thread1.depth_coef_urb_read_offset = 1;
135 if (prog_data->base.use_alt_mode)
136 wm->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
137 else
138 wm->thread1.floating_point_mode = BRW_FLOATING_POINT_IEEE_754;
139
140 wm->thread1.binding_table_entry_count =
141 prog_data->base.binding_table.size_bytes / 4;
142
143 if (prog_data->base.total_scratch != 0) {
144 wm->thread2.scratch_space_base_pointer =
145 brw->wm.base.scratch_bo->offset64 >> 10; /* reloc */
146 wm->thread2.per_thread_scratch_space =
147 ffs(prog_data->base.total_scratch) - 11;
148 } else {
149 wm->thread2.scratch_space_base_pointer = 0;
150 wm->thread2.per_thread_scratch_space = 0;
151 }
152
153 wm->thread3.dispatch_grf_start_reg =
154 prog_data->base.dispatch_grf_start_reg;
155 wm->thread3.urb_entry_read_length =
156 prog_data->num_varying_inputs * 2;
157 wm->thread3.urb_entry_read_offset = 0;
158 wm->thread3.const_urb_entry_read_length =
159 prog_data->base.curb_read_length;
160 /* BRW_NEW_CURBE_OFFSETS */
161 wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
162
163 if (brw->gen == 5)
164 wm->wm4.sampler_count = 0; /* hardware requirement */
165 else {
166 wm->wm4.sampler_count = (brw->wm.base.sampler_count + 1) / 4;
167 }
168
169 if (brw->wm.base.sampler_count) {
170 /* BRW_NEW_SAMPLER_STATE_TABLE - reloc */
171 wm->wm4.sampler_state_pointer = (brw->batch.bo->offset64 +
172 brw->wm.base.sampler_offset) >> 5;
173 } else {
174 wm->wm4.sampler_state_pointer = 0;
175 }
176
177 /* BRW_NEW_FRAGMENT_PROGRAM */
178 wm->wm5.program_uses_depth = (fp->Base.InputsRead &
179 (1 << VARYING_SLOT_POS)) != 0;
180 wm->wm5.program_computes_depth = (fp->Base.OutputsWritten &
181 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) != 0;
182 /* _NEW_BUFFERS
183 * Override for NULL depthbuffer case, required by the Pixel Shader Computed
184 * Depth field.
185 */
186 if (!intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH))
187 wm->wm5.program_computes_depth = 0;
188
189 /* _NEW_COLOR */
190 wm->wm5.program_uses_killpixel =
191 prog_data->uses_kill || ctx->Color.AlphaEnabled;
192
193 wm->wm5.max_threads = brw->max_wm_threads - 1;
194
195 /* _NEW_BUFFERS | _NEW_COLOR */
196 if (brw_color_buffer_write_enabled(brw) ||
197 wm->wm5.program_uses_killpixel ||
198 wm->wm5.program_computes_depth) {
199 wm->wm5.thread_dispatch_enable = 1;
200 }
201
202 wm->wm5.legacy_line_rast = 0;
203 wm->wm5.legacy_global_depth_bias = 0;
204 wm->wm5.early_depth_test = 1; /* never need to disable */
205 wm->wm5.line_aa_region_width = 0;
206 wm->wm5.line_endcap_aa_region_width = 1;
207
208 /* _NEW_POLYGONSTIPPLE */
209 wm->wm5.polygon_stipple = ctx->Polygon.StippleFlag;
210
211 /* _NEW_POLYGON */
212 if (ctx->Polygon.OffsetFill) {
213 wm->wm5.depth_offset = 1;
214 /* Something weird going on with legacy_global_depth_bias,
215 * offset_constant, scaling and MRD. This value passes glean
216 * but gives some odd results elsewere (eg. the
217 * quad-offset-units test).
218 */
219 wm->global_depth_offset_constant = ctx->Polygon.OffsetUnits * 2;
220
221 /* This is the only value that passes glean:
222 */
223 wm->global_depth_offset_scale = ctx->Polygon.OffsetFactor;
224 }
225
226 /* _NEW_LINE */
227 wm->wm5.line_stipple = ctx->Line.StippleFlag;
228
229 /* BRW_NEW_STATS_WM */
230 if (unlikely(INTEL_DEBUG & DEBUG_STATS) || brw->stats_wm)
231 wm->wm4.stats_enable = 1;
232
233 /* Emit scratch space relocation */
234 if (prog_data->base.total_scratch != 0) {
235 drm_intel_bo_emit_reloc(brw->batch.bo,
236 brw->wm.base.state_offset +
237 offsetof(struct brw_wm_unit_state, thread2),
238 brw->wm.base.scratch_bo,
239 wm->thread2.per_thread_scratch_space,
240 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
241 }
242
243 /* Emit sampler state relocation */
244 if (brw->wm.base.sampler_count != 0) {
245 drm_intel_bo_emit_reloc(brw->batch.bo,
246 brw->wm.base.state_offset +
247 offsetof(struct brw_wm_unit_state, wm4),
248 brw->batch.bo, (brw->wm.base.sampler_offset |
249 wm->wm4.stats_enable |
250 (wm->wm4.sampler_count << 2)),
251 I915_GEM_DOMAIN_INSTRUCTION, 0);
252 }
253
254 brw->ctx.NewDriverState |= BRW_NEW_GEN4_UNIT_STATE;
255
256 /* _NEW_POLGYON */
257 if (brw->wm.offset_clamp != ctx->Polygon.OffsetClamp) {
258 BEGIN_BATCH(2);
259 OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
260 OUT_BATCH_F(ctx->Polygon.OffsetClamp);
261 ADVANCE_BATCH();
262
263 brw->wm.offset_clamp = ctx->Polygon.OffsetClamp;
264 }
265 }
266
267 const struct brw_tracked_state brw_wm_unit = {
268 .dirty = {
269 .mesa = _NEW_BUFFERS |
270 _NEW_COLOR |
271 _NEW_LINE |
272 _NEW_POLYGON |
273 _NEW_POLYGONSTIPPLE,
274 .brw = BRW_NEW_BATCH |
275 BRW_NEW_CURBE_OFFSETS |
276 BRW_NEW_FRAGMENT_PROGRAM |
277 BRW_NEW_FS_PROG_DATA |
278 BRW_NEW_PROGRAM_CACHE |
279 BRW_NEW_SAMPLER_STATE_TABLE |
280 BRW_NEW_STATS_WM,
281 },
282 .emit = brw_upload_wm_unit,
283 };
284