[intel] Add more cliprect modes to cover other meanings for batch emits.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .update = upload_blend_constant_color
72 };
73
74 /**
75 * Upload the binding table pointers, which point each stage's array of surface
76 * state pointers.
77 *
78 * The binding table pointers are relative to the surface state base address,
79 * which is 0.
80 */
81 static void upload_binding_table_pointers(struct brw_context *brw)
82 {
83 struct intel_context *intel = &brw->intel;
84
85 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
86 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
87 OUT_BATCH(0); /* vs */
88 OUT_BATCH(0); /* gs */
89 OUT_BATCH(0); /* clip */
90 OUT_BATCH(0); /* sf */
91 OUT_RELOC(brw->wm.bind_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
92 ADVANCE_BATCH();
93 }
94
95 const struct brw_tracked_state brw_binding_table_pointers = {
96 .dirty = {
97 .mesa = 0,
98 .brw = BRW_NEW_BATCH,
99 .cache = CACHE_NEW_SURF_BIND,
100 },
101 .update = upload_binding_table_pointers,
102 };
103
104
105 /**
106 * Upload pointers to the per-stage state.
107 *
108 * The state pointers in this packet are all relative to the general state
109 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
110 */
111 static void upload_pipelined_state_pointers(struct brw_context *brw )
112 {
113 struct intel_context *intel = &brw->intel;
114
115 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
116 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
117 OUT_RELOC(brw->vs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
118 if (brw->gs.prog_active)
119 OUT_RELOC(brw->gs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 1);
120 else
121 OUT_BATCH(0);
122 if (!brw->metaops.active)
123 OUT_RELOC(brw->clip.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 1);
124 else
125 OUT_BATCH(0);
126 OUT_RELOC(brw->sf.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
127 OUT_RELOC(brw->wm.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
128 OUT_RELOC(brw->cc.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
129 ADVANCE_BATCH();
130
131 brw->state.dirty.brw |= BRW_NEW_PSP;
132 }
133
134 #if 0
135 /* Combined into brw_psp_urb_cbs */
136 const struct brw_tracked_state brw_pipelined_state_pointers = {
137 .dirty = {
138 .mesa = 0,
139 .brw = BRW_NEW_METAOPS | BRW_NEW_BATCH,
140 .cache = (CACHE_NEW_VS_UNIT |
141 CACHE_NEW_GS_UNIT |
142 CACHE_NEW_GS_PROG |
143 CACHE_NEW_CLIP_UNIT |
144 CACHE_NEW_SF_UNIT |
145 CACHE_NEW_WM_UNIT |
146 CACHE_NEW_CC_UNIT)
147 },
148 .update = upload_pipelined_state_pointers
149 };
150 #endif
151
152 static void upload_psp_urb_cbs(struct brw_context *brw )
153 {
154 upload_pipelined_state_pointers(brw);
155 brw_upload_urb_fence(brw);
156 brw_upload_constant_buffer_state(brw);
157 }
158
159
160 const struct brw_tracked_state brw_psp_urb_cbs = {
161 .dirty = {
162 .mesa = 0,
163 .brw = BRW_NEW_URB_FENCE | BRW_NEW_METAOPS | BRW_NEW_BATCH,
164 .cache = (CACHE_NEW_VS_UNIT |
165 CACHE_NEW_GS_UNIT |
166 CACHE_NEW_GS_PROG |
167 CACHE_NEW_CLIP_UNIT |
168 CACHE_NEW_SF_UNIT |
169 CACHE_NEW_WM_UNIT |
170 CACHE_NEW_CC_UNIT)
171 },
172 .update = upload_psp_urb_cbs,
173 };
174
175 /**
176 * Upload the depthbuffer offset and format.
177 *
178 * We have to do this per state validation as we need to emit the relocation
179 * in the batch buffer.
180 */
181 static void upload_depthbuffer(struct brw_context *brw)
182 {
183 struct intel_context *intel = &brw->intel;
184 struct intel_region *region = brw->state.depth_region;
185
186 if (region == NULL) {
187 BEGIN_BATCH(5, IGNORE_CLIPRECTS);
188 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
189 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
190 (BRW_SURFACE_NULL << 29));
191 OUT_BATCH(0);
192 OUT_BATCH(0);
193 OUT_BATCH(0);
194 ADVANCE_BATCH();
195 } else {
196 unsigned int format;
197
198 switch (region->cpp) {
199 case 2:
200 format = BRW_DEPTHFORMAT_D16_UNORM;
201 break;
202 case 4:
203 if (intel->depth_buffer_is_float)
204 format = BRW_DEPTHFORMAT_D32_FLOAT;
205 else
206 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
207 break;
208 default:
209 assert(0);
210 return;
211 }
212
213 BEGIN_BATCH(5, IGNORE_CLIPRECTS);
214 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
215 OUT_BATCH(((region->pitch * region->cpp) - 1) |
216 (format << 18) |
217 (BRW_TILEWALK_YMAJOR << 26) |
218 (region->tiled << 27) |
219 (BRW_SURFACE_2D << 29));
220 OUT_RELOC(region->buffer,
221 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
222 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
223 ((region->pitch - 1) << 6) |
224 ((region->height - 1) << 19));
225 OUT_BATCH(0);
226 ADVANCE_BATCH();
227 }
228 }
229
230 const struct brw_tracked_state brw_depthbuffer = {
231 .dirty = {
232 .mesa = 0,
233 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
234 .cache = 0,
235 },
236 .update = upload_depthbuffer,
237 };
238
239
240
241 /***********************************************************************
242 * Polygon stipple packet
243 */
244
245 static void upload_polygon_stipple(struct brw_context *brw)
246 {
247 struct brw_polygon_stipple bps;
248 GLuint i;
249
250 memset(&bps, 0, sizeof(bps));
251 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
252 bps.header.length = sizeof(bps)/4-2;
253
254 for (i = 0; i < 32; i++)
255 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
256
257 BRW_CACHED_BATCH_STRUCT(brw, &bps);
258 }
259
260 const struct brw_tracked_state brw_polygon_stipple = {
261 .dirty = {
262 .mesa = _NEW_POLYGONSTIPPLE,
263 .brw = 0,
264 .cache = 0
265 },
266 .update = upload_polygon_stipple
267 };
268
269
270 /***********************************************************************
271 * Polygon stipple offset packet
272 */
273
274 static void upload_polygon_stipple_offset(struct brw_context *brw)
275 {
276 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
277 struct brw_polygon_stipple_offset bpso;
278
279 memset(&bpso, 0, sizeof(bpso));
280 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
281 bpso.header.length = sizeof(bpso)/4-2;
282
283 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
284 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
285
286 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
287 }
288
289 const struct brw_tracked_state brw_polygon_stipple_offset = {
290 .dirty = {
291 .mesa = _NEW_WINDOW_POS,
292 .brw = 0,
293 .cache = 0
294 },
295 .update = upload_polygon_stipple_offset
296 };
297
298 /***********************************************************************
299 * Line stipple packet
300 */
301
302 static void upload_line_stipple(struct brw_context *brw)
303 {
304 struct brw_line_stipple bls;
305 GLfloat tmp;
306 GLint tmpi;
307
308 memset(&bls, 0, sizeof(bls));
309 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
310 bls.header.length = sizeof(bls)/4 - 2;
311
312 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
313 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
314
315 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
316 tmpi = tmp * (1<<13);
317
318
319 bls.bits1.inverse_repeat_count = tmpi;
320
321 BRW_CACHED_BATCH_STRUCT(brw, &bls);
322 }
323
324 const struct brw_tracked_state brw_line_stipple = {
325 .dirty = {
326 .mesa = _NEW_LINE,
327 .brw = 0,
328 .cache = 0
329 },
330 .update = upload_line_stipple
331 };
332
333
334
335 /***********************************************************************
336 * Misc constant state packets
337 */
338
339 static void upload_pipe_control(struct brw_context *brw)
340 {
341 struct brw_pipe_control pc;
342
343 return;
344
345 memset(&pc, 0, sizeof(pc));
346
347 pc.header.opcode = CMD_PIPE_CONTROL;
348 pc.header.length = sizeof(pc)/4 - 2;
349 pc.header.post_sync_operation = PIPE_CONTROL_NOWRITE;
350
351 pc.header.instruction_state_cache_flush_enable = 1;
352
353 pc.bits1.dest_addr_type = PIPE_CONTROL_GTTWRITE_GLOBAL;
354
355 BRW_BATCH_STRUCT(brw, &pc);
356 }
357
358 const struct brw_tracked_state brw_pipe_control = {
359 .dirty = {
360 .mesa = 0,
361 .brw = BRW_NEW_CONTEXT,
362 .cache = 0
363 },
364 .update = upload_pipe_control
365 };
366
367
368 /***********************************************************************
369 * Misc invarient state packets
370 */
371
372 static void upload_invarient_state( struct brw_context *brw )
373 {
374 {
375 /* 0x61040000 Pipeline Select */
376 /* PipelineSelect : 0 */
377 struct brw_pipeline_select ps;
378
379 memset(&ps, 0, sizeof(ps));
380 ps.header.opcode = CMD_PIPELINE_SELECT;
381 ps.header.pipeline_select = 0;
382 BRW_BATCH_STRUCT(brw, &ps);
383 }
384
385 {
386 struct brw_global_depth_offset_clamp gdo;
387 memset(&gdo, 0, sizeof(gdo));
388
389 /* Disable depth offset clamping.
390 */
391 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
392 gdo.header.length = sizeof(gdo)/4 - 2;
393 gdo.depth_offset_clamp = 0.0;
394
395 BRW_BATCH_STRUCT(brw, &gdo);
396 }
397
398
399 /* 0x61020000 State Instruction Pointer */
400 {
401 struct brw_system_instruction_pointer sip;
402 memset(&sip, 0, sizeof(sip));
403
404 sip.header.opcode = CMD_STATE_INSN_POINTER;
405 sip.header.length = 0;
406 sip.bits0.pad = 0;
407 sip.bits0.system_instruction_pointer = 0;
408 BRW_BATCH_STRUCT(brw, &sip);
409 }
410
411
412 {
413 struct brw_vf_statistics vfs;
414 memset(&vfs, 0, sizeof(vfs));
415
416 vfs.opcode = CMD_VF_STATISTICS;
417 if (INTEL_DEBUG & DEBUG_STATS)
418 vfs.statistics_enable = 1;
419
420 BRW_BATCH_STRUCT(brw, &vfs);
421 }
422 }
423
424 const struct brw_tracked_state brw_invarient_state = {
425 .dirty = {
426 .mesa = 0,
427 .brw = BRW_NEW_CONTEXT,
428 .cache = 0
429 },
430 .update = upload_invarient_state
431 };
432
433 /**
434 * Define the base addresses which some state is referenced from.
435 *
436 * This allows us to avoid having to emit relocations in many places for
437 * cached state, and instead emit pointers inside of large, mostly-static
438 * state pools. This comes at the expense of memory, and more expensive cache
439 * misses.
440 */
441 static void upload_state_base_address( struct brw_context *brw )
442 {
443 struct intel_context *intel = &brw->intel;
444
445 /* Output the structure (brw_state_base_address) directly to the
446 * batchbuffer, so we can emit relocations inline.
447 */
448 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
449 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
450 OUT_BATCH(1); /* General state base address */
451 OUT_BATCH(1); /* Surface state base address */
452 OUT_BATCH(1); /* Indirect object base address */
453 OUT_BATCH(1); /* General state upper bound */
454 OUT_BATCH(1); /* Indirect object upper bound */
455 ADVANCE_BATCH();
456 }
457
458 const struct brw_tracked_state brw_state_base_address = {
459 .dirty = {
460 .mesa = 0,
461 .brw = BRW_NEW_CONTEXT,
462 .cache = 0,
463 },
464 .update = upload_state_base_address
465 };