i965/vs: Add a function for how many MRFs get written as part of a SEND.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 extern "C" {
26 #include "main/macros.h"
27 #include "program/prog_parameter.h"
28 }
29
30 #define MAX_INSTRUCTION (1 << 30)
31
32 namespace brw {
33
34 bool
35 vec4_instruction::is_math()
36 {
37 return (opcode == SHADER_OPCODE_RCP ||
38 opcode == SHADER_OPCODE_RSQ ||
39 opcode == SHADER_OPCODE_SQRT ||
40 opcode == SHADER_OPCODE_EXP2 ||
41 opcode == SHADER_OPCODE_LOG2 ||
42 opcode == SHADER_OPCODE_SIN ||
43 opcode == SHADER_OPCODE_COS ||
44 opcode == SHADER_OPCODE_POW);
45 }
46 /**
47 * Returns how many MRFs an opcode will write over.
48 *
49 * Note that this is not the 0 or 1 implied writes in an actual gen
50 * instruction -- the generate_* functions generate additional MOVs
51 * for setup.
52 */
53 int
54 vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
55 {
56 if (inst->mlen == 0)
57 return 0;
58
59 switch (inst->opcode) {
60 case SHADER_OPCODE_RCP:
61 case SHADER_OPCODE_RSQ:
62 case SHADER_OPCODE_SQRT:
63 case SHADER_OPCODE_EXP2:
64 case SHADER_OPCODE_LOG2:
65 case SHADER_OPCODE_SIN:
66 case SHADER_OPCODE_COS:
67 return 1;
68 case SHADER_OPCODE_POW:
69 return 2;
70 case VS_OPCODE_URB_WRITE:
71 return 1;
72 case VS_OPCODE_PULL_CONSTANT_LOAD:
73 return 2;
74 case VS_OPCODE_SCRATCH_READ:
75 return 2;
76 case VS_OPCODE_SCRATCH_WRITE:
77 return 3;
78 default:
79 assert(!"not reached");
80 return inst->mlen;
81 }
82 }
83
84 bool
85 src_reg::equals(src_reg *r)
86 {
87 return (file == r->file &&
88 reg == r->reg &&
89 reg_offset == r->reg_offset &&
90 type == r->type &&
91 negate == r->negate &&
92 abs == r->abs &&
93 swizzle == r->swizzle &&
94 !reladdr && !r->reladdr &&
95 memcmp(&fixed_hw_reg, &r->fixed_hw_reg,
96 sizeof(fixed_hw_reg)) == 0 &&
97 imm.u == r->imm.u);
98 }
99
100 void
101 vec4_visitor::calculate_live_intervals()
102 {
103 int *def = ralloc_array(mem_ctx, int, virtual_grf_count);
104 int *use = ralloc_array(mem_ctx, int, virtual_grf_count);
105 int loop_depth = 0;
106 int loop_start = 0;
107
108 if (this->live_intervals_valid)
109 return;
110
111 for (int i = 0; i < virtual_grf_count; i++) {
112 def[i] = MAX_INSTRUCTION;
113 use[i] = -1;
114 }
115
116 int ip = 0;
117 foreach_list(node, &this->instructions) {
118 vec4_instruction *inst = (vec4_instruction *)node;
119
120 if (inst->opcode == BRW_OPCODE_DO) {
121 if (loop_depth++ == 0)
122 loop_start = ip;
123 } else if (inst->opcode == BRW_OPCODE_WHILE) {
124 loop_depth--;
125
126 if (loop_depth == 0) {
127 /* Patches up the use of vars marked for being live across
128 * the whole loop.
129 */
130 for (int i = 0; i < virtual_grf_count; i++) {
131 if (use[i] == loop_start) {
132 use[i] = ip;
133 }
134 }
135 }
136 } else {
137 for (unsigned int i = 0; i < 3; i++) {
138 if (inst->src[i].file == GRF) {
139 int reg = inst->src[i].reg;
140
141 if (!loop_depth) {
142 use[reg] = ip;
143 } else {
144 def[reg] = MIN2(loop_start, def[reg]);
145 use[reg] = loop_start;
146
147 /* Nobody else is going to go smash our start to
148 * later in the loop now, because def[reg] now
149 * points before the bb header.
150 */
151 }
152 }
153 }
154 if (inst->dst.file == GRF) {
155 int reg = inst->dst.reg;
156
157 if (!loop_depth) {
158 def[reg] = MIN2(def[reg], ip);
159 } else {
160 def[reg] = MIN2(def[reg], loop_start);
161 }
162 }
163 }
164
165 ip++;
166 }
167
168 ralloc_free(this->virtual_grf_def);
169 ralloc_free(this->virtual_grf_use);
170 this->virtual_grf_def = def;
171 this->virtual_grf_use = use;
172
173 this->live_intervals_valid = true;
174 }
175
176 bool
177 vec4_visitor::virtual_grf_interferes(int a, int b)
178 {
179 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
180 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
181
182 /* We can't handle dead register writes here, without iterating
183 * over the whole instruction stream to find every single dead
184 * write to that register to compare to the live interval of the
185 * other register. Just assert that dead_code_eliminate() has been
186 * called.
187 */
188 assert((this->virtual_grf_use[a] != -1 ||
189 this->virtual_grf_def[a] == MAX_INSTRUCTION) &&
190 (this->virtual_grf_use[b] != -1 ||
191 this->virtual_grf_def[b] == MAX_INSTRUCTION));
192
193 return start < end;
194 }
195
196 /**
197 * Must be called after calculate_live_intervales() to remove unused
198 * writes to registers -- register allocation will fail otherwise
199 * because something deffed but not used won't be considered to
200 * interfere with other regs.
201 */
202 bool
203 vec4_visitor::dead_code_eliminate()
204 {
205 bool progress = false;
206 int pc = 0;
207
208 calculate_live_intervals();
209
210 foreach_list_safe(node, &this->instructions) {
211 vec4_instruction *inst = (vec4_instruction *)node;
212
213 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
214 inst->remove();
215 progress = true;
216 }
217
218 pc++;
219 }
220
221 if (progress)
222 live_intervals_valid = false;
223
224 return progress;
225 }
226
227 void
228 vec4_visitor::split_uniform_registers()
229 {
230 /* Prior to this, uniforms have been in an array sized according to
231 * the number of vector uniforms present, sparsely filled (so an
232 * aggregate results in reg indices being skipped over). Now we're
233 * going to cut those aggregates up so each .reg index is one
234 * vector. The goal is to make elimination of unused uniform
235 * components easier later.
236 */
237 foreach_list(node, &this->instructions) {
238 vec4_instruction *inst = (vec4_instruction *)node;
239
240 for (int i = 0 ; i < 3; i++) {
241 if (inst->src[i].file != UNIFORM)
242 continue;
243
244 assert(!inst->src[i].reladdr);
245
246 inst->src[i].reg += inst->src[i].reg_offset;
247 inst->src[i].reg_offset = 0;
248 }
249 }
250
251 /* Update that everything is now vector-sized. */
252 for (int i = 0; i < this->uniforms; i++) {
253 this->uniform_size[i] = 1;
254 }
255 }
256
257 void
258 vec4_visitor::pack_uniform_registers()
259 {
260 bool uniform_used[this->uniforms];
261 int new_loc[this->uniforms];
262 int new_chan[this->uniforms];
263
264 memset(uniform_used, 0, sizeof(uniform_used));
265 memset(new_loc, 0, sizeof(new_loc));
266 memset(new_chan, 0, sizeof(new_chan));
267
268 /* Find which uniform vectors are actually used by the program. We
269 * expect unused vector elements when we've moved array access out
270 * to pull constants, and from some GLSL code generators like wine.
271 */
272 foreach_list(node, &this->instructions) {
273 vec4_instruction *inst = (vec4_instruction *)node;
274
275 for (int i = 0 ; i < 3; i++) {
276 if (inst->src[i].file != UNIFORM)
277 continue;
278
279 uniform_used[inst->src[i].reg] = true;
280 }
281 }
282
283 int new_uniform_count = 0;
284
285 /* Now, figure out a packing of the live uniform vectors into our
286 * push constants.
287 */
288 for (int src = 0; src < uniforms; src++) {
289 int size = this->uniform_vector_size[src];
290
291 if (!uniform_used[src]) {
292 this->uniform_vector_size[src] = 0;
293 continue;
294 }
295
296 int dst;
297 /* Find the lowest place we can slot this uniform in. */
298 for (dst = 0; dst < src; dst++) {
299 if (this->uniform_vector_size[dst] + size <= 4)
300 break;
301 }
302
303 if (src == dst) {
304 new_loc[src] = dst;
305 new_chan[src] = 0;
306 } else {
307 new_loc[src] = dst;
308 new_chan[src] = this->uniform_vector_size[dst];
309
310 /* Move the references to the data */
311 for (int j = 0; j < size; j++) {
312 c->prog_data.param[dst * 4 + new_chan[src] + j] =
313 c->prog_data.param[src * 4 + j];
314 }
315
316 this->uniform_vector_size[dst] += size;
317 this->uniform_vector_size[src] = 0;
318 }
319
320 new_uniform_count = MAX2(new_uniform_count, dst + 1);
321 }
322
323 this->uniforms = new_uniform_count;
324
325 /* Now, update the instructions for our repacked uniforms. */
326 foreach_list(node, &this->instructions) {
327 vec4_instruction *inst = (vec4_instruction *)node;
328
329 for (int i = 0 ; i < 3; i++) {
330 int src = inst->src[i].reg;
331
332 if (inst->src[i].file != UNIFORM)
333 continue;
334
335 inst->src[i].reg = new_loc[src];
336
337 int sx = BRW_GET_SWZ(inst->src[i].swizzle, 0) + new_chan[src];
338 int sy = BRW_GET_SWZ(inst->src[i].swizzle, 1) + new_chan[src];
339 int sz = BRW_GET_SWZ(inst->src[i].swizzle, 2) + new_chan[src];
340 int sw = BRW_GET_SWZ(inst->src[i].swizzle, 3) + new_chan[src];
341 inst->src[i].swizzle = BRW_SWIZZLE4(sx, sy, sz, sw);
342 }
343 }
344 }
345
346 bool
347 src_reg::is_zero() const
348 {
349 if (file != IMM)
350 return false;
351
352 if (type == BRW_REGISTER_TYPE_F) {
353 return imm.f == 0.0;
354 } else {
355 return imm.i == 0;
356 }
357 }
358
359 bool
360 src_reg::is_one() const
361 {
362 if (file != IMM)
363 return false;
364
365 if (type == BRW_REGISTER_TYPE_F) {
366 return imm.f == 1.0;
367 } else {
368 return imm.i == 1;
369 }
370 }
371
372 /**
373 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
374 *
375 * While GLSL IR also performs this optimization, we end up with it in
376 * our instruction stream for a couple of reasons. One is that we
377 * sometimes generate silly instructions, for example in array access
378 * where we'll generate "ADD offset, index, base" even if base is 0.
379 * The other is that GLSL IR's constant propagation doesn't track the
380 * components of aggregates, so some VS patterns (initialize matrix to
381 * 0, accumulate in vertex blending factors) end up breaking down to
382 * instructions involving 0.
383 */
384 bool
385 vec4_visitor::opt_algebraic()
386 {
387 bool progress = false;
388
389 foreach_list(node, &this->instructions) {
390 vec4_instruction *inst = (vec4_instruction *)node;
391
392 switch (inst->opcode) {
393 case BRW_OPCODE_ADD:
394 if (inst->src[1].is_zero()) {
395 inst->opcode = BRW_OPCODE_MOV;
396 inst->src[1] = src_reg();
397 progress = true;
398 }
399 break;
400
401 case BRW_OPCODE_MUL:
402 if (inst->src[1].is_zero()) {
403 inst->opcode = BRW_OPCODE_MOV;
404 switch (inst->src[0].type) {
405 case BRW_REGISTER_TYPE_F:
406 inst->src[0] = src_reg(0.0f);
407 break;
408 case BRW_REGISTER_TYPE_D:
409 inst->src[0] = src_reg(0);
410 break;
411 case BRW_REGISTER_TYPE_UD:
412 inst->src[0] = src_reg(0u);
413 break;
414 default:
415 assert(!"not reached");
416 inst->src[0] = src_reg(0.0f);
417 break;
418 }
419 inst->src[1] = src_reg();
420 progress = true;
421 } else if (inst->src[1].is_one()) {
422 inst->opcode = BRW_OPCODE_MOV;
423 inst->src[1] = src_reg();
424 progress = true;
425 }
426 break;
427 default:
428 break;
429 }
430 }
431
432 if (progress)
433 this->live_intervals_valid = false;
434
435 return progress;
436 }
437
438 /**
439 * Only a limited number of hardware registers may be used for push
440 * constants, so this turns access to the overflowed constants into
441 * pull constants.
442 */
443 void
444 vec4_visitor::move_push_constants_to_pull_constants()
445 {
446 int pull_constant_loc[this->uniforms];
447
448 /* Only allow 32 registers (256 uniform components) as push constants,
449 * which is the limit on gen6.
450 */
451 int max_uniform_components = 32 * 8;
452 if (this->uniforms * 4 <= max_uniform_components)
453 return;
454
455 /* Make some sort of choice as to which uniforms get sent to pull
456 * constants. We could potentially do something clever here like
457 * look for the most infrequently used uniform vec4s, but leave
458 * that for later.
459 */
460 for (int i = 0; i < this->uniforms * 4; i += 4) {
461 pull_constant_loc[i / 4] = -1;
462
463 if (i >= max_uniform_components) {
464 const float **values = &prog_data->param[i];
465
466 /* Try to find an existing copy of this uniform in the pull
467 * constants if it was part of an array access already.
468 */
469 for (unsigned int j = 0; j < prog_data->nr_pull_params; j += 4) {
470 int matches;
471
472 for (matches = 0; matches < 4; matches++) {
473 if (prog_data->pull_param[j + matches] != values[matches])
474 break;
475 }
476
477 if (matches == 4) {
478 pull_constant_loc[i / 4] = j / 4;
479 break;
480 }
481 }
482
483 if (pull_constant_loc[i / 4] == -1) {
484 assert(prog_data->nr_pull_params % 4 == 0);
485 pull_constant_loc[i / 4] = prog_data->nr_pull_params / 4;
486
487 for (int j = 0; j < 4; j++) {
488 prog_data->pull_param[prog_data->nr_pull_params++] = values[j];
489 }
490 }
491 }
492 }
493
494 /* Now actually rewrite usage of the things we've moved to pull
495 * constants.
496 */
497 foreach_list_safe(node, &this->instructions) {
498 vec4_instruction *inst = (vec4_instruction *)node;
499
500 for (int i = 0 ; i < 3; i++) {
501 if (inst->src[i].file != UNIFORM ||
502 pull_constant_loc[inst->src[i].reg] == -1)
503 continue;
504
505 int uniform = inst->src[i].reg;
506
507 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
508
509 emit_pull_constant_load(inst, temp, inst->src[i],
510 pull_constant_loc[uniform]);
511
512 inst->src[i].file = temp.file;
513 inst->src[i].reg = temp.reg;
514 inst->src[i].reg_offset = temp.reg_offset;
515 inst->src[i].reladdr = NULL;
516 }
517 }
518
519 /* Repack push constants to remove the now-unused ones. */
520 pack_uniform_registers();
521 }
522
523 } /* namespace brw */