/* Now write to the render target and terminate the thread */
emit_render_target_write(
mrf_rt_write,
- base_mrf,
+ base_mrf,
mrf_offset /* msg_length. TODO: Should be smaller for non-RGBA formats. */,
use_header);
}
brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0));
else
brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0));
-
+
brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
brw_MOV(p, c->reg.nr_verts, brw_imm_ud(0));
brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
}
brw_ENDIF(p);
-
+
}
brw_ELSE(p);
{
brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
- }
+ }
brw_ENDIF(p);
}
brw_ENDIF(p);
-
+
/* vtxPrev = vtx;
* inlist_ptr++;
*/
{
brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));
-
+
apply_one_offset(c, v0);
-
+
brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_G);
}
emit_unfilled_primitives(c);
brw_clip_kill_thread(c);
}
-
-
-
brw_MOV(p, tmp, deref_4f(vert_addr, hpos_offset));
brw_clip_project_position(c, tmp);
brw_MOV(p, deref_4f(vert_addr, ndc_offset), tmp);
-
+
release_tmp(c, tmp);
}
#include "brw_context.h"
#include "brw_eu.h"
-#define MAX_GS_VERTS (4)
+#define MAX_GS_VERTS (4)
struct brw_ff_gs_prog_key {
GLbitfield64 attrs;
_mesa_UseProgram(*prog_id);
return *prog_id;
}
-
+
fs_source = ralloc_asprintf(NULL, fs_tmpl, sampler->sampler,
sampler->fetch);
_mesa_meta_compile_and_link_program(ctx, vs_source, fs_source,
}
/**
- * Samples in stencil buffer are interleaved, and unfortunately the data port
+ * Samples in stencil buffer are interleaved, and unfortunately the data port
* does not support it as render target. Therefore the surface is set up as
* single sampled and the program handles the interleaving.
* In case of single sampled stencil, the render buffer is adjusted with
copy_flatshaded_attributes(c, c->vert[0], c->vert[2]);
copy_flatshaded_attributes(c, c->vert[1], c->vert[2]);
}
-
+
static void do_flatshade_line( struct brw_sf_compile *c )
{
copy_flatshaded_attributes(c, c->vert[0], c->vert[1]);
}
-
/***********************************************************************
* Triangle setup.
brw_MUL(p, brw_null_reg(), c->a1_sub_a0, c->dy2);
brw_MAC(p, c->tmp, c->a2_sub_a0, negate(c->dy0));
brw_MUL(p, c->m1Cx, c->tmp, c->inv_det);
-
+
/* calculate dA/dy
*/
brw_MUL(p, brw_null_reg(), c->a2_sub_a0, c->dx0);
/* Copy m0..m3 to URB. m0 is implicitly copied from r0 in
* the send instruction:
- */
+ */
brw_urb_WRITE(p,
brw_null_reg(),
0,
brw_MUL(p, c->tmp, c->a1_sub_a0, c->dx0);
brw_MUL(p, c->m1Cx, c->tmp, c->inv_det);
-
+
brw_MUL(p, c->tmp, c->a1_sub_a0, c->dy0);
brw_MUL(p, c->m2Cy, c->tmp, c->inv_det);
}
bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
if (pc_persp)
- {
+ {
/* This seems odd as the values are all constant, but the
* fragment shader will be expecting it:
*/
brw->urb.sfsize = sfsize;
brw->urb.vsize = vsize;
- brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
- brw->urb.nr_gs_entries = limits[GS].preferred_nr_entries;
+ brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
+ brw->urb.nr_gs_entries = limits[GS].preferred_nr_entries;
brw->urb.nr_clip_entries = limits[CLP].preferred_nr_entries;
- brw->urb.nr_sf_entries = limits[SF].preferred_nr_entries;
- brw->urb.nr_cs_entries = limits[CS].preferred_nr_entries;
+ brw->urb.nr_sf_entries = limits[SF].preferred_nr_entries;
+ brw->urb.nr_cs_entries = limits[CS].preferred_nr_entries;
brw->urb.constrained = 0;
}
if (!check_urb_layout(brw)) {
- brw->urb.nr_vs_entries = limits[VS].min_nr_entries;
- brw->urb.nr_gs_entries = limits[GS].min_nr_entries;
+ brw->urb.nr_vs_entries = limits[VS].min_nr_entries;
+ brw->urb.nr_gs_entries = limits[GS].min_nr_entries;
brw->urb.nr_clip_entries = limits[CLP].min_nr_entries;
- brw->urb.nr_sf_entries = limits[SF].min_nr_entries;
- brw->urb.nr_cs_entries = limits[CS].min_nr_entries;
+ brw->urb.nr_sf_entries = limits[SF].min_nr_entries;
+ brw->urb.nr_cs_entries = limits[CS].min_nr_entries;
/* Mark us as operating with constrained nr_entries, so that next
* time we recalculate we'll resize the fences in the hope of
* escaping constrained mode and getting back to normal performance.
*/
brw->urb.constrained = 1;
-
+
if (!check_urb_layout(brw)) {
/* This is impossible, given the maximal sizes of urb
* entries and the values for minimum nr of entries
fprintf(stderr, "couldn't calculate URB layout!\n");
exit(1);
}
-
+
if (unlikely(INTEL_DEBUG & (DEBUG_URB|DEBUG_PERF)))
fprintf(stderr, "URB CONSTRAINED\n");
}
const uint32_t surf_index = render_target_start + i;
if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
- surf_offset[surf_index] =
+ surf_offset[surf_index] =
brw->vtbl.update_renderbuffer_surface(
brw, fb->_ColorDrawBuffers[i],
_mesa_geometric_layers(fb) > 0, i, surf_index);
const unsigned sampler_count =
DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);
- dw3 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT);
+ dw3 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT);
/* BRW_NEW_FS_PROG_DATA */
dw3 |=
return NULL;
}
- return image;
+ return image;
}
static __DRIimage *
if (image == NULL)
return NULL;
-
cpp = _mesa_get_format_bytes(image->format);
image->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr, "image",
width, height, cpp, &tiling,