X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_state.c;h=bfc29d71496f001b62d7465501b06105d1338f12;hb=a5e7c12cedb8a91236dd3caf99133f86349702a9;hp=13942df5008a15ea41350ff5c197b44063b05f41;hpb=31de802e7e6428b1b84b42ff555cdb8f87a59a6e;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c index 13942df5008..bfc29d71496 100644 --- a/src/gallium/drivers/iris/iris_state.c +++ b/src/gallium/drivers/iris/iris_state.c @@ -106,61 +106,8 @@ #include "iris_pipe.h" #include "iris_resource.h" -#define __gen_address_type struct iris_address -#define __gen_user_data struct iris_batch - -#define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x)) - -static uint64_t -__gen_combine_address(struct iris_batch *batch, void *location, - struct iris_address addr, uint32_t delta) -{ - uint64_t result = addr.offset + delta; - - if (addr.bo) { - iris_use_pinned_bo(batch, addr.bo, addr.write); - /* Assume this is a general address, not relative to a base. */ - result += addr.bo->gtt_offset; - } - - return result; -} - -#define __genxml_cmd_length(cmd) cmd ## _length -#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias -#define __genxml_cmd_header(cmd) cmd ## _header -#define __genxml_cmd_pack(cmd) cmd ## _pack - -#define _iris_pack_command(batch, cmd, dst, name) \ - for (struct cmd name = { __genxml_cmd_header(cmd) }, \ - *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \ - ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \ - _dst = NULL; \ - })) - -#define iris_pack_command(cmd, dst, name) \ - _iris_pack_command(NULL, cmd, dst, name) - -#define iris_pack_state(cmd, dst, name) \ - for (struct cmd name = {}, \ - *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \ - __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \ - _dst = NULL) - -#define iris_emit_cmd(batch, cmd, name) \ - _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name) - -#define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \ - do { \ - uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \ - for (uint32_t i = 0; i < num_dwords; i++) \ - dw[i] = (dwords0)[i] | (dwords1)[i]; \ - VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \ - } while (0) - -#include "genxml/genX_pack.h" -#include "genxml/gen_macros.h" -#include "genxml/genX_bits.h" +#include "iris_genx_macros.h" +#include "intel/common/gen_guardband.h" #if GEN_GEN == 8 #define MOCS_PTE 0x18 @@ -367,24 +314,6 @@ translate_wrap(unsigned pipe_wrap) return map[pipe_wrap]; } -static struct iris_address -ro_bo(struct iris_bo *bo, uint64_t offset) -{ - /* CSOs must pass NULL for bo! Otherwise it will add the BO to the - * validation list at CSO creation time, instead of draw time. - */ - return (struct iris_address) { .bo = bo, .offset = offset }; -} - -static struct iris_address -rw_bo(struct iris_bo *bo, uint64_t offset) -{ - /* CSOs must pass NULL for bo! Otherwise it will add the BO to the - * validation list at CSO creation time, instead of draw time. - */ - return (struct iris_address) { .bo = bo, .offset = offset, .write = true }; -} - /** * Allocate space for some indirect state. * @@ -679,6 +608,85 @@ iris_enable_obj_preemption(struct iris_batch *batch, bool enable) } #endif +#if GEN_GEN == 11 +static void +iris_upload_slice_hashing_state(struct iris_batch *batch) +{ + const struct gen_device_info *devinfo = &batch->screen->devinfo; + int subslices_delta = + devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1]; + if (subslices_delta == 0) + return; + + struct iris_context *ice = NULL; + ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]); + assert(&ice->batches[IRIS_BATCH_RENDER] == batch); + + unsigned size = GENX(SLICE_HASH_TABLE_length) * 4; + uint32_t hash_address; + struct pipe_resource *tmp = NULL; + uint32_t *map = + stream_state(batch, ice->state.dynamic_uploader, &tmp, + size, 64, &hash_address); + pipe_resource_reference(&tmp, NULL); + + struct GENX(SLICE_HASH_TABLE) table0 = { + .Entry = { + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 } + } + }; + + struct GENX(SLICE_HASH_TABLE) table1 = { + .Entry = { + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 } + } + }; + + const struct GENX(SLICE_HASH_TABLE) *table = + subslices_delta < 0 ? &table0 : &table1; + GENX(SLICE_HASH_TABLE_pack)(NULL, map, table); + + iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) { + ptr.SliceHashStatePointerValid = true; + ptr.SliceHashTableStatePointer = hash_address; + } + + iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) { + mode.SliceHashingTableEnable = true; + } +} +#endif + /** * Upload the initial GPU state for a render context. * @@ -741,21 +749,24 @@ iris_init_render_context(struct iris_screen *screen, } iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val); - /* WA_2204188704: Pixel Shader Panic dispatch must be disabled. */ - iris_pack_state(GENX(COMMON_SLICE_CHICKEN3), ®_val, reg) { - reg.PSThreadPanicDispatch = 0x3; - reg.PSThreadPanicDispatchMask = 0x3; - } - iris_emit_lri(batch, COMMON_SLICE_CHICKEN3, reg_val); - iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), ®_val, reg) { reg.StateCacheRedirectToCSSectionEnable = true; reg.StateCacheRedirectToCSSectionEnableMask = true; } iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val); + /* Hardware specification recommends disabling repacking for the + * compatibility with decompression mechanism in display controller. + */ + if (devinfo->disable_ccs_repack) { + iris_pack_state(GENX(CACHE_MODE_0), ®_val, reg) { + reg.DisableRepackingforCompression = true; + reg.DisableRepackingforCompressionMask = true; + } + iris_emit_lri(batch, CACHE_MODE_0, reg_val); + } - // XXX: 3D_MODE? + iris_upload_slice_hashing_state(batch); #endif /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid @@ -852,6 +863,7 @@ struct iris_depth_buffer_state { */ struct iris_genx_state { struct iris_vertex_buffer_state vertex_buffers[33]; + uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)]; struct iris_depth_buffer_state depth_buffer; @@ -1271,7 +1283,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx, iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) { sf.StatisticsEnable = true; - sf.ViewportTransformEnable = true; sf.AALineDistanceMode = AALINEDISTANCE_TRUE; sf.LineEndCapAntialiasingRegionWidth = state->line_smooth ? _10pixels : _05pixels; @@ -1738,6 +1749,8 @@ fill_surface_state(struct isl_device *isl_dev, .address = res->bo->gtt_offset + res->offset, }; + assert(!iris_resource_unfinished_aux_import(res)); + if (aux_usage != ISL_AUX_USAGE_NONE) { f.aux_surf = &res->aux.surf; f.aux_usage = aux_usage; @@ -1828,6 +1841,9 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->view.array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1; + if (iris_resource_unfinished_aux_import(isv->res)) + iris_resource_finish_aux_import(&screen->base, isv->res); + unsigned aux_modes = isv->res->aux.sampler_usages; while (aux_modes) { enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); @@ -1873,23 +1889,6 @@ iris_create_surface(struct pipe_context *ctx, struct iris_context *ice = (struct iris_context *) ctx; struct iris_screen *screen = (struct iris_screen *)ctx->screen; const struct gen_device_info *devinfo = &screen->devinfo; - struct iris_surface *surf = calloc(1, sizeof(struct iris_surface)); - struct pipe_surface *psurf = &surf->base; - struct iris_resource *res = (struct iris_resource *) tex; - - if (!surf) - return NULL; - - pipe_reference_init(&psurf->reference, 1); - pipe_resource_reference(&psurf->texture, tex); - psurf->context = ctx; - psurf->format = tmpl->format; - psurf->width = tex->width0; - psurf->height = tex->height0; - psurf->texture = tex; - psurf->u.tex.first_layer = tmpl->u.tex.first_layer; - psurf->u.tex.last_layer = tmpl->u.tex.last_layer; - psurf->u.tex.level = tmpl->u.tex.level; isl_surf_usage_flags_t usage = 0; if (tmpl->writable) @@ -1900,7 +1899,7 @@ iris_create_surface(struct pipe_context *ctx, usage = ISL_SURF_USAGE_RENDER_TARGET_BIT; const struct iris_format_info fmt = - iris_format_for_usage(devinfo, psurf->format, usage); + iris_format_for_usage(devinfo, tmpl->format, usage); if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) && !isl_format_supports_rendering(devinfo, fmt.fmt)) { @@ -1908,10 +1907,27 @@ iris_create_surface(struct pipe_context *ctx, * hasn't had the opportunity yet. In the meantime, we need to * avoid hitting ISL asserts about unsupported formats below. */ - free(surf); return NULL; } + struct iris_surface *surf = calloc(1, sizeof(struct iris_surface)); + struct pipe_surface *psurf = &surf->base; + struct iris_resource *res = (struct iris_resource *) tex; + + if (!surf) + return NULL; + + pipe_reference_init(&psurf->reference, 1); + pipe_resource_reference(&psurf->texture, tex); + psurf->context = ctx; + psurf->format = tmpl->format; + psurf->width = tex->width0; + psurf->height = tex->height0; + psurf->texture = tex; + psurf->u.tex.first_layer = tmpl->u.tex.first_layer; + psurf->u.tex.last_layer = tmpl->u.tex.last_layer; + psurf->u.tex.level = tmpl->u.tex.level; + struct isl_view *view = &surf->view; *view = (struct isl_view) { .format = fmt.fmt, @@ -1938,6 +1954,9 @@ iris_create_surface(struct pipe_context *ctx, return NULL; if (!isl_format_is_compressed(res->surf.format)) { + if (iris_resource_unfinished_aux_import(res)) + iris_resource_finish_aux_import(&screen->base, res); + /* This is a normal surface. Fill out a SURFACE_STATE for each possible * auxiliary surface mode and return the pipe_surface. */ @@ -2011,12 +2030,8 @@ iris_create_surface(struct pipe_context *ctx, const struct isl_format_layout *fmtl = isl_format_get_layout(res->surf.format); isl_surf.format = fmt.fmt; - isl_surf.logical_level0_px.width = - DIV_ROUND_UP(isl_surf.logical_level0_px.width, fmtl->bw); - isl_surf.logical_level0_px.height = - DIV_ROUND_UP(isl_surf.logical_level0_px.height, fmtl->bh); - isl_surf.phys_level0_sa.width /= fmtl->bw; - isl_surf.phys_level0_sa.height /= fmtl->bh; + isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf); + isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf); tile_x_sa /= fmtl->bw; tile_y_sa /= fmtl->bh; @@ -2094,16 +2109,13 @@ iris_set_shader_images(struct pipe_context *ctx, const struct pipe_image_view *img = &p_images[i]; struct iris_resource *res = (void *) img->resource; - // XXX: these are not retained forever, use a separate uploader? void *map = alloc_surface_states(ice->state.surface_uploader, &iv->surface_state, 1 << ISL_AUX_USAGE_NONE); if (!unlikely(map)) return; - iv->base = *img; - iv->base.resource = NULL; - pipe_resource_reference(&iv->base.resource, &res->base); + util_copy_image_view(&iv->base, img); shs->bound_image_views |= 1 << (start_slot + i); @@ -2184,7 +2196,7 @@ iris_set_shader_images(struct pipe_context *ctx, /* Broadwell also needs brw_image_params re-uploaded */ if (GEN_GEN < 9) { ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage; - shs->cbuf0_needs_upload = true; + shs->sysvals_need_upload = true; } } @@ -2236,7 +2248,7 @@ iris_set_tess_state(struct pipe_context *ctx, memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float)); ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS; - shs->cbuf0_needs_upload = true; + shs->sysvals_need_upload = true; } static void @@ -2254,11 +2266,16 @@ iris_set_clip_state(struct pipe_context *ctx, { struct iris_context *ice = (struct iris_context *) ctx; struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX]; + struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY]; + struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL]; memcpy(&ice->state.clip_planes, state, sizeof(*state)); - ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS; - shs->cbuf0_needs_upload = true; + ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS | + IRIS_DIRTY_CONSTANTS_TES; + shs->sysvals_need_upload = true; + gshs->sysvals_need_upload = true; + tshs->sysvals_need_upload = true; } /** @@ -2347,86 +2364,6 @@ viewport_extent(const struct pipe_viewport_state *state, int axis, float sign) return copysignf(state->scale[axis], sign) + state->translate[axis]; } -static void -calculate_guardband_size(uint32_t fb_width, uint32_t fb_height, - float m00, float m11, float m30, float m31, - float *xmin, float *xmax, - float *ymin, float *ymax) -{ - /* According to the "Vertex X,Y Clamping and Quantization" section of the - * Strips and Fans documentation: - * - * "The vertex X and Y screen-space coordinates are also /clamped/ to the - * fixed-point "guardband" range supported by the rasterization hardware" - * - * and - * - * "In almost all circumstances, if an object’s vertices are actually - * modified by this clamping (i.e., had X or Y coordinates outside of - * the guardband extent the rendered object will not match the intended - * result. Therefore software should take steps to ensure that this does - * not happen - e.g., by clipping objects such that they do not exceed - * these limits after the Drawing Rectangle is applied." - * - * I believe the fundamental restriction is that the rasterizer (in - * the SF/WM stages) have a limit on the number of pixels that can be - * rasterized. We need to ensure any coordinates beyond the rasterizer - * limit are handled by the clipper. So effectively that limit becomes - * the clipper's guardband size. - * - * It goes on to say: - * - * "In addition, in order to be correctly rendered, objects must have a - * screenspace bounding box not exceeding 8K in the X or Y direction. - * This additional restriction must also be comprehended by software, - * i.e., enforced by use of clipping." - * - * This makes no sense. Gen7+ hardware supports 16K render targets, - * and you definitely need to be able to draw polygons that fill the - * surface. Our assumption is that the rasterizer was limited to 8K - * on Sandybridge, which only supports 8K surfaces, and it was actually - * increased to 16K on Ivybridge and later. - * - * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge. - */ - const float gb_size = GEN_GEN >= 7 ? 16384.0f : 8192.0f; - - if (m00 != 0 && m11 != 0) { - /* First, we compute the screen-space render area */ - const float ss_ra_xmin = MIN3( 0, m30 + m00, m30 - m00); - const float ss_ra_xmax = MAX3( fb_width, m30 + m00, m30 - m00); - const float ss_ra_ymin = MIN3( 0, m31 + m11, m31 - m11); - const float ss_ra_ymax = MAX3(fb_height, m31 + m11, m31 - m11); - - /* We want the guardband to be centered on that */ - const float ss_gb_xmin = (ss_ra_xmin + ss_ra_xmax) / 2 - gb_size; - const float ss_gb_xmax = (ss_ra_xmin + ss_ra_xmax) / 2 + gb_size; - const float ss_gb_ymin = (ss_ra_ymin + ss_ra_ymax) / 2 - gb_size; - const float ss_gb_ymax = (ss_ra_ymin + ss_ra_ymax) / 2 + gb_size; - - /* Now we need it in native device coordinates */ - const float ndc_gb_xmin = (ss_gb_xmin - m30) / m00; - const float ndc_gb_xmax = (ss_gb_xmax - m30) / m00; - const float ndc_gb_ymin = (ss_gb_ymin - m31) / m11; - const float ndc_gb_ymax = (ss_gb_ymax - m31) / m11; - - /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be - * flipped upside-down. X should be fine though. - */ - assert(ndc_gb_xmin <= ndc_gb_xmax); - *xmin = ndc_gb_xmin; - *xmax = ndc_gb_xmax; - *ymin = MIN2(ndc_gb_ymin, ndc_gb_ymax); - *ymax = MAX2(ndc_gb_ymin, ndc_gb_ymax); - } else { - /* The viewport scales to 0, so nothing will be rendered. */ - *xmin = 0.0f; - *xmax = 0.0f; - *ymin = 0.0f; - *ymax = 0.0f; - } -} - /** * The pipe->set_viewport_states() driver hook. * @@ -2473,6 +2410,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx, if (cso->samples != samples) { ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE; + + /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */ + if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16)) + ice->state.dirty |= IRIS_DIRTY_FS; } if (cso->nr_cbufs != state->nr_cbufs) { @@ -2487,6 +2428,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx, ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT; } + if (cso->zsbuf || state->zsbuf) { + ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER; + } + util_copy_framebuffer_state(cso, state); cso->samples = samples; cso->layers = layers; @@ -2524,7 +2469,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, if (iris_resource_level_has_hiz(zres, view.base_level)) { info.hiz_usage = ISL_AUX_USAGE_HIZ; info.hiz_surf = &zres->aux.surf; - info.hiz_address = zres->aux.bo->gtt_offset; + info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset; } } @@ -2552,8 +2497,6 @@ iris_set_framebuffer_state(struct pipe_context *ctx, ice->state.null_fb.offset += iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res)); - ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER; - /* Render target change */ ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS; @@ -2597,16 +2540,31 @@ iris_set_constant_buffer(struct pipe_context *ctx, struct iris_shader_state *shs = &ice->state.shaders[stage]; struct pipe_shader_buffer *cbuf = &shs->constbuf[index]; - if (input && input->buffer) { + if (input && input->buffer_size && (input->buffer || input->user_buffer)) { shs->bound_cbufs |= 1u << index; - assert(index > 0); + if (input->user_buffer) { + void *map = NULL; + pipe_resource_reference(&cbuf->buffer, NULL); + u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64, + &cbuf->buffer_offset, &cbuf->buffer, (void **) &map); + + if (!cbuf->buffer) { + /* Allocation was unsuccessful - just unbind */ + iris_set_constant_buffer(ctx, p_stage, index, NULL); + return; + } - pipe_resource_reference(&cbuf->buffer, input->buffer); - cbuf->buffer_offset = input->buffer_offset; - cbuf->buffer_size = - MIN2(input->buffer_size, - iris_resource_bo(input->buffer)->size - cbuf->buffer_offset); + assert(map); + memcpy(map, input->user_buffer, input->buffer_size); + } else if (input->buffer) { + pipe_resource_reference(&cbuf->buffer, input->buffer); + + cbuf->buffer_offset = input->buffer_offset; + cbuf->buffer_size = + MIN2(input->buffer_size, + iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset); + } struct iris_resource *res = (void *) cbuf->buffer; res->bind_history |= PIPE_BIND_CONSTANT_BUFFER; @@ -2620,15 +2578,6 @@ iris_set_constant_buffer(struct pipe_context *ctx, pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL); } - if (index == 0) { - if (input) - memcpy(&shs->cbuf0, input, sizeof(shs->cbuf0)); - else - memset(&shs->cbuf0, 0, sizeof(shs->cbuf0)); - - shs->cbuf0_needs_upload = true; - } - ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage; // XXX: maybe not necessary all the time...? // XXX: we need 3DS_BTP to commit these changes, and if we fell back to @@ -2637,21 +2586,24 @@ iris_set_constant_buffer(struct pipe_context *ctx, } static void -upload_uniforms(struct iris_context *ice, +upload_sysvals(struct iris_context *ice, gl_shader_stage stage) { UNUSED struct iris_genx_state *genx = ice->state.genx; struct iris_shader_state *shs = &ice->state.shaders[stage]; - struct pipe_shader_buffer *cbuf = &shs->constbuf[0]; - struct iris_compiled_shader *shader = ice->shaders.prog[stage]; - - unsigned upload_size = shader->num_system_values * sizeof(uint32_t) + - shs->cbuf0.buffer_size; - if (upload_size == 0) + struct iris_compiled_shader *shader = ice->shaders.prog[stage]; + if (!shader || shader->num_system_values == 0) return; + assert(shader->num_cbufs > 0); + + unsigned sysval_cbuf_index = shader->num_cbufs - 1; + struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index]; + unsigned upload_size = shader->num_system_values * sizeof(uint32_t); uint32_t *map = NULL; + + assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS); u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64, &cbuf->buffer_offset, &cbuf->buffer, (void **) &map); @@ -2702,13 +2654,11 @@ upload_uniforms(struct iris_context *ice, *map++ = value; } - if (shs->cbuf0.user_buffer) { - memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size); - } - cbuf->buffer_size = upload_size; iris_upload_ubo_ssbo_surf_state(ice, cbuf, - &shs->constbuf_surf_state[0], false); + &shs->constbuf_surf_state[sysval_cbuf_index], false); + + shs->sysvals_need_upload = false; } /** @@ -3477,12 +3427,14 @@ iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice) static void iris_populate_vs_key(const struct iris_context *ice, const struct shader_info *info, + gl_shader_stage last_stage, struct brw_vs_prog_key *key) { const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; if (info->clip_distance_array_size == 0 && - (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX))) + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_VERTEX) key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } @@ -3500,8 +3452,16 @@ iris_populate_tcs_key(const struct iris_context *ice, */ static void iris_populate_tes_key(const struct iris_context *ice, + const struct shader_info *info, + gl_shader_stage last_stage, struct brw_tes_prog_key *key) { + const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; + + if (info->clip_distance_array_size == 0 && + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_TESS_EVAL) + key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3509,8 +3469,16 @@ iris_populate_tes_key(const struct iris_context *ice, */ static void iris_populate_gs_key(const struct iris_context *ice, + const struct shader_info *info, + gl_shader_stage last_stage, struct brw_gs_prog_key *key) { + const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; + + if (info->clip_distance_array_size == 0 && + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_GEOMETRY) + key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3518,6 +3486,7 @@ iris_populate_gs_key(const struct iris_context *ice, */ static void iris_populate_fs_key(const struct iris_context *ice, + const struct shader_info *info, struct brw_wm_prog_key *key) { struct iris_screen *screen = (void *) ice->ctx.screen; @@ -3534,8 +3503,8 @@ iris_populate_fs_key(const struct iris_context *ice, key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled; - /* XXX: only bother if COL0/1 are read */ - key->flat_shade = rast->flatshade; + key->flat_shade = rast->flatshade && + (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1)); key->persample_interp = rast->force_persample_interp; key->multisample_fbo = rast->multisample && fb->samples > 1; @@ -3546,7 +3515,6 @@ iris_populate_fs_key(const struct iris_context *ice, screen->driconf.dual_color_blend_by_location && (blend->blend_enables & 1) && blend->dual_color_blending; - /* TODO: support key->force_dual_color_blend for Unigine */ /* TODO: Respect glHint for key->high_quality_derivatives */ } @@ -3763,25 +3731,6 @@ iris_store_fs_state(struct iris_context *ice, */ ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE; - ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; - ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; - ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; - - // XXX: Disable SIMD32 with 16x MSAA - - ps.DispatchGRFStartRegisterForConstantSetupData0 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); - ps.DispatchGRFStartRegisterForConstantSetupData1 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); - ps.DispatchGRFStartRegisterForConstantSetupData2 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); - - ps.KernelStartPointer0 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); - ps.KernelStartPointer1 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); - ps.KernelStartPointer2 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); if (prog_data->total_scratch) { struct iris_bo *bo = @@ -3806,10 +3755,7 @@ iris_store_fs_state(struct iris_context *ice, #if GEN_GEN >= 9 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary; psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil; -#else - psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; #endif - // XXX: UAV bit } } @@ -4137,7 +4083,6 @@ iris_populate_binding_table(struct iris_context *ice, struct iris_shader_state *shs = &ice->state.shaders[stage]; uint32_t binder_addr = binder->bo->gtt_offset; - //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; uint32_t *bt_map = binder->map + binder->bt_offset[stage]; int s = 0; @@ -4395,13 +4340,7 @@ iris_restore_render_saved_bos(struct iris_context *ice, pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa); } - if (draw->index_size == 0 && ice->state.last_res.index_buffer) { - /* This draw didn't emit a new index buffer, so we are inheriting the - * older index buffer. This draw didn't need it, but future ones may. - */ - struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer); - iris_use_pinned_bo(batch, bo, false); - } + iris_use_optional_res(batch, ice->state.last_res.index_buffer, false); if (clean & IRIS_DIRTY_VERTEX_BUFFERS) { uint64_t bound = ice->state.bound_vertex_buffers; @@ -4423,30 +4362,6 @@ iris_restore_compute_saved_bos(struct iris_context *ice, const int stage = MESA_SHADER_COMPUTE; struct iris_shader_state *shs = &ice->state.shaders[stage]; - if (clean & IRIS_DIRTY_CONSTANTS_CS) { - struct iris_compiled_shader *shader = ice->shaders.prog[stage]; - - if (shader) { - struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; - const struct brw_ubo_range *range = &prog_data->ubo_ranges[0]; - - if (range->length > 0) { - /* Range block is a binding table index, map back to UBO index. */ - unsigned block_index = iris_bti_to_group_index( - &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block); - assert(block_index != IRIS_SURFACE_NOT_USED); - - struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index]; - struct iris_resource *res = (void *) cbuf->buffer; - - if (res) - iris_use_pinned_bo(batch, res->bo, false); - else - iris_use_pinned_bo(batch, batch->screen->workaround_bo, false); - } - } - } - if (clean & IRIS_DIRTY_BINDINGS_CS) { /* Re-pin any buffers referred to by the binding table. */ iris_populate_binding_table(ice, batch, stage, true); @@ -4456,6 +4371,13 @@ iris_restore_compute_saved_bos(struct iris_context *ice, if (sampler_res) iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false); + if ((clean & IRIS_DIRTY_SAMPLER_STATES_CS) && + (clean & IRIS_DIRTY_BINDINGS_CS) && + (clean & IRIS_DIRTY_CONSTANTS_CS) && + (clean & IRIS_DIRTY_CS)) { + iris_use_optional_res(batch, ice->state.last_res.cs_desc, false); + } + if (clean & IRIS_DIRTY_CS) { struct iris_compiled_shader *shader = ice->shaders.prog[stage]; @@ -4463,6 +4385,10 @@ iris_restore_compute_saved_bos(struct iris_context *ice, struct iris_bo *bo = iris_resource_bo(shader->assembly.res); iris_use_pinned_bo(batch, bo, false); + struct iris_bo *curbe_bo = + iris_resource_bo(ice->state.last_res.cs_thread_ids); + iris_use_pinned_bo(batch, curbe_bo, false); + struct brw_stage_prog_data *prog_data = shader->prog_data; if (prog_data->total_scratch > 0) { @@ -4495,6 +4421,18 @@ iris_update_surface_base_address(struct iris_batch *batch, batch->last_surface_base_address = binder->bo->gtt_offset; } +static inline void +iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz, + bool window_space_position, float *zmin, float *zmax) +{ + if (window_space_position) { + *zmin = 0.f; + *zmax = 1.f; + return; + } + util_viewport_zmin_zmax(vp, halfz, zmin, zmax); +} + static void iris_upload_dirty_render_state(struct iris_context *ice, struct iris_batch *batch, @@ -4522,8 +4460,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, GENX(CC_VIEWPORT_length), 32, &cc_vp_address); for (int i = 0; i < ice->state.num_viewports; i++) { float zmin, zmax; - util_viewport_zmin_zmax(&ice->state.viewports[i], - cso_rast->clip_halfz, &zmin, &zmax); + iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz, + ice->state.window_space_position, + &zmin, &zmax); if (cso_rast->depth_clip_near) zmin = 0.0; if (cso_rast->depth_clip_far) @@ -4560,10 +4499,10 @@ iris_upload_dirty_render_state(struct iris_context *ice, float vp_ymin = viewport_extent(state, 1, -1.0f); float vp_ymax = viewport_extent(state, 1, 1.0f); - calculate_guardband_size(cso_fb->width, cso_fb->height, - state->scale[0], state->scale[1], - state->translate[0], state->translate[1], - &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax); + gen_calculate_guardband_size(cso_fb->width, cso_fb->height, + state->scale[0], state->scale[1], + state->translate[0], state->translate[1], + &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax); iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) { vp.ViewportMatrixElementm00 = state->scale[0]; @@ -4682,8 +4621,8 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (!shader) continue; - if (shs->cbuf0_needs_upload) - upload_uniforms(ice, stage); + if (shs->sysvals_need_upload) + upload_sysvals(ice, stage); struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; @@ -4796,32 +4735,72 @@ iris_upload_dirty_render_state(struct iris_context *ice, iris_get_scratch_space(ice, prog_data->total_scratch, stage); iris_use_pinned_bo(batch, bo, true); } -#if GEN_GEN >= 9 - if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) { - uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; - uint32_t *shader_psx = ((uint32_t*)shader->derived_data) + - GENX(3DSTATE_PS_length); - struct iris_rasterizer_state *cso = ice->state.cso_rast; - iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) { + if (stage == MESA_SHADER_FRAGMENT) { + UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast; + struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + + uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) { + ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; + ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; + ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; + + /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say: + * + * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, + * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch + * mode." + * + * 16x MSAA only exists on Gen9+, so we can skip this on Gen8. + */ + if (GEN_GEN >= 9 && cso_fb->samples == 16 && + !wm_prog_data->persample_dispatch) { + assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable); + ps._32PixelDispatchEnable = false; + } + + ps.DispatchGRFStartRegisterForConstantSetupData0 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); + ps.DispatchGRFStartRegisterForConstantSetupData1 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); + ps.DispatchGRFStartRegisterForConstantSetupData2 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); + + ps.KernelStartPointer0 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); + ps.KernelStartPointer1 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); + ps.KernelStartPointer2 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); + } + + uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) { +#if GEN_GEN >= 9 if (wm_prog_data->post_depth_coverage) psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; - else if (wm_prog_data->inner_coverage && cso->conservative_rasterization) + else if (wm_prog_data->inner_coverage && + cso->conservative_rasterization) psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE; else psx.InputCoverageMaskState = ICMS_NORMAL; +#else + psx.PixelShaderUsesInputCoverageMask = + wm_prog_data->uses_sample_mask; +#endif } - iris_batch_emit(batch, shader->derived_data, - sizeof(uint32_t) * GENX(3DSTATE_PS_length)); - iris_emit_merge(batch, - shader_psx, - psx_state, + uint32_t *shader_ps = (uint32_t *) shader->derived_data; + uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length); + iris_emit_merge(batch, shader_ps, ps_state, + GENX(3DSTATE_PS_length)); + iris_emit_merge(batch, shader_psx, psx_state, GENX(3DSTATE_PS_EXTRA_length)); - } else -#endif + } else { iris_batch_emit(batch, shader->derived_data, iris_derived_program_state_size(stage)); + } } else { if (stage == MESA_SHADER_TESS_EVAL) { iris_emit_cmd(batch, GENX(3DSTATE_HS), hs); @@ -4893,8 +4872,14 @@ iris_upload_dirty_render_state(struct iris_context *ice, uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)]; iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) { cl.StatisticsEnable = ice->state.statistics_counters_enabled; - cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL - : CLIPMODE_NORMAL; + if (cso_rast->rasterizer_discard) + cl.ClipMode = CLIPMODE_REJECT_ALL; + else if (ice->state.window_space_position) + cl.ClipMode = CLIPMODE_ACCEPT_ALL; + else + cl.ClipMode = CLIPMODE_NORMAL; + + cl.PerspectiveDivideDisable = ice->state.window_space_position; cl.ViewportXYClipTestEnable = !points_or_lines; if (wm_prog_data->barycentric_interp_modes & @@ -4911,8 +4896,13 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_RASTER) { struct iris_rasterizer_state *cso = ice->state.cso_rast; iris_batch_emit(batch, cso->raster, sizeof(cso->raster)); - iris_batch_emit(batch, cso->sf, sizeof(cso->sf)); + uint32_t dynamic_sf[GENX(3DSTATE_SF_length)]; + iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) { + sf.ViewportTransformEnable = !ice->state.window_space_position; + } + iris_emit_merge(batch, cso->sf, dynamic_sf, + ARRAY_SIZE(dynamic_sf)); } if (dirty & IRIS_DIRTY_WM) { @@ -5289,6 +5279,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } + if (ice->state.current_hash_scale != 1) + genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1); + /* TODO: Gen8 PMA fix */ } @@ -5329,13 +5322,21 @@ iris_upload_render_state(struct iris_context *ice, offset = 0; } + struct iris_genx_state *genx = ice->state.genx; struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer); - iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) { + uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)]; + iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) { ib.IndexFormat = draw->index_size >> 1; ib.MOCS = mocs(bo); ib.BufferSize = bo->size - offset; - ib.BufferStartingAddress = ro_bo(bo, offset); + ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset); + } + + if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) { + memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet)); + iris_batch_emit(batch, ib_packet, sizeof(ib_packet)); + iris_use_pinned_bo(batch, bo, false); } /* The VF cache key only uses 32-bits, see vertex buffer comment above */ @@ -5370,37 +5371,19 @@ iris_upload_render_state(struct iris_context *ice, PIPE_CONTROL_FLUSH_ENABLE); if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) { - static const uint32_t math[] = { - MI_MATH | (9 - 2), - /* Compute (draw index < draw count). - * We do this by subtracting and storing the carry bit. - */ - MI_ALU2(LOAD, SRCA, R0), - MI_ALU2(LOAD, SRCB, R1), - MI_ALU0(SUB), - MI_ALU2(STORE, R3, CF), - /* Compute (subtracting result & MI_PREDICATE). */ - MI_ALU2(LOAD, SRCA, R3), - MI_ALU2(LOAD, SRCB, R2), - MI_ALU0(AND), - MI_ALU2(STORE, R3, ACCU), - }; - - /* Upload the current draw count from the draw parameters - * buffer to GPR1. - */ - ice->vtbl.load_register_mem32(batch, CS_GPR(1), draw_count_bo, - draw_count_offset); - /* Zero the top 32-bits of GPR1. */ - ice->vtbl.load_register_imm32(batch, CS_GPR(1) + 4, 0); - /* Upload the id of the current primitive to GPR0. */ - ice->vtbl.load_register_imm64(batch, CS_GPR(0), draw->drawid); - - iris_batch_emit(batch, math, sizeof(math)); - - /* Store result of MI_MATH computations to MI_PREDICATE_RESULT. */ - ice->vtbl.load_register_reg64(batch, - MI_PREDICATE_RESULT, CS_GPR(3)); + struct gen_mi_builder b; + gen_mi_builder_init(&b, batch); + + /* comparison = draw id < draw count */ + struct gen_mi_value comparison = + gen_mi_ult(&b, gen_mi_imm(draw->drawid), + gen_mi_mem32(ro_bo(draw_count_bo, + draw_count_offset))); + + /* predicate = comparison & conditional rendering predicate */ + gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT), + gen_mi_iand(&b, comparison, + gen_mi_reg32(CS_GPR(15)))); } else { uint32_t mi_predicate; @@ -5477,15 +5460,16 @@ iris_upload_render_state(struct iris_context *ice, "draw count from stream output stall", PIPE_CONTROL_CS_STALL); - iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { - lrm.RegisterAddress = CS_GPR(0); - lrm.MemoryAddress = - ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); - } - if (so->base.buffer_offset) - iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset); - iris_math_div32_gpr0(ice, batch, so->stride); - _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0)); + struct gen_mi_builder b; + gen_mi_builder_init(&b, batch); + + struct iris_address addr = + ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); + struct gen_mi_value offset = + gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset); + + gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT), + gen_mi_udiv32_imm(&b, offset, so->stride)); _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0); _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0); @@ -5504,7 +5488,6 @@ iris_upload_render_state(struct iris_context *ice, prim.InstanceCount = draw->instance_count; prim.VertexCountPerInstance = draw->count; - // XXX: this is probably bonkers. prim.StartVertexLocation = draw->start; if (draw->index_size) { @@ -5512,8 +5495,6 @@ iris_upload_render_state(struct iris_context *ice, } else { prim.StartVertexLocation += draw->index_bias; } - - //prim.BaseVertexLocation = ...; } } } @@ -5540,8 +5521,8 @@ iris_upload_compute_state(struct iris_context *ice, */ iris_use_pinned_bo(batch, ice->state.binder.bo, false); - if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload) - upload_uniforms(ice, MESA_SHADER_COMPUTE); + if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->sysvals_need_upload) + upload_sysvals(ice, MESA_SHADER_COMPUTE); if (dirty & IRIS_DIRTY_BINDINGS_CS) iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false); @@ -5596,20 +5577,20 @@ iris_upload_compute_state(struct iris_context *ice, } /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */ - uint32_t curbe_data_offset = 0; - assert(cs_prog_data->push.cross_thread.dwords == 0 && - cs_prog_data->push.per_thread.dwords == 1 && - cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID); - struct pipe_resource *curbe_data_res = NULL; - uint32_t *curbe_data_map = - stream_state(batch, ice->state.dynamic_uploader, &curbe_data_res, - ALIGN(cs_prog_data->push.total.size, 64), 64, - &curbe_data_offset); - assert(curbe_data_map); - memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64)); - iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map); - - if (dirty & IRIS_DIRTY_CONSTANTS_CS) { + if (dirty & IRIS_DIRTY_CS) { + uint32_t curbe_data_offset = 0; + assert(cs_prog_data->push.cross_thread.dwords == 0 && + cs_prog_data->push.per_thread.dwords == 1 && + cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID); + uint32_t *curbe_data_map = + stream_state(batch, ice->state.dynamic_uploader, + &ice->state.last_res.cs_thread_ids, + ALIGN(cs_prog_data->push.total.size, 64), 64, + &curbe_data_offset); + assert(curbe_data_map); + memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64)); + iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map); + iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) { curbe.CURBETotalDataLength = ALIGN(cs_prog_data->push.total.size, 64); @@ -5621,7 +5602,6 @@ iris_upload_compute_state(struct iris_context *ice, IRIS_DIRTY_BINDINGS_CS | IRIS_DIRTY_CONSTANTS_CS | IRIS_DIRTY_CS)) { - struct pipe_resource *desc_res = NULL; uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)]; iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) { @@ -5637,10 +5617,8 @@ iris_upload_compute_state(struct iris_context *ice, GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); load.InterfaceDescriptorDataStartAddress = emit_state(batch, ice->state.dynamic_uploader, - &desc_res, desc, sizeof(desc), 32); + &ice->state.last_res.cs_desc, desc, sizeof(desc), 64); } - - pipe_resource_reference(&desc_res, NULL); } uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2]; @@ -5702,6 +5680,9 @@ iris_destroy_state(struct iris_context *ice) { struct iris_genx_state *genx = ice->state.genx; + pipe_resource_reference(&ice->draw.draw_params_res, NULL); + pipe_resource_reference(&ice->draw.derived_draw_params_res, NULL); + uint64_t bound_vbs = ice->state.bound_vertex_buffers; while (bound_vbs) { const int i = u_bit_scan64(&bound_vbs); @@ -5709,6 +5690,10 @@ iris_destroy_state(struct iris_context *ice) } free(ice->state.genx); + for (int i = 0; i < 4; i++) { + pipe_so_target_reference(&ice->state.so_target[i], NULL); + } + for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) { pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL); } @@ -5747,6 +5732,8 @@ iris_destroy_state(struct iris_context *ice) pipe_resource_reference(&ice->state.last_res.scissor, NULL); pipe_resource_reference(&ice->state.last_res.blend, NULL); pipe_resource_reference(&ice->state.last_res.index_buffer, NULL); + pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL); + pipe_resource_reference(&ice->state.last_res.cs_desc, NULL); } /* ------------------------------------------------------------------- */ @@ -5791,8 +5778,10 @@ iris_rebind_buffer(struct iris_context *ice, } } - /* No need to handle these: - * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw) + /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit + * the 3DSTATE_INDEX_BUFFER packet whenever the address changes. + * + * There is also no need to handle these: * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw) * - PIPE_BIND_QUERY_BUFFER (no persistent state references) */ @@ -6543,6 +6532,119 @@ gen9_toggle_preemption(struct iris_context *ice, } #endif +static void +iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch) +{ + struct iris_genx_state *genx = ice->state.genx; + + memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer)); +} + +static void +iris_emit_mi_report_perf_count(struct iris_batch *batch, + struct iris_bo *bo, + uint32_t offset_in_bytes, + uint32_t report_id) +{ + iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) { + mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes); + mi_rpc.ReportID = report_id; + } +} + +/** + * Update the pixel hashing modes that determine the balancing of PS threads + * across subslices and slices. + * + * \param width Width bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param height Height bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param scale The number of framebuffer samples that could potentially be + * affected by an individual channel of the PS thread. This is + * typically one for single-sampled rendering, but for operations + * like CCS resolves and fast clears a single PS invocation may + * update a huge number of pixels, in which case a finer + * balancing is desirable in order to maximally utilize the + * bandwidth available. UINT_MAX can be used as shorthand for + * "finest hashing mode available". + */ +void +genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch, + unsigned width, unsigned height, unsigned scale) +{ +#if GEN_GEN == 9 + const struct gen_device_info *devinfo = &batch->screen->devinfo; + const unsigned slice_hashing[] = { + /* Because all Gen9 platforms with more than one slice require + * three-way subslice hashing, a single "normal" 16x16 slice hashing + * block is guaranteed to suffer from substantial imbalance, with one + * subslice receiving twice as much work as the other two in the + * slice. + * + * The performance impact of that would be particularly severe when + * three-way hashing is also in use for slice balancing (which is the + * case for all Gen9 GT4 platforms), because one of the slices + * receives one every three 16x16 blocks in either direction, which + * is roughly the periodicity of the underlying subslice imbalance + * pattern ("roughly" because in reality the hardware's + * implementation of three-way hashing doesn't do exact modulo 3 + * arithmetic, which somewhat decreases the magnitude of this effect + * in practice). This leads to a systematic subslice imbalance + * within that slice regardless of the size of the primitive. The + * 32x32 hashing mode guarantees that the subslice imbalance within a + * single slice hashing block is minimal, largely eliminating this + * effect. + */ + _32x32, + /* Finest slice hashing mode available. */ + NORMAL + }; + const unsigned subslice_hashing[] = { + /* 16x16 would provide a slight cache locality benefit especially + * visible in the sampler L1 cache efficiency of low-bandwidth + * non-LLC platforms, but it comes at the cost of greater subslice + * imbalance for primitives of dimensions approximately intermediate + * between 16x4 and 16x16. + */ + _16x4, + /* Finest subslice hashing mode available. */ + _8x4 + }; + /* Dimensions of the smallest hashing block of a given hashing mode. If + * the rendering area is smaller than this there can't possibly be any + * benefit from switching to this mode, so we optimize out the + * transition. + */ + const unsigned min_size[][2] = { + { 16, 4 }, + { 8, 4 } + }; + const unsigned idx = scale > 1; + + if (width > min_size[idx][0] || height > min_size[idx][1]) { + uint32_t gt_mode; + + iris_pack_state(GENX(GT_MODE), >_mode, reg) { + reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0); + reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0); + reg.SubsliceHashing = subslice_hashing[idx]; + reg.SubsliceHashingMask = -1; + }; + + iris_emit_raw_pipe_control(batch, + "workaround: CS stall before GT_MODE LRI", + PIPE_CONTROL_STALL_AT_SCOREBOARD | + PIPE_CONTROL_CS_STALL, + NULL, 0, 0); + + iris_emit_lri(batch, GT_MODE, gt_mode); + + ice->state.current_hash_scale = scale; + } +#endif +} + void genX(init_state)(struct iris_context *ice) { @@ -6595,6 +6697,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.update_surface_base_address = iris_update_surface_base_address; ice->vtbl.upload_compute_state = iris_upload_compute_state; ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control; + ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count; ice->vtbl.rebind_buffer = iris_rebind_buffer; ice->vtbl.load_register_reg32 = iris_load_register_reg32; ice->vtbl.load_register_reg64 = iris_load_register_reg64; @@ -6617,6 +6720,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.populate_fs_key = iris_populate_fs_key; ice->vtbl.populate_cs_key = iris_populate_cs_key; ice->vtbl.mocs = mocs; + ice->vtbl.lost_genx_state = iris_lost_genx_state; ice->state.dirty = ~0ull;