extends:
- .use-x86_build-base
variables:
- FDO_DISTRIBUTION_TAG: &x86_build "2020-06-30"
+ FDO_DISTRIBUTION_TAG: &x86_build "2020-06-02"
.use-x86_build:
variables:
extends:
- .use-x86_build-base
variables:
- FDO_DISTRIBUTION_TAG: &i386_build "2020-06-30"
+ FDO_DISTRIBUTION_TAG: &i386_build "2020-06-02"
.use-i386_build:
variables:
extends:
- .use-x86_build-base
variables:
- FDO_DISTRIBUTION_TAG: &ppc64el_build "2020-06-30"
+ FDO_DISTRIBUTION_TAG: &ppc64el_build "2020-06-02"
.use-ppc64el_build:
variables:
- .fdo.container-build@debian@arm64v8
- .container
variables:
- FDO_DISTRIBUTION_TAG: &arm_build "2020-07-10"
+ FDO_DISTRIBUTION_TAG: &arm_build "2020-07-07"
.use-arm_build:
variables:
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
-export LIBDRM_VERSION=libdrm-2.4.102
+export LIBDRM_VERSION=libdrm-2.4.100
-wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
-tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
+wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.bz2
+tar -xvf $LIBDRM_VERSION.tar.bz2 && rm $LIBDRM_VERSION.tar.bz2
cd $LIBDRM_VERSION; meson build -D vc4=true -D freedreno=true -D etnaviv=true; ninja -C build install; cd ..
rm -rf $LIBDRM_VERSION
# dependencies where we want a specific version
-export LIBDRM_VERSION=libdrm-2.4.102
+export LIBDRM_VERSION=libdrm-2.4.100
-wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
-tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
+wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.bz2
+tar -xvf $LIBDRM_VERSION.tar.bz2 && rm $LIBDRM_VERSION.tar.bz2
cd $LIBDRM_VERSION
meson --cross-file=/cross_file-${arch}.txt build -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)
ninja -C build install
export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
-export LIBDRM_VERSION=libdrm-2.4.102
+export LIBDRM_VERSION=libdrm-2.4.100
export XCBPROTO_VERSION=xcb-proto-1.13
export LIBXCB_VERSION=libxcb-1.13
export LIBWAYLAND_VERSION=wayland-1.15.0
cd $LIBXCB_VERSION; ./configure; make install; cd ..
rm -rf $LIBXCB_VERSION
-wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
-tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
+wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.bz2
+tar -xvf $LIBDRM_VERSION.tar.bz2 && rm $LIBDRM_VERSION.tar.bz2
cd $LIBDRM_VERSION
meson build -D vc4=true -D freedreno=true -D etnaviv=true -D libdir=lib/x86_64-linux-gnu; ninja -C build install
cd ..
existing user memory into the device address space for direct device access.
The create function is pipe_screen::resource_from_user_memory. The address
and size must be page-aligned.
-* ``PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY``: Same as
- ``PIPE_CAP_RESOURCE_FROM_USER_MEMORY`` but indicates it is only supported from
- the compute engines.
* ``PIPE_CAP_DEVICE_RESET_STATUS_QUERY``:
Whether pipe_context::get_device_reset_status is implemented.
* ``PIPE_CAP_MAX_SHADER_PATCH_VARYINGS``:
_drm_amdgpu_ver = '2.4.100'
_drm_radeon_ver = '2.4.71'
-_drm_nouveau_ver = '2.4.102'
+_drm_nouveau_ver = '2.4.66'
_drm_intel_ver = '2.4.75'
_drm_ver = '2.4.81'
case PIPE_CAP_POLYGON_OFFSET_CLAMP:
case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
- case PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY:
case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
inline void
nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
{
- assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
-
if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
buf->bo = NULL;
{
assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
- assert(!(dst->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
- assert(!(src->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
-
if (likely(dst->domain) && likely(src->domain)) {
nv->copy_data(nv,
dst->bo, dst->offset + dstx, dst->domain,
struct nv04_resource *res, uint32_t offset,
uint32_t flags)
{
- if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) ||
- unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_PTR))
+ if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
return res->data + offset;
if (res->domain == NOUVEAU_BO_VRAM) {
return (uint8_t *)res->bo->map + res->offset + offset;
}
+
const struct u_resource_vtbl nouveau_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
nouveau_buffer_transfer_unmap, /* transfer_unmap */
};
-static void
-nouveau_user_ptr_destroy(struct pipe_screen *pscreen,
- struct pipe_resource *presource)
-{
- struct nv04_resource *res = nv04_resource(presource);
- FREE(res);
-}
-
-static void *
-nouveau_user_ptr_transfer_map(struct pipe_context *pipe,
- struct pipe_resource *resource,
- unsigned level, unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
-{
- struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
- if (!tx)
- return NULL;
- nouveau_buffer_transfer_init(tx, resource, box, usage);
- *ptransfer = &tx->base;
- return nv04_resource(resource)->data;
-}
-
-static void
-nouveau_user_ptr_transfer_unmap(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- struct nouveau_transfer *tx = nouveau_transfer(transfer);
- FREE(tx);
-}
-
-const struct u_resource_vtbl nouveau_user_ptr_buffer_vtbl =
-{
- u_default_resource_get_handle, /* get_handle */
- nouveau_user_ptr_destroy, /* resource_destroy */
- nouveau_user_ptr_transfer_map, /* transfer_map */
- u_default_transfer_flush_region, /* transfer_flush_region */
- nouveau_user_ptr_transfer_unmap, /* transfer_unmap */
-};
-
struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ)
return NULL;
}
-struct pipe_resource *
-nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
- const struct pipe_resource *templ,
- void *user_ptr)
-{
- struct nv04_resource *buffer;
-
- buffer = CALLOC_STRUCT(nv04_resource);
- if (!buffer)
- return NULL;
-
- buffer->base = *templ;
- buffer->vtbl = &nouveau_user_ptr_buffer_vtbl;
- /* set address and data to the same thing for higher compatibility with
- * existing code. It's correct nonetheless as the same pointer is equally
- * valid on the CPU and the GPU.
- */
- buffer->address = (uint64_t)user_ptr;
- buffer->data = user_ptr;
- buffer->status = NOUVEAU_BUFFER_STATUS_USER_PTR;
- buffer->base.screen = pscreen;
-
- pipe_reference_init(&buffer->base.reference, 1);
-
- return &buffer->base;
-}
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
nouveau_buffer_migrate(struct nouveau_context *nv,
struct nv04_resource *buf, const unsigned new_domain)
{
- assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
-
struct nouveau_screen *screen = nv->screen;
struct nouveau_bo *bo;
const unsigned old_domain = buf->domain;
struct nv04_resource *buf,
unsigned base, unsigned size)
{
- assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
-
struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
int ret;
struct nv04_resource *buf = nv04_resource(resource);
int ref = buf->base.reference.count - 1;
- assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
-
/* Shared buffers shouldn't get reallocated */
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return;
*
* USER_MEMORY: resource->data is a pointer to client memory and may change
* between GL calls
- *
- * USER_PTR: bo is backed by user memory mapped into the GPUs VM
*/
#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
#define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 2)
-#define NOUVEAU_BUFFER_STATUS_USER_PTR (1 << 6)
#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ);
-struct pipe_resource *
-nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
- const struct pipe_resource *templ,
- void *user_ptr);
-
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
#include "util/format/u_format_s3tc.h"
#include "util/u_string.h"
-#include "os/os_mman.h"
#include "util/os_time.h"
#include <stdio.h>
#include <stdlib.h>
#include <nouveau_drm.h>
-#include <xf86drm.h>
#include "nouveau_winsys.h"
#include "nouveau_screen.h"
/* XXX this should go away */
#include "frontend/drm_driver.h"
-/* Even though GPUs might allow addresses with more bits, some engines do not.
- * Stick with 40 for compatibility.
- */
-#define NV_GENERIC_VM_LIMIT_SHIFT 39
-
int nouveau_mesa_debug = 0;
static const char *
cache_id, driver_flags);
}
-static void*
-reserve_vma(uintptr_t start, uint64_t reserved_size)
-{
- void *reserved = os_mmap((void*)start, reserved_size, PROT_NONE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (reserved == MAP_FAILED)
- return NULL;
- return reserved;
-}
-
int
nouveau_screen_init(struct nouveau_screen *screen, struct nouveau_device *dev)
{
size = sizeof(nvc0_data);
}
- screen->has_svm = false;
- /* we only care about HMM with OpenCL enabled */
- if (dev->chipset > 0x130 && screen->force_enable_cl) {
- /* Before being able to enable SVM we need to carve out some memory for
- * driver bo allocations. Let's just base the size on the available VRAM.
- *
- * 40 bit is the biggest we care about and for 32 bit systems we don't
- * want to allocate all of the available memory either.
- *
- * Also we align the size we want to reserve to the next POT to make use
- * of hugepages.
- */
- const int vram_shift = util_logbase2_ceil64(dev->vram_size);
- const int limit_bit =
- MIN2(sizeof(void*) * 8 - 1, NV_GENERIC_VM_LIMIT_SHIFT);
- screen->svm_cutout_size =
- BITFIELD64_BIT(MIN2(sizeof(void*) == 4 ? 26 : NV_GENERIC_VM_LIMIT_SHIFT, vram_shift));
-
- size_t start = screen->svm_cutout_size;
- do {
- screen->svm_cutout = reserve_vma(start, screen->svm_cutout_size);
- if (!screen->svm_cutout) {
- start += screen->svm_cutout_size;
- continue;
- }
-
- struct drm_nouveau_svm_init svm_args = {
- .unmanaged_addr = (uint64_t)screen->svm_cutout,
- .unmanaged_size = screen->svm_cutout_size,
- };
-
- ret = drmCommandWrite(screen->drm->fd, DRM_NOUVEAU_SVM_INIT,
- &svm_args, sizeof(svm_args));
- screen->has_svm = !ret;
- if (!screen->has_svm)
- os_munmap(screen->svm_cutout, screen->svm_cutout_size);
- break;
- } while ((start + screen->svm_cutout_size) < BITFIELD64_MASK(limit_bit));
- }
-
/*
* Set default VRAM domain if not overridden
*/
ret = nouveau_object_new(&dev->object, 0, NOUVEAU_FIFO_CHANNEL_CLASS,
data, size, &screen->channel);
if (ret)
- goto err;
+ return ret;
ret = nouveau_client_new(screen->device, &screen->client);
if (ret)
- goto err;
+ return ret;
ret = nouveau_pushbuf_new(screen->client, screen->channel,
4, 512 * 1024, 1,
&screen->pushbuf);
if (ret)
- goto err;
+ return ret;
/* getting CPU time first appears to be more accurate */
screen->cpu_gpu_time_delta = os_time_get();
&mm_config);
screen->mm_VRAM = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, &mm_config);
return 0;
-
-err:
- if (screen->svm_cutout)
- os_munmap(screen->svm_cutout, screen->svm_cutout_size);
- return ret;
}
void
if (screen->force_enable_cl)
glsl_type_singleton_decref();
- if (screen->has_svm)
- os_munmap(screen->svm_cutout, screen->svm_cutout_size);
nouveau_mm_destroy(screen->mm_GART);
nouveau_mm_destroy(screen->mm_VRAM);
bool prefer_nir;
bool force_enable_cl;
- bool has_svm;
- void *svm_cutout;
- size_t svm_cutout_size;
#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
union {
return nvc0_miptree_surface_new(pipe, pres, templ);
}
-static struct pipe_resource *
-nvc0_resource_from_user_memory(struct pipe_screen *pipe,
- const struct pipe_resource *templ,
- void *user_memory)
-{
- struct nouveau_screen *screen = nouveau_screen(pipe);
-
- assert(screen->has_svm);
- assert(templ->target == PIPE_BUFFER);
-
- return nouveau_buffer_create_from_user(pipe, templ, user_memory);
-}
-
void
nvc0_init_resource_functions(struct pipe_context *pcontext)
{
pscreen->resource_from_handle = nvc0_resource_from_handle;
pscreen->resource_get_handle = u_resource_get_handle_vtbl;
pscreen->resource_destroy = u_resource_destroy_vtbl;
- pscreen->resource_from_user_memory = nvc0_resource_from_user_memory;
}
return class_3d >= GM200_3D_CLASS;
case PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_TRIANGLES:
return class_3d >= GP100_3D_CLASS;
- case PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY:
- case PIPE_CAP_SYSTEM_SVM:
- return screen->has_svm ? 1 : 0;
/* caps has to be turned on with nir */
case PIPE_CAP_GL_SPIRV:
//
// Another unsolvable scenario is a cl_mem object passed by cl_mem reference
// and SVM pointer into the same kernel at the same time.
- if (allows_user_pointers() && pipe->get_param(pipe, PIPE_CAP_SYSTEM_SVM))
+ if (pipe->get_param(pipe, PIPE_CAP_RESOURCE_FROM_USER_MEMORY) &&
+ pipe->get_param(pipe, PIPE_CAP_SYSTEM_SVM))
// we can emulate all lower levels if we support fine grain system
return CL_DEVICE_SVM_FINE_GRAIN_SYSTEM |
CL_DEVICE_SVM_COARSE_GRAIN_BUFFER |
return 0;
}
-bool
-device::allows_user_pointers() const {
- return pipe->get_param(pipe, PIPE_CAP_RESOURCE_FROM_USER_MEMORY) ||
- pipe->get_param(pipe, PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY);
-}
-
std::vector<size_t>
device::max_block_size() const {
auto v = get_compute_param<uint64_t>(pipe, ir_format(),
bool has_unified_memory() const;
size_t mem_base_addr_align() const;
cl_device_svm_capabilities svm_support() const;
- bool allows_user_pointers() const;
std::vector<size_t> max_block_size() const;
cl_uint subgroup_size() const;
command_queue &q, const std::string &data) :
resource(dev, obj) {
pipe_resource info {};
+ const bool user_ptr_support = dev.pipe->get_param(dev.pipe,
+ PIPE_CAP_RESOURCE_FROM_USER_MEMORY);
if (image *img = dynamic_cast<image *>(&obj)) {
info.format = translate_format(img->format());
PIPE_BIND_COMPUTE_RESOURCE |
PIPE_BIND_GLOBAL);
- if (obj.flags() & CL_MEM_USE_HOST_PTR && dev.allows_user_pointers()) {
+ if (obj.flags() & CL_MEM_USE_HOST_PTR && user_ptr_support) {
// Page alignment is normally required for this, just try, hope for the
// best and fall back if it fails.
pipe = dev.pipe->resource_from_user_memory(dev.pipe, &info, obj.host_ptr());
PIPE_CAP_POLYGON_OFFSET_CLAMP,
PIPE_CAP_MULTISAMPLE_Z_RESOLVE,
PIPE_CAP_RESOURCE_FROM_USER_MEMORY,
- PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY,
PIPE_CAP_DEVICE_RESET_STATUS_QUERY,
PIPE_CAP_MAX_SHADER_PATCH_VARYINGS,
PIPE_CAP_TEXTURE_FLOAT_LINEAR,