diff --git a/level_zero/core/source/context/context_imp.cpp b/level_zero/core/source/context/context_imp.cpp index 89cc58aed2af3..92be2db242307 100644 --- a/level_zero/core/source/context/context_imp.cpp +++ b/level_zero/core/source/context/context_imp.cpp @@ -1049,10 +1049,6 @@ ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, con if (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_DEVICE_ATOMICS) { auto deviceAllocCapabilities = memProp.deviceAllocCapabilities; - if (isSharedSystemAlloc) { - deviceAllocCapabilities = memProp.sharedSystemAllocCapabilities; - } - if (!(deviceAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC)) { return ZE_RESULT_ERROR_INVALID_ARGUMENT; } @@ -1060,9 +1056,6 @@ ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, con } if (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_HOST_ATOMICS) { auto hostAllocCapabilities = memProp.hostAllocCapabilities; - if (isSharedSystemAlloc) { - hostAllocCapabilities = memProp.sharedSystemAllocCapabilities; - } if (!(hostAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC)) { return ZE_RESULT_ERROR_INVALID_ARGUMENT; } @@ -1086,8 +1079,22 @@ ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, con mode = NEO::AtomicAccessMode::system; } - if ((attr == 0) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_HOST_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_DEVICE_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS)) { - + if (sharedSystemAllocEnabled) { + if (attr == 0) { + if (isSharedSystemAlloc) { + auto sharedSystemAllocCapabilities = memProp.sharedSystemAllocCapabilities; + if (!(sharedSystemAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT_ATOMIC)) { + return ZE_RESULT_ERROR_INVALID_ARGUMENT; + } + } else { + auto deviceAllocCapabilities = memProp.deviceAllocCapabilities; + if (!(deviceAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC)) { + return ZE_RESULT_ERROR_INVALID_ARGUMENT; + } + } + } + mode = NEO::AtomicAccessMode::none; + } else if ((attr == 0) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_HOST_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_DEVICE_ATOMICS) || (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS)) { mode = NEO::AtomicAccessMode::none; } @@ -1095,15 +1102,17 @@ ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, con return ZE_RESULT_ERROR_INVALID_ARGUMENT; } - auto memoryManager = device->getDriverHandle()->getMemoryManager(); - if (isSharedSystemAlloc) { - - DeviceImp *deviceImp = static_cast((L0::Device::fromHandle(hDevice))); + if (sharedSystemAllocEnabled) { + // For BO this feature will be available in the future. Currently only supporting SVM madvise. + if (allocData != nullptr) { + PRINT_DEBUG_STRING(NEO::debugManager.flags.PrintDebugMessages.get(), stderr, "BO madvise not supported"); + return ZE_RESULT_ERROR_INVALID_ARGUMENT; + } auto unifiedMemoryManager = driverHandle->getSvmAllocsManager(); - unifiedMemoryManager->sharedSystemAtomicAccess(*deviceImp->getNEODevice(), mode, ptr, size); } else { + auto memoryManager = device->getDriverHandle()->getMemoryManager(); auto alloc = allocData->gpuAllocations.getGraphicsAllocation(deviceImp->getRootDeviceIndex()); memoryManager->setAtomicAccess(alloc, size, mode, deviceImp->getRootDeviceIndex()); deviceImp->atomicAccessAllocations[allocData] = attr; @@ -1115,18 +1124,49 @@ ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, con ze_result_t ContextImp::getAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t *pAttr) { auto device = Device::fromHandle(hDevice); - auto allocData = device->getDriverHandle()->getSvmAllocsManager()->getSVMAlloc(ptr); - if (allocData == nullptr) { + const bool sharedSystemAllocEnabled = device->getNEODevice()->areSharedSystemAllocationsAllowed(); + + if (allocData == nullptr && !sharedSystemAllocEnabled) { return ZE_RESULT_ERROR_INVALID_ARGUMENT; } DeviceImp *deviceImp = static_cast((L0::Device::fromHandle(hDevice))); - if (deviceImp->atomicAccessAllocations.find(allocData) != deviceImp->atomicAccessAllocations.end()) { - *pAttr = deviceImp->atomicAccessAllocations[allocData]; - return ZE_RESULT_SUCCESS; + + if (sharedSystemAllocEnabled) { + // For BO this feature will be available in the future. Currently only supporting SVM madvise. + if (allocData != nullptr) { + PRINT_DEBUG_STRING(NEO::debugManager.flags.PrintDebugMessages.get(), stderr, "BO madvise not supported"); + return ZE_RESULT_ERROR_INVALID_ARGUMENT; + } + + auto unifiedMemoryManager = driverHandle->getSvmAllocsManager(); + auto mode = unifiedMemoryManager->getSharedSystemAtomicAccess(*deviceImp->getNEODevice(), ptr, size); + switch (mode) { + case NEO::AtomicAccessMode::device: + *pAttr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_DEVICE_ATOMICS; + break; + case NEO::AtomicAccessMode::host: + *pAttr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_HOST_ATOMICS; + break; + case NEO::AtomicAccessMode::system: + *pAttr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_SYSTEM_ATOMICS; + break; + case NEO::AtomicAccessMode::none: + *pAttr = 0; + break; + case NEO::AtomicAccessMode::invalid: + default: + return ZE_RESULT_ERROR_INVALID_ARGUMENT; + } + } else { + if (deviceImp->atomicAccessAllocations.find(allocData) != deviceImp->atomicAccessAllocations.end()) { + *pAttr = deviceImp->atomicAccessAllocations[allocData]; + return ZE_RESULT_SUCCESS; + } + return ZE_RESULT_ERROR_INVALID_ARGUMENT; } - return ZE_RESULT_ERROR_INVALID_ARGUMENT; + return ZE_RESULT_SUCCESS; } ze_result_t ContextImp::createModule(ze_device_handle_t hDevice, diff --git a/shared/source/memory_manager/memory_manager.h b/shared/source/memory_manager/memory_manager.h index f12185d3b5d27..3cf0917c239e7 100644 --- a/shared/source/memory_manager/memory_manager.h +++ b/shared/source/memory_manager/memory_manager.h @@ -283,7 +283,7 @@ class MemoryManager { virtual bool prefetchSharedSystemAlloc(const void *ptr, const size_t size, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) { return true; } virtual bool setAtomicAccess(GraphicsAllocation *gfxAllocation, size_t size, AtomicAccessMode mode, uint32_t rootDeviceIndex) { return true; } virtual bool setSharedSystemAtomicAccess(const void *ptr, const size_t size, AtomicAccessMode mode, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) { return true; } - + virtual AtomicAccessMode getSharedSystemAtomicAccess(const void *ptr, const size_t size, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) { return AtomicAccessMode::none; } bool isExternalAllocation(AllocationType allocationType); LocalMemoryUsageBankSelector *getLocalMemoryUsageBankSelector(AllocationType allocationType, uint32_t rootDeviceIndex); diff --git a/shared/source/memory_manager/unified_memory_manager.cpp b/shared/source/memory_manager/unified_memory_manager.cpp index 611f814cf3372..010991f776b35 100644 --- a/shared/source/memory_manager/unified_memory_manager.cpp +++ b/shared/source/memory_manager/unified_memory_manager.cpp @@ -1176,6 +1176,13 @@ void SVMAllocsManager::sharedSystemAtomicAccess(Device &device, AtomicAccessMode memoryManager->setSharedSystemAtomicAccess(ptr, size, mode, subDeviceIds, device.getRootDeviceIndex()); } +AtomicAccessMode SVMAllocsManager::getSharedSystemAtomicAccess(Device &device, const void *ptr, const size_t size) { + // All vm_ids on a single device for shared system USM allocation + auto subDeviceIds = NEO::SubDevice::getSubDeviceIdsFromDevice(device); + + return memoryManager->getSharedSystemAtomicAccess(ptr, size, subDeviceIds, device.getRootDeviceIndex()); +} + std::unique_lock SVMAllocsManager::obtainOwnership() { return std::unique_lock(mtxForIndirectAccess); } diff --git a/shared/source/memory_manager/unified_memory_manager.h b/shared/source/memory_manager/unified_memory_manager.h index 6372759d30be2..7a3ef9b5e738d 100644 --- a/shared/source/memory_manager/unified_memory_manager.h +++ b/shared/source/memory_manager/unified_memory_manager.h @@ -287,6 +287,7 @@ class SVMAllocsManager { MOCKABLE_VIRTUAL void prefetchMemory(Device &device, CommandStreamReceiver &commandStreamReceiver, const void *ptr, const size_t size); void prefetchSVMAllocs(Device &device, CommandStreamReceiver &commandStreamReceiver); void sharedSystemAtomicAccess(Device &device, AtomicAccessMode mode, const void *ptr, const size_t size); + AtomicAccessMode getSharedSystemAtomicAccess(Device &device, const void *ptr, const size_t size); std::unique_lock obtainOwnership(); std::map indirectAllocationsResidency; diff --git a/shared/source/os_interface/linux/drm_memory_manager.cpp b/shared/source/os_interface/linux/drm_memory_manager.cpp index 49926ee176877..aa491c525f11c 100644 --- a/shared/source/os_interface/linux/drm_memory_manager.cpp +++ b/shared/source/os_interface/linux/drm_memory_manager.cpp @@ -299,28 +299,26 @@ bool DrmMemoryManager::setSharedSystemMemAdvise(const void *ptr, const size_t si auto &drm = this->getDrm(rootDeviceIndex); auto ioctlHelper = drm.getIoctlHelper(); - uint32_t attribute = 0; + uint32_t attribute = ioctlHelper->getPreferredLocationAdvise(); uint64_t param = 0; + uint64_t preferredLocation = 0; + uint64_t policy = 0; + switch (memAdviseOp) { case MemAdvise::setPreferredLocation: - attribute = ioctlHelper->getPreferredLocationAdvise(); - param = (static_cast(-1) << 32) //-1 as currently not supported and ignored. This will be useful in multi device settings. - | static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryClassDevice)); - break; case MemAdvise::clearPreferredLocation: - // Assumes that the default location is VRAM, i.e. 1 == DrmParam::memoryClassDevice - attribute = ioctlHelper->getPreferredLocationAdvise(); - param = (static_cast(-1) << 32) | static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryClassDevice)); - break; - case MemAdvise::setSystemMemoryPreferredLocation: - attribute = ioctlHelper->getPreferredLocationAdvise(); - param = (static_cast(-1) << 32) | static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryClassSystem)); - break; - case MemAdvise::clearSystemMemoryPreferredLocation: - attribute = ioctlHelper->getPreferredLocationAdvise(); - param = (static_cast(-1) << 32) | static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryClassDevice)); - break; + case MemAdvise::clearSystemMemoryPreferredLocation: { + // Assumes that the default location is VRAM, i.e. 1 == DrmParam::memoryAdviseLocationDevice + preferredLocation = static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryAdviseLocationDevice)); + policy = static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryAdviseMigrationPolicyAllPages)); + param = (preferredLocation << 32) | policy; + } break; + case MemAdvise::setSystemMemoryPreferredLocation: { + preferredLocation = static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryAdviseLocationSystem)); + policy = static_cast(ioctlHelper->getDrmParamValue(DrmParam::memoryAdviseMigrationPolicySystemPages)); + param = (preferredLocation << 32) | policy; + } break; default: return false; } @@ -341,29 +339,10 @@ bool DrmMemoryManager::setSharedSystemAtomicAccess(const void *ptr, const size_t auto &drm = this->getDrm(rootDeviceIndex); auto ioctlHelper = drm.getIoctlHelper(); - uint32_t attribute = 0; - uint64_t param = 0; + uint32_t attribute = ioctlHelper->getAtomicAdvise(false); + uint32_t atomicParam = ioctlHelper->getAtomicAccess(mode); - switch (mode) { - case AtomicAccessMode::device: - attribute = ioctlHelper->getAtomicAdvise(false); - param = (static_cast(ioctlHelper->getDrmParamValue(DrmParam::atomicClassDevice)) << 32); - break; - case AtomicAccessMode::system: - attribute = ioctlHelper->getAtomicAdvise(false); - param = (static_cast(ioctlHelper->getDrmParamValue(DrmParam::atomicClassGlobal)) << 32); - break; - case AtomicAccessMode::host: - attribute = ioctlHelper->getAtomicAdvise(false); - param = (static_cast(ioctlHelper->getDrmParamValue(DrmParam::atomicClassSystem)) << 32); - break; - case AtomicAccessMode::none: - attribute = ioctlHelper->getAtomicAdvise(false); - param = (static_cast(ioctlHelper->getDrmParamValue(DrmParam::atomicClassUndefined)) << 32); - break; - default: - return false; - } + uint64_t param = (static_cast(atomicParam) << 32) | 0; // Apply the shared system USM IOCTL to all the VMs of the device std::vector vmIds; @@ -377,6 +356,23 @@ bool DrmMemoryManager::setSharedSystemAtomicAccess(const void *ptr, const size_t return result; } +AtomicAccessMode DrmMemoryManager::getSharedSystemAtomicAccess(const void *ptr, const size_t size, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) { + + auto &drm = this->getDrm(rootDeviceIndex); + auto ioctlHelper = drm.getIoctlHelper(); + + // Apply the shared system USM IOCTL to all the VMs of the device + std::vector vmIds; + vmIds.reserve(subDeviceIds.size()); + for (auto subDeviceId : subDeviceIds) { + vmIds.push_back(drm.getVirtualMemoryAddressSpace(subDeviceId)); + } + + auto result = ioctlHelper->getVmSharedSystemAtomicAttribute(reinterpret_cast(ptr), size, vmIds); + + return result; +} + bool DrmMemoryManager::setAtomicAccess(GraphicsAllocation *gfxAllocation, size_t size, AtomicAccessMode mode, uint32_t rootDeviceIndex) { auto drmAllocation = static_cast(gfxAllocation); diff --git a/shared/source/os_interface/linux/drm_memory_manager.h b/shared/source/os_interface/linux/drm_memory_manager.h index c7fb7060aea4d..71dd01bb210fa 100644 --- a/shared/source/os_interface/linux/drm_memory_manager.h +++ b/shared/source/os_interface/linux/drm_memory_manager.h @@ -92,6 +92,7 @@ class DrmMemoryManager : public MemoryManager { bool prefetchSharedSystemAlloc(const void *ptr, const size_t size, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) override; bool setAtomicAccess(GraphicsAllocation *gfxAllocation, size_t size, AtomicAccessMode mode, uint32_t rootDeviceIndex) override; bool setSharedSystemAtomicAccess(const void *ptr, const size_t size, AtomicAccessMode mode, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) override; + AtomicAccessMode getSharedSystemAtomicAccess(const void *ptr, const size_t size, SubDeviceIdsVec &subDeviceIds, uint32_t rootDeviceIndex) override; [[nodiscard]] std::unique_lock acquireAllocLock(); std::vector &getSysMemAllocs(); std::vector &getLocalMemAllocs(uint32_t rootDeviceIndex); diff --git a/shared/source/os_interface/linux/drm_wrappers.h b/shared/source/os_interface/linux/drm_wrappers.h index 996d2782b054f..8d058f9e3137c 100644 --- a/shared/source/os_interface/linux/drm_wrappers.h +++ b/shared/source/os_interface/linux/drm_wrappers.h @@ -324,6 +324,7 @@ enum class DrmIoctl { dg1GemCreateExt, gemCreateExt, gemVmAdvise, + gemVmGetMemRangeAttr, gemVmPrefetch, uuidRegister, uuidUnregister, @@ -371,6 +372,10 @@ enum class DrmParam { execRender, memoryClassDevice, memoryClassSystem, + memoryAdviseLocationDevice, + memoryAdviseLocationSystem, + memoryAdviseMigrationPolicyAllPages, + memoryAdviseMigrationPolicySystemPages, mmapOffsetWb, mmapOffsetWc, paramHasPooledEu, diff --git a/shared/source/os_interface/linux/ioctl_helper.h b/shared/source/os_interface/linux/ioctl_helper.h index 4f49ece523bd1..f704dfc6bc8ed 100644 --- a/shared/source/os_interface/linux/ioctl_helper.h +++ b/shared/source/os_interface/linux/ioctl_helper.h @@ -133,6 +133,7 @@ class IoctlHelper { virtual std::optional getPreferredLocationRegion(PreferredLocation memoryLocation, uint32_t memoryInstance) = 0; virtual bool setVmBoAdvise(int32_t handle, uint32_t attribute, void *region) = 0; virtual bool setVmSharedSystemMemAdvise(uint64_t handle, const size_t size, const uint32_t attribute, const uint64_t param, const std::vector &vmIds) { return true; } + virtual AtomicAccessMode getVmSharedSystemAtomicAttribute(uint64_t handle, const size_t size, const std::vector &vmIds) { return AtomicAccessMode::none; } virtual bool setVmBoAdviseForChunking(int32_t handle, uint64_t start, uint64_t length, uint32_t attribute, void *region) = 0; virtual bool setVmPrefetch(uint64_t start, uint64_t length, uint32_t region, uint32_t vmId) = 0; virtual bool setGemTiling(void *setTiling) = 0; diff --git a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp index 965ef06063106..6db3f1d12d013 100644 --- a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp +++ b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp @@ -76,6 +76,14 @@ const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) { } const char *IoctlHelperXe::xeGetAdviseOperationName(int adviseOperation) { + switch (adviseOperation) { + case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: + return "PREFERRED_LOC"; + case DRM_XE_MEM_RANGE_ATTR_ATOMIC: + return "ATOMIC"; + case DRM_XE_MEM_RANGE_ATTR_PAT: + return "PAT"; + } return "Unknown operation"; } @@ -754,17 +762,37 @@ int IoctlHelperXe::waitUserFence(uint32_t ctxId, uint64_t address, uint32_t IoctlHelperXe::getAtomicAdvise(bool /* isNonAtomic */) { xeLog(" -> IoctlHelperXe::%s\n", __FUNCTION__); - return 0; + return DRM_XE_MEM_RANGE_ATTR_ATOMIC; } uint32_t IoctlHelperXe::getAtomicAccess(AtomicAccessMode mode) { xeLog(" -> IoctlHelperXe::%s\n", __FUNCTION__); - return 0; + + uint32_t retVal = 0; + switch (mode) { + case AtomicAccessMode::device: + retVal = static_cast(this->getDrmParamValue(DrmParam::atomicClassDevice)); + break; + case AtomicAccessMode::system: + retVal = static_cast(this->getDrmParamValue(DrmParam::atomicClassGlobal)); + break; + case AtomicAccessMode::host: + retVal = static_cast(this->getDrmParamValue(DrmParam::atomicClassSystem)); + break; + case AtomicAccessMode::none: + retVal = static_cast(this->getDrmParamValue(DrmParam::atomicClassUndefined)); + break; + default: + xeLog(" Invalid advise mode %s\n", __FUNCTION__); + break; + } + + return retVal; } uint32_t IoctlHelperXe::getPreferredLocationAdvise() { xeLog(" -> IoctlHelperXe::%s\n", __FUNCTION__); - return 0; + return DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC; } std::optional IoctlHelperXe::getPreferredLocationRegion(PreferredLocation memoryLocation, uint32_t memoryInstance) { @@ -791,10 +819,128 @@ bool IoctlHelperXe::setVmSharedSystemMemAdvise(uint64_t handle, const size_t siz } vmIdsStr += "]"; xeLog(" -> IoctlHelperXe::%s h=0x%x s=0x%lx vmids=%s\n", __FUNCTION__, handle, size, vmIdsStr.c_str()); - // There is no vmAdvise attribute in Xe, so return success + + drm_xe_madvise vmAdvise{}; + + vmAdvise.vm_id = 0; + vmAdvise.start = alignDown(handle, MemoryConstants::pageSize); + vmAdvise.range = alignSizeWholePage(reinterpret_cast(handle), size); + vmAdvise.type = attribute; + + if (attribute == this->getPreferredLocationAdvise()) { + uint32_t devmen_fd = static_cast(param >> 32); + uint32_t migrationPolicy = static_cast(param & 0xFFFFFFFF); + vmAdvise.preferred_mem_loc.devmem_fd = devmen_fd; + vmAdvise.preferred_mem_loc.migration_policy = migrationPolicy; + } else if (attribute == this->getAtomicAdvise(false)) { + uint32_t val = static_cast(param >> 32); + vmAdvise.atomic.val = val; + } else { + return false; + } + + for (auto vmId : vmIds) { + + // Call madvise on all VM Ids. + vmAdvise.vm_id = vmId; + auto ret = IoctlHelper::ioctl(DrmIoctl::gemVmAdvise, &vmAdvise); + + xeLog(" vm=%d start=0x%lx size=0x%lx param=0x%lx operation=%d(%s) ret=%d\n", + vmAdvise.vm_id, + vmAdvise.start, + vmAdvise.range, + param, + vmAdvise.type, + xeGetAdviseOperationName(vmAdvise.type), + ret); + + if (ret != 0) { + xeLog("error: %s ret=%d\n", xeGetAdviseOperationName(vmAdvise.type), ret); + return false; + } + } + return true; } +AtomicAccessMode IoctlHelperXe::getVmSharedSystemAtomicAttribute(uint64_t handle, const size_t size, const std::vector &vmIds) { + std::string vmIdsStr = "["; + for (size_t i = 0; i < vmIds.size(); ++i) { + { + std::stringstream ss; + ss << std::hex << vmIds[i]; + vmIdsStr += "0x" + ss.str(); + } + if (i != vmIds.size() - 1) { + vmIdsStr += ", "; + } + } + vmIdsStr += "]"; + xeLog(" -> IoctlHelperXe::%s h=0x%x s=0x%lx vmids=%s\n", __FUNCTION__, handle, size, vmIdsStr.c_str()); + + drm_xe_vm_query_mem_range_attr query{}; + + query.vm_id = vmIds[0]; + query.start = handle; + query.range = size; + + // First ioctl call to get num of mem regions and sizeof each attribute + auto ret = IoctlHelper::ioctl(DrmIoctl::gemVmGetMemRangeAttr, &query); + + xeLog(" vm=%d start=0x%lx size=0x%lx num_mem_ranges=%d sizeof_mem_range_attr=%d ret=%d\n", + query.vm_id, query.start, query.range, query.num_mem_ranges, query.sizeof_mem_range_attr, ret); + + if (ret != 0) { + xeLog("error: %s ret=%d\n", "QUERY RANGE ATTR", ret); + return AtomicAccessMode::invalid; + } + + if (sizeof(drm_xe_mem_range_attr) != query.sizeof_mem_range_attr) { + xeLog("Error: sizeof(drm_xe_mem_range_attr) != query.sizeof_mem_range_attr\n"); + return AtomicAccessMode::invalid; + } + + if (query.num_mem_ranges > 1) { + xeLog("Error: More than one memory ranges found for vmId %d\n", query.vm_id); + return AtomicAccessMode::invalid; + } + + // Allocate buffer for the memory region attributes + void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr); + if (ptr == nullptr) { + xeLog("Error: malloc failed for memory region attributes\n"); + return AtomicAccessMode::invalid; + } + + query.vector_of_mem_attr = (uintptr_t)ptr; + + // Second ioctl call to actually fill the memory attributes + ret = IoctlHelper::ioctl(DrmIoctl::gemVmGetMemRangeAttr, &query); + + xeLog(" vm=%d start=0x%lx size=0x%lx num_mem_ranges=%d sizeof_mem_range_attr=%d ret=%d\n", + query.vm_id, query.start, query.range, query.num_mem_ranges, query.sizeof_mem_range_attr, ret); + + struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr; + uint32_t val = attr->atomic.val; + xeLog("Found atomic attribute: val=0x%x\n", val); + + free(ptr); + + int atomicValue = static_cast(val); + if (atomicValue == this->getDrmParamValue(DrmParam::atomicClassDevice)) { + return AtomicAccessMode::device; + } else if (atomicValue == this->getDrmParamValue(DrmParam::atomicClassGlobal)) { + return AtomicAccessMode::system; + } else if (atomicValue == this->getDrmParamValue(DrmParam::atomicClassSystem)) { + return AtomicAccessMode::host; + } else if (atomicValue == this->getDrmParamValue(DrmParam::atomicClassUndefined)) { + return AtomicAccessMode::none; + } else { + xeLog("Unknown atomic access mode: 0x%x\n", val); + } + return AtomicAccessMode::invalid; +} + bool IoctlHelperXe::setVmBoAdviseForChunking(int32_t handle, uint64_t start, uint64_t length, uint32_t attribute, void *region) { xeLog(" -> IoctlHelperXe::%s\n", __FUNCTION__); // There is no vmAdvise attribute in Xe, so return success @@ -1070,17 +1216,25 @@ int IoctlHelperXe::getDrmParamValue(DrmParam drmParam) const { xeLog(" -> IoctlHelperXe::%s 0x%x %s\n", __FUNCTION__, drmParam, getDrmParamString(drmParam).c_str()); switch (drmParam) { case DrmParam::atomicClassUndefined: - return -1; + return DRM_XE_ATOMIC_UNDEFINED; case DrmParam::atomicClassDevice: - return -1; + return DRM_XE_ATOMIC_DEVICE; case DrmParam::atomicClassGlobal: - return -1; + return DRM_XE_ATOMIC_GLOBAL; case DrmParam::atomicClassSystem: - return -1; + return DRM_XE_ATOMIC_CPU; case DrmParam::memoryClassDevice: return DRM_XE_MEM_REGION_CLASS_VRAM; case DrmParam::memoryClassSystem: return DRM_XE_MEM_REGION_CLASS_SYSMEM; + case DrmParam::memoryAdviseLocationDevice: + return DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE; + case DrmParam::memoryAdviseLocationSystem: + return DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM; + case DrmParam::memoryAdviseMigrationPolicyAllPages: + return DRM_XE_MIGRATE_ALL_PAGES; + case DrmParam::memoryAdviseMigrationPolicySystemPages: + return DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES; case DrmParam::engineClassRender: return DRM_XE_ENGINE_CLASS_RENDER; case DrmParam::engineClassCopy: @@ -1748,6 +1902,10 @@ unsigned int IoctlHelperXe::getIoctlRequestValue(DrmIoctl ioctlRequest) const { RETURN_ME(DRM_IOCTL_XE_EXEC); case DrmIoctl::gemVmBind: RETURN_ME(DRM_IOCTL_XE_VM_BIND); + case DrmIoctl::gemVmAdvise: + RETURN_ME(DRM_IOCTL_XE_MADVISE); + case DrmIoctl::gemVmGetMemRangeAttr: + RETURN_ME(DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS); case DrmIoctl::query: RETURN_ME(DRM_IOCTL_XE_DEVICE_QUERY); case DrmIoctl::gemContextCreateExt: @@ -1803,6 +1961,8 @@ std::string IoctlHelperXe::getIoctlString(DrmIoctl ioctlRequest) const { STRINGIFY_ME(DRM_IOCTL_XE_GEM_MMAP_OFFSET); case DrmIoctl::gemCreate: STRINGIFY_ME(DRM_IOCTL_XE_GEM_CREATE); + case DrmIoctl::gemVmAdvise: + STRINGIFY_ME(DRM_IOCTL_XE_MADVISE); case DrmIoctl::gemExecbuffer2: STRINGIFY_ME(DRM_IOCTL_XE_EXEC); case DrmIoctl::gemVmBind: diff --git a/shared/source/os_interface/linux/xe/ioctl_helper_xe.h b/shared/source/os_interface/linux/xe/ioctl_helper_xe.h index d4655c2c99057..a711f99568a7e 100644 --- a/shared/source/os_interface/linux/xe/ioctl_helper_xe.h +++ b/shared/source/os_interface/linux/xe/ioctl_helper_xe.h @@ -61,6 +61,7 @@ class IoctlHelperXe : public IoctlHelper { std::optional getPreferredLocationRegion(PreferredLocation memoryLocation, uint32_t memoryInstance) override; bool setVmBoAdvise(int32_t handle, uint32_t attribute, void *region) override; bool setVmSharedSystemMemAdvise(uint64_t handle, const size_t size, const uint32_t attribute, const uint64_t param, const std::vector &vmIds) override; + AtomicAccessMode getVmSharedSystemAtomicAttribute(uint64_t handle, const size_t size, const std::vector &vmIds) override; bool setVmBoAdviseForChunking(int32_t handle, uint64_t start, uint64_t length, uint32_t attribute, void *region) override; bool setVmPrefetch(uint64_t start, uint64_t length, uint32_t region, uint32_t vmId) override; bool setGemTiling(void *setTiling) override; diff --git a/third_party/uapi/drm-next/.version b/third_party/uapi/drm-next/.version index d5fdfb6825960..4fa16a52bbb12 100644 --- a/third_party/uapi/drm-next/.version +++ b/third_party/uapi/drm-next/.version @@ -1,2 +1,3 @@ git_revision: b60301774a8fe6c30b14a95104ec099290a2e904 git_url: https://gitlab.freedesktop.org/drm/kernel.git +patch: https://lore.kernel.org/intel-xe/20250818215753.2762426-1-himal.prasad.ghimiray@intel.com/T/#m758572b8b93a525db7523255cd49e8c3c06bb6a8 \ No newline at end of file diff --git a/third_party/uapi/drm-next/xe/xe_drm.h b/third_party/uapi/drm-next/xe/xe_drm.h index 6308c0d69948f..6b03f319ab70f 100644 --- a/third_party/uapi/drm-next/xe/xe_drm.h +++ b/third_party/uapi/drm-next/xe/xe_drm.h @@ -3,8 +3,8 @@ * Copyright © 2023 Intel Corporation */ -#ifndef _XE_DRM_H_ -#define _XE_DRM_H_ +#ifndef _UAPI_XE_DRM_H_ +#define _UAPI_XE_DRM_H_ #include "drm.h" @@ -81,6 +81,8 @@ extern "C" { * - &DRM_IOCTL_XE_EXEC * - &DRM_IOCTL_XE_WAIT_USER_FENCE * - &DRM_IOCTL_XE_OBSERVATION + * - &DRM_IOCTL_XE_MADVISE + * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS */ /* @@ -102,6 +104,8 @@ extern "C" { #define DRM_XE_EXEC 0x09 #define DRM_XE_WAIT_USER_FENCE 0x0a #define DRM_XE_OBSERVATION 0x0b +#define DRM_XE_MADVISE 0x0c +#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d /* Must be kept compact -- no holes */ @@ -117,6 +121,8 @@ extern "C" { #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) +#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise) +#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr) /** * DOC: Xe IOCTL Extensions @@ -134,7 +140,7 @@ extern "C" { * redefine the interface more easily than an ever growing struct of * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across - * the boundary with pointers encapsulated inside u64. + * the __user boundary with pointers encapsulated inside u64. * * Example chaining: * @@ -760,7 +766,11 @@ struct drm_xe_device_query { * gem creation * * The @flags can be: - * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING + * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object + * allocation strategy by deferring physical memory allocation + * until the object is either bound to a virtual memory region via + * VM_BIND or accessed by the CPU. As a result, no backing memory is + * reserved at the time of GEM object creation. * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a * possible placement, ensure that the corresponding VRAM allocation @@ -917,13 +927,17 @@ struct drm_xe_gem_mmap_offset { * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE * * The @flags can be: - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address + * space of the VM to scratch page. A vm_bind would overwrite the scratch + * page mapping. This flag is mutually exclusive with the + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and + * xe3 platform. * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts * exec submissions to its exec_queues that don't have an upper time * limit on the job execution time. But exec submissions to these - * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, - * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, - * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. + * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ, + * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is, + * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL. * LR VMs can be created in recoverable page-fault mode using * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. * If that flag is omitted, the UMD can not rely on the slightly @@ -999,6 +1013,10 @@ struct drm_xe_vm_destroy { * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO * handle MBZ, and the BO offset MBZ. + * + * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be: + * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in + * the memory region advised by madvise. */ struct drm_xe_vm_bind_op { /** @extensions: Pointer to the first extension struct, if any */ @@ -1104,6 +1122,7 @@ struct drm_xe_vm_bind_op { /** @flags: Bind flags */ __u32 flags; +#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1 /** * @prefetch_mem_region_instance: Memory region to prefetch VMA to. * It is a region instance, not a mask. @@ -1206,6 +1225,11 @@ struct drm_xe_vm_bind { * there is no need to explicitly set that. When a queue of type * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running. + * The user is expected to query the PXP status via the query ioctl (see + * %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before + * attempting to create a queue with this property. When a queue is created + * before PXP is ready, the ioctl will return -EBUSY if init is still in + * progress or -EIO if init failed. * Given that going into a power-saving state kills PXP HWDRM sessions, * runtime PM will be blocked while queues of this type are alive. * All PXP queues will be killed if a PXP invalidation event occurs. @@ -1385,7 +1409,7 @@ struct drm_xe_sync { /** * @timeline_value: Input for the timeline sync object. Needs to be - * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. + * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ. */ __u64 timeline_value; @@ -1608,6 +1632,9 @@ enum drm_xe_oa_unit_type { /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */ DRM_XE_OA_UNIT_TYPE_OAM, + + /** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */ + DRM_XE_OA_UNIT_TYPE_OAM_SAG, }; /** @@ -1629,6 +1656,7 @@ struct drm_xe_oa_unit { #define DRM_XE_OA_CAPS_SYNCS (1 << 1) #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) #define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) +#define DRM_XE_OA_CAPS_OAM (1 << 4) /** @oa_timestamp_freq: OA timestamp freq */ __u64 oa_timestamp_freq; @@ -1961,8 +1989,271 @@ struct drm_xe_query_eu_stall { __u64 sampling_rates[]; }; +/** + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE + * + * This structure is used to set memory attributes for a virtual address range + * in a VM. The type of attribute is specified by @type, and the corresponding + * union member is used to provide additional parameters for @type. + * + * Supported attribute types: + * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location. + * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy. + * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index. + * + * Example: + * + * .. code-block:: C + * + * struct drm_xe_madvise madvise = { + *          .vm_id = vm_id, + *          .start = 0x100000, + *          .range = 0x2000, + *          .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC, + *         .atomic_val = DRM_XE_ATOMIC_DEVICE, + * }; + * + * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise); + * + */ +struct drm_xe_madvise { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @start: start of the virtual address range */ + __u64 start; + + /** @range: size of the virtual address range */ + __u64 range; + + /** @vm_id: vm_id of the virtual range */ + __u32 vm_id; + +#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0 +#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1 +#define DRM_XE_MEM_RANGE_ATTR_PAT 2 + /** @type: type of attribute */ + __u32 type; + + union { + /** + * @preferred_mem_loc: preferred memory location + * + * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC + * + * Supported values for @preferred_mem_loc.devmem_fd: + * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of faulting tile as preferred loc + * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc + * + * Supported values for @preferred_mem_loc.migration_policy: + * - DRM_XE_MIGRATE_ALL_PAGES + * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES + */ + struct { +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0 +#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1 + /** @preferred_mem_loc.devmem_fd: fd for preferred loc */ + __u32 devmem_fd; + +#define DRM_XE_MIGRATE_ALL_PAGES 0 +#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1 + /** @preferred_mem_loc.migration_policy: Page migration policy */ + __u16 migration_policy; + + /** @preferred_mem_loc.pad : MBZ */ + __u16 pad; + + /** @preferred_mem_loc.reserved : Reserved */ + __u64 reserved; + } preferred_mem_loc; + + /** + * @atomic: Atomic access policy + * + * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC. + * + * Supported values for @atomic.val: + * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour + * Support both GPU and CPU atomic operations for system allocator + * Support GPU atomic operations for normal(bo) allocator + * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations + * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations + * - DRM_XE_ATOMIC_CPU: Support CPU atomic + */ + struct { +#define DRM_XE_ATOMIC_UNDEFINED 0 +#define DRM_XE_ATOMIC_DEVICE 1 +#define DRM_XE_ATOMIC_GLOBAL 2 +#define DRM_XE_ATOMIC_CPU 3 + /** @atomic.val: value of atomic operation */ + __u32 val; + + /** @atomic.pad: MBZ */ + __u32 pad; + + /** @atomic.reserved: Reserved */ + __u64 reserved; + } atomic; + + /** + * @pat_index: Page attribute table index + * + * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT. + */ + struct { + /** @pat_index.val: PAT index value */ + __u32 val; + + /** @pat_index.pad: MBZ */ + __u32 pad; + + /** @pat_index.reserved: Reserved */ + __u64 reserved; + } pat_index; + }; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS + * + * This structure is provided by userspace and filled by KMD in response to the + * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of + * a memory ranges within a user specified address range in a VM. + * + * The structure includes information such as atomic access policy, + * page attribute table (PAT) index, and preferred memory location. + * Userspace allocates an array of these structures and passes a pointer to the + * ioctl to retrieve attributes for each memory ranges + * + * @extensions: Pointer to the first extension struct, if any + * @start: Start address of the memory range + * @end: End address of the virtual memory range + * + */ +struct drm_xe_mem_range_attr { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @start: start of the memory range */ + __u64 start; + + /** @end: end of the memory range */ + __u64 end; + + /** @preferred_mem_loc: preferred memory location */ + struct { + /** @preferred_mem_loc.devmem_fd: fd for preferred loc */ + __u32 devmem_fd; + + /** @preferred_mem_loc.migration_policy: Page migration policy */ + __u32 migration_policy; + } preferred_mem_loc; + + /** * @atomic: Atomic access policy */ + struct { + /** @atomic.val: atomic attribute */ + __u32 val; + + /** @atomic.reserved: Reserved */ + __u32 reserved; + } atomic; + + /** @pat_index: Page attribute table index */ + struct { + /** @pat_index.val: PAT index */ + __u32 val; + + /** @pat_index.reserved: Reserved */ + __u32 reserved; + } pat_index; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES + * + * This structure is used to query memory attributes of memory regions + * within a user specified address range in a VM. It provides detailed + * information about each memory range, including atomic access policy, + * page attribute table (PAT) index, and preferred memory location. + * + * Userspace first calls the ioctl with @num_mem_ranges = 0, + * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve + * the number of memory regions and size of each memory range attribute. + * Then, it allocates a buffer of that size and calls the ioctl again to fill + * the buffer with memory range attributes. + * + * If second call fails with -ENOSPC, it means memory ranges changed between + * first call and now, retry IOCTL again with @num_mem_ranges = 0, + * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by + * Second ioctl call. + * + * Example: + * + * .. code-block:: C + * struct drm_xe_vm_query_mem_range_attr query = { + * .vm_id = vm_id, + * .start = 0x100000, + * .range = 0x2000, + * }; + * + * // First ioctl call to get num of mem regions and sizeof each attribute + * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query); + * + * // Allocate buffer for the memory region attributes + * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr); + * + * query.vector_of_mem_attr = (uintptr_t)ptr; + * + * // Second ioctl call to actually fill the memory attributes + * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query); + * + * // Iterate over the returned memory region attributes + * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) { + * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr; + * + * // Do something with attr + * + * // Move pointer by one entry + * ptr += query.sizeof_mem_range_attr; + * } + * + * free(ptr); + */ +struct drm_xe_vm_query_mem_range_attr { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @vm_id: vm_id of the virtual range */ + __u32 vm_id; + + /** @num_mem_ranges: number of mem_ranges in range */ + __u32 num_mem_ranges; + + /** @start: start of the virtual address range */ + __u64 start; + + /** @range: size of the virtual address range */ + __u64 range; + + /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */ + __u64 sizeof_mem_range_attr; + + /** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */ + __u64 vector_of_mem_attr; + + /** @reserved: Reserved */ + __u64 reserved[2]; + +}; + #if defined(__cplusplus) } #endif -#endif /* _XE_DRM_H_ */ +#endif /* _UAPI_XE_DRM_H_ */