Skip to content

Rename oneMKL interfaces to oneMath #2487

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jun 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Test oneMKL interfaces
name: Test oneAPI Math Library (oneMath)

on:
push:
Expand All @@ -15,7 +15,7 @@ env:
build-with-oneapi-env: 'environments/build_with_oneapi.yml'
dpctl-pkg-env: 'environments/dpctl_pkg.yml'
oneapi-pkgs-env: 'environments/oneapi_pkgs.yml'
test-env-name: 'test_onemkl_interfaces'
test-env-name: 'test_onemath'
rerun-tests-on-failure: 'true'
rerun-tests-max-attempts: 2
rerun-tests-timeout: 20
Expand Down Expand Up @@ -58,7 +58,7 @@ jobs:
path: ${{ env.environment-file }}

test_by_tag:
name: Run tests with oneMKL tag
name: Run tests with OneMath tag

needs: build_env_file

Expand Down Expand Up @@ -116,7 +116,7 @@ jobs:

- name: Build and install DPNP package
run: |
python scripts/build_locally.py --onemkl-interfaces --verbose
python scripts/build_locally.py --onemath --verbose

- name: Smoke test
run: |
Expand Down Expand Up @@ -148,7 +148,7 @@ jobs:
SYCL_CACHE_PERSISTENT: 1

test_by_branch:
name: Run tests with oneMKL develop branch
name: Run tests with oneMath develop branch

needs: build_env_file

Expand Down Expand Up @@ -221,7 +221,7 @@ jobs:

- name: Build and install DPNP package
run: |
python scripts/build_locally.py --onemkl-interfaces --onemkl-interfaces-dir=${{ env.onemkl-source-dir }} --verbose
python scripts/build_locally.py --onemath --onemath-dir=${{ env.onemkl-source-dir }} --verbose

- name: Smoke test
run: |
Expand Down
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Adjusted the `pre-commit` configuration to run autoupdate weekly [#2479](https://github.com/IntelPython/dpnp/pull/2479)
* Improved validation of `--target-hip` build option to only accept a gfx-prefixed value [#2481](https://github.com/IntelPython/dpnp/pull/2481)
* Simplifies backend implementation of `dpnp.kaiser` by getting rid of unnecessary template [#2472](https://github.com/IntelPython/dpnp/pull/2472)
* `--onemkl-interfaces` and `--onemkl-interfaces-dir` options for building script are deprecated, instead `--onemath` and `--onemath-dir` are introduced to be aligned with [oneMath specification](https://oneapi-spec.uxlfoundation.org/specifications/oneapi/latest/elements/onemath/source/) [#2487](https://github.com/IntelPython/dpnp/pull/2487)

### Deprecated

* `--onemkl-interfaces` and `--onemkl-interfaces-dir` options for building script are deprecated, instead `--onemath` and `--onemath-dir` are introduced to be aligned with [oneMath specification](https://oneapi-spec.uxlfoundation.org/specifications/oneapi/latest/elements/onemath/source/) [#2487](https://github.com/IntelPython/dpnp/pull/2487)

### Removed

* Cleaned up backend code to remove obsolete and unused parts of functionality [#2485](https://github.com/IntelPython/dpnp/pull/2485)
Expand All @@ -26,7 +29,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Security


## [0.18.1] - 2025-06-24

This release achieves `dpnp` compatibility with Python 3.13 and enables distributing `dpnp` packages with the latest Python version.
Expand Down
48 changes: 24 additions & 24 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ find_package(Dpctl REQUIRED)
message(STATUS "Dpctl_INCLUDE_DIR=" ${Dpctl_INCLUDE_DIR})
message(STATUS "Dpctl_TENSOR_INCLUDE_DIR=" ${Dpctl_TENSOR_INCLUDE_DIR})

option(DPNP_USE_ONEMKL_INTERFACES
"Build DPNP with oneMKL Interfaces"
option(DPNP_USE_ONEMATH
"Build DPNP with oneMath"
OFF
)
set(DPNP_TARGET_CUDA
Expand All @@ -82,9 +82,9 @@ or to a specific architecture like sm_80."
set(HIP_TARGETS "" CACHE STRING "HIP architecture for target")

set(_dpnp_sycl_targets)
set(_use_onemkl_interfaces OFF)
set(_use_onemkl_interfaces_cuda OFF)
set(_use_onemkl_interfaces_hip OFF)
set(_use_onemath OFF)
set(_use_onemath_cuda OFF)
set(_use_onemath_hip OFF)

set(_dpnp_sycl_target_compile_options)
set(_dpnp_sycl_target_link_options)
Expand All @@ -103,7 +103,7 @@ if ("x${DPNP_SYCL_TARGETS}" STREQUAL "x")
)
endif()
set(_dpnp_sycl_targets "nvidia_gpu_${_dpnp_cuda_arch},spir64-unknown-unknown")
set(_use_onemkl_interfaces_cuda ON)
set(_use_onemath_cuda ON)
endif()

if (HIP_TARGETS)
Expand All @@ -113,7 +113,7 @@ if ("x${DPNP_SYCL_TARGETS}" STREQUAL "x")
else()
set(_dpnp_sycl_targets "amd_gpu_${HIP_TARGETS},${_dpnp_sycl_targets}")
endif()
set(_use_onemkl_interfaces_hip ON)
set(_use_onemath_hip ON)
else()
message(FATAL_ERROR
"Invalid value for HIP_TARGETS: \"${HIP_TARGETS}\". "
Expand All @@ -125,11 +125,11 @@ else()
set(_dpnp_sycl_targets ${DPNP_SYCL_TARGETS})

if("${DPNP_SYCL_TARGETS}" MATCHES "(nvidia_gpu_sm_|nvptx64-nvidia-cuda)")
set(_use_onemkl_interfaces_cuda ON)
set(_use_onemath_cuda ON)
endif()

if ("${DPNP_SYCL_TARGETS}" MATCHES "amd_gpu_")
set(_use_onemkl_interfaces_hip ON)
set(_use_onemath_hip ON)

if ("x${HIP_TARGETS}" STREQUAL "x")
message(FATAL_ERROR "HIP_TARGETS must be specified when using HIP backend")
Expand All @@ -150,35 +150,35 @@ if (_dpnp_sycl_targets)
list(APPEND _dpnp_sycl_target_link_options -fsycl-targets=${_dpnp_sycl_targets})
endif()

if(DPNP_USE_ONEMKL_INTERFACES)
set(_use_onemkl_interfaces ON)
if(DPNP_USE_ONEMATH)
set(_use_onemath ON)
else()
if(DEFINED ENV{DPNP_USE_ONEMKL_INTERFACES})
set(_use_onemkl_interfaces ON)
if(DEFINED ENV{DPNP_USE_ONEMATH})
set(_use_onemath ON)
endif()
endif()

if(_use_onemkl_interfaces)
if(_use_onemath)
set(BUILD_FUNCTIONAL_TESTS False)
set(BUILD_EXAMPLES False)
set(ENABLE_MKLGPU_BACKEND True)
set(ENABLE_MKLCPU_BACKEND True)

if(_use_onemkl_interfaces_cuda)
if(_use_onemath_cuda)
set(ENABLE_CUBLAS_BACKEND True)
set(ENABLE_CUSOLVER_BACKEND True)
set(ENABLE_CUFFT_BACKEND True)
# set(ENABLE_CURAND_BACKEND True)
endif()
if(_use_onemkl_interfaces_hip)
if(_use_onemath_hip)
set(ENABLE_ROCBLAS_BACKEND True)
set(ENABLE_ROCSOLVER_BACKEND True)
set(ENABLE_ROCFFT_BACKEND True)
# set(ENABLE_ROCRAND_BACKEND True)
endif()

if(DPNP_ONEMKL_INTERFACES_DIR)
FetchContent_Declare(onemath_library SOURCE_DIR "${DPNP_ONEMKL_INTERFACES_DIR}")
if(DPNP_ONEMATH_DIR)
FetchContent_Declare(onemath_library SOURCE_DIR "${DPNP_ONEMATH_DIR}")
else()
FetchContent_Declare(
onemath_library
Expand All @@ -189,19 +189,19 @@ if(_use_onemkl_interfaces)

FetchContent_MakeAvailable(onemath_library)
if(TARGET onemath)
set(MKL_INTERFACES_LIB "onemath" CACHE INTERNAL "OneMath lib target")
set(ONEMATH_LIB "onemath" CACHE INTERNAL "OneMath lib target")
elseif(TARGET onemkl)
set(MKL_INTERFACES_LIB "onemkl" CACHE INTERNAL "OneMKL lib target")
set(ONEMATH_LIB "onemkl" CACHE INTERNAL "OneMKL lib target")
else()
message(FATAL_ERROR "Neither 'oneMath' nor 'oneMKL' found!")
endif()
message(STATUS "MKL interfaces lib target used: ${MKL_INTERFACES_LIB}")
message(STATUS "OneMath lib target used: ${ONEMATH_LIB}")
set(CMAKE_INSTALL_RPATH "${CMAKE_BINARY_DIR}/lib")
else()
if(_use_onemkl_interfaces_cuda OR _use_onemkl_interfaces_hip)
if(_use_onemath_cuda OR _use_onemath_hip)
message(FATAL_ERROR
"CUDA or HIP targets are enabled, but oneMKL Interfaces are not. "
"Please set DPNP_USE_ONEMKL_INTERFACES=ON to enable them."
"CUDA or HIP targets are enabled, but oneMath is not. "
"Please set DPNP_USE_ONEMATH=ON to enable them."
)
endif()
endif()
Expand Down
10 changes: 5 additions & 5 deletions dpnp/backend/extensions/blas/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ if (DPNP_GENERATE_COVERAGE)
target_link_options(${python_module_name} PRIVATE -fprofile-instr-generate -fcoverage-mapping)
endif()

if(_use_onemkl_interfaces)
target_link_libraries(${python_module_name} PRIVATE ${MKL_INTERFACES_LIB})
target_compile_options(${python_module_name} PRIVATE -DUSE_ONEMKL_INTERFACES)
if(_use_onemkl_interfaces_cuda)
target_compile_options(${python_module_name} PRIVATE -DUSE_ONEMKL_CUBLAS)
if(_ues_onemath)
target_link_libraries(${python_module_name} PRIVATE ${ONEMATH_LIB})
target_compile_options(${python_module_name} PRIVATE -DUSE_ONEMATH)
if(_ues_onemath_cuda)
target_compile_options(${python_module_name} PRIVATE -DUSE_ONEMATH_CUBLAS)
endif()
else()
target_link_libraries(${python_module_name} PUBLIC MKL::MKL_SYCL::BLAS)
Expand Down
6 changes: 3 additions & 3 deletions dpnp/backend/extensions/blas/blas_py.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,14 +143,14 @@ PYBIND11_MODULE(_blas_impl, m)

{
m.def(
"_using_onemkl_interfaces",
"_using_onemath",
[]() {
#ifdef USE_ONEMKL_INTERFACES
#ifdef USE_ONEMATH
return true;
#else
return false;
#endif
},
"Check if the OneMKL interfaces are being used.");
"Check if OneMath is being used.");
}
}
20 changes: 10 additions & 10 deletions dpnp/backend/extensions/blas/gemm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ typedef sycl::event (*gemm_impl_fn_ptr_t)(sycl::queue &,
const std::int64_t,
char *,
const std::int64_t,
#if !defined(USE_ONEMKL_CUBLAS)
#if !defined(USE_ONEMATH_CUBLAS)
const bool,
#endif // !USE_ONEMKL_CUBLAS
#endif // !USE_ONEMATH_CUBLAS
const std::vector<sycl::event> &);

static gemm_impl_fn_ptr_t gemm_dispatch_table[dpctl_td_ns::num_types]
Expand All @@ -76,9 +76,9 @@ static sycl::event gemm_impl(sycl::queue &exec_q,
const std::int64_t ldb,
char *resultC,
const std::int64_t ldc,
#if !defined(USE_ONEMKL_CUBLAS)
#if !defined(USE_ONEMATH_CUBLAS)
const bool is_row_major,
#endif // !USE_ONEMKL_CUBLAS
#endif // !USE_ONEMATH_CUBLAS
const std::vector<sycl::event> &depends)
{
type_utils::validate_type_for_device<Tab>(exec_q);
Expand All @@ -100,7 +100,7 @@ static sycl::event gemm_impl(sycl::queue &exec_q,
const Tab *a, const std::int64_t lda, const Tab *b,
const std::int64_t ldb, Tab beta, Tc *c, const std::int64_t ldc,
const std::vector<sycl::event> &deps) -> sycl::event {
#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
return mkl_blas::column_major::gemm(q, transA, transB, m, n, k,
alpha, a, lda, b, ldb, beta, c,
ldc, deps);
Expand All @@ -115,7 +115,7 @@ static sycl::event gemm_impl(sycl::queue &exec_q,
alpha, a, lda, b, ldb, beta,
c, ldc, deps);
}
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS
};
gemm_event = gemm_func(
exec_q,
Expand Down Expand Up @@ -241,7 +241,7 @@ std::tuple<sycl::event, sycl::event, bool>
std::int64_t ldb;

// cuBLAS supports only column-major storage
#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
const bool is_row_major = false;

transA = is_matrixA_c_contig ? oneapi::mkl::transpose::T
Expand Down Expand Up @@ -291,7 +291,7 @@ std::tuple<sycl::event, sycl::event, bool>
lda = m;
ldb = k;
}
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS

const std::int64_t ldc = is_row_major ? n : m;

Expand Down Expand Up @@ -320,15 +320,15 @@ std::tuple<sycl::event, sycl::event, bool>
const char *b_typeless_ptr = matrixB.get_data();
char *r_typeless_ptr = resultC.get_data();

#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
sycl::event gemm_ev =
gemm_fn(exec_q, transA, transB, m, n, k, a_typeless_ptr, lda,
b_typeless_ptr, ldb, r_typeless_ptr, ldc, depends);
#else
sycl::event gemm_ev = gemm_fn(exec_q, transA, transB, m, n, k,
a_typeless_ptr, lda, b_typeless_ptr, ldb,
r_typeless_ptr, ldc, is_row_major, depends);
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS

sycl::event args_ev = dpctl::utils::keep_args_alive(
exec_q, {matrixA, matrixB, resultC}, {gemm_ev});
Expand Down
20 changes: 10 additions & 10 deletions dpnp/backend/extensions/blas/gemm_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ typedef sycl::event (*gemm_batch_impl_fn_ptr_t)(
const char *,
const char *,
char *,
#if !defined(USE_ONEMKL_CUBLAS)
#if !defined(USE_ONEMATH_CUBLAS)
const bool,
#endif // !USE_ONEMKL_CUBLAS
#endif // !USE_ONEMATH_CUBLAS
const std::vector<sycl::event> &);

static gemm_batch_impl_fn_ptr_t
Expand All @@ -85,9 +85,9 @@ static sycl::event gemm_batch_impl(sycl::queue &exec_q,
const char *matrixA,
const char *matrixB,
char *resultC,
#if !defined(USE_ONEMKL_CUBLAS)
#if !defined(USE_ONEMATH_CUBLAS)
const bool is_row_major,
#endif // !USE_ONEMKL_CUBLAS
#endif // !USE_ONEMATH_CUBLAS
const std::vector<sycl::event> &depends)
{
type_utils::validate_type_for_device<Tab>(exec_q);
Expand All @@ -112,7 +112,7 @@ static sycl::event gemm_batch_impl(sycl::queue &exec_q,
Tc *c, const std::int64_t ldc, const std::int64_t stridec,
const std::int64_t batch_size,
const std::vector<sycl::event> &deps) -> sycl::event {
#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
return mkl_blas::column_major::gemm_batch(
q, transA, transB, m, n, k, alpha, a, lda, stridea, b, ldb,
strideb, beta, c, ldc, stridec, batch_size, deps);
Expand All @@ -127,7 +127,7 @@ static sycl::event gemm_batch_impl(sycl::queue &exec_q,
q, transA, transB, m, n, k, alpha, a, lda, stridea, b, ldb,
strideb, beta, c, ldc, stridec, batch_size, deps);
}
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS
};
gemm_batch_event = gemm_batch_func(
exec_q,
Expand Down Expand Up @@ -316,7 +316,7 @@ std::tuple<sycl::event, sycl::event, bool>
std::int64_t ldb;

// cuBLAS supports only column-major storage
#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
const bool is_row_major = false;

transA = A_base_is_c_contig ? oneapi::mkl::transpose::T
Expand Down Expand Up @@ -367,7 +367,7 @@ std::tuple<sycl::event, sycl::event, bool>
lda = m;
ldb = k;
}
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS

const std::int64_t ldc = is_row_major ? n : m;

Expand Down Expand Up @@ -396,7 +396,7 @@ std::tuple<sycl::event, sycl::event, bool>
const char *b_typeless_ptr = matrixB.get_data();
char *r_typeless_ptr = resultC.get_data();

#if defined(USE_ONEMKL_CUBLAS)
#if defined(USE_ONEMATH_CUBLAS)
sycl::event gemm_batch_ev =
gemm_batch_fn(exec_q, m, n, k, batch_size, lda, ldb, ldc, stridea,
strideb, stridec, transA, transB, a_typeless_ptr,
Expand All @@ -406,7 +406,7 @@ std::tuple<sycl::event, sycl::event, bool>
gemm_batch_fn(exec_q, m, n, k, batch_size, lda, ldb, ldc, stridea,
strideb, stridec, transA, transB, a_typeless_ptr,
b_typeless_ptr, r_typeless_ptr, is_row_major, depends);
#endif // USE_ONEMKL_CUBLAS
#endif // USE_ONEMATH_CUBLAS

sycl::event args_ev = dpctl::utils::keep_args_alive(
exec_q, {matrixA, matrixB, resultC}, {gemm_batch_ev});
Expand Down
Loading
Loading