Compare commits

...

154 commits

Author SHA1 Message Date
197be84693 a retarded goobus I am 2025-07-13 18:06:09 +00:00
7e3ee40b33 Update src/xmrig.cpp 2025-07-13 18:05:00 +00:00
XMRig
6e4a5a6d94
v6.24.0 2025-06-23 07:44:53 +07:00
XMRig
273133aa63
Merge branch 'dev' 2025-06-23 07:44:05 +07:00
xmrig
c69e30c9a0
Update CHANGELOG.md 2025-06-23 05:39:26 +07:00
XMRig
6a690ba1e9
More DNS cleanup. 2025-06-20 23:45:53 +07:00
XMRig
545aef0937
v6.24.0-dev 2025-06-20 08:34:58 +07:00
xmrig
9fa66d3242
Merge pull request #3678 from xmrig/dns_ip_version
Improved IPv6 support.
2025-06-20 08:33:50 +07:00
XMRig
ec286c7fef
Improved IPv6 support. 2025-06-20 07:39:52 +07:00
xmrig
e28d663d80
Merge pull request #3677 from SChernykh/dev
Tweaked autoconfig for AMD CPUs with < 2 MB L3 cache per thread, again (hopefully the last time)
2025-06-19 18:07:54 +07:00
SChernykh
aba1ad8cfc Tweaked autoconfig for AMD CPUs with < 2 MB L3 cache per thread, again (hopefully the last time) 2025-06-19 12:58:31 +02:00
xmrig
bf44ed52e9
Merge pull request #3674 from benthetechguy/armhf
cflags: Add lax-vector-conversions on ARMv7
2025-06-19 04:46:02 +07:00
Ben Westover
762c435fa8
cflags: Add lax-vector-conversions on ARMv7
lax-vector-conversions is enabled in the CXXFLAGS but not CFLAGS for ARMv7.
This commit adds it to CFLAGS which fixes the ARMv7 build (Fixes: #3673).
2025-06-18 16:38:05 -04:00
xmrig
48faf0a11b
Merge pull request #3671 from SChernykh/dev
Hwloc: fixed detection of L2 cache size for some complex NUMA topologies
2025-06-17 18:52:43 +07:00
SChernykh
d125d22d27 Hwloc: fixed detection of L2 cache size for some complex NUMA topologies 2025-06-17 13:49:02 +02:00
XMRig
9f3591ae0d
v6.23.1-dev 2025-06-16 21:29:17 +07:00
XMRig
6bbbcc71f1
Merge branch 'master' into dev 2025-06-16 21:28:48 +07:00
XMRig
e5a7a69cc0
v6.23.0 2025-06-16 21:00:42 +07:00
XMRig
f354b85a7b
Merge branch 'dev' 2025-06-16 21:00:12 +07:00
xmrig
5ed8d79574
Update CHANGELOG.md 2025-06-16 20:46:33 +07:00
XMRig
fc395a5800
Update ARM CPUs database. 2025-06-16 19:54:08 +07:00
XMRig
9138690126
v6.23.0-dev 2025-06-16 02:05:43 +07:00
XMRig
d58061c903
Add detection for _aligned_malloc. 2025-06-15 20:06:19 +07:00
XMRig
3b863cf88f
Fixed __umul128 for MSVC ARM64. 2025-06-15 04:58:03 +07:00
XMRig
9c7468df64
Fixed user agent string. 2025-06-15 00:21:23 +07:00
xmrig
a18fa269a6
Merge pull request #3666 from SChernykh/dev
Better detection of aligned malloc functions
2025-06-14 23:09:05 +07:00
SChernykh
bcc5581535 Better detection of aligned malloc functions 2025-06-14 18:00:27 +02:00
XMRig
dba336aa04
Update hwloc for MSVC. 2025-06-14 22:11:33 +07:00
XMRig
3ff41f7c94
Fixed UTF-8 paths support for the config file with Clang compiler on Windows ARM64. 2025-06-14 15:38:25 +07:00
XMRig
faa3d55123
Remove deprecated -Ofast for Clang. 2025-06-13 21:53:03 +07:00
XMRig
9e7cf69ac3
Detect CPU name and AES instructions on Windows ARM64. 2025-06-13 21:02:10 +07:00
XMRig
57a4998ae2
Fix Linux build. 2025-06-13 04:05:30 +07:00
XMRig
34b4448a81
Split BasicCpuInfo_arm. 2025-06-13 03:57:13 +07:00
XMRig
650d794fb1
Initial Windows ARM64 support via MSYS2. 2025-06-13 03:00:34 +07:00
XMRig
064a61988a
Update deps scripts. 2025-06-12 00:52:49 +07:00
xmrig
2ab7f85ccd
Merge pull request #3665 from SChernykh/dev
Tweaked autoconfig for AMD CPUs with < 2 MB L3 cache per thread
2025-06-11 23:40:46 +07:00
SChernykh
e4c30eb0dd Tweaked autoconfig for AMD CPUs with < 2 MB L3 cache per thread 2025-06-11 18:34:50 +02:00
XMRig
d4e57d9427
Fix LLHTTP_EXPORT 2025-06-10 03:13:34 +07:00
XMRig
9a71190ca1
Update llhttp to 9.3.0 2025-06-09 03:02:26 +07:00
XMRig
a7dcbb143e
Bump minimum CMake version to 3.10 2025-06-08 23:23:40 +07:00
XMRig
a6a0f80b12
Fix header path. 2025-06-06 14:42:49 +07:00
XMRig
682834b87d
Universal fix for NaN and Infinity in JSON output 2025-06-06 14:36:21 +07:00
XMRig
184d6100dc
Update rapidjson 2025-06-05 01:22:31 +07:00
XMRig
0c52d789a9
v6.22.4-dev 2025-06-04 18:59:39 +07:00
XMRig
e33334f11a
Merge branch 'master' into dev 2025-06-04 18:58:55 +07:00
XMRig
6184224a66
v6.22.3 2025-06-04 18:11:51 +07:00
XMRig
f499155032
Merge branch 'dev' 2025-06-04 18:11:14 +07:00
xmrig
a32b688dcf
Update CHANGELOG.md 2025-06-04 01:47:57 +07:00
XMRig
35b334d58a
Fixed compile warning. 2025-05-31 01:12:00 +07:00
XMRig
33623492fe
Allow run generate_cl.js from the scripts directory. 2025-05-30 01:47:08 +07:00
xmrig
77009bd0d1
Merge pull request #3662 from ybh1998/keccak_f800
Fix type of `keccak_f800`
2025-05-30 01:04:22 +07:00
ybh1998
46572dcb3d
Fix type of keccak_f800 2025-05-30 01:57:08 +08:00
xmrig
0d9af3347d
Merge pull request #3652 from SChernykh/dev
Fixed HttpsClient::flush logic
2025-04-17 16:12:31 +07:00
SChernykh
d24e13e605
Fixed HttpsClient::flush logic
- Don't write empty buffers
- Don't write if an error was returned
2025-04-17 10:32:14 +02:00
xmrig
36fdfa2694
Merge pull request #3646 from SChernykh/dev
Optimized autoconfig for AMD CPUs with < 2 MB L3 cache per thread
2025-03-22 18:36:09 +07:00
SChernykh
6cfc02d24f Optimized autoconfig for AMD CPUs with < 2 MB L3 cache per thread 2025-03-22 11:34:23 +01:00
XMRig
16ecb8f085
Allow use of the previous CUDA plugin version with a warning. 2024-12-23 23:14:06 +07:00
xmrig
0229c65232
Merge pull request #3605 from SChernykh/dev
CUDA backend: update RandomX dataset when it changes
2024-12-18 22:36:08 +07:00
SChernykh
4a13a8a75c CUDA backend: update RandomX dataset when it changes 2024-12-18 13:45:10 +01:00
XMRig
cd2fd9d7a6
Simplified getting PCI topology for the OpenCL backend. 2024-11-08 13:03:35 +07:00
XMRig
064cd3ef20
Fixed and simplified OpenCL GPU type detection. 2024-11-08 07:09:35 +07:00
XMRig
e8bbd134f9
v6.22.3-dev 2024-11-03 15:06:54 +07:00
XMRig
cf86a1e05c
Merge branch 'master' into dev 2024-11-03 15:06:22 +07:00
XMRig
f9e990d0f0
v6.22.2 2024-11-03 14:38:44 +07:00
XMRig
200f23bba7
Merge branch 'dev' 2024-11-03 14:38:00 +07:00
xmrig
4234b20e21
Update CHANGELOG.md 2024-11-03 14:31:17 +07:00
xmrig
c5d8b8265b
Merge pull request #3571 from SChernykh/dev
Fix number of threads on the new Intel Core Ultra CPUs
2024-10-25 20:55:35 +07:00
SChernykh
77c14c8362 Fix number of threads on the new Intel Core Ultra CPUs 2024-10-25 13:44:24 +02:00
xmrig
8b03750806
Merge pull request #3569 from SChernykh/dev
Fix: don't use NaN in hashrate calculations
2024-10-23 17:18:36 +07:00
SChernykh
40949f2767 Fix: don't use NaN in hashrate calculations 2024-10-23 11:40:27 +02:00
XMRig
56c447e02a
v6.22.2-dev 2024-10-23 13:36:56 +07:00
XMRig
21c206f05d
Merge branch 'master' into dev 2024-10-23 13:36:19 +07:00
XMRig
ee65b3d159
v6.22.1 2024-10-23 12:53:06 +07:00
XMRig
1f75d198d8
Merge branch 'dev' 2024-10-23 12:52:16 +07:00
xmrig
5cf2422766
Update CHANGELOG.md 2024-10-22 17:34:07 +07:00
XMRig
a32f9b5b04
Fixed --version output on ARM. 2024-10-21 08:48:58 +07:00
XMRig
8a4792f638
Update hwloc for MSVC. 2024-10-21 08:31:52 +07:00
XMRig
e32731b60b
Update deps 2024-10-20 09:49:06 +07:00
xmrig
e1ae367084
Merge pull request #3540 from SChernykh/dev
Detect AMD engineering samples in randomx_boost.sh
2024-08-29 19:50:43 +07:00
SChernykh
bc1c8358c4 Detect AMD engineering samples in randomx_boost.sh 2024-08-29 14:47:30 +02:00
xmrig
e0af8f0c6b
Merge pull request #3539 from SChernykh/dev
Added Zen5 to randomx_boost.sh
2024-08-28 18:51:39 +07:00
SChernykh
29f9c8cf4c Added Zen5 to randomx_boost.sh 2024-08-28 13:49:27 +02:00
xmrig
26f4936f6f
Merge pull request #3535 from SChernykh/dev
RandomX: tweaks for Zen5
2024-08-20 06:47:30 +07:00
SChernykh
a411ee3565 RandomX: tweaks for Zen5 2024-08-19 21:01:49 +02:00
xmrig
01bd0d48a1
Merge pull request #3534 from SChernykh/dev
Fixed threads auto-config on Zen5
2024-08-17 06:23:49 +07:00
SChernykh
20d555668b Fixed threads auto-config on Zen5 2024-08-16 23:36:22 +02:00
xmrig
56baec762f
Merge pull request #3531 from SChernykh/dev
Always reset nonce on RandomX dataset change
2024-08-14 22:16:34 +07:00
SChernykh
17a52fb418 Always reset nonce on RandomX dataset change
Also never get a new job when mining is paused
2024-08-14 16:41:03 +02:00
XMRig
7e4caa8929
Merge remote-tracking branch 'remotes/origin/master' into dev 2024-08-12 03:02:19 +07:00
xmrig
ef14d55aa5
Merge pull request #3529 from eltociear/patch-1
docs: update ghostrider/README.md
2024-08-12 03:01:13 +07:00
XMRig
5776fdcc20
v6.22.1-dev 2024-08-12 02:15:08 +07:00
XMRig
fe0f69031b
Merge branch 'master' into dev 2024-08-12 02:14:40 +07:00
Ikko Eltociear Ashimine
e682f89298
docs: update ghostrider/README.md
nubmer -> number
2024-08-12 03:54:26 +09:00
XMRig
544c393f78
v6.22.0 2024-08-12 01:13:51 +07:00
XMRig
9da6ea07bd
Merge branch 'dev' 2024-08-12 01:13:29 +07:00
XMRig
62bcd6e5dc
v6.22.0-dev 2024-08-10 22:00:42 +07:00
xmrig
c5f98fc5c7
Merge pull request #3528 from SChernykh/dev
Added rx/yada OpenCL support
2024-08-07 13:36:55 +07:00
SChernykh
ecb3ec0317 Added rx/yada OpenCL support 2024-08-07 00:18:51 +02:00
XMRig
3dfeed475f
Sync changes with the proxy. 2024-08-06 23:32:20 +07:00
XMRig
98c775703e
Don't generate "rx/yada" profile, use the "rx" profile by default. 2024-08-04 20:00:12 +07:00
XMRig
8da49f2650
More clean target parse. 2024-08-04 19:51:11 +07:00
xmrig
4570187459
Merge pull request #3525 from SChernykh/dev
Added Zen5 detection
2024-08-03 22:58:00 +07:00
SChernykh
748365d6e3 Added Zen5 detection
Preliminary Zen5 support, MSR mod is not ready yet.
2024-08-03 11:01:18 +02:00
xmrig
dd7e0e520d
Merge pull request #3524 from SChernykh/dev
Fixed ARMv8 compilation
2024-08-02 23:47:21 +07:00
SChernykh
ef6fb728b5 Fixed ARMv8 compilation 2024-08-02 17:51:08 +02:00
xmrig
92ffcd34d6
Merge pull request #2411 from pdxwebdev/feature/yadacoin
Added support for Yada (rx/yada algorithm)
2024-08-02 16:22:50 +07:00
Matthew Vogel
b108845627 fix yada nonce offset 2024-08-01 15:10:20 -07:00
Matthew Vogel
046b2a17d3 finish updating for yadacoin 2024-08-01 00:01:09 -07:00
Matthew Vogel
5342f25fbf update constants for yadacoin 2024-07-31 23:45:34 -07:00
Matthew Vogel
5f6bcfe949 add yada constants 2024-07-31 23:26:37 -07:00
xmrig
ecef382326
Merge pull request #3522 from SChernykh/dev
Removed rx/keva
2024-07-31 15:41:25 +07:00
SChernykh
86f5db19d2 Removed rx/keva
Keva coin is too small now.
2024-07-31 08:28:05 +02:00
xmrig
b4a47d6ed0
Merge pull request #3518 from SChernykh/dev
Make Json::normalize more strict
2024-07-29 22:27:29 +07:00
SChernykh
f5095247e8 Make Json::normalize more strict
Rounding a regular FP value can give an invalid result - check the result too.
2024-07-29 17:14:21 +02:00
XMRig
2bb07fe633
#3515 Update build scripts for OpenSSL. 2024-07-24 21:02:53 +07:00
XMRig
a7be8cb80c
Remove chdir call after fork. 2024-06-05 03:45:37 +07:00
XMRig
2ce16df423
Create signal handles after fork() call, replace #3492. 2024-06-05 03:23:58 +07:00
XMRig
5eaa6c152e
v6.21.4-dev 2024-04-23 16:51:58 +07:00
XMRig
6972f727c1
Merge branch 'master' into dev 2024-04-23 16:50:58 +07:00
XMRig
7897f10c48
v6.21.3 2024-04-23 16:27:24 +07:00
XMRig
da2fb331b3
Merge branch 'dev' 2024-04-23 16:26:18 +07:00
xmrig
57f3e9c3da
Update CHANGELOG.md 2024-04-23 16:17:26 +07:00
xmrig
1efe7e9562
Merge pull request #3462 from SChernykh/dev
RandomX: correct memcpy size for JIT initialization
2024-04-14 17:01:16 +07:00
SChernykh
caae7c64f0 RandomX: correct memcpy size for JIT initialization
No buffer overflow, better fix for `_FORTIFY_SOURCE`
2024-04-14 09:13:00 +02:00
xmrig
9fbdcc0ef0
Merge pull request #3461 from SChernykh/dev
RandomX: check pointer sizes during JIT initialization
2024-04-14 05:38:53 +07:00
SChernykh
c7c26d97fe RandomX: check pointer sizes during JIT initialization 2024-04-13 20:32:16 +02:00
XMRig
1f7e635b04
Use internal logger for error message. 2024-03-26 21:46:18 +07:00
XMRig
1c5786e3c5
v6.21.3-dev 2024-03-23 16:21:54 +07:00
XMRig
44eb4f0038
Merge branch 'master' into dev 2024-03-23 16:20:24 +07:00
XMRig
4ab9329dda
v6.21.2 2024-03-23 13:38:42 +07:00
XMRig
0c2ee013a7
Merge branch 'dev' 2024-03-23 13:38:05 +07:00
xmrig
3347537635
Update CHANGELOG.md 2024-03-23 00:46:15 +07:00
XMRig
7a85257ad4
Update hwloc for MSVC builds. 2024-03-22 18:14:39 +07:00
XMRig
850b43c079
Fix build with recent libuv. 2024-03-22 01:22:54 +07:00
XMRig
b8e4eaac87
Fix rapidjson assert. 2024-03-21 21:03:35 +07:00
xmrig
b9dd5e3eae
Merge pull request #3450 from SChernykh/dev
Fix RandomX crash when compiled with fortify_source
2024-03-21 04:09:05 +07:00
SChernykh
032c28d50a Merge remote-tracking branch 'upstream/dev' into dev 2024-03-20 21:24:58 +01:00
SChernykh
f6c50b5393 Fix RandomX crash when compiled with fortify_source 2024-03-20 21:24:02 +01:00
SChernykh
e65e283aac Merge remote-tracking branch 'upstream/dev' into dev 2024-03-20 21:22:11 +01:00
XMRig
5552e1f864
Fix scripts for systems without bash. 2024-03-21 02:13:01 +07:00
XMRig
3beccae136
Merge branch 'goodmost-master' into dev 2024-03-20 14:11:53 +07:00
XMRig
ef9bf2aa8c
Merge branch 'master' of https://github.com/goodmost/xmrig into goodmost-master 2024-03-20 14:11:28 +07:00
XMRig
42f645fa3b
Merge branch 'dev' of github.com:xmrig/xmrig into dev 2024-03-20 00:25:21 +07:00
XMRig
1fb5be6c1d
Update deps. 2024-03-20 00:24:46 +07:00
goodmost
08c43b7e58 chore: remove repetitive words
Signed-off-by: goodmost <zhaohaiyang@outlook.com>
2024-03-19 23:19:36 +08:00
xmrig
7b016fd9ce
Merge pull request #3436 from SChernykh/dev
Thread-safe FileLogWriter
2024-03-14 21:46:45 +07:00
SChernykh
688d4f5ee1 Thread-safe FileLogWriter 2024-03-04 08:45:22 +01:00
xmrig
64913e3163
Merge pull request #3434 from SChernykh/dev
Update bug_report.md
2024-02-29 14:33:07 +07:00
SChernykh
48fa095e3e Update bug_report.md 2024-02-29 08:31:16 +01:00
XMRig
c9b9ef51ee
#2800 Fixed donation with ghostrider algorithm for builds without KawPow algorithm. 2024-02-29 09:38:47 +07:00
xmrig
dd782c7001
Merge pull request #3431 from SChernykh/dev
Stratum: better check of the login response
2024-02-28 11:25:34 +07:00
SChernykh
b49197f808 Stratum: better check of the login response 2024-02-27 23:39:23 +01:00
XMRig
f9c4c57216
v6.21.2-dev 2024-02-25 23:00:45 +07:00
XMRig
a5b8b85967
Merge branch 'master' into dev 2024-02-25 23:00:11 +07:00
188 changed files with 15914 additions and 17388 deletions

View file

@ -17,6 +17,9 @@ Steps to reproduce the behavior.
A clear and concise description of what you expected to happen.
**Required data**
- XMRig version
- Either the exact link to a release you downloaded from https://github.com/xmrig/xmrig/releases
- Or the exact command lines that you used to build XMRig
- Miner log as text or screenshot
- Config file or command line (without wallets)
- OS: [e.g. Windows]

View file

@ -1,3 +1,47 @@
# v6.24.0
- [#3671](https://github.com/xmrig/xmrig/pull/3671) Fixed detection of L2 cache size for some complex NUMA topologies.
- [#3674](https://github.com/xmrig/xmrig/pull/3674) Fixed ARMv7 build.
- [#3677](https://github.com/xmrig/xmrig/pull/3677) Fixed auto-config for AMD CPUs with less than 2 MB L3 cache per thread.
- [#3678](https://github.com/xmrig/xmrig/pull/3678) Improved IPv6 support: the new default settings use IPv6 equally with IPv4.
# v6.23.0
- [#3668](https://github.com/xmrig/xmrig/issues/3668) Added support for Windows ARM64.
- [#3665](https://github.com/xmrig/xmrig/pull/3665) Tweaked auto-config for AMD CPUs with < 2 MB L3 cache per thread.
# v6.22.3
- [#3605](https://github.com/xmrig/xmrig/pull/3605) CUDA backend: added missing RandomX dataset update.
- [#3646](https://github.com/xmrig/xmrig/pull/3646) Optimized auto-config for AMD CPUs with less than 2 MB L3 cache per thread.
- [#3652](https://github.com/xmrig/xmrig/pull/3652) Fixed possible crash when submitting RandomX benchmark.
- [#3662](https://github.com/xmrig/xmrig/pull/3662) Fixed OpenCL kernel compilation error on some platforms.
# v6.22.2
- [#3569](https://github.com/xmrig/xmrig/pull/3569) Fixed corrupted API output in some rare conditions.
- [#3571](https://github.com/xmrig/xmrig/pull/3571) Fixed number of threads on the new Intel Core Ultra CPUs.
# v6.22.1
- [#3531](https://github.com/xmrig/xmrig/pull/3531) Always reset nonce on RandomX dataset change.
- [#3534](https://github.com/xmrig/xmrig/pull/3534) Fixed threads auto-config on Zen5.
- [#3535](https://github.com/xmrig/xmrig/pull/3535) RandomX: tweaks for Zen5.
- [#3539](https://github.com/xmrig/xmrig/pull/3539) Added Zen5 to `randomx_boost.sh`.
- [#3540](https://github.com/xmrig/xmrig/pull/3540) Detect AMD engineering samples in `randomx_boost.sh`.
# v6.22.0
- [#2411](https://github.com/xmrig/xmrig/pull/2411) Added support for [Yada](https://yadacoin.io/) (`rx/yada` algorithm).
- [#3492](https://github.com/xmrig/xmrig/pull/3492) Fixed `--background` option on Unix systems.
- [#3518](https://github.com/xmrig/xmrig/pull/3518) Possible fix for corrupted API output in rare cases.
- [#3522](https://github.com/xmrig/xmrig/pull/3522) Removed `rx/keva` algorithm.
- [#3525](https://github.com/xmrig/xmrig/pull/3525) Added Zen5 detection.
- [#3528](https://github.com/xmrig/xmrig/pull/3528) Added `rx/yada` OpenCL support.
# v6.21.3
- [#3462](https://github.com/xmrig/xmrig/pull/3462) RandomX: correct memcpy size for JIT initialization.
# v6.21.2
- The dependencies of all prebuilt releases have been updated. Support for old Ubuntu releases has been dropped.
- [#2800](https://github.com/xmrig/xmrig/issues/2800) Fixed donation with GhostRider algorithm for builds without KawPow algorithm.
- [#3436](https://github.com/xmrig/xmrig/pull/3436) Fixed, the file log writer was not thread-safe.
- [#3450](https://github.com/xmrig/xmrig/pull/3450) Fixed RandomX crash when compiled with fortify_source.
# v6.21.1
- [#3391](https://github.com/xmrig/xmrig/pull/3391) Added support for townforge (monero fork using randomx).
- [#3399](https://github.com/xmrig/xmrig/pull/3399) Fixed Zephyr mining (OpenCL).

View file

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.1)
cmake_minimum_required(VERSION 3.10)
project(xmrig)
option(WITH_HWLOC "Enable hwloc support" ON)
@ -162,7 +162,7 @@ if (XMRIG_OS_WIN)
src/crypto/common/VirtualMemory_win.cpp
)
set(EXTRA_LIBS ws2_32 psapi iphlpapi userenv)
set(EXTRA_LIBS ws2_32 psapi iphlpapi userenv dbghelp)
elseif (XMRIG_OS_APPLE)
list(APPEND SOURCES_OS
src/App_unix.cpp
@ -240,7 +240,10 @@ add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${HEADE
target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${ARGON2_LIBRARY} ${ETHASH_LIBRARY} ${GHOSTRIDER_LIBRARY})
if (WIN32)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/bin/WinRing0/WinRing0x64.sys" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
if (NOT ARM_TARGET)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/bin/WinRing0/WinRing0x64.sys" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
endif()
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/benchmark_1M.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/benchmark_10M.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/pool_mine_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
@ -249,5 +252,5 @@ if (WIN32)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_BUILD_TYPE STREQUAL Release AND NOT CMAKE_GENERATOR STREQUAL Xcode)
add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} ${CMAKE_PROJECT_NAME})
add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} "$<TARGET_FILE:${CMAKE_PROJECT_NAME}>")
endif()

View file

@ -29,6 +29,8 @@ else()
set(WITH_VAES OFF)
endif()
add_definitions(-DRAPIDJSON_WRITE_DEFAULT_FLAGS=6) # rapidjson::kWriteNanAndInfFlag | rapidjson::kWriteNanAndInfNullFlag
if (ARM_V8)
set(ARM_TARGET 8)
elseif (ARM_V7)
@ -36,7 +38,7 @@ elseif (ARM_V7)
endif()
if (NOT ARM_TARGET)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64|armv8-a)$")
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64|ARM64|armv8-a)$")
set(ARM_TARGET 8)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7|armv7f|armv7s|armv7k|armv7-a|armv7l|armv7ve)$")
set(ARM_TARGET 7)
@ -49,7 +51,7 @@ if (ARM_TARGET AND ARM_TARGET GREATER 6)
message(STATUS "Use ARM_TARGET=${ARM_TARGET} (${CMAKE_SYSTEM_PROCESSOR})")
if (ARM_TARGET EQUAL 8)
if (ARM_TARGET EQUAL 8 AND (CMAKE_CXX_COMPILER_ID MATCHES GNU OR CMAKE_CXX_COMPILER_ID MATCHES Clang))
CHECK_CXX_COMPILER_FLAG(-march=armv8-a+crypto XMRIG_ARM_CRYPTO)
if (XMRIG_ARM_CRYPTO)

View file

@ -26,7 +26,7 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ARM8_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARM8_CXX_FLAGS} -flax-vector-conversions")
elseif (ARM_TARGET EQUAL 7)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a -mfpu=neon")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a -mfpu=neon -flax-vector-conversions")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv7-a -mfpu=neon -flax-vector-conversions")
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes")
@ -63,10 +63,10 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Ofast -funroll-loops -fmerge-all-constants")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -funroll-loops -fmerge-all-constants")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti -Wno-missing-braces")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast -funroll-loops -fmerge-all-constants")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -funroll-loops -fmerge-all-constants")
if (ARM_TARGET EQUAL 8)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ARM8_CXX_FLAGS}")
@ -84,10 +84,9 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
endif()
endif()
if (BUILD_STATIC)
if ((WIN32 AND ARM_TARGET) OR BUILD_STATIC)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static")
endif()
endif()
if (NOT WIN32)

View file

@ -20,7 +20,6 @@ else()
endif()
endif()
if (XMRIG_OS_WIN)
add_definitions(-DWIN32 -DXMRIG_OS_WIN)
elseif(XMRIG_OS_APPLE)

View file

@ -1,4 +1,18 @@
if (WITH_RANDOMX)
include(CheckSymbolExists)
if (WIN32)
check_symbol_exists(_aligned_malloc "stdlib.h" HAVE_ALIGNED_MALLOC)
if (HAVE_ALIGNED_MALLOC)
add_compile_definitions(HAVE_ALIGNED_MALLOC)
endif()
else()
check_symbol_exists(posix_memalign "stdlib.h" HAVE_POSIX_MEMALIGN)
if (HAVE_POSIX_MEMALIGN)
add_compile_definitions(HAVE_POSIX_MEMALIGN)
endif()
endif()
add_definitions(/DXMRIG_ALGO_RANDOMX)
set(WITH_ARGON2 ON)

View file

@ -13,7 +13,6 @@ Option `coin` useful for pools without [algorithm negotiation](https://xmrig.com
| Name | Memory | Version | Description | Notes |
|------|--------|---------|-------------|-------|
| `kawpow` | - | 6.0.0+ | KawPow (Ravencoin) | GPU only |
| `rx/keva` | 1 MB | 5.9.0+ | RandomKEVA (RandomX variant for Keva). | |
| `astrobwt` | 20 MB | 5.8.0+ | AstroBWT (Dero). | |
| `cn-pico/tlo` | 256 KB | 5.5.0+ | CryptoNight-Pico (Talleo). | |
| `rx/sfx` | 2 MB | 5.4.0+ | RandomSFX (RandomX variant for Safex). | |

View file

@ -256,7 +256,7 @@
# v2.8.0
- **[#753](https://github.com/xmrig/xmrig/issues/753) Added new algorithm [CryptoNight variant 2](https://github.com/xmrig/xmrig/issues/753) for Monero fork, thanks [@SChernykh](https://github.com/SChernykh).**
- Added global and per thread option `"asm"` and and command line equivalent.
- Added global and per thread option `"asm"` and command line equivalent.
- **[#758](https://github.com/xmrig/xmrig/issues/758) Added SSL/TLS support for secure connections to pools.**
- Added per pool options `"tls"` and `"tls-fingerprint"` and command line equivalents.
- [#767](https://github.com/xmrig/xmrig/issues/767) Added config autosave feature, same with GPU miners.

View file

@ -1,8 +1,8 @@
#!/bin/bash -e
#!/bin/sh -e
HWLOC_VERSION_MAJOR="2"
HWLOC_VERSION_MINOR="9"
HWLOC_VERSION_PATCH="0"
HWLOC_VERSION_MINOR="12"
HWLOC_VERSION_PATCH="1"
HWLOC_VERSION="${HWLOC_VERSION_MAJOR}.${HWLOC_VERSION_MINOR}.${HWLOC_VERSION_PATCH}"

View file

@ -1,4 +1,4 @@
#!/bin/bash -e
#!/bin/sh -e
HWLOC_VERSION="1.11.13"

View file

@ -1,4 +1,4 @@
#!/bin/bash -e
#!/bin/sh -e
LIBRESSL_VERSION="3.5.2"

View file

@ -1,6 +1,6 @@
#!/bin/bash -e
#!/bin/sh -e
OPENSSL_VERSION="1.1.1s"
OPENSSL_VERSION="1.1.1u"
mkdir -p deps
mkdir -p deps/include
@ -8,7 +8,7 @@ mkdir -p deps/lib
mkdir -p build && cd build
wget https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz -O openssl-${OPENSSL_VERSION}.tar.gz
wget https://openssl.org/source/old/1.1.1/openssl-${OPENSSL_VERSION}.tar.gz -O openssl-${OPENSSL_VERSION}.tar.gz
tar -xzf openssl-${OPENSSL_VERSION}.tar.gz
cd openssl-${OPENSSL_VERSION}

View file

@ -1,6 +1,6 @@
#!/bin/bash -e
#!/bin/sh -e
OPENSSL_VERSION="3.0.7"
OPENSSL_VERSION="3.0.16"
mkdir -p deps
mkdir -p deps/include
@ -8,7 +8,7 @@ mkdir -p deps/lib
mkdir -p build && cd build
wget https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz -O openssl-${OPENSSL_VERSION}.tar.gz
wget https://github.com/openssl/openssl/releases/download/openssl-${OPENSSL_VERSION}/openssl-${OPENSSL_VERSION}.tar.gz -O openssl-${OPENSSL_VERSION}.tar.gz
tar -xzf openssl-${OPENSSL_VERSION}.tar.gz
cd openssl-${OPENSSL_VERSION}

View file

@ -1,6 +1,6 @@
#!/bin/bash -e
#!/bin/sh -e
UV_VERSION="1.44.2"
UV_VERSION="1.51.0"
mkdir -p deps
mkdir -p deps/include
@ -8,10 +8,10 @@ mkdir -p deps/lib
mkdir -p build && cd build
wget https://github.com/libuv/libuv/archive/v${UV_VERSION}.tar.gz -O v${UV_VERSION}.tar.gz
wget https://dist.libuv.org/dist/v${UV_VERSION}/libuv-v${UV_VERSION}.tar.gz -O v${UV_VERSION}.tar.gz
tar -xzf v${UV_VERSION}.tar.gz
cd libuv-${UV_VERSION}
cd libuv-v${UV_VERSION}
sh autogen.sh
./configure --disable-shared
make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)

View file

@ -1,5 +1,5 @@
#!/bin/bash -e
#!/bin/sh -e
./build.uv.sh
./build.hwloc.sh
./build.openssl.sh
./build.openssl3.sh

View file

@ -1,4 +1,4 @@
#!/bin/bash -e
#!/bin/sh -e
# https://xmrig.com/docs/miner/hugepages#onegb-huge-pages

View file

@ -6,7 +6,6 @@ const fs = require('fs');
const path = require('path');
const { text2h, text2h_bundle, addIncludes } = require('./js/opencl');
const { opencl_minify } = require('./js/opencl_minify');
const cwd = process.cwd();
function cn()
@ -50,7 +49,6 @@ function rx()
'randomx_constants_monero.h',
'randomx_constants_wow.h',
'randomx_constants_arqma.h',
'randomx_constants_keva.h',
'randomx_constants_graft.h',
'aes.cl',
'blake2b.cl',
@ -77,18 +75,24 @@ function kawpow()
fs.writeFileSync('kawpow_dag_cl.h', text2h(kawpow_dag, 'xmrig', 'kawpow_dag_cl'));
}
for (let i = 0; i < 2; i++) {
if (fs.existsSync('src/backend/opencl/cl/OclSource.h')) {
break;
}
process.chdir(path.resolve('src/backend/opencl/cl/cn'));
process.chdir('..');
}
process.chdir(path.resolve('src/backend/opencl/cl'));
const cwd = process.cwd();
process.chdir(path.resolve(cwd, 'cn'));
cn();
cn_r();
process.chdir(cwd);
process.chdir(path.resolve('src/backend/opencl/cl/rx'));
process.chdir(path.resolve(cwd, 'rx'));
rx();
process.chdir(cwd);
process.chdir(path.resolve('src/backend/opencl/cl/kawpow'));
process.chdir(path.resolve(cwd, 'kawpow'));
kawpow();

View file

@ -8,7 +8,7 @@ else
modprobe msr allow_writes=on
fi
if grep -E 'AMD Ryzen|AMD EPYC' /proc/cpuinfo > /dev/null;
if grep -E 'AMD Ryzen|AMD EPYC|AuthenticAMD' /proc/cpuinfo > /dev/null;
then
if grep "cpu family[[:space:]]\{1,\}:[[:space:]]25" /proc/cpuinfo > /dev/null;
then
@ -28,6 +28,14 @@ if grep -E 'AMD Ryzen|AMD EPYC' /proc/cpuinfo > /dev/null;
wrmsr -a 0xc001102b 0x2000cc10
echo "MSR register values for Zen3 applied"
fi
elif grep "cpu family[[:space:]]\{1,\}:[[:space:]]26" /proc/cpuinfo > /dev/null;
then
echo "Detected Zen5 CPU"
wrmsr -a 0xc0011020 0x4400000000000
wrmsr -a 0xc0011021 0x4000000000040
wrmsr -a 0xc0011022 0x8680000401570000
wrmsr -a 0xc001102b 0x2040cc10
echo "MSR register values for Zen5 applied"
else
echo "Detected Zen1/Zen2 CPU"
wrmsr -a 0xc0011020 0

View file

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.1)
cmake_minimum_required(VERSION 3.10)
project(argon2 C)
set(CMAKE_C_STANDARD 99)

View file

@ -1 +1 @@
epee - is a small library of helpers, wrappers, tools and and so on, used to make my life easier.
epee - is a small library of helpers, wrappers, tools and so on, used to make my life easier.

View file

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.1)
cmake_minimum_required(VERSION 3.10)
project (hwloc C)
include_directories(include)

View file

@ -1,5 +1,5 @@
Copyright © 2009 CNRS
Copyright © 2009-2022 Inria. All rights reserved.
Copyright © 2009-2025 Inria. All rights reserved.
Copyright © 2009-2013 Université Bordeaux
Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
Copyright © 2020 Hewlett Packard Enterprise. All rights reserved.
@ -17,6 +17,214 @@ bug fixes (and other actions) for each version of hwloc since version
0.9.
Version 2.12.1
--------------
* Add hwloc-calc's --default-nodes option to hwloc-bind and hwloc-info.
* Improve the --best-memattr "default" fallback, try to use "default"
memory nodes, and add verbose messages and warnings if some
performance info are incomplete or missing.
Thanks to Antoine Morvan for the report.
* Fix CPU and memory binding on different locations,
thanks to Antoine Morvan for the report.
* Add HWLOC_LOCAL_NUMANODE_FLAG_INTERSECT_LOCALITY and enable it by
default in hwloc-calc --local-memory for finding local NUMA nodes
that do not exactly match input locations.
Thanks to Antoine Morvan for the report.
* Fix a possible crash in the x86 backend when Qemu is configured to
expose multicore/thread CPUs that are actually single-core/thread.
Thanks to Georg Pfuetzenreuter.
Version 2.12.0
--------------
* Add hwloc_topology_get_default_nodeset() for the set of default
NUMA nodes.
- hwloc-calc now has --default-nodes option.
* Rework oneAPI LevelZero support to use zesInit() and avoid the need
to set ZES_ENABLE_SYSMAN=1 in the environment.
- zesDriverGetDeviceByUuidExp() is now required in the L0 runtime.
- ZES/Sysman variants were added in hwloc/levelzero.h to specifically
handle ZES/Sysman device handles.
* Fix the locality of AMD GPU partitions, thanks to Edgar Leon for
reporting and debugging the issue.
* Better detect Cray Slingshot NICs, thanks to Edgar Leon.
* Add support for Die objects and Module groups on Windows.
* Only filter-out Dies that are identical to their Packages
when it applies to all Dies.
* Improve hwloc-calc to handle CPU-less NUMA nodes or platforms with
heterogeneous memory without requiring --nodeset-output.
* hwloc-calc now accepts counting/listing cpukinds and memory tiers
with -N and -I cpukind/memorytier.
* The systemd-dbus-api output of hwloc-calc has changed, and
--nodeset-output-format was added, to support NUMA node outputs.
Thanks to Pierre Neyron.
* Update NVLink bandwidth and CUDA capabilities up to NVIDIA Blackwell.
* Fix some NUMA syscalls on Linux for platforms with old libc headers.
* Some minor fixes in distances.
Version 2.11.2
--------------
* Add missing CPU info attrs on aarch64 on Linux.
* Use ACPI CPPC on Linux to get better information about cpukinds,
at least on AMD CPUs.
* Fix crash when manipulating cpukinds after topology
duplication, thanks to Hadrien Grasland for the report.
* Fix missing input target checks in memattr functions,
thanks to Hadrien Grasland for the report.
* Fix a memory leak when ignoring NUMA distances on FreeBSD.
* Fix build failure on old Linux distributions without accessat().
* Fix non-Windows importing of XML topologies and CPUID dumps exported
on Windows.
* hwloc-calc --cpuset-output-format systemd-dbus-api now allows
to generate AllowedCPUs information for systemd slices.
See the hwloc-calc manpage for examples. Thanks to Pierre Neyron.
* Some fixes in manpage EXAMPLES and split them into subsections.
Version 2.11.1
--------------
* Fix bash completions, thanks Tavis Rudd.
Version 2.11.0
--------------
* API
+ Add HWLOC_MEMBIND_WEIGHTED_INTERLEAVE memory binding policy on
Linux 6.9+. Thanks to Honggyu Kim for the patch.
- weighted_interleave_membind is added to membind support bits.
- The "weighted" policy is added to the hwloc-bind tool.
+ Add hwloc_obj_set_subtype(). Thanks to Hadrien Grasland for the report.
* GPU support
+ Don't hide the GPU NUMA node on NVIDIA Grace Hopper.
+ Get Intel GPU OpenCL device locality.
+ Add bandwidths between subdevices in the LevelZero XeLinkBandwidth
matrix.
+ Fix PCI Gen4+ link speed of NVIDIA GPU obtained from NVML,
thanks to Akram Sbaih for the report.
* Windows support
+ Fix Windows support when UNICODE is enabled, several hwloc features
were missing, thanks to Martin for the report.
+ Fix the enabling of CUDA in Windows CMake build,
Thanks to Moritz Kreutzer for the patch.
+ Fix CUDA/OpenCL test source path in Windows CMake.
* Tools
+ Option --best-memattr may now return multiple nodes. Additional
configuration flags may be given to tweak its behavior.
+ hwloc-info has a new --get-attr option to get a single attribute.
+ hwloc-info now supports "levels", "support" and "topology"
special keywords for backward compatibility for hwloc 3.0.
+ The --taskset command-line option is superseded by the new
--cpuset-output-format which also allows to export as list.
+ hwloc-calc may now import bitmasks described as a list of bits
with the new "--cpuset-input-format list".
* Misc
+ The MemoryTiersNr info attribute in the root object now says how many
memory tiers were built. Thanks to Antoine Morvan for the report.
+ Fix the management of infinite cpusets in the bitmap printf/sscanf
API as well as in command-line tools.
+ Add section "Compiling software on top of hwloc's C API" in the
documentation with examples for GNU Make and CMake,
thanks to Florent Pruvost for the help.
Version 2.10.0
--------------
* Heterogeneous Memory core improvements
+ Better heuristics to identify the subtype of memory such as HBM,
DRAM, NVM, CXL-DRAM, etc.
+ Build memory tiers, i.e. sets of NUMA nodes with the same subtype
and similar performance.
- NUMA node tier ranks are exposed in the new MemoryTier info
attribute (starts from 0 for highest bandwidth tier)..
+ See the new Heterogeneous Memory section in the documentation.
* API
+ Add hwloc_topology_free_group_object() to discard a Group created
by hwloc_topology_alloc_group_object().
* Linux backend
+ Fix cpukinds on NVIDIA Grace to report identical cores even if they
actually have very small frequency differences.
Thanks to John C. Linford for the report.
+ Add CXLDevice attributes to CXL DAX objects and NUMA nodes to show
which PCI device implements which window.
+ Ignore buggy memory-side caches and memory attributes when fake NUMA
emulation is enabled on the Linux kernel command-line.
+ Add more info attributes in MemoryModule Misc objects,
thanks to Zubiao Xiong for the patch.
+ Get CPUModel and CPUFamily info attributes on LoongArch platforms.
* x86 backend
+ Add support for new AMD CPUID leaf 0x80000026 for better detection
of Core Complex and Die on Zen4 processors.
+ Improve Zhaoxin CPU topology detection.
* Tools
+ Input locations and many command-line options (e.g. hwloc-calc -I -N -H,
lstopo --only) now accept filters such as "NUMA[HBM]" so that only
objects are that type and subtype are considered.
- NUMA[tier=1] is also accepted for selecting NUMA nodes depending
on their MemoryTier info attribute.
+ Add --object-output to hwloc-calc to report the type as a prefix to
object indexes, e.g. Core:2 instead of 2 in the output of -I.
+ hwloc-info --ancestor and --descendants now accepts kinds of objects
instead of single types.
- The new --first option only shows the first matching object.
+ Add --children-of-pid to hwloc-ps to show a hierarchy of processes.
Thanks to Antoine Morvan for the suggestion.
+ Add --misc-from to lstopo to add Misc objects described in a file.
- To be combined with the new hwloc-ps --lstopo-misc for a customizable
lstopo --top replacement.
* Misc
+ lstopo may now configure the layout of memory object placed above,
for instance with --children-order memory:above:vert.
+ Fix XML import from memory or stdin when using libxml2 2.12.
+ Fix installation failures when configuring with --target,
thanks to Clement Foyer for the patch.
+ Fix support for 128bit pointer architectures.
+ Remove Netloc.
Version 2.9.3
-------------
* Handle Linux glibc allocation errors in binding routines (CVE-2022-47022).
* Fix hwloc-calc when searching objects on heterogeneous memory platforms,
thanks to Antoine Morvan for the report.
* Fix hwloc_get_next_child() when there are some memory-side caches.
* Don't crash if the topology is empty because Linux cgroups are wrong.
* Improve some hwloc-bind warnings in case of command-line parsing errors.
* Many documentation improvements all over the place, including:
+ hwloc_topology_restrict() and hwloc_topology_insert_group() may reorder
children, causing the logical indexes of objects to change.
Version 2.9.2
-------------
* Don't forget L3i when defining filters for multiple levels of caches
with hwloc_topology_set_cache/icache_types_filter().
* Fix object total_memory after hwloc_topology_insert_group_object().
* Fix the (non-yet) exporting in synthetic description for complex memory
hierarchies with memory-side caches, etc.
* Fix some default size attributes when building synthetic topologies.
* Fix size units in hwloc-annotate.
* Improve bitmap reallocation error management in many functions.
* Documentation improvements:
+ Better document return values of functions.
+ Add "Error reporting" section (in hwloc.h and in the doxygen doc).
+ Add FAQ entry "What may I disable to make hwloc faster?"
+ Improve FAQ entries "Why is lstopo slow?" and
"I only need ..., why should I use hwloc?"
+ Clarify how to deal with cpukinds in hwloc-calc and hwloc-bind
manpages.
Version 2.9.1
-------------
* Don't forget to apply object type filters to "perflevel" caches detected
on recent Mac OS X releases, thanks to Michel Lesoinne for the report.
* Fix a failed assertion in hwloc_topology_restrict() when some NUMA nodes
are removed because of HWLOC_RESTRICT_FLAG_REMOVE_CPULESS but no PUs are.
Thanks to Mark Grondona for reporting the issue.
* Mark HPE Cray Slingshot NICs with subtype "Slingshot".
Version 2.9.0
-------------
* Backends
@ -61,6 +269,14 @@ Version 2.8.0
file from the documentation.
Version 2.7.2
-------------
* Fix a crash when LevelZero devices have multiple subdevices,
e.g. on PonteVecchio GPUs, thanks to Jonathan Peyton.
* Fix a leak when importing cpukinds from XML,
thanks to Hui Zhou.
Version 2.7.1
-------------
* Workaround crashes when virtual machines report incoherent x86 CPUID

View file

@ -1,4 +1,8 @@
Introduction
This is a truncated and poorly-formatted version of the documentation main page.
See https://www.open-mpi.org/projects/hwloc/doc/ for more.
hwloc Overview
The Hardware Locality (hwloc) software project aims at easing the process of
discovering hardware resources in parallel architectures. It offers
@ -8,66 +12,450 @@ high-performance computing (HPC) applications, but is also applicable to any
project seeking to exploit code and/or data locality on modern computing
platforms.
hwloc is actually made of two subprojects distributed together:
hwloc provides command line tools and a C API to obtain the hierarchical map of
key computing elements within a node, such as: NUMA memory nodes, shared
caches, processor packages, dies and cores, processing units (logical
processors or "threads") and even I/O devices. hwloc also gathers various
attributes such as cache and memory information, and is portable across a
variety of different operating systems and platforms.
* The original hwloc project for describing the internals of computing nodes.
It is described in details starting at section Hardware Locality (hwloc)
Introduction.
* The network-oriented companion called netloc (Network Locality), described
in details starting with section Network Locality (netloc).
hwloc primarily aims at helping high-performance computing (HPC) applications,
but is also applicable to any project seeking to exploit code and/or data
locality on modern computing platforms.
See also the Related pages tab above for links to other sections.
hwloc supports the following operating systems:
Netloc may be disabled, but the original hwloc cannot. Both hwloc and netloc
APIs are documented after these sections.
* Linux (with knowledge of cgroups and cpusets, memory targets/initiators,
etc.) on all supported hardware, including Intel Xeon Phi, ScaleMP vSMP,
and NumaScale NumaConnect.
* Solaris (with support for processor sets and logical domains)
* AIX
* Darwin / OS X
* FreeBSD and its variants (such as kFreeBSD/GNU)
* NetBSD
* HP-UX
* Microsoft Windows
* IBM BlueGene/Q Compute Node Kernel (CNK)
Installation
Since it uses standard Operating System information, hwloc's support is mostly
independant from the processor type (x86, powerpc, ...) and just relies on the
Operating System support. The main exception is BSD operating systems (NetBSD,
FreeBSD, etc.) because they do not provide support topology information, hence
hwloc uses an x86-only CPUID-based backend (which can be used for other OSes
too, see the Components and plugins section).
hwloc (https://www.open-mpi.org/projects/hwloc/) is available under the BSD
license. It is hosted as a sub-project of the overall Open MPI project (https:/
/www.open-mpi.org/). Note that hwloc does not require any functionality from
Open MPI -- it is a wholly separate (and much smaller!) project and code base.
It just happens to be hosted as part of the overall Open MPI project.
To check whether hwloc works on a particular machine, just try to build it and
run lstopo or lstopo-no-graphics. If some things do not look right (e.g. bogus
or missing cache information), see Questions and Bugs.
Basic Installation
hwloc only reports the number of processors on unsupported operating systems;
no topology information is available.
Installation is the fairly common GNU-based process:
For development and debugging purposes, hwloc also offers the ability to work
on "fake" topologies:
shell$ ./configure --prefix=...
shell$ make
shell$ make install
* Symmetrical tree of resources generated from a list of level arities, see
Synthetic topologies.
* Remote machine simulation through the gathering of topology as XML files,
see Importing and exporting topologies from/to XML files.
hwloc- and netloc-specific configure options and requirements are documented in
sections hwloc Installation and Netloc Installation respectively.
hwloc can display the topology in a human-readable format, either in graphical
mode (X11), or by exporting in one of several different formats, including:
plain text, LaTeX tikzpicture, PDF, PNG, and FIG (see Command-line Examples
below). Note that some of the export formats require additional support
libraries.
Also note that if you install supplemental libraries in non-standard locations,
hwloc's configure script may not be able to find them without some help. You
may need to specify additional CPPFLAGS, LDFLAGS, or PKG_CONFIG_PATH values on
the configure command line.
hwloc offers a programming interface for manipulating topologies and objects.
It also brings a powerful CPU bitmap API that is used to describe topology
objects location on physical/logical processors. See the Programming Interface
below. It may also be used to binding applications onto certain cores or memory
nodes. Several utility programs are also provided to ease command-line
manipulation of topology objects, binding of processes, and so on.
For example, if libpciaccess was installed into /opt/pciaccess, hwloc's
configure script may not find it be default. Try adding PKG_CONFIG_PATH to the
./configure command line, like this:
Bindings for several other languages are available from the project website.
./configure PKG_CONFIG_PATH=/opt/pciaccess/lib/pkgconfig ...
Command-line Examples
Running the "lstopo" tool is a good way to check as a graphical output whether
hwloc properly detected the architecture of your node. Netloc command-line
tools can be used to display the network topology interconnecting your nodes.
On a 4-package 2-core machine with hyper-threading, the lstopo tool may show
the following graphical output:
Installing from a Git clone
[dudley]
Additionally, the code can be directly cloned from Git:
Here's the equivalent output in textual form:
shell$ git clone https://github.com/open-mpi/hwloc.git
shell$ cd hwloc
shell$ ./autogen.sh
Machine
NUMANode L#0 (P#0)
Package L#0 + L3 L#0 (4096KB)
L2 L#0 (1024KB) + L1 L#0 (16KB) + Core L#0
PU L#0 (P#0)
PU L#1 (P#8)
L2 L#1 (1024KB) + L1 L#1 (16KB) + Core L#1
PU L#2 (P#4)
PU L#3 (P#12)
Package L#1 + L3 L#1 (4096KB)
L2 L#2 (1024KB) + L1 L#2 (16KB) + Core L#2
PU L#4 (P#1)
PU L#5 (P#9)
L2 L#3 (1024KB) + L1 L#3 (16KB) + Core L#3
PU L#6 (P#5)
PU L#7 (P#13)
Package L#2 + L3 L#2 (4096KB)
L2 L#4 (1024KB) + L1 L#4 (16KB) + Core L#4
PU L#8 (P#2)
PU L#9 (P#10)
L2 L#5 (1024KB) + L1 L#5 (16KB) + Core L#5
PU L#10 (P#6)
PU L#11 (P#14)
Package L#3 + L3 L#3 (4096KB)
L2 L#6 (1024KB) + L1 L#6 (16KB) + Core L#6
PU L#12 (P#3)
PU L#13 (P#11)
L2 L#7 (1024KB) + L1 L#7 (16KB) + Core L#7
PU L#14 (P#7)
PU L#15 (P#15)
Note that GNU Autoconf >=2.63, Automake >=1.11 and Libtool >=2.2.6 are required
when building from a Git clone.
Note that there is also an equivalent output in XML that is meant for exporting
/importing topologies but it is hardly readable to human-beings (see Importing
and exporting topologies from/to XML files for details).
Nightly development snapshots are available on the web site, they can be
configured and built without any need for Git or GNU Autotools.
On a 4-package 2-core Opteron NUMA machine (with two core cores disallowed by
the administrator), the lstopo tool may show the following graphical output
(with --disallowed for displaying disallowed objects):
[hagrid]
Here's the equivalent output in textual form:
Machine (32GB total)
Package L#0
NUMANode L#0 (P#0 8190MB)
L2 L#0 (1024KB) + L1 L#0 (64KB) + Core L#0 + PU L#0 (P#0)
L2 L#1 (1024KB) + L1 L#1 (64KB) + Core L#1 + PU L#1 (P#1)
Package L#1
NUMANode L#1 (P#1 8192MB)
L2 L#2 (1024KB) + L1 L#2 (64KB) + Core L#2 + PU L#2 (P#2)
L2 L#3 (1024KB) + L1 L#3 (64KB) + Core L#3 + PU L#3 (P#3)
Package L#2
NUMANode L#2 (P#2 8192MB)
L2 L#4 (1024KB) + L1 L#4 (64KB) + Core L#4 + PU L#4 (P#4)
L2 L#5 (1024KB) + L1 L#5 (64KB) + Core L#5 + PU L#5 (P#5)
Package L#3
NUMANode L#3 (P#3 8192MB)
L2 L#6 (1024KB) + L1 L#6 (64KB) + Core L#6 + PU L#6 (P#6)
L2 L#7 (1024KB) + L1 L#7 (64KB) + Core L#7 + PU L#7 (P#7)
On a 2-package quad-core Xeon (pre-Nehalem, with 2 dual-core dies into each
package):
[emmett]
Here's the same output in textual form:
Machine (total 16GB)
NUMANode L#0 (P#0 16GB)
Package L#0
L2 L#0 (4096KB)
L1 L#0 (32KB) + Core L#0 + PU L#0 (P#0)
L1 L#1 (32KB) + Core L#1 + PU L#1 (P#4)
L2 L#1 (4096KB)
L1 L#2 (32KB) + Core L#2 + PU L#2 (P#2)
L1 L#3 (32KB) + Core L#3 + PU L#3 (P#6)
Package L#1
L2 L#2 (4096KB)
L1 L#4 (32KB) + Core L#4 + PU L#4 (P#1)
L1 L#5 (32KB) + Core L#5 + PU L#5 (P#5)
L2 L#3 (4096KB)
L1 L#6 (32KB) + Core L#6 + PU L#6 (P#3)
L1 L#7 (32KB) + Core L#7 + PU L#7 (P#7)
Programming Interface
The basic interface is available in hwloc.h. Some higher-level functions are
available in hwloc/helper.h to reduce the need to manually manipulate objects
and follow links between them. Documentation for all these is provided later in
this document. Developers may also want to look at hwloc/inlines.h which
contains the actual inline code of some hwloc.h routines, and at this document,
which provides good higher-level topology traversal examples.
To precisely define the vocabulary used by hwloc, a Terms and Definitions
section is available and should probably be read first.
Each hwloc object contains a cpuset describing the list of processing units
that it contains. These bitmaps may be used for CPU binding and Memory binding.
hwloc offers an extensive bitmap manipulation interface in hwloc/bitmap.h.
Moreover, hwloc also comes with additional helpers for interoperability with
several commonly used environments. See the Interoperability With Other
Software section for details.
The complete API documentation is available in a full set of HTML pages, man
pages, and self-contained PDF files (formatted for both both US letter and A4
formats) in the source tarball in doc/doxygen-doc/.
NOTE: If you are building the documentation from a Git clone, you will need to
have Doxygen and pdflatex installed -- the documentation will be built during
the normal "make" process. The documentation is installed during "make install"
to $prefix/share/doc/hwloc/ and your systems default man page tree (under
$prefix, of course).
Portability
Operating System have varying support for CPU and memory binding, e.g. while
some Operating Systems provide interfaces for all kinds of CPU and memory
bindings, some others provide only interfaces for a limited number of kinds of
CPU and memory binding, and some do not provide any binding interface at all.
Hwloc's binding functions would then simply return the ENOSYS error (Function
not implemented), meaning that the underlying Operating System does not provide
any interface for them. CPU binding and Memory binding provide more information
on which hwloc binding functions should be preferred because interfaces for
them are usually available on the supported Operating Systems.
Similarly, the ability of reporting topology information varies from one
platform to another. As shown in Command-line Examples, hwloc can obtain
information on a wide variety of hardware topologies. However, some platforms
and/or operating system versions will only report a subset of this information.
For example, on an PPC64-based system with 8 cores (each with 2 hardware
threads) running a default 2.6.18-based kernel from RHEL 5.4, hwloc is only
able to glean information about NUMA nodes and processor units (PUs). No
information about caches, packages, or cores is available.
Here's the graphical output from lstopo on this platform when Simultaneous
Multi-Threading (SMT) is enabled:
[ppc64-with]
And here's the graphical output from lstopo on this platform when SMT is
disabled:
[ppc64-with]
Notice that hwloc only sees half the PUs when SMT is disabled. PU L#6, for
example, seems to change location from NUMA node #0 to #1. In reality, no PUs
"moved" -- they were simply re-numbered when hwloc only saw half as many (see
also Logical index in Indexes and Sets). Hence, PU L#6 in the SMT-disabled
picture probably corresponds to PU L#12 in the SMT-enabled picture.
This same "PUs have disappeared" effect can be seen on other platforms -- even
platforms / OSs that provide much more information than the above PPC64 system.
This is an unfortunate side-effect of how operating systems report information
to hwloc.
Note that upgrading the Linux kernel on the same PPC64 system mentioned above
to 2.6.34, hwloc is able to discover all the topology information. The
following picture shows the entire topology layout when SMT is enabled:
[ppc64-full]
Developers using the hwloc API or XML output for portable applications should
therefore be extremely careful to not make any assumptions about the structure
of data that is returned. For example, per the above reported PPC topology, it
is not safe to assume that PUs will always be descendants of cores.
Additionally, future hardware may insert new topology elements that are not
available in this version of hwloc. Long-lived applications that are meant to
span multiple different hardware platforms should also be careful about making
structure assumptions. For example, a new element may someday exist between a
core and a PU.
API Example
The following small C example (available in the source tree as ``doc/examples/
hwloc-hello.c'') prints the topology of the machine and performs some thread
and memory binding. More examples are available in the doc/examples/ directory
of the source tree.
/* Example hwloc API program.
*
* See other examples under doc/examples/ in the source tree
* for more details.
*
* Copyright (c) 2009-2016 Inria. All rights reserved.
* Copyright (c) 2009-2011 Universit?eacute; Bordeaux
* Copyright (c) 2009-2010 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*
* hwloc-hello.c
*/
#include "hwloc.h"
#include <errno.h>
#include <stdio.h>
#include <string.h>
static void print_children(hwloc_topology_t topology, hwloc_obj_t obj,
int depth)
{
char type[32], attr[1024];
unsigned i;
hwloc_obj_type_snprintf(type, sizeof(type), obj, 0);
printf("%*s%s", 2*depth, "", type);
if (obj->os_index != (unsigned) -1)
printf("#%u", obj->os_index);
hwloc_obj_attr_snprintf(attr, sizeof(attr), obj, " ", 0);
if (*attr)
printf("(%s)", attr);
printf("\n");
for (i = 0; i < obj->arity; i++) {
print_children(topology, obj->children[i], depth + 1);
}
}
int main(void)
{
int depth;
unsigned i, n;
unsigned long size;
int levels;
char string[128];
int topodepth;
void *m;
hwloc_topology_t topology;
hwloc_cpuset_t cpuset;
hwloc_obj_t obj;
/* Allocate and initialize topology object. */
hwloc_topology_init(&topology);
/* ... Optionally, put detection configuration here to ignore
some objects types, define a synthetic topology, etc....
The default is to detect all the objects of the machine that
the caller is allowed to access. See Configure Topology
Detection. */
/* Perform the topology detection. */
hwloc_topology_load(topology);
/* Optionally, get some additional topology information
in case we need the topology depth later. */
topodepth = hwloc_topology_get_depth(topology);
/*****************************************************************
* First example:
* Walk the topology with an array style, from level 0 (always
* the system level) to the lowest level (always the proc level).
*****************************************************************/
for (depth = 0; depth < topodepth; depth++) {
printf("*** Objects at level %d\n", depth);
for (i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth);
i++) {
hwloc_obj_type_snprintf(string, sizeof(string),
hwloc_get_obj_by_depth(topology, depth, i), 0);
printf("Index %u: %s\n", i, string);
}
}
/*****************************************************************
* Second example:
* Walk the topology with a tree style.
*****************************************************************/
printf("*** Printing overall tree\n");
print_children(topology, hwloc_get_root_obj(topology), 0);
/*****************************************************************
* Third example:
* Print the number of packages.
*****************************************************************/
depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PACKAGE);
if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) {
printf("*** The number of packages is unknown\n");
} else {
printf("*** %u package(s)\n",
hwloc_get_nbobjs_by_depth(topology, depth));
}
/*****************************************************************
* Fourth example:
* Compute the amount of cache that the first logical processor
* has above it.
*****************************************************************/
levels = 0;
size = 0;
for (obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0);
obj;
obj = obj->parent)
if (hwloc_obj_type_is_cache(obj->type)) {
levels++;
size += obj->attr->cache.size;
}
printf("*** Logical processor 0 has %d caches totaling %luKB\n",
levels, size / 1024);
/*****************************************************************
* Fifth example:
* Bind to only one thread of the last core of the machine.
*
* First find out where cores are, or else smaller sets of CPUs if
* the OS doesn't have the notion of a "core".
*****************************************************************/
depth = hwloc_get_type_or_below_depth(topology, HWLOC_OBJ_CORE);
/* Get last core. */
obj = hwloc_get_obj_by_depth(topology, depth,
hwloc_get_nbobjs_by_depth(topology, depth) - 1);
if (obj) {
/* Get a copy of its cpuset that we may modify. */
cpuset = hwloc_bitmap_dup(obj->cpuset);
/* Get only one logical processor (in case the core is
SMT/hyper-threaded). */
hwloc_bitmap_singlify(cpuset);
/* And try to bind ourself there. */
if (hwloc_set_cpubind(topology, cpuset, 0)) {
char *str;
int error = errno;
hwloc_bitmap_asprintf(&str, obj->cpuset);
printf("Couldn't bind to cpuset %s: %s\n", str, strerror(error));
free(str);
}
/* Free our cpuset copy */
hwloc_bitmap_free(cpuset);
}
/*****************************************************************
* Sixth example:
* Allocate some memory on the last NUMA node, bind some existing
* memory to the last NUMA node.
*****************************************************************/
/* Get last node. There's always at least one. */
n = hwloc_get_nbobjs_by_type(topology, HWLOC_OBJ_NUMANODE);
obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, n - 1);
size = 1024*1024;
m = hwloc_alloc_membind(topology, size, obj->nodeset,
HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
hwloc_free(topology, m, size);
m = malloc(size);
hwloc_set_area_membind(topology, m, size, obj->nodeset,
HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
free(m);
/* Destroy topology object. */
hwloc_topology_destroy(topology);
return 0;
}
hwloc provides a pkg-config executable to obtain relevant compiler and linker
flags. See Compiling software on top of hwloc's C API for details on building
program on top of hwloc's API using GNU Make or CMake.
On a machine 2 processor packages -- each package of which has two processing
cores -- the output from running hwloc-hello could be something like the
following:
shell$ ./hwloc-hello
*** Objects at level 0
Index 0: Machine
*** Objects at level 1
Index 0: Package#0
Index 1: Package#1
*** Objects at level 2
Index 0: Core#0
Index 1: Core#1
Index 2: Core#3
Index 3: Core#2
*** Objects at level 3
Index 0: PU#0
Index 1: PU#1
Index 2: PU#2
Index 3: PU#3
*** Printing overall tree
Machine
Package#0
Core#0
PU#0
Core#1
PU#1
Package#1
Core#3
PU#2
Core#2
PU#3
*** 2 package(s)
*** Logical processor 0 has 0 caches totaling 0KB
shell$
Questions and Bugs
@ -80,6 +468,20 @@ www.open-mpi.org/community/lists/hwloc.php).
There is also a #hwloc IRC channel on Libera Chat (irc.libera.chat).
History / Credits
hwloc is the evolution and merger of the libtopology project and the Portable
Linux Processor Affinity (PLPA) (https://www.open-mpi.org/projects/plpa/)
project. Because of functional and ideological overlap, these two code bases
and ideas were merged and released under the name "hwloc" as an Open MPI
sub-project.
libtopology was initially developed by the Inria Runtime Team-Project. PLPA was
initially developed by the Open MPI development team as a sub-project. Both are
now deprecated in favor of hwloc, which is distributed as an Open MPI
sub-project.
See https://www.open-mpi.org/projects/hwloc/doc/ for more hwloc documentation.
See https://www.open-mpi.org/projects/hwloc/doc/ for more hwloc documentation,
actual links to related pages, images, etc.

View file

@ -8,8 +8,8 @@
# Please update HWLOC_VERSION* in contrib/windows/hwloc_config.h too.
major=2
minor=9
release=0
minor=12
release=1
# greek is used for alpha or beta release tags. If it is non-empty,
# it will be appended to the version number. It does not have to be
@ -22,7 +22,7 @@ greek=
# The date when this release was created
date="Dec 14, 2022"
date="May 12, 2025"
# If snapshot=1, then use the value from snapshot_version as the
# entire hwloc version (i.e., ignore major, minor, release, and
@ -41,7 +41,6 @@ snapshot_version=${major}.${minor}.${release}${greek}-git
# 2. Version numbers are described in the Libtool current:revision:age
# format.
libhwloc_so_version=21:1:6
libnetloc_so_version=0:0:0
libhwloc_so_version=25:0:10
# Please also update the <TargetName> lines in contrib/windows/libhwloc.vcxproj

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2025 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -11,10 +11,10 @@
#ifndef HWLOC_CONFIG_H
#define HWLOC_CONFIG_H
#define HWLOC_VERSION "2.9.0"
#define HWLOC_VERSION "2.12.1"
#define HWLOC_VERSION_MAJOR 2
#define HWLOC_VERSION_MINOR 9
#define HWLOC_VERSION_RELEASE 0
#define HWLOC_VERSION_MINOR 12
#define HWLOC_VERSION_RELEASE 1
#define HWLOC_VERSION_GREEK ""
#define __hwloc_restrict

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -50,9 +50,10 @@ extern "C" {
* hwloc_bitmap_free(set);
* \endcode
*
* \note Most functions below return an int that may be negative in case of
* error. The usual error case would be an internal failure to realloc/extend
* \note Most functions below return 0 on success and -1 on error.
* The usual error case would be an internal failure to realloc/extend
* the storage of the bitmap (\p errno would be set to \c ENOMEM).
* See also \ref hwlocality_api_error_reporting.
*
* \note Several examples of using the bitmap API are available under the
* doc/examples/ directory in the source tree.
@ -83,7 +84,13 @@ typedef const struct hwloc_bitmap_s * hwloc_const_bitmap_t;
*/
HWLOC_DECLSPEC hwloc_bitmap_t hwloc_bitmap_alloc(void) __hwloc_attribute_malloc;
/** \brief Allocate a new full bitmap. */
/** \brief Allocate a new full bitmap.
*
* \returns A valid bitmap or \c NULL.
*
* The bitmap should be freed by a corresponding call to
* hwloc_bitmap_free().
*/
HWLOC_DECLSPEC hwloc_bitmap_t hwloc_bitmap_alloc_full(void) __hwloc_attribute_malloc;
/** \brief Free bitmap \p bitmap.
@ -106,73 +113,134 @@ HWLOC_DECLSPEC int hwloc_bitmap_copy(hwloc_bitmap_t dst, hwloc_const_bitmap_t sr
* Bitmap/String Conversion
*/
/** \brief Stringify a bitmap.
/** \brief Stringify a bitmap in the default hwloc format.
*
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* Print the bits set inside a bitmap as a comma-separated list of hexadecimal 32-bit blocks.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as <tt>"0xffffffff,0x00000006,0x00000002"</tt>.
*
* Up to \p buflen characters may be written in buffer \p buf.
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
* or that would have been written (not including the ending \c \0).
* \return -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
/** \brief Stringify a bitmap into a newly allocated string.
/** \brief Stringify a bitmap into a newly allocated string in the default hwloc format.
*
* \return -1 on error.
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* Print the bits set inside a bitmap as a comma-separated list of hexadecimal 32-bit blocks.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as <tt>"0xffffffff,0x00000006,0x00000002"</tt>.
*
* \return the number of characters that were written (not including the ending \c \0).
* \return -1 on error, for instance with \p errno set to \c ENOMEM on failure to allocate the output string.
*/
HWLOC_DECLSPEC int hwloc_bitmap_asprintf(char ** strp, hwloc_const_bitmap_t bitmap);
/** \brief Parse a bitmap string and stores it in bitmap \p bitmap.
/** \brief Parse a bitmap string as the default hwloc format and stores it in bitmap \p bitmap.
*
* <b>Note that if the bitmap is a CPU or nodeset, the input string must contain physical indexes.</b>
*
* The input string should be a comma-separared list of hexadecimal 32-bit blocks.
* String <tt>"0xffffffff,0x6,0x2"</tt> is parsed as a bitmap containing all bits between 64 and 95,
* and bits 33, 34 and 1.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string);
/** \brief Stringify a bitmap in the list format.
*
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* Lists are comma-separated indexes or ranges.
* Ranges are dash separated indexes.
* The last range may not have an ending indexes if the bitmap is infinitely set.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as <tt>"1,33-34,64-95"</tt>.
* The last range may not have an ending index if the bitmap is infinitely set.
*
* Up to \p buflen characters may be written in buffer \p buf.
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
* or that would have been written (not including the ending \c \0).
* \return -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_list_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
/** \brief Stringify a bitmap into a newly allocated list string.
*
* \return -1 on error.
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* Lists are comma-separated indexes or ranges.
* Ranges are dash separated indexes.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as <tt>"1,33-34,64-95"</tt>.
* The last range may not have an ending index if the bitmap is infinitely set.
*
* \return the number of characters that were written (not including the ending \c \0).
* \return -1 on error, for instance with \p errno set to \c ENOMEM on failure to allocate the output string.
*/
HWLOC_DECLSPEC int hwloc_bitmap_list_asprintf(char ** strp, hwloc_const_bitmap_t bitmap);
/** \brief Parse a list string and stores it in bitmap \p bitmap.
*
* <b>Note that if the bitmap is a CPU or nodeset, the input string must contain physical indexes.</b>
*
* Lists are comma-separated indexes or ranges.
* Ranges are dash separated indexes.
* String <tt>"1,33-34,64-95"</tt> is parsed as a bitmap containing bits 1, 33, 34, and all from 64 to 95.
* The last range may not have an ending index if the bitmap is infinitely set.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_list_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string);
/** \brief Stringify a bitmap in the taskset-specific format.
*
* The taskset command manipulates bitmap strings that contain a single
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* The taskset program manipulates bitmap strings that contain a single
* (possible very long) hexadecimal number starting with 0x.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as </tt>"0xffffffff0000000600000002"</tt>.
*
* Up to \p buflen characters may be written in buffer \p buf.
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
* or that would have been written (not including the ending \c \0).
* \return -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
/** \brief Stringify a bitmap into a newly allocated taskset-specific string.
*
* \return -1 on error.
* <b>Note that if the bitmap is a CPU or nodeset, it contains physical indexes.</b>
*
* The taskset program manipulates bitmap strings that contain a single
* (possible very long) hexadecimal number starting with 0x.
* A bitmap containing bits 1, 33, 34, and all from 64 to 95 is printed as <tt>"0xffffffff0000000600000002"</tt>.
*
* \return the number of characters that were written (not including the ending \c \0).
* \return -1 on error, for instance with \p errno set to \c ENOMEM on failure to allocate the output string.
*/
HWLOC_DECLSPEC int hwloc_bitmap_taskset_asprintf(char ** strp, hwloc_const_bitmap_t bitmap);
/** \brief Parse a taskset-specific bitmap string and stores it in bitmap \p bitmap.
*
* <b>Note that if the bitmap is a CPU or nodeset, the input string must contain physical indexes.</b>
*
* The taskset program manipulates bitmap strings that contain a single
* (possible very long) hexadecimal number starting with 0x.
* String <tt>"0xffffffff0000000600000002"</tt> is parsed as a bitmap containing all bits between 64 and 95,
* and bits 33, 34 and 1.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_bitmap_taskset_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string);
@ -279,6 +347,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_to_ulongs(hwloc_const_bitmap_t bitmap, unsigned
* When called on the output of hwloc_topology_get_topology_cpuset(),
* the returned number is large enough for all cpusets of the topology.
*
* \return the number of unsigned longs required.
* \return -1 if \p bitmap is infinite.
*/
HWLOC_DECLSPEC int hwloc_bitmap_nr_ulongs(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
@ -305,21 +374,23 @@ HWLOC_DECLSPEC int hwloc_bitmap_isfull(hwloc_const_bitmap_t bitmap) __hwloc_attr
/** \brief Compute the first index (least significant bit) in bitmap \p bitmap
*
* \return -1 if no index is set in \p bitmap.
* \return the first index set in \p bitmap.
* \return -1 if \p bitmap is empty.
*/
HWLOC_DECLSPEC int hwloc_bitmap_first(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
/** \brief Compute the next index in bitmap \p bitmap which is after index \p prev
*
* If \p prev is -1, the first index is returned.
*
* \return the first index set in \p bitmap if \p prev is \c -1.
* \return the next index set in \p bitmap if \p prev is not \c -1.
* \return -1 if no index with higher index is set in \p bitmap.
*/
HWLOC_DECLSPEC int hwloc_bitmap_next(hwloc_const_bitmap_t bitmap, int prev) __hwloc_attribute_pure;
/** \brief Compute the last index (most significant bit) in bitmap \p bitmap
*
* \return -1 if no index is set in \p bitmap, or if \p bitmap is infinitely set.
* \return the last index set in \p bitmap.
* \return -1 if \p bitmap is empty, or if \p bitmap is infinitely set.
*/
HWLOC_DECLSPEC int hwloc_bitmap_last(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
@ -327,28 +398,29 @@ HWLOC_DECLSPEC int hwloc_bitmap_last(hwloc_const_bitmap_t bitmap) __hwloc_attrib
* indexes that are in the bitmap).
*
* \return the number of indexes that are in the bitmap.
*
* \return -1 if \p bitmap is infinitely set.
*/
HWLOC_DECLSPEC int hwloc_bitmap_weight(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
/** \brief Compute the first unset index (least significant bit) in bitmap \p bitmap
*
* \return -1 if no index is unset in \p bitmap.
* \return the first unset index in \p bitmap.
* \return -1 if \p bitmap is full.
*/
HWLOC_DECLSPEC int hwloc_bitmap_first_unset(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
/** \brief Compute the next unset index in bitmap \p bitmap which is after index \p prev
*
* If \p prev is -1, the first unset index is returned.
*
* \return the first index unset in \p bitmap if \p prev is \c -1.
* \return the next index unset in \p bitmap if \p prev is not \c -1.
* \return -1 if no index with higher index is unset in \p bitmap.
*/
HWLOC_DECLSPEC int hwloc_bitmap_next_unset(hwloc_const_bitmap_t bitmap, int prev) __hwloc_attribute_pure;
/** \brief Compute the last unset index (most significant bit) in bitmap \p bitmap
*
* \return -1 if no index is unset in \p bitmap, or if \p bitmap is infinitely set.
* \return the last index unset in \p bitmap.
* \return -1 if \p bitmap is full, or if \p bitmap is not infinitely set.
*/
HWLOC_DECLSPEC int hwloc_bitmap_last_unset(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure;
@ -428,6 +500,8 @@ HWLOC_DECLSPEC int hwloc_bitmap_not (hwloc_bitmap_t res, hwloc_const_bitmap_t bi
/** \brief Test whether bitmaps \p bitmap1 and \p bitmap2 intersects.
*
* \return 1 if bitmaps intersect, 0 otherwise.
*
* \note The empty bitmap does not intersect any other bitmap.
*/
HWLOC_DECLSPEC int hwloc_bitmap_intersects (hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure;

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2023 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -42,6 +42,9 @@ extern "C" {
/** \brief Return the domain, bus and device IDs of the CUDA device \p cudevice.
*
* Device \p cudevice must match the local machine.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_cuda_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused,
@ -87,6 +90,9 @@ hwloc_cuda_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_cuda_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2023 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -43,6 +43,9 @@ extern "C" {
/** \brief Return the domain, bus and device IDs of the CUDA device whose index is \p idx.
*
* Device index \p idx must match the local machine.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_cudart_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused,
@ -84,6 +87,9 @@ hwloc_cudart_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unus
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_cudart_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2013-2020 Inria. All rights reserved.
* Copyright © 2013-2024 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -222,6 +222,8 @@ enum hwloc_topology_diff_apply_flags_e {
HWLOC_DECLSPEC int hwloc_topology_diff_apply(hwloc_topology_t topology, hwloc_topology_diff_t diff, unsigned long flags);
/** \brief Destroy a list of topology differences.
*
* \return 0.
*/
HWLOC_DECLSPEC int hwloc_topology_diff_destroy(hwloc_topology_diff_t diff);
@ -233,6 +235,8 @@ HWLOC_DECLSPEC int hwloc_topology_diff_destroy(hwloc_topology_diff_t diff);
* This identifier is usually the name of the other XML file
* that contains the reference topology.
*
* \return 0 on success, -1 on error.
*
* \note the pointer returned in refname should later be freed
* by the caller.
*/
@ -246,10 +250,17 @@ HWLOC_DECLSPEC int hwloc_topology_diff_load_xml(const char *xmlpath, hwloc_topol
* This identifier is usually the name of the other XML file
* that contains the reference topology.
* This attribute is given back when reading the diff from XML.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_topology_diff_export_xml(hwloc_topology_diff_t diff, const char *refname, const char *xmlpath);
/** \brief Load a list of topology differences from a XML buffer.
*
* Build a list of differences from the XML memory buffer given
* at \p xmlbuffer and of length \p buflen (including an ending \c \0).
* This buffer may have been filled earlier with
* hwloc_topology_diff_export_xmlbuffer().
*
* If not \c NULL, \p refname will be filled with the identifier
* string of the reference topology for the difference file,
@ -257,6 +268,8 @@ HWLOC_DECLSPEC int hwloc_topology_diff_export_xml(hwloc_topology_diff_t diff, co
* This identifier is usually the name of the other XML file
* that contains the reference topology.
*
* \return 0 on success, -1 on error.
*
* \note the pointer returned in refname should later be freed
* by the caller.
*/
@ -271,9 +284,11 @@ HWLOC_DECLSPEC int hwloc_topology_diff_load_xmlbuffer(const char *xmlbuffer, int
* that contains the reference topology.
* This attribute is given back when reading the diff from XML.
*
* The returned buffer ends with a \0 that is included in the returned
* The returned buffer ends with a \c \0 that is included in the returned
* length.
*
* \return 0 on success, -1 on error.
*
* \note The XML buffer should later be freed with hwloc_free_xmlbuffer().
*/
HWLOC_DECLSPEC int hwloc_topology_diff_export_xmlbuffer(hwloc_topology_diff_t diff, const char *refname, char **xmlbuffer, int *buflen);

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2010-2025 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -28,18 +28,18 @@ extern "C" {
/** \brief Matrix of distances between a set of objects.
*
* This matrix often contains latencies between NUMA nodes
* The most common matrix contains latencies between NUMA nodes
* (as reported in the System Locality Distance Information Table (SLIT)
* in the ACPI specification), which may or may not be physically accurate.
* It corresponds to the latency for accessing the memory of one node
* from a core in another node.
* The corresponding kind is ::HWLOC_DISTANCES_KIND_FROM_OS | ::HWLOC_DISTANCES_KIND_FROM_USER.
* The corresponding kind is ::HWLOC_DISTANCES_KIND_MEANS_LATENCY | ::HWLOC_DISTANCES_KIND_FROM_USER.
* The name of this distances structure is "NUMALatency".
* Others distance structures include and "XGMIBandwidth", "XGMIHops",
* "XeLinkBandwidth" and "NVLinkBandwidth".
*
* The matrix may also contain bandwidths between random sets of objects,
* possibly provided by the user, as specified in the \p kind attribute.
* Others common distance structures include and "XGMIBandwidth", "XGMIHops",
* "XeLinkBandwidth" and "NVLinkBandwidth".
*
* Pointers \p objs and \p values should not be replaced, reallocated, freed, etc.
* However callers are allowed to modify \p kind as well as the contents
@ -70,11 +70,10 @@ struct hwloc_distances_s {
* The \p kind attribute of struct hwloc_distances_s is a OR'ed set
* of kinds.
*
* A kind of format HWLOC_DISTANCES_KIND_FROM_* specifies where the
* distance information comes from, if known.
*
* A kind of format HWLOC_DISTANCES_KIND_MEANS_* specifies whether
* values are latencies or bandwidths, if applicable.
* Each distance matrix may have only one kind among HWLOC_DISTANCES_KIND_FROM_*
* specifying where distance information comes from,
* and one kind among HWLOC_DISTANCES_KIND_MEANS_* specifying
* whether values are latencies or bandwidths.
*/
enum hwloc_distances_kind_e {
/** \brief These distances were obtained from the operating system or hardware.
@ -131,6 +130,8 @@ enum hwloc_distances_kind_e {
*
* Each distance matrix returned in the \p distances array should be released
* by the caller using hwloc_distances_release().
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int
hwloc_distances_get(hwloc_topology_t topology,
@ -140,6 +141,8 @@ hwloc_distances_get(hwloc_topology_t topology,
/** \brief Retrieve distance matrices for object at a specific depth in the topology.
*
* Identical to hwloc_distances_get() with the additional \p depth filter.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int
hwloc_distances_get_by_depth(hwloc_topology_t topology, int depth,
@ -149,6 +152,8 @@ hwloc_distances_get_by_depth(hwloc_topology_t topology, int depth,
/** \brief Retrieve distance matrices for object of a specific type.
*
* Identical to hwloc_distances_get() with the additional \p type filter.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int
hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
@ -162,6 +167,8 @@ hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
* The name of the most common structure is "NUMALatency".
* Others include "XGMIBandwidth", "XGMIHops", "XeLinkBandwidth",
* and "NVLinkBandwidth".
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int
hwloc_distances_get_by_name(hwloc_topology_t topology, const char *name,
@ -171,7 +178,12 @@ hwloc_distances_get_by_name(hwloc_topology_t topology, const char *name,
/** \brief Get a description of what a distances structure contains.
*
* For instance "NUMALatency" for hardware-provided NUMA distances (ACPI SLIT),
* or NULL if unknown.
* or \c NULL if unknown.
*
* \return the constant string with the name of the distance structure.
*
* \note The returned name should not be freed by the caller,
* it belongs to the hwloc library.
*/
HWLOC_DECLSPEC const char *
hwloc_distances_get_name(hwloc_topology_t topology, struct hwloc_distances_s *distances);
@ -215,17 +227,24 @@ enum hwloc_distances_transform_e {
HWLOC_DISTANCES_TRANSFORM_LINKS = 1,
/** \brief Merge switches with multiple ports into a single object.
* This currently only applies to NVSwitches where GPUs seem connected to different
* separate switch ports in the NVLinkBandwidth matrix. This transformation will
* replace all of them with the same port connected to all GPUs.
* Other ports are removed by applying ::HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL internally.
*
* This currently only applies to NVSwitches where GPUs seem connected
* to different switch ports. Switch ports must be objects with subtype
* "NVSwitch" as in the NVLinkBandwidth matrix.
*
* This transformation will replace all ports with only the first one,
* now connected to all GPUs. Other ports are removed by applying
* ::HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL internally.
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS = 2,
/** \brief Apply a transitive closure to the matrix to connect objects across switches.
* This currently only applies to GPUs and NVSwitches in the NVLinkBandwidth matrix.
* All pairs of GPUs will be reported as directly connected.
*
* All pairs of GPUs will be reported as directly connected instead GPUs being
* only connected to switches.
*
* Switch ports must be objects with subtype "NVSwitch" as in the NVLinkBandwidth matrix.
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE = 3
@ -252,6 +271,8 @@ enum hwloc_distances_transform_e {
*
* \p flags must be \c 0 for now.
*
* \return 0 on success, -1 on error for instance if flags are invalid.
*
* \note Objects in distances array \p objs may be directly modified
* in place without using hwloc_distances_transform().
* One may use hwloc_get_obj_with_same_locality() to easily convert
@ -272,6 +293,7 @@ HWLOC_DECLSPEC int hwloc_distances_transform(hwloc_topology_t topology, struct h
/** \brief Find the index of an object in a distances structure.
*
* \return the index of the object in the distances structure if any.
* \return -1 if object \p obj is not involved in structure \p distances.
*/
static __hwloc_inline int
@ -289,6 +311,7 @@ hwloc_distances_obj_index(struct hwloc_distances_s *distances, hwloc_obj_t obj)
* The distance from \p obj1 to \p obj2 is stored in the value pointed by
* \p value1to2 and reciprocally.
*
* \return 0 on success.
* \return -1 if object \p obj1 or \p obj2 is not involved in structure \p distances.
*/
static __hwloc_inline int
@ -340,6 +363,8 @@ typedef void * hwloc_distances_add_handle_t;
* Otherwise, it will be copied internally and may later be freed by the caller.
*
* \p kind specifies the kind of distance as a OR'ed set of ::hwloc_distances_kind_e.
* Only one kind of meaning and one kind of provenance may be given if appropriate
* (e.g. ::HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH and ::HWLOC_DISTANCES_KIND_FROM_USER).
* Kind ::HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES will be automatically set
* according to objects having different types in hwloc_distances_add_values().
*
@ -374,8 +399,8 @@ hwloc_distances_add_create(hwloc_topology_t topology,
*
* \p flags must be \c 0 for now.
*
* \return \c 0 on success.
* \return \c -1 on error.
* \return 0 on success.
* \return -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_add_values(hwloc_topology_t topology,
hwloc_distances_add_handle_t handle,
@ -386,7 +411,8 @@ HWLOC_DECLSPEC int hwloc_distances_add_values(hwloc_topology_t topology,
/** \brief Flags for adding a new distances to a topology. */
enum hwloc_distances_add_flag_e {
/** \brief Try to group objects based on the newly provided distance information.
* This is ignored for distances between objects of different types.
* Grouping is only performed when the distances structure contains latencies,
* and when all objects are of the same type.
* \hideinitializer
*/
HWLOC_DISTANCES_ADD_FLAG_GROUP = (1UL<<0),
@ -411,8 +437,8 @@ enum hwloc_distances_add_flag_e {
*
* On error, the temporary distances structure and its content are destroyed.
*
* \return \c 0 on success.
* \return \c -1 on error.
* \return 0 on success.
* \return -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_add_commit(hwloc_topology_t topology,
hwloc_distances_add_handle_t handle,
@ -433,18 +459,24 @@ HWLOC_DECLSPEC int hwloc_distances_add_commit(hwloc_topology_t topology,
*
* If these distances were used to group objects, these additional
* Group objects are not removed from the topology.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_remove(hwloc_topology_t topology);
/** \brief Remove distance matrices for objects at a specific depth in the topology.
*
* Identical to hwloc_distances_remove() but only applies to one level of the topology.
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_remove_by_depth(hwloc_topology_t topology, int depth);
/** \brief Remove distance matrices for objects of a specific type in the topology.
*
* Identical to hwloc_distances_remove() but only applies to one level of the topology.
*
* \return 0 on success, -1 on error.
*/
static __hwloc_inline int
hwloc_distances_remove_by_type(hwloc_topology_t topology, hwloc_obj_type_t type)
@ -458,6 +490,8 @@ hwloc_distances_remove_by_type(hwloc_topology_t topology, hwloc_obj_type_t type)
/** \brief Release and remove the given distance matrice from the topology.
*
* This function includes a call to hwloc_distances_release().
*
* \return 0 on success, -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_release_remove(hwloc_topology_t topology, struct hwloc_distances_s *distances);

View file

@ -55,7 +55,7 @@ enum hwloc_topology_export_xml_flags_e {
*
* \p flags is a OR'ed set of ::hwloc_topology_export_xml_flags_e.
*
* \return -1 if a failure occured.
* \return 0 on success, or -1 on error.
*
* \note See also hwloc_topology_set_userdata_export_callback()
* for exporting application-specific object userdata.
@ -91,7 +91,7 @@ HWLOC_DECLSPEC int hwloc_topology_export_xml(hwloc_topology_t topology, const ch
*
* \p flags is a OR'ed set of ::hwloc_topology_export_xml_flags_e.
*
* \return -1 if a failure occured.
* \return 0 on success, or -1 on error.
*
* \note See also hwloc_topology_set_userdata_export_callback()
* for exporting application-specific object userdata.
@ -145,13 +145,15 @@ HWLOC_DECLSPEC void hwloc_topology_set_userdata_export_callback(hwloc_topology_t
* that were given to the export callback.
*
* Only printable characters may be exported to XML string attributes.
* If a non-printable character is passed in \p name or \p buffer,
* the function returns -1 with errno set to EINVAL.
*
* If exporting binary data, the application should first encode into
* printable characters only (or use hwloc_export_obj_userdata_base64()).
* It should also take care of portability issues if the export may
* be reimported on a different architecture.
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if a non-printable character is
* passed in \p name or \b buffer.
*/
HWLOC_DECLSPEC int hwloc_export_obj_userdata(void *reserved, hwloc_topology_t topology, hwloc_obj_t obj, const char *name, const void *buffer, size_t length);
@ -165,8 +167,14 @@ HWLOC_DECLSPEC int hwloc_export_obj_userdata(void *reserved, hwloc_topology_t to
* This function may only be called from within the export() callback passed
* to hwloc_topology_set_userdata_export_callback().
*
* The name must be made of printable characters for export to XML string attributes.
*
* The function does not take care of portability issues if the export
* may be reimported on a different architecture.
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if a non-printable character is
* passed in \p name.
*/
HWLOC_DECLSPEC int hwloc_export_obj_userdata_base64(void *reserved, hwloc_topology_t topology, hwloc_obj_t obj, const char *name, const void *buffer, size_t length);

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2012 Blue Brain Project, EPFL. All rights reserved.
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright © 2012-2023 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -102,7 +102,8 @@ hwloc_gl_get_display_osdev_by_name(hwloc_topology_t topology,
* Retrieves the OpenGL display port (server) in \p port and device (screen)
* in \p screen that correspond to the given hwloc OS device object.
*
* \return \c -1 if none could be found.
* \return 0 on success.
* \return -1 if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2023 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -52,6 +52,8 @@ extern "C" {
* that takes a cpu_set_t as input parameter.
*
* \p schedsetsize should be sizeof(cpu_set_t) unless \p schedset was dynamically allocated with CPU_ALLOC
*
* \return 0.
*/
static __hwloc_inline int
hwloc_cpuset_to_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t hwlocset,
@ -80,6 +82,9 @@ hwloc_cpuset_to_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribute
* that takes a cpu_set_t as input parameter.
*
* \p schedsetsize should be sizeof(cpu_set_t) unless \p schedset was dynamically allocated with CPU_ALLOC
*
* \return 0 on success.
* \return -1 with errno set to \c ENOMEM if some internal reallocation failed.
*/
static __hwloc_inline int
hwloc_cpuset_from_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_cpuset_t hwlocset,
@ -95,7 +100,8 @@ hwloc_cpuset_from_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribu
cpu = 0;
while (count) {
if (CPU_ISSET_S(cpu, schedsetsize, schedset)) {
hwloc_bitmap_set(hwlocset, cpu);
if (hwloc_bitmap_set(hwlocset, cpu) < 0)
return -1;
count--;
}
cpu++;
@ -107,7 +113,8 @@ hwloc_cpuset_from_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribu
assert(schedsetsize == sizeof(cpu_set_t));
for(cpu=0; cpu<CPU_SETSIZE; cpu++)
if (CPU_ISSET(cpu, schedset))
hwloc_bitmap_set(hwlocset, cpu);
if (hwloc_bitmap_set(hwlocset, cpu) < 0)
return -1;
#endif /* !CPU_ZERO_S */
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2021 Inria. All rights reserved.
* Copyright © 2021-2024 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -32,7 +32,8 @@ extern "C" {
/** \defgroup hwlocality_levelzero Interoperability with the oneAPI Level Zero interface.
*
* This interface offers ways to retrieve topology information about
* devices managed by the Level Zero API.
* devices managed by the Level Zero API, both for main Core devices (ZE API)
* and the Sysman devices (ZES API).
*
* @{
*/
@ -44,8 +45,7 @@ extern "C" {
* the Level Zero device \p device.
*
* Topology \p topology and device \p device must match the local machine.
* The Level Zero must have been initialized with Sysman enabled
* (ZES_ENABLE_SYSMAN=1 in the environment).
* The Level Zero library must have been initialized with zeInit().
* I/O devices detection and the Level Zero component are not needed in the
* topology.
*
@ -55,6 +55,12 @@ extern "C" {
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*
* \note zeDevicePciGetPropertiesExt() must be supported, or the entire machine
* locality will be returned.
*/
static __hwloc_inline int
hwloc_levelzero_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
@ -64,8 +70,7 @@ hwloc_levelzero_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_un
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
#define HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX 128
char path[HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX];
zes_pci_properties_t pci;
zes_device_handle_t sdevice = device;
ze_pci_ext_properties_t pci;
ze_result_t res;
if (!hwloc_topology_is_thissystem(topology)) {
@ -73,7 +78,65 @@ hwloc_levelzero_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_un
return -1;
}
res = zesDevicePciGetProperties(sdevice, &pci);
pci.stype = ZE_STRUCTURE_TYPE_PCI_EXT_PROPERTIES;
pci.pNext = NULL;
res = zeDevicePciGetPropertiesExt(device, &pci);
if (res != ZE_RESULT_SUCCESS) {
errno = EINVAL;
return -1;
}
sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/local_cpus",
pci.address.domain, pci.address.bus, pci.address.device, pci.address.function);
if (hwloc_linux_read_path_as_cpumask(path, set) < 0
|| hwloc_bitmap_iszero(set))
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#else
/* Non-Linux systems simply get a full cpuset */
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#endif
return 0;
}
/** \brief Get the CPU set of logical processors that are physically
* close to the Level Zero Sysman device \p device
*
* Store in \p set the CPU-set describing the locality of
* the Level Zero device \p device.
*
* Topology \p topology and device \p device must match the local machine.
* The Level Zero library must have been initialized with Sysman enabled
* with zesInit().
* I/O devices detection and the Level Zero component are not needed in the
* topology.
*
* The function only returns the locality of the device.
* If more information about the device is needed, OS objects should
* be used instead, see hwloc_levelzero_get_device_osdev().
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_levelzero_get_sysman_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
zes_device_handle_t device, hwloc_cpuset_t set)
{
#ifdef HWLOC_LINUX_SYS
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
#define HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX 128
char path[HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX];
zes_pci_properties_t pci;
ze_result_t res;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return -1;
}
res = zesDevicePciGetProperties(device, &pci);
if (res != ZE_RESULT_SUCCESS) {
errno = EINVAL;
return -1;
@ -98,17 +161,90 @@ hwloc_levelzero_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_un
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* The Level Zero library must have been initialized with zeInit().
* I/O devices detection and the Level Zero component must be enabled in the
* topology. If not, the locality of the object may still be found using
* hwloc_levelzero_get_device_cpuset().
*
* \note If the input ZE device is actually a subdevice, then its parent
* (root device) is actually translated, i.e. the main hwloc OS device
* is returned instead of one of its children.
*
* \note The corresponding hwloc PCI device may be found by looking
* at the result parent pointer (unless PCI devices are filtered out).
*
* \note zeDevicePciGetPropertiesExt() must be supported.
*/
static __hwloc_inline hwloc_obj_t
hwloc_levelzero_get_device_osdev(hwloc_topology_t topology, ze_device_handle_t device)
{
ze_pci_ext_properties_t pci;
ze_result_t res;
hwloc_obj_t osdev;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return NULL;
}
pci.stype = ZE_STRUCTURE_TYPE_PCI_EXT_PROPERTIES;
pci.pNext = NULL;
res = zeDevicePciGetPropertiesExt(device, &pci);
if (res != ZE_RESULT_SUCCESS) {
errno = EINVAL;
return NULL;
}
osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
hwloc_obj_t pcidev;
if (strncmp(osdev->name, "ze", 2))
continue;
pcidev = osdev;
while (pcidev && pcidev->type != HWLOC_OBJ_PCI_DEVICE)
pcidev = pcidev->parent;
if (!pcidev)
continue;
if (pcidev
&& pcidev->type == HWLOC_OBJ_PCI_DEVICE
&& pcidev->attr->pcidev.domain == pci.address.domain
&& pcidev->attr->pcidev.bus == pci.address.bus
&& pcidev->attr->pcidev.dev == pci.address.device
&& pcidev->attr->pcidev.func == pci.address.function)
return osdev;
/* FIXME: when we'll have serialnumber, try it in case PCI is filtered-out */
}
return NULL;
}
/** \brief Get the hwloc OS device object corresponding to Level Zero Sysman device
* \p device.
*
* \return The hwloc OS device object that describes the given Level Zero device \p device.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* The Level Zero library must have been initialized with Sysman enabled
* with zesInit().
* I/O devices detection and the Level Zero component must be enabled in the
* topology. If not, the locality of the object may still be found using
* hwloc_levelzero_get_device_cpuset().
*
* \note If the input ZES device is actually a subdevice, then its parent
* (root device) is actually translated, i.e. the main hwloc OS device
* is returned instead of one of its children.
*
* \note The corresponding hwloc PCI device may be found by looking
* at the result parent pointer (unless PCI devices are filtered out).
*/
static __hwloc_inline hwloc_obj_t
hwloc_levelzero_get_device_osdev(hwloc_topology_t topology, ze_device_handle_t device)
hwloc_levelzero_get_sysman_device_osdev(hwloc_topology_t topology, zes_device_handle_t device)
{
zes_device_handle_t sdevice = device;
zes_pci_properties_t pci;
ze_result_t res;
hwloc_obj_t osdev;
@ -118,20 +254,25 @@ hwloc_levelzero_get_device_osdev(hwloc_topology_t topology, ze_device_handle_t d
return NULL;
}
res = zesDevicePciGetProperties(sdevice, &pci);
res = zesDevicePciGetProperties(device, &pci);
if (res != ZE_RESULT_SUCCESS) {
/* L0 was likely initialized without sysman, don't bother */
errno = EINVAL;
return NULL;
}
osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
hwloc_obj_t pcidev = osdev->parent;
hwloc_obj_t pcidev;
if (strncmp(osdev->name, "ze", 2))
continue;
pcidev = osdev;
while (pcidev && pcidev->type != HWLOC_OBJ_PCI_DEVICE)
pcidev = pcidev->parent;
if (!pcidev)
continue;
if (pcidev
&& pcidev->type == HWLOC_OBJ_PCI_DEVICE
&& pcidev->attr->pcidev.domain == pci.address.domain

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2017 Inria. All rights reserved.
* Copyright © 2009-2023 Inria. All rights reserved.
* Copyright © 2009-2010, 2012 Université Bordeaux
* See COPYING in top-level directory.
*/
@ -50,6 +50,8 @@ extern "C" {
* This function may be used before calling set_mempolicy, mbind, migrate_pages
* or any other function that takes an array of unsigned long and a maximal
* node number as input parameter.
*
* \return 0.
*/
static __hwloc_inline int
hwloc_cpuset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_cpuset_t cpuset,
@ -84,6 +86,8 @@ hwloc_cpuset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_cpus
* This function may be used before calling set_mempolicy, mbind, migrate_pages
* or any other function that takes an array of unsigned long and a maximal
* node number as input parameter.
*
* \return 0.
*/
static __hwloc_inline int
hwloc_nodeset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset,
@ -119,6 +123,9 @@ hwloc_nodeset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_nod
* This function may be used after calling get_mempolicy or any other function
* that takes an array of unsigned long as output parameter (and possibly
* a maximal node number as input parameter).
*
* \return 0 on success.
* \return -1 on error, for instance if failing an internal reallocation.
*/
static __hwloc_inline int
hwloc_cpuset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_cpuset_t cpuset,
@ -130,7 +137,8 @@ hwloc_cpuset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_cpuset_t
while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL)
if (node->os_index < maxnode
&& (mask[node->os_index/sizeof(*mask)/8] & (1UL << (node->os_index % (sizeof(*mask)*8)))))
hwloc_bitmap_or(cpuset, cpuset, node->cpuset);
if (hwloc_bitmap_or(cpuset, cpuset, node->cpuset) < 0)
return -1;
return 0;
}
@ -142,6 +150,9 @@ hwloc_cpuset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_cpuset_t
* This function may be used after calling get_mempolicy or any other function
* that takes an array of unsigned long as output parameter (and possibly
* a maximal node number as input parameter).
*
* \return 0 on success.
* \return -1 with errno set to \c ENOMEM if some internal reallocation failed.
*/
static __hwloc_inline int
hwloc_nodeset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_nodeset_t nodeset,
@ -153,7 +164,8 @@ hwloc_nodeset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_nodeset
while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL)
if (node->os_index < maxnode
&& (mask[node->os_index/sizeof(*mask)/8] & (1UL << (node->os_index % (sizeof(*mask)*8)))))
hwloc_bitmap_set(nodeset, node->os_index);
if (hwloc_bitmap_set(nodeset, node->os_index) < 0)
return -1;
return 0;
}
@ -184,7 +196,7 @@ hwloc_nodeset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_nodeset
* This function may be used before calling many numa_ functions
* that use a struct bitmask as an input parameter.
*
* \return newly allocated struct bitmask.
* \return newly allocated struct bitmask, or \c NULL on error.
*/
static __hwloc_inline struct bitmask *
hwloc_cpuset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_cpuset_t cpuset) __hwloc_attribute_malloc;
@ -209,7 +221,7 @@ hwloc_cpuset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_cpu
* This function may be used before calling many numa_ functions
* that use a struct bitmask as an input parameter.
*
* \return newly allocated struct bitmask.
* \return newly allocated struct bitmask, or \c NULL on error.
*/
static __hwloc_inline struct bitmask *
hwloc_nodeset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset) __hwloc_attribute_malloc;
@ -231,6 +243,9 @@ hwloc_nodeset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_no
*
* This function may be used after calling many numa_ functions
* that use a struct bitmask as an output parameter.
*
* \return 0 on success.
* \return -1 with errno set to \c ENOMEM if some internal reallocation failed.
*/
static __hwloc_inline int
hwloc_cpuset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_cpuset_t cpuset,
@ -241,7 +256,8 @@ hwloc_cpuset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_cpuset_
hwloc_bitmap_zero(cpuset);
while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL)
if (numa_bitmask_isbitset(bitmask, node->os_index))
hwloc_bitmap_or(cpuset, cpuset, node->cpuset);
if (hwloc_bitmap_or(cpuset, cpuset, node->cpuset) < 0)
return -1;
return 0;
}
@ -249,6 +265,9 @@ hwloc_cpuset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_cpuset_
*
* This function may be used after calling many numa_ functions
* that use a struct bitmask as an output parameter.
*
* \return 0 on success.
* \return -1 with errno set to \c ENOMEM if some internal reallocation failed.
*/
static __hwloc_inline int
hwloc_nodeset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_nodeset_t nodeset,
@ -259,7 +278,8 @@ hwloc_nodeset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_nodese
hwloc_bitmap_zero(nodeset);
while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL)
if (numa_bitmask_isbitset(bitmask, node->os_index))
hwloc_bitmap_set(nodeset, node->os_index);
if (hwloc_bitmap_set(nodeset, node->os_index) < 0)
return -1;
return 0;
}

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2023 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* See COPYING in top-level directory.
*/
@ -38,6 +38,8 @@ extern "C" {
* The behavior is exactly the same as the Linux sched_setaffinity system call,
* but uses a hwloc cpuset.
*
* \return 0 on success, -1 on error.
*
* \note This is equivalent to calling hwloc_set_proc_cpubind() with
* HWLOC_CPUBIND_THREAD as flags.
*/
@ -52,6 +54,8 @@ HWLOC_DECLSPEC int hwloc_linux_set_tid_cpubind(hwloc_topology_t topology, pid_t
* The behavior is exactly the same as the Linux sched_getaffinity system call,
* but uses a hwloc cpuset.
*
* \return 0 on success, -1 on error.
*
* \note This is equivalent to calling hwloc_get_proc_cpubind() with
* ::HWLOC_CPUBIND_THREAD as flags.
*/
@ -62,6 +66,8 @@ HWLOC_DECLSPEC int hwloc_linux_get_tid_cpubind(hwloc_topology_t topology, pid_t
* The CPU-set \p set (previously allocated by the caller)
* is filled with the PU which the thread last ran on.
*
* \return 0 on success, -1 on error.
*
* \note This is equivalent to calling hwloc_get_proc_last_cpu_location() with
* ::HWLOC_CPUBIND_THREAD as flags.
*/
@ -72,6 +78,8 @@ HWLOC_DECLSPEC int hwloc_linux_get_tid_last_cpu_location(hwloc_topology_t topolo
* Might be used when reading CPU set from sysfs attributes such as topology
* and caches for processors, or local_cpus for devices.
*
* \return 0 on success, -1 on error.
*
* \note This function ignores the HWLOC_FSROOT environment variable.
*/
HWLOC_DECLSPEC int hwloc_linux_read_path_as_cpumask(const char *path, hwloc_bitmap_t set);

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2019-2022 Inria. All rights reserved.
* Copyright © 2019-2025 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -54,6 +54,15 @@ extern "C" {
* Attribute values for these nodes, if any, may then be obtained with
* hwloc_memattr_get_value() and manually compared with the desired criteria.
*
* Memory attributes are also used internally to build Memory Tiers which provide
* an easy way to distinguish NUMA nodes of different kinds, as explained
* in \ref heteromem.
*
* Beside tiers, hwloc defines a set of "default" nodes where normal memory
* allocations should be made from (see hwloc_topology_get_default_nodeset()).
* This is also useful for dividing the machine into a set of non-overlapping
* NUMA domains, for instance for binding tasks per domain.
*
* \sa An example is available in doc/examples/memory-attributes.c in the source tree.
*
* \note The API also supports specific objects as initiator,
@ -65,7 +74,10 @@ extern "C" {
* @{
*/
/** \brief Memory node attributes. */
/** \brief Predefined memory attribute IDs.
* See ::hwloc_memattr_id_t for the generic definition of IDs
* for predefined or custom attributes.
*/
enum hwloc_memattr_id_e {
/** \brief
* The \"Capacity\" is returned in bytes (local_memory attribute in objects).
@ -74,6 +86,8 @@ enum hwloc_memattr_id_e {
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
*
* Capacity values may not be modified using hwloc_memattr_set_value().
* \hideinitializer
*/
HWLOC_MEMATTR_ID_CAPACITY = 0,
@ -89,6 +103,8 @@ enum hwloc_memattr_id_e {
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
* Locality values may not be modified using hwloc_memattr_set_value().
* \hideinitializer
*/
HWLOC_MEMATTR_ID_LOCALITY = 1,
@ -169,15 +185,26 @@ enum hwloc_memattr_id_e {
/* TODO persistence? */
HWLOC_MEMATTR_ID_MAX /**< \private Sentinel value */
HWLOC_MEMATTR_ID_MAX /**< \private
* Sentinel value for predefined attributes.
* Dynamically registered custom attributes start here.
*/
};
/** \brief A memory attribute identifier.
* May be either one of ::hwloc_memattr_id_e or a new id returned by hwloc_memattr_register().
*
* hwloc predefines some commonly-used attributes in ::hwloc_memattr_id_e.
* One may then dynamically register custom ones with hwloc_memattr_register(),
* they will be assigned IDs immediately after the predefined ones.
* See \ref hwlocality_memattrs_manage for more information about
* existing attribute IDs.
*/
typedef unsigned hwloc_memattr_id_t;
/** \brief Return the identifier of the memory attribute with the given name.
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if no such attribute exists.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_by_name(hwloc_topology_t topology,
@ -223,6 +250,16 @@ enum hwloc_local_numanode_flag_e {
*/
HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY = (1UL<<1),
/** \breif Select NUMA nodes whose locality intersects the given cpuset.
* This includes larger and smaller localities as well as localities
* that are partially included.
* For instance, if the locality is one core of both packages, a NUMA node
* local to one package is neither larger nor smaller than this locality,
* but it intersects it.
* \hideinitializer
*/
HWLOC_LOCAL_NUMANODE_FLAG_INTERSECT_LOCALITY = (1UL<<3),
/** \brief Select all NUMA nodes in the topology.
* The initiator \p initiator is ignored.
* \hideinitializer
@ -247,6 +284,8 @@ enum hwloc_local_numanode_flag_e {
* or the number of nodes that would have been stored if there were
* enough room.
*
* \return 0 on success or -1 on error.
*
* \note Some of these NUMA nodes may not have any memory attribute
* values and hence not be reported as actual targets in other functions.
*
@ -266,7 +305,57 @@ hwloc_get_local_numanode_objs(hwloc_topology_t topology,
hwloc_obj_t *nodes,
unsigned long flags);
/** \brief Return the set of default NUMA nodes
*
* In machines with heterogeneous memory, some NUMA nodes are considered
* the default ones, i.e. where basic allocations should be made from.
* These are usually DRAM nodes.
*
* Other nodes may be reserved for specific use (I/O device memory, e.g. GPU memory),
* small but high performance (HBM), large but slow memory (NVM), etc.
* Buffers should usually not be allocated from there unless explicitly required.
*
* This function fills \p nodeset with the bits of NUMA nodes considered default.
*
* It is guaranteed that these nodes have non-intersecting CPU sets,
* i.e. cores may not have multiple local NUMA nodes anymore.
* Hence this may be used to iterate over the platform divided into separate
* NUMA localities, for instance for binding one task per NUMA domain.
*
* Any core that had some local NUMA node(s) in the initial topology should
* still have one in the default nodeset. Corner cases where this would be
* wrong consist in asymmetric platforms with missing DRAM nodes, or topologies
* that were already restricted to less NUMA nodes.
*
* The returned nodeset may be passed to hwloc_topology_restrict() with
* ::HWLOC_RESTRICT_FLAG_BYNODESET to remove all non-default nodes from
* the topology. The resulting topology will be easier to use when iterating
* over (now homogeneous) NUMA nodes.
*
* The heuristics for finding default nodes relies on memory tiers and subtypes
* (see \ref heteromem) as well as the assumption that hardware vendors list
* default nodes first in hardware tables.
*
* \p flags must be \c 0 for now.
*
* \return 0 on success.
* \return -1 on error.
*
* \note The returned nodeset usually contains all nodes from a single memory
* tier, likely the DRAM one.
*
* \note The returned nodeset is included in the list of available nodes
* returned by hwloc_topology_get_topology_nodeset(). It is strictly smaller
* if the machine has heterogeneous memory.
*
* \note The heuristics may return a suboptimal set of nodes if hwloc could
* not guess memory types and/or if some default nodes were removed earlier
* from the topology (e.g. with hwloc_topology_restrict()).
*/
HWLOC_DECLSPEC int
hwloc_topology_get_default_nodeset(hwloc_topology_t topology,
hwloc_nodeset_t nodeset,
unsigned long flags);
/** \brief Return an attribute value for a specific target NUMA node.
*
@ -274,8 +363,16 @@ hwloc_get_local_numanode_objs(hwloc_topology_t topology,
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* \p target_node cannot be \c NULL. If \p attribute is ::HWLOC_MEMATTR_ID_CAPACITY,
* \p target_node must be a NUMA node. If it is ::HWLOC_MEMATTR_ID_LOCALITY,
* \p target_node must have a CPU set.
*
* \p flags must be \c 0 for now.
*
* \return 0 on success.
* \return -1 on error, for instance with errno set to \c EINVAL if flags
* are invalid or no such attribute exists.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
@ -307,7 +404,10 @@ hwloc_memattr_get_value(hwloc_topology_t topology,
*
* \p flags must be \c 0 for now.
*
* If there are no matching targets, \c -1 is returned with \p errno set to \c ENOENT;
* \return 0 on success.
* \return -1 with errno set to \c ENOENT if there are no matching targets.
* \return -1 with errno set to \c EINVAL if flags are invalid,
* or no such attribute exists.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
@ -323,10 +423,6 @@ hwloc_memattr_get_best_target(hwloc_topology_t topology,
hwloc_obj_t *best_target, hwloc_uint64_t *value);
/** \brief Return the best initiator for the given attribute and target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* \c -1 is returned and \p errno is set to \c EINVAL.
*
* If \p value is non \c NULL, the corresponding value is returned there.
*
@ -340,96 +436,22 @@ hwloc_memattr_get_best_target(hwloc_topology_t topology,
* The returned initiator should not be modified or freed,
* it belongs to the topology.
*
* \p target_node cannot be \c NULL.
*
* \p flags must be \c 0 for now.
*
* If there are no matching initiators, \c -1 is returned with \p errno set to \c ENOENT;
* \return 0 on success.
* \return -1 with errno set to \c ENOENT if there are no matching initiators.
* \return -1 with errno set to \c EINVAL if the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR).
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_best_initiator(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target,
hwloc_obj_t target_node,
unsigned long flags,
struct hwloc_location *best_initiator, hwloc_uint64_t *value);
/** @} */
/** \defgroup hwlocality_memattrs_manage Managing memory attributes
* @{
*/
/** \brief Return the name of a memory attribute.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_name(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
const char **name);
/** \brief Return the flags of the given attribute.
*
* Flags are a OR'ed set of ::hwloc_memattr_flag_e.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_flags(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
unsigned long *flags);
/** \brief Memory attribute flags.
* Given to hwloc_memattr_register() and returned by hwloc_memattr_get_flags().
*/
enum hwloc_memattr_flag_e {
/** \brief The best nodes for this memory attribute are those with the higher values.
* For instance Bandwidth.
*/
HWLOC_MEMATTR_FLAG_HIGHER_FIRST = (1UL<<0),
/** \brief The best nodes for this memory attribute are those with the lower values.
* For instance Latency.
*/
HWLOC_MEMATTR_FLAG_LOWER_FIRST = (1UL<<1),
/** \brief The value returned for this memory attribute depends on the given initiator.
* For instance Bandwidth and Latency, but not Capacity.
*/
HWLOC_MEMATTR_FLAG_NEED_INITIATOR = (1UL<<2)
};
/** \brief Register a new memory attribute.
*
* Add a specific memory attribute that is not defined in ::hwloc_memattr_id_e.
* Flags are a OR'ed set of ::hwloc_memattr_flag_e. It must contain at least
* one of ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST or ::HWLOC_MEMATTR_FLAG_LOWER_FIRST.
*/
HWLOC_DECLSPEC int
hwloc_memattr_register(hwloc_topology_t topology,
const char *name,
unsigned long flags,
hwloc_memattr_id_t *id);
/** \brief Set an attribute value for a specific target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* The initiator will be copied into the topology,
* the caller should free anything allocated to store the initiator,
* for instance the cpuset.
*
* \p flags must be \c 0 for now.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when referring to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*/
HWLOC_DECLSPEC int
hwloc_memattr_set_value(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target_node,
struct hwloc_location *initiator,
unsigned long flags,
hwloc_uint64_t value);
/** \brief Return the target NUMA nodes that have some values for a given attribute.
*
* Return targets for the given attribute in the \p targets array
@ -460,6 +482,8 @@ hwloc_memattr_set_value(hwloc_topology_t topology,
* NUMA nodes with hwloc_get_local_numanode_objs() and then look at their attribute
* values.
*
* \return 0 on success or -1 on error.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when referring to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
@ -491,12 +515,16 @@ hwloc_memattr_get_targets(hwloc_topology_t topology,
* The returned initiators should not be modified or freed,
* they belong to the topology.
*
* \p target_node cannot be \c NULL.
*
* \p flags must be \c 0 for now.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* no initiator is returned.
*
* \return 0 on success or -1 on error.
*
* \note This function is meant for tools and debugging (listing internal information)
* rather than for application queries. Applications should rather select useful
* NUMA nodes with hwloc_get_local_numanode_objs() and then look at their attribute
@ -508,6 +536,131 @@ hwloc_memattr_get_initiators(hwloc_topology_t topology,
hwloc_obj_t target_node,
unsigned long flags,
unsigned *nr, struct hwloc_location *initiators, hwloc_uint64_t *values);
/** @} */
/** \defgroup hwlocality_memattrs_manage Managing memory attributes
*
* Memory attribues are identified by an ID (::hwloc_memattr_id_t)
* and a name. hwloc_memattr_get_name() and hwloc_memattr_get_by_name()
* convert between them (or return error if the attribute does not exist).
*
* The set of valid ::hwloc_memattr_id_t is a contigous set starting at \c 0.
* It first contains predefined attributes, as listed
* in ::hwloc_memattr_id_e (from \c 0 to \c HWLOC_MEMATTR_ID_MAX-1).
* Then custom attributes may be dynamically registered with
* hwloc_memattr_register(). They will get the following IDs
* (\c HWLOC_MEMATTR_ID_MAX for the first one, etc.).
*
* To iterate over all valid attributes
* (either predefined or dynamically registered custom ones),
* one may iterate over IDs starting from \c 0 until hwloc_memattr_get_name()
* or hwloc_memattr_get_flags() returns an error.
*
* The values for an existing attribute or for custom dynamically registered ones
* may be set or modified with hwloc_memattr_set_value().
*
* @{
*/
/** \brief Return the name of a memory attribute.
*
* The output pointer \p name cannot be \c NULL.
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if the attribute does not exist.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_name(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
const char **name);
/** \brief Return the flags of the given attribute.
*
* Flags are a OR'ed set of ::hwloc_memattr_flag_e.
*
* The output pointer \p flags cannot be \c NULL.
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if the attribute does not exist.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_flags(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
unsigned long *flags);
/** \brief Memory attribute flags.
* Given to hwloc_memattr_register() and returned by hwloc_memattr_get_flags().
*/
enum hwloc_memattr_flag_e {
/** \brief The best nodes for this memory attribute are those with the higher values.
* For instance Bandwidth.
*/
HWLOC_MEMATTR_FLAG_HIGHER_FIRST = (1UL<<0),
/** \brief The best nodes for this memory attribute are those with the lower values.
* For instance Latency.
*/
HWLOC_MEMATTR_FLAG_LOWER_FIRST = (1UL<<1),
/** \brief The value returned for this memory attribute depends on the given initiator.
* For instance Bandwidth and Latency, but not Capacity.
*/
HWLOC_MEMATTR_FLAG_NEED_INITIATOR = (1UL<<2)
};
/** \brief Register a new memory attribute.
*
* Add a new custom memory attribute.
* Flags are a OR'ed set of ::hwloc_memattr_flag_e. It must contain one of
* ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST or ::HWLOC_MEMATTR_FLAG_LOWER_FIRST but not both.
*
* The new attribute \p id is immediately after the last existing attribute ID
* (which is either the ID of the last registered attribute if any,
* or the ID of the last predefined attribute in ::hwloc_memattr_id_e).
*
* \return 0 on success.
* \return -1 with errno set to \c EINVAL if an invalid set of flags is given.
* \return -1 with errno set to \c EBUSY if another attribute already uses this name.
*/
HWLOC_DECLSPEC int
hwloc_memattr_register(hwloc_topology_t topology,
const char *name,
unsigned long flags,
hwloc_memattr_id_t *id);
/** \brief Set an attribute value for a specific target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* The initiator will be copied into the topology,
* the caller should free anything allocated to store the initiator,
* for instance the cpuset.
*
* \p target_node cannot be \c NULL.
*
* \p attribute cannot be ::HWLOC_MEMATTR_FLAG_ID_CAPACITY or
* ::HWLOC_MEMATTR_FLAG_ID_LOCALITY.
*
* \p flags must be \c 0 for now.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when referring to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*
* \return 0 on success or -1 on error.
*/
HWLOC_DECLSPEC int
hwloc_memattr_set_value(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target_node,
struct hwloc_location *initiator,
unsigned long flags,
hwloc_uint64_t value);
/** @} */
#ifdef __cplusplus

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright © 2012-2023 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -51,6 +51,9 @@ extern "C" {
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_nvml_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright © 2012-2023 Inria. All rights reserved.
* Copyright © 2013, 2018 Université Bordeaux. All right reserved.
* See COPYING in top-level directory.
*/
@ -41,6 +41,15 @@ extern "C" {
*/
/* Copyright (c) 2008-2018 The Khronos Group Inc. */
/* needs "cl_khr_pci_bus_info" device extension, but not strictly required for clGetDeviceInfo() */
typedef struct {
cl_uint pci_domain;
cl_uint pci_bus;
cl_uint pci_device;
cl_uint pci_function;
} hwloc_cl_device_pci_bus_info_khr;
#define HWLOC_CL_DEVICE_PCI_BUS_INFO_KHR 0x410F
/* needs "cl_amd_device_attribute_query" device extension, but not strictly required for clGetDeviceInfo() */
#define HWLOC_CL_DEVICE_TOPOLOGY_AMD 0x4037
typedef union {
@ -69,15 +78,28 @@ typedef union {
/** \brief Return the domain, bus and device IDs of the OpenCL device \p device.
*
* Device \p device must match the local machine.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_opencl_get_device_pci_busid(cl_device_id device,
unsigned *domain, unsigned *bus, unsigned *dev, unsigned *func)
{
hwloc_cl_device_topology_amd amdtopo;
hwloc_cl_device_pci_bus_info_khr khrbusinfo;
cl_uint nvbus, nvslot, nvdomain;
cl_int clret;
clret = clGetDeviceInfo(device, HWLOC_CL_DEVICE_PCI_BUS_INFO_KHR, sizeof(khrbusinfo), &khrbusinfo, NULL);
if (CL_SUCCESS == clret) {
*domain = (unsigned) khrbusinfo.pci_domain;
*bus = (unsigned) khrbusinfo.pci_bus;
*dev = (unsigned) khrbusinfo.pci_device;
*func = (unsigned) khrbusinfo.pci_function;
return 0;
}
clret = clGetDeviceInfo(device, HWLOC_CL_DEVICE_TOPOLOGY_AMD, sizeof(amdtopo), &amdtopo, NULL);
if (CL_SUCCESS == clret
&& HWLOC_CL_DEVICE_TOPOLOGY_TYPE_PCIE_AMD == amdtopo.raw.type) {
@ -126,6 +148,9 @@ hwloc_opencl_get_device_pci_busid(cl_device_id device,
* This function is currently only implemented in a meaningful way for
* Linux with the AMD or NVIDIA OpenCL implementation; other systems will simply
* get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if the device could not be found.
*/
static __hwloc_inline int
hwloc_opencl_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2023 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -57,6 +57,9 @@ extern "C" {
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_ibv_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2013-2022 Inria. All rights reserved.
* Copyright © 2013-2024 Inria. All rights reserved.
* Copyright © 2016 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*/
@ -26,7 +26,7 @@ struct hwloc_backend;
/** \defgroup hwlocality_disc_components Components and Plugins: Discovery components
/** \defgroup hwlocality_disc_components Components and Plugins: Discovery components and backends
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
@ -90,18 +90,6 @@ struct hwloc_disc_component {
struct hwloc_disc_component * next;
};
/** @} */
/** \defgroup hwlocality_disc_backends Components and Plugins: Discovery backends
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
/** \brief Discovery phase */
typedef enum hwloc_disc_phase_e {
/** \brief xml or synthetic, platform-specific components such as bgq.
@ -164,7 +152,7 @@ struct hwloc_disc_status {
*/
unsigned excluded_phases;
/** \brief OR'ed set of hwloc_disc_status_flag_e */
/** \brief OR'ed set of ::hwloc_disc_status_flag_e */
unsigned long flags;
};
@ -313,6 +301,64 @@ struct hwloc_component {
void * data;
};
/** \brief Make sure that plugins can lookup core symbols.
*
* This is a sanity check to avoid lazy-lookup failures when libhwloc
* is loaded within a plugin, and later tries to load its own plugins.
* This may fail (and abort the program) if libhwloc symbols are in a
* private namespace.
*
* \return 0 on success.
* \return -1 if the plugin cannot be successfully loaded. The caller
* plugin init() callback should return a negative error code as well.
*
* Plugins should call this function in their init() callback to avoid
* later crashes if lazy symbol resolution is used by the upper layer that
* loaded hwloc (e.g. OpenCL implementations using dlopen with RTLD_LAZY).
*
* \note The build system must define HWLOC_INSIDE_PLUGIN if and only if
* building the caller as a plugin.
*
* \note This function should remain inline so plugins can call it even
* when they cannot find libhwloc symbols.
*/
static __hwloc_inline int
hwloc_plugin_check_namespace(const char *pluginname __hwloc_attribute_unused, const char *symbol __hwloc_attribute_unused)
{
#ifdef HWLOC_INSIDE_PLUGIN
void *sym;
#ifdef HWLOC_HAVE_LTDL
lt_dlhandle handle = lt_dlopen(NULL);
#else
void *handle = dlopen(NULL, RTLD_NOW|RTLD_LOCAL);
#endif
if (!handle)
/* cannot check, assume things will work */
return 0;
#ifdef HWLOC_HAVE_LTDL
sym = lt_dlsym(handle, symbol);
lt_dlclose(handle);
#else
sym = dlsym(handle, symbol);
dlclose(handle);
#endif
if (!sym) {
static int verboseenv_checked = 0;
static int verboseenv_value = 0;
if (!verboseenv_checked) {
const char *verboseenv = getenv("HWLOC_PLUGINS_VERBOSE");
verboseenv_value = verboseenv ? atoi(verboseenv) : 0;
verboseenv_checked = 1;
}
if (verboseenv_value)
fprintf(stderr, "Plugin `%s' disabling itself because it cannot find the `%s' core symbol.\n",
pluginname, symbol);
return -1;
}
#endif /* HWLOC_INSIDE_PLUGIN */
return 0;
}
/** @} */
@ -422,64 +468,6 @@ HWLOC_DECLSPEC int hwloc_obj_add_children_sets(hwloc_obj_t obj);
*/
HWLOC_DECLSPEC int hwloc_topology_reconnect(hwloc_topology_t topology, unsigned long flags __hwloc_attribute_unused);
/** \brief Make sure that plugins can lookup core symbols.
*
* This is a sanity check to avoid lazy-lookup failures when libhwloc
* is loaded within a plugin, and later tries to load its own plugins.
* This may fail (and abort the program) if libhwloc symbols are in a
* private namespace.
*
* \return 0 on success.
* \return -1 if the plugin cannot be successfully loaded. The caller
* plugin init() callback should return a negative error code as well.
*
* Plugins should call this function in their init() callback to avoid
* later crashes if lazy symbol resolution is used by the upper layer that
* loaded hwloc (e.g. OpenCL implementations using dlopen with RTLD_LAZY).
*
* \note The build system must define HWLOC_INSIDE_PLUGIN if and only if
* building the caller as a plugin.
*
* \note This function should remain inline so plugins can call it even
* when they cannot find libhwloc symbols.
*/
static __hwloc_inline int
hwloc_plugin_check_namespace(const char *pluginname __hwloc_attribute_unused, const char *symbol __hwloc_attribute_unused)
{
#ifdef HWLOC_INSIDE_PLUGIN
void *sym;
#ifdef HWLOC_HAVE_LTDL
lt_dlhandle handle = lt_dlopen(NULL);
#else
void *handle = dlopen(NULL, RTLD_NOW|RTLD_LOCAL);
#endif
if (!handle)
/* cannot check, assume things will work */
return 0;
#ifdef HWLOC_HAVE_LTDL
sym = lt_dlsym(handle, symbol);
lt_dlclose(handle);
#else
sym = dlsym(handle, symbol);
dlclose(handle);
#endif
if (!sym) {
static int verboseenv_checked = 0;
static int verboseenv_value = 0;
if (!verboseenv_checked) {
const char *verboseenv = getenv("HWLOC_PLUGINS_VERBOSE");
verboseenv_value = verboseenv ? atoi(verboseenv) : 0;
verboseenv_checked = 1;
}
if (verboseenv_value)
fprintf(stderr, "Plugin `%s' disabling itself because it cannot find the `%s' core symbol.\n",
pluginname, symbol);
return -1;
}
#endif /* HWLOC_INSIDE_PLUGIN */
return 0;
}
/** @} */
@ -645,6 +633,19 @@ HWLOC_DECLSPEC struct hwloc_obj * hwloc_pci_find_parent_by_busid(struct hwloc_to
*/
HWLOC_DECLSPEC struct hwloc_obj * hwloc_pci_find_by_busid(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func);
/** @} */
/** \defgroup hwlocality_components_distances Components and Plugins: distances
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
/** \brief Handle to a new distances structure during its addition to the topology. */
typedef void * hwloc_backend_distances_add_handle_t;

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2010-2025 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -176,6 +176,7 @@ extern "C" {
#define hwloc_topology_insert_misc_object HWLOC_NAME(topology_insert_misc_object)
#define hwloc_topology_alloc_group_object HWLOC_NAME(topology_alloc_group_object)
#define hwloc_topology_free_group_object HWLOC_NAME(topology_free_group_object)
#define hwloc_topology_insert_group_object HWLOC_NAME(topology_insert_group_object)
#define hwloc_obj_add_other_obj_sets HWLOC_NAME(obj_add_other_obj_sets)
#define hwloc_topology_refresh HWLOC_NAME(topology_refresh)
@ -209,6 +210,7 @@ extern "C" {
#define hwloc_obj_get_info_by_name HWLOC_NAME(obj_get_info_by_name)
#define hwloc_obj_add_info HWLOC_NAME(obj_add_info)
#define hwloc_obj_set_subtype HWLOC_NAME(obj_set_subtype)
#define HWLOC_CPUBIND_PROCESS HWLOC_NAME_CAPS(CPUBIND_PROCESS)
#define HWLOC_CPUBIND_THREAD HWLOC_NAME_CAPS(CPUBIND_THREAD)
@ -231,6 +233,7 @@ extern "C" {
#define HWLOC_MEMBIND_FIRSTTOUCH HWLOC_NAME_CAPS(MEMBIND_FIRSTTOUCH)
#define HWLOC_MEMBIND_BIND HWLOC_NAME_CAPS(MEMBIND_BIND)
#define HWLOC_MEMBIND_INTERLEAVE HWLOC_NAME_CAPS(MEMBIND_INTERLEAVE)
#define HWLOC_MEMBIND_WEIGHTED_INTERLEAVE HWLOC_NAME_CAPS(MEMBIND_WEIGHTED_INTERLEAVE)
#define HWLOC_MEMBIND_NEXTTOUCH HWLOC_NAME_CAPS(MEMBIND_NEXTTOUCH)
#define HWLOC_MEMBIND_MIXED HWLOC_NAME_CAPS(MEMBIND_MIXED)
@ -406,8 +409,10 @@ extern "C" {
#define hwloc_local_numanode_flag_e HWLOC_NAME(local_numanode_flag_e)
#define HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_LARGER_LOCALITY)
#define HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY)
#define HWLOC_LOCAL_NUMANODE_FLAG_INTERSECT_LOCALITY HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_INTERSECT_LOCALITY)
#define HWLOC_LOCAL_NUMANODE_FLAG_ALL HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_ALL)
#define hwloc_get_local_numanode_objs HWLOC_NAME(get_local_numanode_objs)
#define hwloc_topology_get_default_nodeset HWLOC_NAME(topology_get_default_nodeset)
#define hwloc_memattr_get_name HWLOC_NAME(memattr_get_name)
#define hwloc_memattr_get_flags HWLOC_NAME(memattr_get_flags)
@ -559,6 +564,7 @@ extern "C" {
/* opencl.h */
#define hwloc_cl_device_pci_bus_info_khr HWLOC_NAME(cl_device_pci_bus_info_khr)
#define hwloc_cl_device_topology_amd HWLOC_NAME(cl_device_topology_amd)
#define hwloc_opencl_get_device_pci_busid HWLOC_NAME(opencl_get_device_pci_ids)
#define hwloc_opencl_get_device_cpuset HWLOC_NAME(opencl_get_device_cpuset)
@ -595,7 +601,9 @@ extern "C" {
/* levelzero.h */
#define hwloc_levelzero_get_device_cpuset HWLOC_NAME(levelzero_get_device_cpuset)
#define hwloc_levelzero_get_sysman_device_cpuset HWLOC_NAME(levelzero_get_sysman_device_cpuset)
#define hwloc_levelzero_get_device_osdev HWLOC_NAME(levelzero_get_device_osdev)
#define hwloc_levelzero_get_sysman_device_osdev HWLOC_NAME(levelzero_get_sysman_device_osdev)
/* gl.h */
@ -714,6 +722,8 @@ extern "C" {
#define hwloc__obj_type_is_dcache HWLOC_NAME(_obj_type_is_dcache)
#define hwloc__obj_type_is_icache HWLOC_NAME(_obj_type_is_icache)
#define hwloc__pci_link_speed HWLOC_NAME(_pci_link_speed)
/* private/cpuid-x86.h */
#define hwloc_have_x86_cpuid HWLOC_NAME(have_x86_cpuid)
@ -807,6 +817,8 @@ extern "C" {
#define hwloc_topology_setup_defaults HWLOC_NAME(topology_setup_defaults)
#define hwloc_topology_clear HWLOC_NAME(topology_clear)
#define hwloc__reconnect HWLOC_NAME(_reconnect)
#define hwloc__attach_memory_object HWLOC_NAME(insert_memory_object)
#define hwloc_get_obj_by_type_and_gp_index HWLOC_NAME(get_obj_by_type_and_gp_index)

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright © 2012-2023 Inria. All rights reserved.
* Copyright (c) 2020, Advanced Micro Devices, Inc. All rights reserved.
* Written by Advanced Micro Devices,
* See COPYING in top-level directory.
@ -55,6 +55,9 @@ extern "C" {
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*
* \return 0 on success.
* \return -1 on error, for instance if device information could not be found.
*/
static __hwloc_inline int
hwloc_rsmi_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2013-2018 Inria. All rights reserved.
* Copyright © 2013-2023 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -48,6 +48,8 @@ extern "C" {
* This length (in bytes) must be used in hwloc_shmem_topology_write()
* and hwloc_shmem_topology_adopt() later.
*
* \return the length, or -1 on error, for instance if flags are invalid.
*
* \note Flags \p flags are currently unused, must be 0.
*/
HWLOC_DECLSPEC int hwloc_shmem_topology_get_length(hwloc_topology_t topology,
@ -74,9 +76,10 @@ HWLOC_DECLSPEC int hwloc_shmem_topology_get_length(hwloc_topology_t topology,
* is not. However the caller may also allocate it manually in shared memory
* to share it as well.
*
* \return -1 with errno set to EBUSY if the virtual memory mapping defined
* \return 0 on success.
* \return -1 with errno set to \c EBUSY if the virtual memory mapping defined
* by \p mmap_address and \p length isn't available in the process.
* \return -1 with errno set to EINVAL if \p fileoffset, \p mmap_address
* \return -1 with errno set to \c EINVAL if \p fileoffset, \p mmap_address
* or \p length aren't page-aligned.
*/
HWLOC_DECLSPEC int hwloc_shmem_topology_write(hwloc_topology_t topology,
@ -112,14 +115,16 @@ HWLOC_DECLSPEC int hwloc_shmem_topology_write(hwloc_topology_t topology,
*
* \note This function takes care of calling hwloc_topology_abi_check().
*
* \return -1 with errno set to EBUSY if the virtual memory mapping defined
* \return 0 on success.
*
* \return -1 with errno set to \c EBUSY if the virtual memory mapping defined
* by \p mmap_address and \p length isn't available in the process.
*
* \return -1 with errno set to EINVAL if \p fileoffset, \p mmap_address
* \return -1 with errno set to \c EINVAL if \p fileoffset, \p mmap_address
* or \p length aren't page-aligned, or do not match what was given to
* hwloc_shmem_topology_write() earlier.
*
* \return -1 with errno set to EINVAL if the layout of the topology structure
* \return -1 with errno set to \c EINVAL if the layout of the topology structure
* is different between the writer process and the adopter process.
*/
HWLOC_DECLSPEC int hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,

View file

@ -11,6 +11,22 @@
#ifndef HWLOC_PRIVATE_CPUID_X86_H
#define HWLOC_PRIVATE_CPUID_X86_H
/* A macro for annotating memory as uninitialized when building with MSAN
* (and otherwise having no effect). See below for why this is used with
* our custom assembly.
*/
#ifdef __has_feature
#define HWLOC_HAS_FEATURE(name) __has_feature(name)
#else
#define HWLOC_HAS_FEATURE(name) 0
#endif
#if HWLOC_HAS_FEATURE(memory_sanitizer) || defined(MEMORY_SANITIZER)
#include <sanitizer/msan_interface.h>
#define HWLOC_ANNOTATE_MEMORY_IS_INITIALIZED(ptr, len) __msan_unpoison(ptr, len)
#else
#define HWLOC_ANNOTATE_MEMORY_IS_INITIALIZED(ptr, len)
#endif
#if (defined HWLOC_X86_32_ARCH) && (!defined HWLOC_HAVE_MSVC_CPUIDEX)
static __hwloc_inline int hwloc_have_x86_cpuid(void)
{
@ -71,12 +87,18 @@ static __hwloc_inline void hwloc_x86_cpuid(unsigned *eax, unsigned *ebx, unsigne
"movl %k2,%1\n\t"
: "+a" (*eax), "=m" (*ebx), "=&r"(sav_rbx),
"+c" (*ecx), "=&d" (*edx));
/* MSAN does not recognize the effect of the above assembly on the memory operand
* (`"=m"(*ebx)`). This may get improved in MSAN at some point in the future, e.g.
* see https://github.com/llvm/llvm-project/pull/77393. */
HWLOC_ANNOTATE_MEMORY_IS_INITIALIZED(ebx, sizeof *ebx);
#elif defined(HWLOC_X86_32_ARCH)
__asm__(
"mov %%ebx,%1\n\t"
"cpuid\n\t"
"xchg %%ebx,%1\n\t"
: "+a" (*eax), "=&SD" (*ebx), "+c" (*ecx), "=&d" (*edx));
/* See above. */
HWLOC_ANNOTATE_MEMORY_IS_INITIALIZED(ebx, sizeof *ebx);
#else
#error unknown architecture
#endif

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -186,7 +186,7 @@ hwloc_ffsl_from_ffs32(unsigned long x)
/**
* flsl helpers.
*/
#ifdef __GNUC_____
#ifdef __GNUC__
# if (__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))
# define hwloc_flsl(x) ((x) ? (8*sizeof(long) - __builtin_clzl(x)) : 0)
@ -573,4 +573,35 @@ typedef SSIZE_T ssize_t;
# endif
#endif
static __inline float
hwloc__pci_link_speed(unsigned generation, unsigned lanes)
{
float lanespeed;
/*
* These are single-direction bandwidths only.
*
* Gen1 used NRZ with 8/10 encoding.
* PCIe Gen1 = 2.5GT/s signal-rate per lane x 8/10 = 0.25GB/s data-rate per lane
* PCIe Gen2 = 5 GT/s signal-rate per lane x 8/10 = 0.5 GB/s data-rate per lane
* Gen3 switched to NRZ with 128/130 encoding.
* PCIe Gen3 = 8 GT/s signal-rate per lane x 128/130 = 1 GB/s data-rate per lane
* PCIe Gen4 = 16 GT/s signal-rate per lane x 128/130 = 2 GB/s data-rate per lane
* PCIe Gen5 = 32 GT/s signal-rate per lane x 128/130 = 4 GB/s data-rate per lane
* Gen6 switched to PAM with with 242/256 FLIT (242B payload protected by 8B CRC + 6B FEC).
* PCIe Gen6 = 64 GT/s signal-rate per lane x 242/256 = 8 GB/s data-rate per lane
* PCIe Gen7 = 128GT/s signal-rate per lane x 242/256 = 16 GB/s data-rate per lane
*/
/* lanespeed in Gbit/s */
if (generation <= 2)
lanespeed = 2.5f * generation * 0.8f;
else if (generation <= 5)
lanespeed = 8.0f * (1<<(generation-3)) * 128/130;
else
lanespeed = 8.0f * (1<<(generation-3)) * 242/256; /* assume Gen8 will be 256 GT/s and so on */
/* linkspeed in GB/s */
return lanespeed * lanes / 8;
}
#endif /* HWLOC_PRIVATE_MISC_H */

View file

@ -1,578 +0,0 @@
/*
* Copyright © 2014 Cisco Systems, Inc. All rights reserved.
* Copyright © 2013-2014 University of Wisconsin-La Crosse.
* All rights reserved.
* Copyright © 2015-2017 Inria. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
* See COPYING in top-level directory.
*
* $HEADER$
*/
#ifndef _NETLOC_PRIVATE_H_
#define _NETLOC_PRIVATE_H_
#include <hwloc.h>
#include <netloc.h>
#include <netloc/uthash.h>
#include <netloc/utarray.h>
#include <private/autogen/config.h>
#define NETLOCFILE_VERSION 1
#ifdef NETLOC_SCOTCH
#include <stdint.h>
#include <scotch.h>
#define NETLOC_int SCOTCH_Num
#else
#define NETLOC_int int
#endif
/*
* "Import" a few things from hwloc
*/
#define __netloc_attribute_unused __hwloc_attribute_unused
#define __netloc_attribute_malloc __hwloc_attribute_malloc
#define __netloc_attribute_const __hwloc_attribute_const
#define __netloc_attribute_pure __hwloc_attribute_pure
#define __netloc_attribute_deprecated __hwloc_attribute_deprecated
#define __netloc_attribute_may_alias __hwloc_attribute_may_alias
#define NETLOC_DECLSPEC HWLOC_DECLSPEC
/**********************************************************************
* Types
**********************************************************************/
/**
* Definitions for Comparators
* \sa These are the return values from the following functions:
* netloc_network_compare, netloc_dt_edge_t_compare, netloc_dt_node_t_compare
*/
typedef enum {
NETLOC_CMP_SAME = 0, /**< Compared as the Same */
NETLOC_CMP_SIMILAR = -1, /**< Compared as Similar, but not the Same */
NETLOC_CMP_DIFF = -2 /**< Compared as Different */
} netloc_compare_type_t;
/**
* Enumerated type for the various types of supported networks
*/
typedef enum {
NETLOC_NETWORK_TYPE_ETHERNET = 1, /**< Ethernet network */
NETLOC_NETWORK_TYPE_INFINIBAND = 2, /**< InfiniBand network */
NETLOC_NETWORK_TYPE_INVALID = 3 /**< Invalid network */
} netloc_network_type_t;
/**
* Enumerated type for the various types of supported topologies
*/
typedef enum {
NETLOC_TOPOLOGY_TYPE_INVALID = -1, /**< Invalid */
NETLOC_TOPOLOGY_TYPE_TREE = 1, /**< Tree */
} netloc_topology_type_t;
/**
* Enumerated type for the various types of nodes
*/
typedef enum {
NETLOC_NODE_TYPE_HOST = 0, /**< Host (a.k.a., network addressable endpoint - e.g., MAC Address) node */
NETLOC_NODE_TYPE_SWITCH = 1, /**< Switch node */
NETLOC_NODE_TYPE_INVALID = 2 /**< Invalid node */
} netloc_node_type_t;
typedef enum {
NETLOC_ARCH_TREE = 0, /* Fat tree */
} netloc_arch_type_t;
/* Pre declarations to avoid inter dependency problems */
/** \cond IGNORE */
struct netloc_topology_t;
typedef struct netloc_topology_t netloc_topology_t;
struct netloc_node_t;
typedef struct netloc_node_t netloc_node_t;
struct netloc_edge_t;
typedef struct netloc_edge_t netloc_edge_t;
struct netloc_physical_link_t;
typedef struct netloc_physical_link_t netloc_physical_link_t;
struct netloc_path_t;
typedef struct netloc_path_t netloc_path_t;
struct netloc_arch_tree_t;
typedef struct netloc_arch_tree_t netloc_arch_tree_t;
struct netloc_arch_node_t;
typedef struct netloc_arch_node_t netloc_arch_node_t;
struct netloc_arch_node_slot_t;
typedef struct netloc_arch_node_slot_t netloc_arch_node_slot_t;
struct netloc_arch_t;
typedef struct netloc_arch_t netloc_arch_t;
/** \endcond */
/**
* \struct netloc_topology_t
* \brief Netloc Topology Context
*
* An opaque data structure used to reference a network topology.
*
* \note Must be initialized with \ref netloc_topology_construct()
*/
struct netloc_topology_t {
/** Topology path */
char *topopath;
/** Subnet ID */
char *subnet_id;
/** Node List */
netloc_node_t *nodes; /* Hash table of nodes by physical_id */
netloc_node_t *nodesByHostname; /* Hash table of nodes by hostname */
netloc_physical_link_t *physical_links; /* Hash table with physcial links */
/** Partition List */
UT_array *partitions;
/** Hwloc topology List */
char *hwlocpath;
UT_array *topos;
hwloc_topology_t *hwloc_topos;
/** Type of the graph */
netloc_topology_type_t type;
};
/**
* \brief Netloc Node Type
*
* Represents the concept of a node (a.k.a., vertex, endpoint) within a network
* graph. This could be a server or a network switch. The \ref node_type parameter
* will distinguish the exact type of node this represents in the graph.
*/
struct netloc_node_t {
UT_hash_handle hh; /* makes this structure hashable with physical_id */
UT_hash_handle hh2; /* makes this structure hashable with hostname */
/** Physical ID of the node */
char physical_id[20];
/** Logical ID of the node (if any) */
int logical_id;
/** Type of the node */
netloc_node_type_t type;
/* Pointer to physical_links */
UT_array *physical_links;
/** Description information from discovery (if any) */
char *description;
/**
* Application-given private data pointer.
* Initialized to NULL, and not used by the netloc library.
*/
void * userdata;
/** Outgoing edges from this node */
netloc_edge_t *edges;
UT_array *subnodes; /* the group of nodes for the virtual nodes */
netloc_path_t *paths;
char *hostname;
UT_array *partitions; /* index in the list from the topology */
hwloc_topology_t hwlocTopo;
int hwlocTopoIdx;
};
/**
* \brief Netloc Edge Type
*
* Represents the concept of a directed edge within a network graph.
*
* \note We do not point to the netloc_node_t structure directly to
* simplify the representation, and allow the information to more easily
* be entered into the data store without circular references.
* \todo JJH Is the note above still true?
*/
struct netloc_edge_t {
UT_hash_handle hh; /* makes this structure hashable */
netloc_node_t *dest;
int id;
/** Pointers to the parent node */
netloc_node_t *node;
/* Pointer to physical_links */
UT_array *physical_links;
/** total gbits of the links */
float total_gbits;
UT_array *partitions; /* index in the list from the topology */
UT_array *subnode_edges; /* for edges going to virtual nodes */
struct netloc_edge_t *other_way;
/**
* Application-given private data pointer.
* Initialized to NULL, and not used by the netloc library.
*/
void * userdata;
};
struct netloc_physical_link_t {
UT_hash_handle hh; /* makes this structure hashable */
int id; // TODO long long
netloc_node_t *src;
netloc_node_t *dest;
int ports[2];
char *width;
char *speed;
netloc_edge_t *edge;
int other_way_id;
struct netloc_physical_link_t *other_way;
UT_array *partitions; /* index in the list from the topology */
/** gbits of the link from speed and width */
float gbits;
/** Description information from discovery (if any) */
char *description;
};
struct netloc_path_t {
UT_hash_handle hh; /* makes this structure hashable */
char dest_id[20];
UT_array *links;
};
/**********************************************************************
* Architecture structures
**********************************************************************/
struct netloc_arch_tree_t {
NETLOC_int num_levels;
NETLOC_int *degrees;
NETLOC_int *cost;
};
struct netloc_arch_node_t {
UT_hash_handle hh; /* makes this structure hashable */
char *name; /* Hash key */
netloc_node_t *node; /* Corresponding node */
int idx_in_topo; /* idx with ghost hosts to have complete topo */
int num_slots; /* it is not the real number of slots but the maximum slot idx */
int *slot_idx; /* corresponding idx in slot_tree */
int *slot_os_idx; /* corresponding os index for each leaf in tree */
netloc_arch_tree_t *slot_tree; /* Tree built from hwloc */
int num_current_slots; /* Number of PUs */
NETLOC_int *current_slots; /* indices in the complete tree */
int *slot_ranks; /* corresponding MPI rank for each leaf in tree */
};
struct netloc_arch_node_slot_t {
netloc_arch_node_t *node;
int slot;
};
struct netloc_arch_t {
netloc_topology_t *topology;
int has_slots; /* if slots are included in the architecture */
netloc_arch_type_t type;
union {
netloc_arch_tree_t *node_tree;
netloc_arch_tree_t *global_tree;
} arch;
netloc_arch_node_t *nodes_by_name;
netloc_arch_node_slot_t *node_slot_by_idx; /* node_slot by index in complete topo */
NETLOC_int num_current_hosts; /* if has_slots, host is a slot, else host is a node */
NETLOC_int *current_hosts; /* indices in the complete topology */
};
/**********************************************************************
* Topology Functions
**********************************************************************/
/**
* Allocate a topology handle.
*
* User is responsible for calling \ref netloc_detach on the topology handle.
* The network parameter information is deep copied into the topology handle, so the
* user may destruct the network handle after calling this function and/or reuse
* the network handle.
*
* \returns NETLOC_SUCCESS on success
* \returns NETLOC_ERROR upon an error.
*/
netloc_topology_t *netloc_topology_construct(char *path);
/**
* Destruct a topology handle
*
* \param topology A valid pointer to a \ref netloc_topology_t handle created
* from a prior call to \ref netloc_topology_construct.
*
* \returns NETLOC_SUCCESS on success
* \returns NETLOC_ERROR upon an error.
*/
int netloc_topology_destruct(netloc_topology_t *topology);
int netloc_topology_find_partition_idx(netloc_topology_t *topology, char *partition_name);
int netloc_topology_read_hwloc(netloc_topology_t *topology, int num_nodes,
netloc_node_t **node_list);
#define netloc_topology_iter_partitions(topology,partition) \
for ((partition) = (char **)utarray_front(topology->partitions); \
(partition) != NULL; \
(partition) = (char **)utarray_next(topology->partitions, partition))
#define netloc_topology_iter_hwloctopos(topology,hwloctopo) \
for ((hwloctopo) = (char **)utarray_front(topology->topos); \
(hwloctopo) != NULL; \
(hwloctopo) = (char **)utarray_next(topology->topos, hwloctopo))
#define netloc_topology_find_node(topology,node_id,node) \
HASH_FIND_STR(topology->nodes, node_id, node)
#define netloc_topology_iter_nodes(topology,node,_tmp) \
HASH_ITER(hh, topology->nodes, node, _tmp)
#define netloc_topology_num_nodes(topology) \
HASH_COUNT(topology->nodes)
/*************************************************/
/**
* Constructor for netloc_node_t
*
* User is responsible for calling the destructor on the handle.
*
* Returns
* A newly allocated pointer to the network information.
*/
netloc_node_t *netloc_node_construct(void);
/**
* Destructor for netloc_node_t
*
* \param node A valid node handle
*
* Returns
* NETLOC_SUCCESS on success
* NETLOC_ERROR on error
*/
int netloc_node_destruct(netloc_node_t *node);
char *netloc_node_pretty_print(netloc_node_t* node);
#define netloc_node_get_num_subnodes(node) \
utarray_len((node)->subnodes)
#define netloc_node_get_subnode(node,i) \
(*(netloc_node_t **)utarray_eltptr((node)->subnodes, (i)))
#define netloc_node_get_num_edges(node) \
utarray_len((node)->edges)
#define netloc_node_get_edge(node,i) \
(*(netloc_edge_t **)utarray_eltptr((node)->edges, (i)))
#define netloc_node_iter_edges(node,edge,_tmp) \
HASH_ITER(hh, node->edges, edge, _tmp)
#define netloc_node_iter_paths(node,path,_tmp) \
HASH_ITER(hh, node->paths, path, _tmp)
#define netloc_node_is_host(node) \
(node->type == NETLOC_NODE_TYPE_HOST)
#define netloc_node_is_switch(node) \
(node->type == NETLOC_NODE_TYPE_SWITCH)
#define netloc_node_iter_paths(node, path,_tmp) \
HASH_ITER(hh, node->paths, path, _tmp)
int netloc_node_is_in_partition(netloc_node_t *node, int partition);
/*************************************************/
/**
* Constructor for netloc_edge_t
*
* User is responsible for calling the destructor on the handle.
*
* Returns
* A newly allocated pointer to the edge information.
*/
netloc_edge_t *netloc_edge_construct(void);
/**
* Destructor for netloc_edge_t
*
* \param edge A valid edge handle
*
* Returns
* NETLOC_SUCCESS on success
* NETLOC_ERROR on error
*/
int netloc_edge_destruct(netloc_edge_t *edge);
char * netloc_edge_pretty_print(netloc_edge_t* edge);
void netloc_edge_reset_uid(void);
int netloc_edge_is_in_partition(netloc_edge_t *edge, int partition);
#define netloc_edge_get_num_links(edge) \
utarray_len((edge)->physical_links)
#define netloc_edge_get_link(edge,i) \
(*(netloc_physical_link_t **)utarray_eltptr((edge)->physical_links, (i)))
#define netloc_edge_get_num_subedges(edge) \
utarray_len((edge)->subnode_edges)
#define netloc_edge_get_subedge(edge,i) \
(*(netloc_edge_t **)utarray_eltptr((edge)->subnode_edges, (i)))
/*************************************************/
/**
* Constructor for netloc_physical_link_t
*
* User is responsible for calling the destructor on the handle.
*
* Returns
* A newly allocated pointer to the physical link information.
*/
netloc_physical_link_t * netloc_physical_link_construct(void);
/**
* Destructor for netloc_physical_link_t
*
* Returns
* NETLOC_SUCCESS on success
* NETLOC_ERROR on error
*/
int netloc_physical_link_destruct(netloc_physical_link_t *link);
char * netloc_link_pretty_print(netloc_physical_link_t* link);
/*************************************************/
netloc_path_t *netloc_path_construct(void);
int netloc_path_destruct(netloc_path_t *path);
/**********************************************************************
* Architecture functions
**********************************************************************/
netloc_arch_t * netloc_arch_construct(void);
int netloc_arch_destruct(netloc_arch_t *arch);
int netloc_arch_build(netloc_arch_t *arch, int add_slots);
int netloc_arch_set_current_resources(netloc_arch_t *arch);
int netloc_arch_set_global_resources(netloc_arch_t *arch);
int netloc_arch_node_get_hwloc_info(netloc_arch_node_t *arch);
void netloc_arch_tree_complete(netloc_arch_tree_t *tree, UT_array **down_degrees_by_level,
int num_hosts, int **parch_idx);
NETLOC_int netloc_arch_tree_num_leaves(netloc_arch_tree_t *tree);
/**********************************************************************
* Access functions of various elements of the topology
**********************************************************************/
#define netloc_get_num_partitions(object) \
utarray_len((object)->partitions)
#define netloc_get_partition(object,i) \
(*(int *)utarray_eltptr((object)->partitions, (i)))
#define netloc_path_iter_links(path,link) \
for ((link) = (netloc_physical_link_t **)utarray_front(path->links); \
(link) != NULL; \
(link) = (netloc_physical_link_t **)utarray_next(path->links, link))
/**********************************************************************
* Misc functions
**********************************************************************/
/**
* Decode the network type
*
* \param net_type A valid member of the \ref netloc_network_type_t type
*
* \returns NULL if the type is invalid
* \returns A string for that \ref netloc_network_type_t type
*/
static inline const char * netloc_network_type_decode(netloc_network_type_t net_type) {
if( NETLOC_NETWORK_TYPE_ETHERNET == net_type ) {
return "ETH";
}
else if( NETLOC_NETWORK_TYPE_INFINIBAND == net_type ) {
return "IB";
}
else {
return NULL;
}
}
/**
* Decode the node type
*
* \param node_type A valid member of the \ref netloc_node_type_t type
*
* \returns NULL if the type is invalid
* \returns A string for that \ref netloc_node_type_t type
*/
static inline const char * netloc_node_type_decode(netloc_node_type_t node_type) {
if( NETLOC_NODE_TYPE_SWITCH == node_type ) {
return "SW";
}
else if( NETLOC_NODE_TYPE_HOST == node_type ) {
return "CA";
}
else {
return NULL;
}
}
ssize_t netloc_line_get(char **lineptr, size_t *n, FILE *stream);
char *netloc_line_get_next_token(char **string, char c);
int netloc_build_comm_mat(char *filename, int *pn, double ***pmat);
#define STRDUP_IF_NOT_NULL(str) (NULL == str ? NULL : strdup(str))
#define STR_EMPTY_IF_NULL(str) (NULL == str ? "" : str)
#endif // _NETLOC_PRIVATE_H_

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2025 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
*
@ -245,6 +245,12 @@ struct hwloc_topology {
* temporary variables during discovery
*/
/* set to 1 at the beginning of load() if the filter of any cpu cache type (L1 to L3i) is not NONE,
* may be checked by backends before querying caches
* (when they don't know the level of caches they are querying).
*/
int want_some_cpu_caches;
/* machine-wide memory.
* temporarily stored there by OSes that only provide this without NUMA information,
* and actually used later by the core.
@ -296,6 +302,9 @@ extern void hwloc__reorder_children(hwloc_obj_t parent);
extern void hwloc_topology_setup_defaults(struct hwloc_topology *topology);
extern void hwloc_topology_clear(struct hwloc_topology *topology);
#define _HWLOC_RECONNECT_FLAG_KEEPSTRUCTURE (1UL<<0)
extern int hwloc__reconnect(struct hwloc_topology *topology, unsigned long flags);
/* insert memory object as memory child of normal parent */
extern struct hwloc_obj * hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
hwloc_obj_t obj, const char *reason);
@ -420,7 +429,7 @@ extern void hwloc_internal_memattrs_need_refresh(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_refresh(hwloc_topology_t topology);
extern int hwloc_internal_memattrs_dup(hwloc_topology_t new, hwloc_topology_t old);
extern int hwloc_internal_memattr_set_value(hwloc_topology_t topology, hwloc_memattr_id_t id, hwloc_obj_type_t target_type, hwloc_uint64_t target_gp_index, unsigned target_os_index, struct hwloc_internal_location_s *initiator, hwloc_uint64_t value);
extern int hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology);
extern int hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology, int force_subtype);
extern void hwloc_internal_cpukinds_init(hwloc_topology_t topology);
extern int hwloc_internal_cpukinds_rank(hwloc_topology_t topology);
@ -477,6 +486,7 @@ extern char * hwloc_progname(struct hwloc_topology *topology);
#define HWLOC_GROUP_KIND_INTEL_DIE 104 /* no subkind */
#define HWLOC_GROUP_KIND_S390_BOOK 110 /* subkind 0 is book, subkind 1 is drawer (group of books) */
#define HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT 120 /* no subkind */
#define HWLOC_GROUP_KIND_AMD_COMPLEX 121 /* no subkind */
/* then, OS-specific groups */
#define HWLOC_GROUP_KIND_SOLARIS_PG_HW_PERF 200 /* subkind is group width */
#define HWLOC_GROUP_KIND_AIX_SDL_UNKNOWN 210 /* subkind is SDL level */

View file

@ -19,13 +19,14 @@ HWLOC_DECLSPEC int hwloc__xml_verbose(void);
typedef struct hwloc__xml_import_state_s {
struct hwloc__xml_import_state_s *parent;
/* globals shared because the entire stack of states during import */
/* globals shared between the entire stack of states during import */
struct hwloc_xml_backend_data_s *global;
/* opaque data used to store backend-specific data.
* statically allocated to allow stack-allocation by the common code without knowing actual backend needs.
* libxml is 3 ptrs. nolibxml is 3 ptr + one int.
*/
char data[32];
char data[4 * SIZEOF_VOID_P];
} * hwloc__xml_import_state_t;
struct hwloc__xml_imported_v1distances_s {
@ -74,8 +75,9 @@ typedef struct hwloc__xml_export_state_s {
/* opaque data used to store backend-specific data.
* statically allocated to allow stack-allocation by the common code without knowing actual backend needs.
* libxml is 1 ptr. nolibxml is 1 ptr + 2 size_t + 3 ints.
*/
char data[40];
char data[6 * SIZEOF_VOID_P];
} * hwloc__xml_export_state_t;
HWLOC_DECLSPEC void hwloc__xml_export_topology(hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, unsigned long flags);

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* Copyright © 2009-2010, 2012 Université Bordeaux
* Copyright © 2011-2015 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -287,6 +287,7 @@ static __hwloc_inline int hwloc__check_membind_policy(hwloc_membind_policy_t pol
|| policy == HWLOC_MEMBIND_FIRSTTOUCH
|| policy == HWLOC_MEMBIND_BIND
|| policy == HWLOC_MEMBIND_INTERLEAVE
|| policy == HWLOC_MEMBIND_WEIGHTED_INTERLEAVE
|| policy == HWLOC_MEMBIND_NEXTTOUCH)
return 0;
return -1;

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -245,6 +245,7 @@ int hwloc_bitmap_copy(struct hwloc_bitmap_s * dst, const struct hwloc_bitmap_s *
/* Strings always use 32bit groups */
#define HWLOC_PRIxSUBBITMAP "%08lx"
#define HWLOC_BITMAP_SUBSTRING_SIZE 32
#define HWLOC_BITMAP_SUBSTRING_FULL_VALUE 0xFFFFFFFFUL
#define HWLOC_BITMAP_SUBSTRING_LENGTH (HWLOC_BITMAP_SUBSTRING_SIZE/4)
#define HWLOC_BITMAP_STRING_PER_LONG (HWLOC_BITS_PER_LONG/HWLOC_BITMAP_SUBSTRING_SIZE)
@ -261,6 +262,7 @@ int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, const stru
const unsigned long accum_mask = ~0UL;
#else /* HWLOC_BITS_PER_LONG != HWLOC_BITMAP_SUBSTRING_SIZE */
const unsigned long accum_mask = ((1UL << HWLOC_BITMAP_SUBSTRING_SIZE) - 1) << (HWLOC_BITS_PER_LONG - HWLOC_BITMAP_SUBSTRING_SIZE);
int merge_with_infinite_prefix = 0;
#endif /* HWLOC_BITS_PER_LONG != HWLOC_BITMAP_SUBSTRING_SIZE */
HWLOC__BITMAP_CHECK(set);
@ -279,6 +281,9 @@ int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, const stru
res = size>0 ? (int)size - 1 : 0;
tmp += res;
size -= res;
#if HWLOC_BITS_PER_LONG > HWLOC_BITMAP_SUBSTRING_SIZE
merge_with_infinite_prefix = 1;
#endif
}
i=(int) set->ulongs_count-1;
@ -294,16 +299,24 @@ int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, const stru
}
while (i>=0 || accumed) {
unsigned long value;
/* Refill accumulator */
if (!accumed) {
accum = set->ulongs[i--];
accumed = HWLOC_BITS_PER_LONG;
}
value = (accum & accum_mask) >> (HWLOC_BITS_PER_LONG - HWLOC_BITMAP_SUBSTRING_SIZE);
if (accum & accum_mask) {
#if HWLOC_BITS_PER_LONG > HWLOC_BITMAP_SUBSTRING_SIZE
if (merge_with_infinite_prefix && value == HWLOC_BITMAP_SUBSTRING_FULL_VALUE) {
/* first full subbitmap merged with infinite prefix */
res = 0;
} else
#endif
if (value) {
/* print the whole subset if not empty */
res = hwloc_snprintf(tmp, size, needcomma ? ",0x" HWLOC_PRIxSUBBITMAP : "0x" HWLOC_PRIxSUBBITMAP,
(accum & accum_mask) >> (HWLOC_BITS_PER_LONG - HWLOC_BITMAP_SUBSTRING_SIZE));
res = hwloc_snprintf(tmp, size, needcomma ? ",0x" HWLOC_PRIxSUBBITMAP : "0x" HWLOC_PRIxSUBBITMAP, value);
needcomma = 1;
} else if (i == -1 && accumed == HWLOC_BITMAP_SUBSTRING_SIZE) {
/* print a single 0 to mark the last subset */
@ -323,6 +336,7 @@ int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, const stru
#else
accum <<= HWLOC_BITMAP_SUBSTRING_SIZE;
accumed -= HWLOC_BITMAP_SUBSTRING_SIZE;
merge_with_infinite_prefix = 0;
#endif
if (res >= size)
@ -362,7 +376,8 @@ int hwloc_bitmap_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc_restric
{
const char * current = string;
unsigned long accum = 0;
int count=0;
int count = 0;
int ulongcount;
int infinite = 0;
/* count how many substrings there are */
@ -383,9 +398,20 @@ int hwloc_bitmap_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc_restric
count--;
}
if (hwloc_bitmap_reset_by_ulongs(set, (count + HWLOC_BITMAP_STRING_PER_LONG - 1) / HWLOC_BITMAP_STRING_PER_LONG) < 0)
ulongcount = (count + HWLOC_BITMAP_STRING_PER_LONG - 1) / HWLOC_BITMAP_STRING_PER_LONG;
if (hwloc_bitmap_reset_by_ulongs(set, ulongcount) < 0)
return -1;
set->infinite = 0;
set->infinite = 0; /* will be updated later */
#if HWLOC_BITS_PER_LONG != HWLOC_BITMAP_SUBSTRING_SIZE
if (infinite && (count % HWLOC_BITMAP_STRING_PER_LONG) != 0) {
/* accumulate substrings of the first ulong that are hidden in the infinite prefix */
int i;
for(i = (count % HWLOC_BITMAP_STRING_PER_LONG); i < HWLOC_BITMAP_STRING_PER_LONG; i++)
accum |= (HWLOC_BITMAP_SUBSTRING_FULL_VALUE << (i*HWLOC_BITMAP_SUBSTRING_SIZE));
}
#endif
while (*current != '\0') {
unsigned long val;
@ -544,6 +570,9 @@ int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, co
ssize_t size = buflen;
char *tmp = buf;
int res, ret = 0;
#if HWLOC_BITS_PER_LONG == 64
int merge_with_infinite_prefix = 0;
#endif
int started = 0;
int i;
@ -563,6 +592,9 @@ int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, co
res = size>0 ? (int)size - 1 : 0;
tmp += res;
size -= res;
#if HWLOC_BITS_PER_LONG == 64
merge_with_infinite_prefix = 1;
#endif
}
i=set->ulongs_count-1;
@ -582,7 +614,11 @@ int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, co
if (started) {
/* print the whole subset */
#if HWLOC_BITS_PER_LONG == 64
res = hwloc_snprintf(tmp, size, "%016lx", val);
if (merge_with_infinite_prefix && (val & 0xffffffff00000000UL) == 0xffffffff00000000UL) {
res = hwloc_snprintf(tmp, size, "%08lx", val & 0xffffffffUL);
} else {
res = hwloc_snprintf(tmp, size, "%016lx", val);
}
#else
res = hwloc_snprintf(tmp, size, "%08lx", val);
#endif
@ -599,6 +635,9 @@ int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, co
res = size>0 ? (int)size - 1 : 0;
tmp += res;
size -= res;
#if HWLOC_BITS_PER_LONG == 64
merge_with_infinite_prefix = 0;
#endif
}
/* if didn't display anything, display 0x0 */
@ -679,6 +718,10 @@ int hwloc_bitmap_taskset_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc
goto failed;
set->ulongs[count-1] = val;
if (infinite && tmpchars != HWLOC_BITS_PER_LONG/4) {
/* infinite prefix with partial substring, fill remaining bits */
set->ulongs[count-1] |= (~0ULL)<<(4*tmpchars);
}
current += tmpchars;
chars -= tmpchars;

View file

@ -94,8 +94,7 @@ static hwloc_dlhandle hwloc_dlopenext(const char *_filename)
{
hwloc_dlhandle handle;
char *filename = NULL;
(void) asprintf(&filename, "%s.so", _filename);
if (!filename)
if (asprintf(&filename, "%s.so", _filename) < 0)
return NULL;
handle = dlopen(filename, RTLD_NOW|RTLD_LOCAL);
free(filename);

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2020-2022 Inria. All rights reserved.
* Copyright © 2020-2024 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -50,6 +50,7 @@ hwloc_internal_cpukinds_dup(hwloc_topology_t new, hwloc_topology_t old)
return -1;
new->cpukinds = kinds;
new->nr_cpukinds = old->nr_cpukinds;
new->nr_cpukinds_allocated = old->nr_cpukinds;
memcpy(kinds, old->cpukinds, old->nr_cpukinds * sizeof(*kinds));
for(i=0;i<old->nr_cpukinds; i++) {

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2013-2022 Inria. All rights reserved.
* Copyright © 2013-2023 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -411,6 +411,30 @@ int hwloc_topology_diff_build(hwloc_topology_t topo1,
}
}
if (!err) {
/* cpukinds */
if (topo1->nr_cpukinds != topo2->nr_cpukinds)
goto roottoocomplex;
for(i=0; i<topo1->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *ic1 = &topo1->cpukinds[i];
struct hwloc_internal_cpukind_s *ic2 = &topo2->cpukinds[i];
unsigned j;
if (!hwloc_bitmap_isequal(ic1->cpuset, ic2->cpuset)
|| ic1->efficiency != ic2->efficiency
|| ic1->forced_efficiency != ic2->forced_efficiency
|| ic1->ranking_value != ic2->ranking_value
|| ic1->nr_infos != ic2->nr_infos)
goto roottoocomplex;
for(j=0; j<ic1->nr_infos; j++) {
struct hwloc_info_s *info1 = &ic1->infos[j], *info2 = &ic2->infos[j];
if (strcmp(info1->name, info2->name)
|| strcmp(info1->value, info2->value)) {
goto roottoocomplex;
}
}
}
}
return err;
roottoocomplex:

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2010-2025 Inria. All rights reserved.
* Copyright © 2011-2012 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -624,8 +624,8 @@ void * hwloc_distances_add_create(hwloc_topology_t topology,
return NULL;
}
if ((kind & ~HWLOC_DISTANCES_KIND_ALL)
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_FROM_ALL) != 1
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_MEANS_ALL) != 1) {
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_FROM_ALL) > 1
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_MEANS_ALL) > 1) {
errno = EINVAL;
return NULL;
}
@ -699,7 +699,7 @@ hwloc_distances_add_commit(hwloc_topology_t topology,
}
/* in case we added some groups, see if we need to reconnect */
hwloc_topology_reconnect(topology, 0);
hwloc__reconnect(topology, 0);
return 0;
@ -1387,19 +1387,12 @@ static __hwloc_inline int is_nvswitch(hwloc_obj_t obj)
}
static int
hwloc__distances_transform_merge_switch_ports(hwloc_topology_t topology,
struct hwloc_distances_s *distances)
hwloc__distances_transform_merge_switch_ports(struct hwloc_distances_s *distances)
{
struct hwloc_internal_distances_s *dist = hwloc__internal_distances_from_public(topology, distances);
hwloc_obj_t *objs = distances->objs;
hwloc_uint64_t *values = distances->values;
unsigned first, i, j, nbobjs = distances->nbobjs;
if (strcmp(dist->name, "NVLinkBandwidth")) {
errno = EINVAL;
return -1;
}
/* find the first port */
first = (unsigned) -1;
for(i=0; i<nbobjs; i++)
@ -1435,20 +1428,13 @@ hwloc__distances_transform_merge_switch_ports(hwloc_topology_t topology,
}
static int
hwloc__distances_transform_transitive_closure(hwloc_topology_t topology,
struct hwloc_distances_s *distances)
hwloc__distances_transform_transitive_closure(struct hwloc_distances_s *distances)
{
struct hwloc_internal_distances_s *dist = hwloc__internal_distances_from_public(topology, distances);
hwloc_obj_t *objs = distances->objs;
hwloc_uint64_t *values = distances->values;
unsigned nbobjs = distances->nbobjs;
unsigned i, j, k;
if (strcmp(dist->name, "NVLinkBandwidth")) {
errno = EINVAL;
return -1;
}
for(i=0; i<nbobjs; i++) {
hwloc_uint64_t bw_i2sw = 0;
if (is_nvswitch(objs[i]))
@ -1467,8 +1453,8 @@ hwloc__distances_transform_transitive_closure(hwloc_topology_t topology,
if (is_nvswitch(objs[k]))
bw_sw2j += values[k*nbobjs+j];
/* bandwidth from i to j is now min(i2sw,sw2j) */
values[i*nbobjs+j] = bw_i2sw > bw_sw2j ? bw_sw2j : bw_i2sw;
/* bandwidth from i to j now gets indirect bandwidth too, min(i2sw,sw2j) */
values[i*nbobjs+j] += bw_i2sw > bw_sw2j ? bw_sw2j : bw_i2sw;
}
}
@ -1476,7 +1462,7 @@ hwloc__distances_transform_transitive_closure(hwloc_topology_t topology,
}
int
hwloc_distances_transform(hwloc_topology_t topology,
hwloc_distances_transform(hwloc_topology_t topology __hwloc_attribute_unused,
struct hwloc_distances_s *distances,
enum hwloc_distances_transform_e transform,
void *transform_attr,
@ -1495,13 +1481,13 @@ hwloc_distances_transform(hwloc_topology_t topology,
case HWLOC_DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS:
{
int err;
err = hwloc__distances_transform_merge_switch_ports(topology, distances);
err = hwloc__distances_transform_merge_switch_ports(distances);
if (!err)
err = hwloc__distances_transform_remove_null(distances);
return err;
}
case HWLOC_DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE:
return hwloc__distances_transform_transitive_closure(topology, distances);
return hwloc__distances_transform_transitive_closure(distances);
default:
errno = EINVAL;
return -1;

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@ -886,36 +886,12 @@ hwloc_pcidisc_find_linkspeed(const unsigned char *config,
unsigned offset, float *linkspeed)
{
unsigned linksta, speed, width;
float lanespeed;
memcpy(&linksta, &config[offset + HWLOC_PCI_EXP_LNKSTA], 4);
speed = linksta & HWLOC_PCI_EXP_LNKSTA_SPEED; /* PCIe generation */
width = (linksta & HWLOC_PCI_EXP_LNKSTA_WIDTH) >> 4; /* how many lanes */
/*
* These are single-direction bandwidths only.
*
* Gen1 used NRZ with 8/10 encoding.
* PCIe Gen1 = 2.5GT/s signal-rate per lane x 8/10 = 0.25GB/s data-rate per lane
* PCIe Gen2 = 5 GT/s signal-rate per lane x 8/10 = 0.5 GB/s data-rate per lane
* Gen3 switched to NRZ with 128/130 encoding.
* PCIe Gen3 = 8 GT/s signal-rate per lane x 128/130 = 1 GB/s data-rate per lane
* PCIe Gen4 = 16 GT/s signal-rate per lane x 128/130 = 2 GB/s data-rate per lane
* PCIe Gen5 = 32 GT/s signal-rate per lane x 128/130 = 4 GB/s data-rate per lane
* Gen6 switched to PAM with with 242/256 FLIT (242B payload protected by 8B CRC + 6B FEC).
* PCIe Gen6 = 64 GT/s signal-rate per lane x 242/256 = 8 GB/s data-rate per lane
* PCIe Gen7 = 128GT/s signal-rate per lane x 242/256 = 16 GB/s data-rate per lane
*/
/* lanespeed in Gbit/s */
if (speed <= 2)
lanespeed = 2.5f * speed * 0.8f;
else if (speed <= 5)
lanespeed = 8.0f * (1<<(speed-3)) * 128/130;
else
lanespeed = 8.0f * (1<<(speed-3)) * 242/256; /* assume Gen8 will be 256 GT/s and so on */
/* linkspeed in GB/s */
*linkspeed = lanespeed * width / 8;
*linkspeed = hwloc__pci_link_speed(speed, width);
return 0;
}

View file

@ -23,6 +23,7 @@ struct hwloc_shmem_header {
uint32_t header_length; /* where the actual topology starts in the file/mapping */
uint64_t mmap_address; /* virtual address to pass to mmap */
uint64_t mmap_length; /* length to pass to mmap (includes the header) */
/* we will pad the end to a multiple of pointer size so that the topology is well aligned */
};
#define HWLOC_SHMEM_MALLOC_ALIGN 8UL
@ -85,6 +86,7 @@ hwloc_shmem_topology_write(hwloc_topology_t topology,
hwloc_topology_t new;
struct hwloc_tma tma;
struct hwloc_shmem_header header;
uint32_t header_length = (sizeof(header) + sizeof(void*) - 1) & ~(sizeof(void*) - 1); /* pad to a multiple of pointer size */
void *mmap_res;
int err;
@ -100,7 +102,7 @@ hwloc_shmem_topology_write(hwloc_topology_t topology,
hwloc_internal_memattrs_refresh(topology);
header.header_version = HWLOC_SHMEM_HEADER_VERSION;
header.header_length = sizeof(header);
header.header_length = header_length;
header.mmap_address = (uintptr_t) mmap_address;
header.mmap_length = length;
@ -127,7 +129,7 @@ hwloc_shmem_topology_write(hwloc_topology_t topology,
tma.malloc = tma_shmem_malloc;
tma.dontfree = 1;
tma.data = (char *)mmap_res + sizeof(header);
tma.data = (char *)mmap_res + header_length;
err = hwloc__topology_dup(&new, topology, &tma);
if (err < 0)
return err;
@ -154,6 +156,7 @@ hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,
{
hwloc_topology_t new, old;
struct hwloc_shmem_header header;
uint32_t header_length = (sizeof(header) + sizeof(void*) - 1) & ~(sizeof(void*) - 1); /* pad to a multiple of pointer size */
void *mmap_res;
int err;
@ -171,7 +174,7 @@ hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,
return -1;
if (header.header_version != HWLOC_SHMEM_HEADER_VERSION
|| header.header_length != sizeof(header)
|| header.header_length != header_length
|| header.mmap_address != (uintptr_t) mmap_address
|| header.mmap_length != length) {
errno = EINVAL;
@ -186,7 +189,7 @@ hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,
goto out_with_mmap;
}
old = (hwloc_topology_t)((char*)mmap_address + sizeof(header));
old = (hwloc_topology_t)((char*)mmap_address + header_length);
if (hwloc_topology_abi_check(old) < 0) {
errno = EINVAL;
goto out_with_mmap;

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2023 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -23,6 +23,7 @@ struct hwloc_synthetic_attr_s {
unsigned depth; /* For caches/groups */
hwloc_obj_cache_type_t cachetype; /* For caches */
hwloc_uint64_t memorysize; /* For caches/memory */
hwloc_uint64_t memorysidecachesize; /* Single level of memory-side-cache in-front of a NUMA node */
};
struct hwloc_synthetic_indexes_s {
@ -380,6 +381,9 @@ hwloc_synthetic_parse_attrs(const char *attrs, const char **next_posp,
} else if (!iscache && !strncmp("memory=", attrs, 7)) {
memorysize = hwloc_synthetic_parse_memory_attr(attrs+7, &attrs);
} else if (!strncmp("memorysidecachesize=", attrs, 20)) {
sattr->memorysidecachesize = hwloc_synthetic_parse_memory_attr(attrs+20, &attrs);
} else if (!strncmp("indexes=", attrs, 8)) {
index_string = attrs+8;
attrs += 8;
@ -387,10 +391,9 @@ hwloc_synthetic_parse_attrs(const char *attrs, const char **next_posp,
attrs += index_string_length;
} else {
if (verbose)
fprintf(stderr, "Unknown attribute at '%s'\n", attrs);
errno = EINVAL;
return -1;
size_t length = strcspn(attrs, " )");
fprintf(stderr, "hwloc/synthetic: Ignoring unknown attribute at '%s'\n", attrs);
attrs += length;
}
if (' ' == *attrs)
@ -416,6 +419,32 @@ hwloc_synthetic_parse_attrs(const char *attrs, const char **next_posp,
return 0;
}
static void
hwloc_synthetic_set_default_attrs(struct hwloc_synthetic_attr_s *sattr,
int *type_count)
{
hwloc_obj_type_t type = sattr->type;
if (type == HWLOC_OBJ_GROUP) {
if (sattr->depth == (unsigned)-1)
sattr->depth = type_count[HWLOC_OBJ_GROUP]--;
} else if (hwloc__obj_type_is_cache(type)) {
if (!sattr->memorysize) {
if (1 == sattr->depth)
/* 32KiB in L1 */
sattr->memorysize = 32*1024;
else
/* *4 at each level, starting from 1MiB for L2, unified */
sattr->memorysize = 256ULL*1024 << (2*sattr->depth);
}
} else if (type == HWLOC_OBJ_NUMANODE && !sattr->memorysize) {
/* 1GiB in memory nodes. */
sattr->memorysize = 1024*1024*1024;
}
}
/* frees level until arity = 0 */
static void
hwloc_synthetic_free_levels(struct hwloc_synthetic_backend_data_s *data)
@ -465,6 +494,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
data->level[0].indexes.string = NULL;
data->level[0].indexes.array = NULL;
data->level[0].attr.memorysize = 0;
data->level[0].attr.memorysidecachesize = 0;
data->level[0].attached = NULL;
type_count[HWLOC_OBJ_MACHINE] = 1;
if (*description == '(') {
@ -514,6 +544,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
if (attached) {
attached->attr.type = type;
attached->attr.memorysize = 0;
attached->attr.memorysidecachesize = 0;
/* attached->attr.depth and .cachetype unused */
attached->next = NULL;
pprev = &data->level[count-1].attached;
@ -601,7 +632,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
}
if (!item) {
if (verbose)
fprintf(stderr,"Synthetic string with disallow 0 number of objects at '%s'\n", pos);
fprintf(stderr,"Synthetic string with disallowed 0 number of objects at '%s'\n", pos);
errno = EINVAL;
goto error;
}
@ -611,6 +642,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
data->level[count].indexes.string = NULL;
data->level[count].indexes.array = NULL;
data->level[count].attr.memorysize = 0;
data->level[count].attr.memorysidecachesize = 0;
if (*next_pos == '(') {
err = hwloc_synthetic_parse_attrs(next_pos+1, &next_pos, &data->level[count].attr, &data->level[count].indexes, verbose);
if (err < 0)
@ -796,6 +828,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
data->level[1].indexes.string = NULL;
data->level[1].indexes.array = NULL;
data->level[1].attr.memorysize = 0;
data->level[1].attr.memorysidecachesize = 0;
data->level[1].totalwidth = data->level[0].totalwidth;
/* update arity to insert a single NUMA node per parent */
data->level[1].arity = data->level[0].arity;
@ -803,30 +836,14 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
count++;
}
/* set default attributes that depend on the depth/hierarchy of levels */
for (i=0; i<count; i++) {
struct hwloc_synthetic_attached_s *attached;
struct hwloc_synthetic_level_data_s *curlevel = &data->level[i];
hwloc_obj_type_t type = curlevel->attr.type;
if (type == HWLOC_OBJ_GROUP) {
if (curlevel->attr.depth == (unsigned)-1)
curlevel->attr.depth = type_count[HWLOC_OBJ_GROUP]--;
} else if (hwloc__obj_type_is_cache(type)) {
if (!curlevel->attr.memorysize) {
if (1 == curlevel->attr.depth)
/* 32KiB in L1 */
curlevel->attr.memorysize = 32*1024;
else
/* *4 at each level, starting from 1MiB for L2, unified */
curlevel->attr.memorysize = 256ULL*1024 << (2*curlevel->attr.depth);
}
} else if (type == HWLOC_OBJ_NUMANODE && !curlevel->attr.memorysize) {
/* 1GiB in memory nodes. */
curlevel->attr.memorysize = 1024*1024*1024;
}
hwloc_synthetic_process_indexes(data, &data->level[i].indexes, data->level[i].totalwidth, verbose);
hwloc_synthetic_set_default_attrs(&curlevel->attr, type_count);
for(attached = curlevel->attached; attached != NULL; attached = attached->next)
hwloc_synthetic_set_default_attrs(&attached->attr, type_count);
hwloc_synthetic_process_indexes(data, &curlevel->indexes, curlevel->totalwidth, verbose);
}
hwloc_synthetic_process_indexes(data, &data->numa_attached_indexes, data->numa_attached_nr, verbose);
@ -859,6 +876,12 @@ hwloc_synthetic_set_attr(struct hwloc_synthetic_attr_s *sattr,
obj->attr->numanode.page_types[0].size = 4096;
obj->attr->numanode.page_types[0].count = sattr->memorysize / 4096;
break;
case HWLOC_OBJ_MEMCACHE:
obj->attr->cache.depth = 1;
obj->attr->cache.linesize = 64;
obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED;
obj->attr->cache.size = sattr->memorysidecachesize;
break;
case HWLOC_OBJ_PACKAGE:
case HWLOC_OBJ_DIE:
break;
@ -926,6 +949,14 @@ hwloc_synthetic_insert_attached(struct hwloc_topology *topology,
hwloc__insert_object_by_cpuset(topology, NULL, child, "synthetic:attached");
if (attached->attr.memorysidecachesize) {
hwloc_obj_t mscachechild = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MEMCACHE, HWLOC_UNKNOWN_INDEX);
mscachechild->cpuset = hwloc_bitmap_dup(set);
mscachechild->nodeset = hwloc_bitmap_dup(child->nodeset);
hwloc_synthetic_set_attr(&attached->attr, mscachechild);
hwloc__insert_object_by_cpuset(topology, NULL, mscachechild, "synthetic:attached:mscache");
}
hwloc_synthetic_insert_attached(topology, data, attached->next, set);
}
@ -977,6 +1008,14 @@ hwloc__look_synthetic(struct hwloc_topology *topology,
hwloc_synthetic_set_attr(&curlevel->attr, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "synthetic");
if (type == HWLOC_OBJ_NUMANODE && curlevel->attr.memorysidecachesize) {
hwloc_obj_t mscachechild = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MEMCACHE, HWLOC_UNKNOWN_INDEX);
mscachechild->cpuset = hwloc_bitmap_dup(set);
mscachechild->nodeset = hwloc_bitmap_dup(obj->nodeset);
hwloc_synthetic_set_attr(&curlevel->attr, mscachechild);
hwloc__insert_object_by_cpuset(topology, NULL, mscachechild, "synthetic:mscache");
}
}
hwloc_synthetic_insert_attached(topology, data, curlevel->attached, set);
@ -1217,6 +1256,7 @@ hwloc__export_synthetic_indexes(hwloc_obj_t *level, unsigned total,
static int
hwloc__export_synthetic_obj_attr(struct hwloc_topology * topology,
unsigned long flags,
hwloc_obj_t obj,
char *buffer, size_t buflen)
{
@ -1224,6 +1264,7 @@ hwloc__export_synthetic_obj_attr(struct hwloc_topology * topology,
const char * prefix = "(";
char cachesize[64] = "";
char memsize[64] = "";
char memorysidecachesize[64] = "";
int needindexes = 0;
if (hwloc__obj_type_is_cache(obj->type) && obj->attr->cache.size) {
@ -1236,6 +1277,19 @@ hwloc__export_synthetic_obj_attr(struct hwloc_topology * topology,
prefix, (unsigned long long) obj->attr->numanode.local_memory);
prefix = separator;
}
if (obj->type == HWLOC_OBJ_NUMANODE && !(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1)) {
hwloc_obj_t memorysidecache = obj->parent;
hwloc_uint64_t size = 0;
while (memorysidecache && memorysidecache->type == HWLOC_OBJ_MEMCACHE) {
size += memorysidecache->attr->cache.size;
memorysidecache = memorysidecache->parent;
}
if (size) {
snprintf(memorysidecachesize, sizeof(memorysidecachesize), "%smemorysidecachesize=%llu",
prefix, (unsigned long long) size);
prefix = separator;
}
}
if (!obj->logical_index /* only display indexes once per level (not for non-first NUMA children, etc.) */
&& (obj->type == HWLOC_OBJ_PU || obj->type == HWLOC_OBJ_NUMANODE)) {
hwloc_obj_t cur = obj;
@ -1247,12 +1301,12 @@ hwloc__export_synthetic_obj_attr(struct hwloc_topology * topology,
cur = cur->next_cousin;
}
}
if (*cachesize || *memsize || needindexes) {
if (*cachesize || *memsize || *memorysidecachesize || needindexes) {
ssize_t tmplen = buflen;
char *tmp = buffer;
int res, ret = 0;
res = hwloc_snprintf(tmp, tmplen, "%s%s%s", cachesize, memsize, needindexes ? "" : ")");
res = hwloc_snprintf(tmp, tmplen, "%s%s%s%s", cachesize, memsize, memorysidecachesize, needindexes ? "" : ")");
if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0)
return -1;
@ -1326,7 +1380,7 @@ hwloc__export_synthetic_obj(struct hwloc_topology * topology, unsigned long flag
if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS)) {
/* obj attributes */
res = hwloc__export_synthetic_obj_attr(topology, obj, tmp, tmplen);
res = hwloc__export_synthetic_obj_attr(topology, flags, obj, tmp, tmplen);
if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0)
return -1;
}
@ -1351,7 +1405,7 @@ hwloc__export_synthetic_memory_children(struct hwloc_topology * topology, unsign
if (flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1) {
/* v1: export a single NUMA child */
if (parent->memory_arity > 1 || mchild->type != HWLOC_OBJ_NUMANODE) {
if (parent->memory_arity > 1) {
/* not supported */
if (verbose)
fprintf(stderr, "Cannot export to synthetic v1 if multiple memory children are attached to the same location.\n");
@ -1362,6 +1416,9 @@ hwloc__export_synthetic_memory_children(struct hwloc_topology * topology, unsign
if (needprefix)
hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, ' ');
/* ignore memcaches and export the NUMA node */
while (mchild->type != HWLOC_OBJ_NUMANODE)
mchild = mchild->memory_first_child;
res = hwloc__export_synthetic_obj(topology, flags, mchild, 1, tmp, tmplen);
if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0)
return -1;
@ -1369,16 +1426,25 @@ hwloc__export_synthetic_memory_children(struct hwloc_topology * topology, unsign
}
while (mchild) {
/* FIXME: really recurse to export memcaches and numanode,
/* The core doesn't support shared memcache for now (because ACPI and Linux don't).
* So, for each mchild here, recurse only in the first children at each level.
*
* FIXME: whenever supported by the core, really recurse to export memcaches and numanode,
* but it requires clever parsing of [ memcache [numa] [numa] ] during import,
* better attaching of things to describe the hierarchy.
*/
hwloc_obj_t numanode = mchild;
/* only export the first NUMA node leaf of each memory child
* FIXME: This assumes mscache aren't shared between nodes, that's true in current platforms
/* Only export the first NUMA node leaf of each memory child.
* Memcaches are ignored here, they will be summed and exported as a single attribute
* of the NUMA node in hwloc__export_synthetic_obj().
*/
while (numanode && numanode->type != HWLOC_OBJ_NUMANODE) {
assert(numanode->arity == 1);
if (verbose && numanode->memory_arity > 1) {
static int warned = 0;
if (!warned)
fprintf(stderr, "Ignoring non-first memory children at non-first level of memory hierarchy.\n");
warned = 1;
}
numanode = numanode->memory_first_child;
}
assert(numanode); /* there's always a numanode at the bottom of the memory tree */
@ -1511,17 +1577,21 @@ hwloc_topology_export_synthetic(struct hwloc_topology * topology,
if (flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1) {
/* v1 requires all NUMA at the same level */
hwloc_obj_t node;
hwloc_obj_t node, parent;
signed pdepth;
node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 0);
assert(node);
assert(hwloc__obj_type_is_normal(node->parent->type)); /* only depth-1 memory children for now */
pdepth = node->parent->depth;
parent = node->parent;
while (!hwloc__obj_type_is_normal(parent->type))
parent = parent->parent;
pdepth = parent->depth;
while ((node = node->next_cousin) != NULL) {
assert(hwloc__obj_type_is_normal(node->parent->type)); /* only depth-1 memory children for now */
if (node->parent->depth != pdepth) {
parent = node->parent;
while (!hwloc__obj_type_is_normal(parent->type))
parent = parent->parent;
if (parent->depth != pdepth) {
if (verbose)
fprintf(stderr, "Cannot export to synthetic v1 if memory is attached to parents at different depths.\n");
errno = EINVAL;
@ -1534,7 +1604,7 @@ hwloc_topology_export_synthetic(struct hwloc_topology * topology,
if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS)) {
/* obj attributes */
res = hwloc__export_synthetic_obj_attr(topology, obj, tmp, tmplen);
res = hwloc__export_synthetic_obj_attr(topology, flags, obj, tmp, tmplen);
if (res > 0)
needprefix = 1;
if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0)

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2025 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -56,6 +56,9 @@ typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP {
RelationCache,
RelationProcessorPackage,
RelationGroup,
RelationProcessorDie,
RelationNumaNodeEx, /* only used to *request* extended numa info only, but included in RelationAll, never returned on output */
RelationProcessorModule,
RelationAll = 0xffff
} LOGICAL_PROCESSOR_RELATIONSHIP;
#else /* HAVE_LOGICAL_PROCESSOR_RELATIONSHIP */
@ -64,6 +67,11 @@ typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP {
# define RelationGroup 4
# define RelationAll 0xffff
# endif /* HAVE_RELATIONPROCESSORPACKAGE */
# ifndef HAVE_RELATIONPROCESSORDIE
# define RelationProcessorDie 5
# define RelationNumaNodeEx 6
# define RelationProcessorModule 7
# endif
#endif /* HAVE_LOGICAL_PROCESSOR_RELATIONSHIP */
#ifndef HAVE_GROUP_AFFINITY
@ -220,7 +228,7 @@ static void hwloc_win_get_function_ptrs(void)
#pragma GCC diagnostic ignored "-Wcast-function-type"
#endif
kernel32 = LoadLibrary("kernel32.dll");
kernel32 = LoadLibrary(TEXT("kernel32.dll"));
if (kernel32) {
GetActiveProcessorGroupCountProc =
(PFN_GETACTIVEPROCESSORGROUPCOUNT) GetProcAddress(kernel32, "GetActiveProcessorGroupCount");
@ -249,12 +257,12 @@ static void hwloc_win_get_function_ptrs(void)
}
if (!QueryWorkingSetExProc) {
HMODULE psapi = LoadLibrary("psapi.dll");
HMODULE psapi = LoadLibrary(TEXT("psapi.dll"));
if (psapi)
QueryWorkingSetExProc = (PFN_QUERYWORKINGSETEX) GetProcAddress(psapi, "QueryWorkingSetEx");
}
ntdll = GetModuleHandle("ntdll");
ntdll = GetModuleHandle(TEXT("ntdll"));
RtlGetVersionProc = (PFN_RTLGETVERSION) GetProcAddress(ntdll, "RtlGetVersion");
#if HWLOC_HAVE_GCC_W_CAST_FUNCTION_TYPE
@ -366,8 +374,8 @@ hwloc_win_get_processor_groups(void)
hwloc_debug("found %lu windows processor groups\n", nr_processor_groups);
if (nr_processor_groups > 1 && SIZEOF_VOID_P == 4) {
if (HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc: multiple processor groups found on 32bits Windows, topology may be invalid/incomplete.\n");
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc/windows: multiple processor groups found on 32bits Windows, topology may be invalid/incomplete.\n");
}
length = 0;
@ -987,7 +995,11 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
OSVERSIONINFOEX osvi;
char versionstr[20];
char hostname[122] = "";
unsigned hostname_size = sizeof(hostname);
#if !defined(__CYGWIN__)
DWORD hostname_size = sizeof(hostname);
#else
size_t hostname_size = sizeof(hostname);
#endif
int has_efficiencyclass = 0;
struct hwloc_win_efficiency_classes eclasses;
char *env = getenv("HWLOC_WINDOWS_PROCESSOR_GROUP_OBJS");
@ -1051,15 +1063,20 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
unsigned efficiency_class = 0;
GROUP_AFFINITY *GroupMask;
/* Ignore unknown caches */
if (procInfo->Relationship == RelationCache
&& procInfo->Cache.Type != CacheUnified
&& procInfo->Cache.Type != CacheData
&& procInfo->Cache.Type != CacheInstruction)
continue;
if (procInfo->Relationship == RelationCache) {
if (!topology->want_some_cpu_caches)
/* TODO: check if RelationAll&~RelationCache works? */
continue;
if (procInfo->Cache.Type != CacheUnified
&& procInfo->Cache.Type != CacheData
&& procInfo->Cache.Type != CacheInstruction)
/* Ignore unknown caches */
continue;
}
id = HWLOC_UNKNOWN_INDEX;
switch (procInfo->Relationship) {
case RelationNumaNodeEx: /* only used on input anyway */
case RelationNumaNode:
type = HWLOC_OBJ_NUMANODE;
/* Starting with Windows 11 and Server 2022, the GroupCount field is valid and >=1
@ -1079,9 +1096,19 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
break;
case RelationProcessorPackage:
type = HWLOC_OBJ_PACKAGE;
num = procInfo->Processor.GroupCount;
GroupMask = procInfo->Processor.GroupMask;
break;
case RelationProcessorDie:
type = HWLOC_OBJ_DIE;
num = procInfo->Processor.GroupCount;
GroupMask = procInfo->Processor.GroupMask;
break;
break;
case RelationProcessorModule:
type = HWLOC_OBJ_GROUP;
num = procInfo->Processor.GroupCount;
GroupMask = procInfo->Processor.GroupMask;
break;
case RelationCache:
type = (procInfo->Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo->Cache.Level - 1;
/* GroupCount added approximately with NumaNode.GroupCount above */
@ -1203,6 +1230,19 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
continue;
}
break;
case HWLOC_OBJ_GROUP:
switch (procInfo->Relationship) {
case RelationGroup:
obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP;
break;
case RelationProcessorModule:
obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_MODULE;
obj->subtype = strdup("Module");
break;
default:
obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN;
}
break;
default:
break;
}

View file

@ -1,11 +1,11 @@
/*
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2010-2025 Inria. All rights reserved.
* Copyright © 2010-2013 Université Bordeaux
* Copyright © 2010-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*
*
* This backend is only used when the operating system does not export
* This backend is mostly used when the operating system does not export
* the necessary hardware topology information to user-space applications.
* Currently, FreeBSD and NetBSD only add PUs and then fallback to this
* backend for CPU/Cache discovery.
@ -15,6 +15,7 @@
* on various architectures, without having to use this x86-specific code.
* But this backend is still used after them to annotate some objects with
* additional details (CPU info in Package, Inclusiveness in Caches).
* It may also be enabled manually to work-around bugs in native OS discovery.
*/
#include "private/autogen/config.h"
@ -38,6 +39,12 @@ struct hwloc_x86_backend_data_s {
int apicid_unique;
char *src_cpuiddump_path;
int is_knl;
int is_hybrid;
int found_die_ids;
int found_complex_ids;
int found_unit_ids;
int found_module_ids;
int found_tile_ids;
};
/************************************
@ -80,7 +87,7 @@ cpuiddump_read(const char *dirpath, unsigned idx)
cpuiddump = malloc(sizeof(*cpuiddump));
if (!cpuiddump) {
fprintf(stderr, "Failed to allocate cpuiddump for PU #%u, ignoring cpuiddump.\n", idx);
fprintf(stderr, "hwloc/x86: Failed to allocate cpuiddump for PU #%u, ignoring cpuiddump.\n", idx);
goto out;
}
@ -91,7 +98,7 @@ cpuiddump_read(const char *dirpath, unsigned idx)
snprintf(filename, filenamelen, "%s/pu%u", dirpath, idx);
file = fopen(filename, "r");
if (!file) {
fprintf(stderr, "Could not read dumped cpuid file %s, ignoring cpuiddump.\n", filename);
fprintf(stderr, "hwloc/x86: Could not read dumped cpuid file %s, ignoring cpuiddump.\n", filename);
goto out_with_filename;
}
@ -100,7 +107,7 @@ cpuiddump_read(const char *dirpath, unsigned idx)
nr++;
cpuiddump->entries = malloc(nr * sizeof(struct cpuiddump_entry));
if (!cpuiddump->entries) {
fprintf(stderr, "Failed to allocate %u cpuiddump entries for PU #%u, ignoring cpuiddump.\n", nr, idx);
fprintf(stderr, "hwloc/x86: Failed to allocate %u cpuiddump entries for PU #%u, ignoring cpuiddump.\n", nr, idx);
goto out_with_file;
}
@ -156,7 +163,7 @@ cpuiddump_find_by_input(unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *e
return;
}
fprintf(stderr, "Couldn't find %x,%x,%x,%x in dumped cpuid, returning 0s.\n",
fprintf(stderr, "hwloc/x86: Couldn't find %x,%x,%x,%x in dumped cpuid, returning 0s.\n",
*eax, *ebx, *ecx, *edx);
*eax = 0;
*ebx = 0;
@ -210,7 +217,8 @@ struct procinfo {
#define TILE 4
#define MODULE 5
#define DIE 6
#define HWLOC_X86_PROCINFO_ID_NR 7
#define COMPLEX 7
#define HWLOC_X86_PROCINFO_ID_NR 8
unsigned ids[HWLOC_X86_PROCINFO_ID_NR];
unsigned *otherids;
unsigned levels;
@ -314,7 +322,7 @@ static void read_amd_caches_topoext(struct procinfo *infos, struct cpuiddump *sr
/* the code below doesn't want any other cache yet */
assert(!infos->numcaches);
for (cachenum = 0; ; cachenum++) {
for (cachenum = 0; cachenum<16 /* guard */; cachenum++) {
eax = 0x8000001d;
ecx = cachenum;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
@ -325,7 +333,7 @@ static void read_amd_caches_topoext(struct procinfo *infos, struct cpuiddump *sr
cache = infos->cache = malloc(infos->numcaches * sizeof(*infos->cache));
if (cache) {
for (cachenum = 0; ; cachenum++) {
for (cachenum = 0; cachenum<16 /* guard */; cachenum++) {
unsigned long linesize, linepart, ways, sets;
eax = 0x8000001d;
ecx = cachenum;
@ -378,7 +386,7 @@ static void read_intel_caches(struct hwloc_x86_backend_data_s *data, struct proc
unsigned cachenum;
struct cacheinfo *cache;
for (cachenum = 0; ; cachenum++) {
for (cachenum = 0; cachenum<16 /* guard */; cachenum++) {
eax = 0x04;
ecx = cachenum;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
@ -400,7 +408,7 @@ static void read_intel_caches(struct hwloc_x86_backend_data_s *data, struct proc
infos->cache = tmpcaches;
cache = &infos->cache[oldnumcaches];
for (cachenum = 0; ; cachenum++) {
for (cachenum = 0; cachenum<16 /* guard */; cachenum++) {
unsigned long linesize, linepart, ways, sets;
eax = 0x04;
ecx = cachenum;
@ -480,7 +488,7 @@ static void read_amd_cores_legacy(struct procinfo *infos, struct cpuiddump *src_
}
/* AMD unit/node from CPUID 0x8000001e leaf (topoext) */
static void read_amd_cores_topoext(struct procinfo *infos, unsigned long flags, struct cpuiddump *src_cpuiddump)
static void read_amd_cores_topoext(struct hwloc_x86_backend_data_s *data, struct procinfo *infos, unsigned long flags __hwloc_attribute_unused, struct cpuiddump *src_cpuiddump)
{
unsigned apic_id, nodes_per_proc = 0;
unsigned eax, ebx, ecx, edx;
@ -489,7 +497,6 @@ static void read_amd_cores_topoext(struct procinfo *infos, unsigned long flags,
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
infos->apicid = apic_id = eax;
if (flags & HWLOC_X86_DISC_FLAG_TOPOEXT_NUMANODES) {
if (infos->cpufamilynumber == 0x16) {
/* ecx is reserved */
infos->ids[NODE] = 0;
@ -504,12 +511,12 @@ static void read_amd_cores_topoext(struct procinfo *infos, unsigned long flags,
|| (infos->cpufamilynumber == 0x19 && nodes_per_proc > 1)) {
hwloc_debug("warning: undefined nodes_per_proc value %u, assuming it means %u\n", nodes_per_proc, nodes_per_proc);
}
}
if (infos->cpufamilynumber <= 0x16) { /* topoext appeared in 0x15 and compute-units were only used in 0x15 and 0x16 */
unsigned cores_per_unit;
/* coreid was obtained from read_amd_cores_legacy() earlier */
infos->ids[UNIT] = ebx & 0xff;
data->found_unit_ids = 1;
cores_per_unit = ((ebx >> 8) & 0xff) + 1;
hwloc_debug("topoext %08x, %u nodes, node %u, %u cores in unit %u\n", apic_id, nodes_per_proc, infos->ids[NODE], cores_per_unit, infos->ids[UNIT]);
/* coreid and unitid are package-wide (core 0-15 and unit 0-7 on 16-core 2-NUMAnode processor).
@ -524,19 +531,29 @@ static void read_amd_cores_topoext(struct procinfo *infos, unsigned long flags,
}
}
/* Intel core/thread or even die/module/tile from CPUID 0x0b or 0x1f leaves (v1 and v2 extended topology enumeration) */
static void read_intel_cores_exttopoenum(struct procinfo *infos, unsigned leaf, struct cpuiddump *src_cpuiddump)
/* Intel core/thread or even die/module/tile from CPUID 0x0b or 0x1f leaves (v1 and v2 extended topology enumeration)
* or AMD core/thread or even complex/ccd from CPUID 0x0b or 0x80000026 (extended CPU topology)
*/
static void read_extended_topo(struct hwloc_x86_backend_data_s *data, struct procinfo *infos, unsigned leaf, enum cpuid_type cpuid_type __hwloc_attribute_unused, struct cpuiddump *src_cpuiddump)
{
unsigned level, apic_nextshift, apic_number, apic_type, apic_id = 0, apic_shift = 0, id;
unsigned level, apic_nextshift, apic_type, apic_id = 0, apic_shift = 0, id;
unsigned threadid __hwloc_attribute_unused = 0; /* shut-up compiler */
unsigned eax, ebx, ecx = 0, edx;
int apic_packageshift = 0;
for (level = 0; ; level++) {
for (level = 0; level<32 /* guard */; level++) {
ecx = level;
eax = leaf;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
if (!eax && !ebx)
/* Intel specifies that the 0x0b/0x1f loop should stop when we get "invalid domain" (0 in ecx[8:15])
* (if so, we also get 0 in eax/ebx for invalid subleaves). Zhaoxin implements this too.
* However AMD rather says that the 0x80000026/0x0b loop should stop when we get "no thread at this level" (0 in ebx[0:15]).
*
* Linux kernel <= 6.8 used "invalid domain" for both Intel and AMD (in detect_extended_topology())
* but x86 discovery revamp in 6.9 now properly checks both Intel and AMD conditions (in topo_subleaf()).
* So let's assume we are allowed to break-out once one of the Intel+AMD conditions is met.
*/
if (!(ebx & 0xffff) || !(ecx & 0xff00))
break;
apic_packageshift = eax & 0x1f;
}
@ -545,47 +562,68 @@ static void read_intel_cores_exttopoenum(struct procinfo *infos, unsigned leaf,
infos->otherids = malloc(level * sizeof(*infos->otherids));
if (infos->otherids) {
infos->levels = level;
for (level = 0; ; level++) {
for (level = 0; level<32 /* guard */; level++) {
ecx = level;
eax = leaf;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
if (!eax && !ebx)
break;
if (!(ebx & 0xffff) || !(ecx & 0xff00))
break;
apic_nextshift = eax & 0x1f;
apic_number = ebx & 0xffff;
apic_type = (ecx & 0xff00) >> 8;
apic_id = edx;
id = (apic_id >> apic_shift) & ((1 << (apic_packageshift - apic_shift)) - 1);
hwloc_debug("x2APIC %08x %u: nextshift %u num %2u type %u id %2u\n", apic_id, level, apic_nextshift, apic_number, apic_type, id);
hwloc_debug("x2APIC %08x %u: nextshift %u nextnumber %2u type %u id %2u\n",
apic_id,
level,
apic_nextshift,
ebx & 0xffff /* number of threads in next level */,
apic_type,
id);
infos->apicid = apic_id;
infos->otherids[level] = UINT_MAX;
switch (apic_type) {
case 1:
threadid = id;
/* apic_number is the actual number of threads per core */
break;
case 2:
infos->ids[CORE] = id;
/* apic_number is the actual number of threads per die */
break;
case 3:
infos->ids[MODULE] = id;
/* apic_number is the actual number of threads per tile */
break;
case 4:
infos->ids[TILE] = id;
/* apic_number is the actual number of threads per die */
break;
case 5:
infos->ids[DIE] = id;
/* apic_number is the actual number of threads per package */
break;
default:
hwloc_debug("x2APIC %u: unknown type %u\n", level, apic_type);
infos->otherids[level] = apic_id >> apic_shift;
break;
}
apic_shift = apic_nextshift;
switch (apic_type) {
case 1:
threadid = id;
break;
case 2:
infos->ids[CORE] = id;
break;
case 3:
if (leaf == 0x80000026) {
data->found_complex_ids = 1;
infos->ids[COMPLEX] = id;
} else {
data->found_module_ids = 1;
infos->ids[MODULE] = id;
}
break;
case 4:
if (leaf == 0x80000026) {
data->found_die_ids = 1;
infos->ids[DIE] = id;
} else {
data->found_tile_ids = 1;
infos->ids[TILE] = id;
}
break;
case 5:
if (leaf == 0x80000026) {
goto unknown_type;
} else {
data->found_die_ids = 1;
infos->ids[DIE] = id;
}
break;
case 6:
/* TODO: "DieGrp" on Intel */
/* fallthrough */
default:
unknown_type:
hwloc_debug("x2APIC %u: unknown type %u\n", level, apic_type);
infos->otherids[level] = apic_id >> apic_shift;
break;
}
apic_shift = apic_nextshift;
}
infos->apicid = apic_id;
infos->ids[PKG] = apic_id >> apic_shift;
@ -615,7 +653,13 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
infos->apicid = ebx >> 24;
if (edx & (1 << 28)) {
legacy_max_log_proc = 1 << hwloc_flsl(((ebx >> 16) & 0xff) - 1);
unsigned ebx_16_23 = (ebx >> 16) & 0xff;
if (ebx_16_23) {
legacy_max_log_proc = 1 << hwloc_flsl(ebx_16_23 - 1);
} else {
hwloc_debug("HTT bit set in CPUID 0x01.edx, but legacy_max_proc = 0 in ebx, assuming legacy_max_log_proc = 1\n");
legacy_max_log_proc = 1;
}
} else {
hwloc_debug("HTT bit not set in CPUID 0x01.edx, assuming legacy_max_log_proc = 1\n");
legacy_max_log_proc = 1;
@ -704,12 +748,13 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
}
if (highest_cpuid >= 0x1a && has_hybrid(features)) {
/* Get hybrid cpu information from cpuid 0x1a */
/* Get hybrid cpu information from cpuid 0x1a on Intel */
eax = 0x1a;
ecx = 0;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
infos->hybridcoretype = eax >> 24;
infos->hybridnativemodel = eax & 0xffffff;
data->is_hybrid = 1;
}
/*********************************************************************************
@ -731,23 +776,30 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
*
* Only needed when x2apic supported if NUMA nodes are needed.
*/
read_amd_cores_topoext(infos, flags, src_cpuiddump);
read_amd_cores_topoext(data, infos, flags, src_cpuiddump);
}
if ((cpuid_type == intel) && highest_cpuid >= 0x1f) {
if ((cpuid_type == amd) && highest_ext_cpuid >= 0x80000026) {
/* Get socket/die/complex/core/thread information from cpuid 0x80000026
* (AMD Extended CPU Topology)
*/
read_extended_topo(data, infos, 0x80000026, cpuid_type, src_cpuiddump);
} else if ((cpuid_type == intel || cpuid_type == zhaoxin) && highest_cpuid >= 0x1f) {
/* Get package/die/module/tile/core/thread information from cpuid 0x1f
* (Intel v2 Extended Topology Enumeration)
*/
read_intel_cores_exttopoenum(infos, 0x1f, src_cpuiddump);
read_extended_topo(data, infos, 0x1f, cpuid_type, src_cpuiddump);
} else if ((cpuid_type == intel || cpuid_type == amd || cpuid_type == zhaoxin)
&& highest_cpuid >= 0x0b && has_x2apic(features)) {
/* Get package/core/thread information from cpuid 0x0b
* (Intel v1 Extended Topology Enumeration)
*/
read_intel_cores_exttopoenum(infos, 0x0b, src_cpuiddump);
read_extended_topo(data, infos, 0x0b, cpuid_type, src_cpuiddump);
}
if (backend->topology->want_some_cpu_caches) {
/**************************************
* Get caches from CPU-specific leaves
*/
@ -845,6 +897,7 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
}
}
}
}
if (hwloc_bitmap_isset(data->apicid_set, infos->apicid))
data->apicid_unique = 0;
@ -1046,21 +1099,34 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) {
if (fulldiscovery) {
/* Look for AMD Compute units inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
UNIT, "Compute Unit",
HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT, 0);
/* Look for Intel Modules inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
MODULE, "Module",
HWLOC_GROUP_KIND_INTEL_MODULE, 0);
/* Look for Intel Tiles inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
TILE, "Tile",
HWLOC_GROUP_KIND_INTEL_TILE, 0);
if (data->found_unit_ids) {
/* Look for AMD Complex inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
COMPLEX, "Complex",
HWLOC_GROUP_KIND_AMD_COMPLEX, 0);
}
if (data->found_unit_ids) {
/* Look for AMD Compute units inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
UNIT, "Compute Unit",
HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT, 0);
}
if (data->found_module_ids) {
/* Look for Intel Modules inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
MODULE, "Module",
HWLOC_GROUP_KIND_INTEL_MODULE, 0);
}
if (data->found_tile_ids) {
/* Look for Intel Tiles inside packages */
hwloc_bitmap_copy(remaining_cpuset, complete_cpuset);
hwloc_x86_add_groups(topology, infos, nbprocs, remaining_cpuset,
TILE, "Tile",
HWLOC_GROUP_KIND_INTEL_TILE, 0);
}
/* Look for unknown objects */
if (infos[one].otherids) {
@ -1094,7 +1160,8 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
}
}
if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_DIE)) {
if (data->found_die_ids
&& hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_DIE)) {
/* Look for Intel Dies inside packages */
if (fulldiscovery) {
hwloc_bitmap_t die_cpuset;
@ -1349,40 +1416,45 @@ look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long
if (data->apicid_unique) {
summarize(backend, infos, flags);
if (has_hybrid(features) && !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
if (data->is_hybrid
&& !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
/* use hybrid info for cpukinds */
hwloc_bitmap_t atomset = hwloc_bitmap_alloc();
hwloc_bitmap_t coreset = hwloc_bitmap_alloc();
for(i=0; i<nbprocs; i++) {
if (infos[i].hybridcoretype == 0x20)
hwloc_bitmap_set(atomset, i);
else if (infos[i].hybridcoretype == 0x40)
hwloc_bitmap_set(coreset, i);
}
/* register IntelAtom set if any */
if (!hwloc_bitmap_iszero(atomset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelAtom";
hwloc_internal_cpukinds_register(topology, atomset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(atomset);
}
/* register IntelCore set if any */
if (!hwloc_bitmap_iszero(coreset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelCore";
hwloc_internal_cpukinds_register(topology, coreset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(coreset);
if (cpuid_type == intel) {
/* Hybrid Intel */
hwloc_bitmap_t atomset = hwloc_bitmap_alloc();
hwloc_bitmap_t coreset = hwloc_bitmap_alloc();
for(i=0; i<nbprocs; i++) {
if (infos[i].hybridcoretype == 0x20)
hwloc_bitmap_set(atomset, i);
else if (infos[i].hybridcoretype == 0x40)
hwloc_bitmap_set(coreset, i);
}
/* register IntelAtom set if any */
if (!hwloc_bitmap_iszero(atomset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelAtom";
hwloc_internal_cpukinds_register(topology, atomset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(atomset);
}
/* register IntelCore set if any */
if (!hwloc_bitmap_iszero(coreset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelCore";
hwloc_internal_cpukinds_register(topology, coreset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(coreset);
}
}
}
} else {
hwloc_debug("x86 APIC IDs aren't unique, x86 discovery ignored.\n");
/* do nothing and return success, so that the caller does nothing either */
}
/* if !data->apicid_unique, do nothing and return success, so that the caller does nothing either */
return 0;
}
@ -1459,7 +1531,15 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
unsigned i;
unsigned highest_cpuid;
unsigned highest_ext_cpuid;
/* This stores cpuid features with the same indexing as Linux */
/* This stores cpuid features with the same indexing as Linux:
* [0] = 0x1 edx
* [1] = 0x80000001 edx
* [4] = 0x1 ecx
* [6] = 0x80000001 ecx
* [9] = 0x7/0 ebx
* [16] = 0x7/0 ecx
* [18] = 0x7/0 edx
*/
unsigned features[19] = { 0 };
struct procinfo *infos = NULL;
enum cpuid_type cpuid_type = unknown;
@ -1579,6 +1659,7 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
ecx = 0;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
features[9] = ebx;
features[16] = ecx;
features[18] = edx;
}
@ -1667,7 +1748,7 @@ hwloc_x86_discover(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
if (topology->levels[0][0]->cpuset) {
/* somebody else discovered things, reconnect levels so that we can look at them */
hwloc_topology_reconnect(topology, 0);
hwloc__reconnect(topology, 0);
if (topology->nb_levels == 2 && topology->level_nbobjects[1] == data->nbprocs) {
/* only PUs were discovered, as much as we would, complete the topology with everything else */
alreadypus = 1;
@ -1730,17 +1811,17 @@ hwloc_x86_check_cpuiddump_input(const char *src_cpuiddump_path, hwloc_bitmap_t s
sprintf(path, "%s/hwloc-cpuid-info", src_cpuiddump_path);
file = fopen(path, "r");
if (!file) {
fprintf(stderr, "Couldn't open dumped cpuid summary %s\n", path);
fprintf(stderr, "hwloc/x86: Couldn't open dumped cpuid summary %s\n", path);
goto out_with_path;
}
if (!fgets(line, sizeof(line), file)) {
fprintf(stderr, "Found read dumped cpuid summary in %s\n", path);
fprintf(stderr, "hwloc/x86: Found read dumped cpuid summary in %s\n", path);
fclose(file);
goto out_with_path;
}
fclose(file);
if (strcmp(line, "Architecture: x86\n")) {
fprintf(stderr, "Found non-x86 dumped cpuid summary in %s: %s\n", path, line);
if (strncmp(line, "Architecture: x86", 17)) {
fprintf(stderr, "hwloc/x86: Found non-x86 dumped cpuid summary in %s: %s\n", path, line);
goto out_with_path;
}
free(path);
@ -1752,19 +1833,19 @@ hwloc_x86_check_cpuiddump_input(const char *src_cpuiddump_path, hwloc_bitmap_t s
if (!*end)
hwloc_bitmap_set(set, idx);
else
fprintf(stderr, "Ignoring invalid dirent `%s' in dumped cpuid directory `%s'\n",
fprintf(stderr, "hwloc/x86: Ignoring invalid dirent `%s' in dumped cpuid directory `%s'\n",
dirent->d_name, src_cpuiddump_path);
}
}
closedir(dir);
if (hwloc_bitmap_iszero(set)) {
fprintf(stderr, "Did not find any valid pu%%u entry in dumped cpuid directory `%s'\n",
fprintf(stderr, "hwloc/x86: Did not find any valid pu%%u entry in dumped cpuid directory `%s'\n",
src_cpuiddump_path);
return -1;
} else if (hwloc_bitmap_last(set) != hwloc_bitmap_weight(set) - 1) {
/* The x86 backends enforces contigous set of PUs starting at 0 so far */
fprintf(stderr, "Found non-contigous pu%%u range in dumped cpuid directory `%s'\n",
fprintf(stderr, "hwloc/x86: Found non-contigous pu%%u range in dumped cpuid directory `%s'\n",
src_cpuiddump_path);
return -1;
}
@ -1816,9 +1897,15 @@ hwloc_x86_component_instantiate(struct hwloc_topology *topology,
/* default values */
data->is_knl = 0;
data->is_hybrid = 0;
data->apicid_set = hwloc_bitmap_alloc();
data->apicid_unique = 1;
data->src_cpuiddump_path = NULL;
data->found_die_ids = 0;
data->found_complex_ids = 0;
data->found_unit_ids = 0;
data->found_module_ids = 0;
data->found_tile_ids = 0;
src_cpuiddump_path = getenv("HWLOC_CPUID_PATH");
if (src_cpuiddump_path) {
@ -1829,7 +1916,7 @@ hwloc_x86_component_instantiate(struct hwloc_topology *topology,
assert(!hwloc_bitmap_iszero(set)); /* enforced by hwloc_x86_check_cpuiddump_input() */
data->nbprocs = hwloc_bitmap_weight(set);
} else {
fprintf(stderr, "Ignoring dumped cpuid directory.\n");
fprintf(stderr, "hwloc/x86: Ignoring dumped cpuid directory.\n");
}
hwloc_bitmap_free(set);
}

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2024 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -41,7 +41,7 @@ typedef struct hwloc__nolibxml_import_state_data_s {
static char *
hwloc__nolibxml_import_ignore_spaces(char *buffer)
{
return buffer + strspn(buffer, " \t\n");
return buffer + strspn(buffer, " \t\n\r");
}
static int
@ -411,12 +411,12 @@ hwloc_nolibxml_backend_init(struct hwloc_xml_backend_data_s *bdata,
bdata->data = nbdata;
if (xmlbuffer) {
nbdata->buffer = malloc(xmlbuflen+1);
nbdata->buffer = malloc(xmlbuflen);
if (!nbdata->buffer)
goto out_with_nbdata;
nbdata->buflen = xmlbuflen+1;
nbdata->buflen = xmlbuflen;
memcpy(nbdata->buffer, xmlbuffer, xmlbuflen);
nbdata->buffer[xmlbuflen] = '\0';
nbdata->buffer[xmlbuflen-1] = '\0'; /* make sure it's there as requested in the API */
} else {
int err = hwloc_nolibxml_read_file(xmlpath, &nbdata->buffer, &nbdata->buflen);
@ -453,8 +453,9 @@ hwloc_nolibxml_import_diff(struct hwloc__xml_import_state_s *state,
buffer = malloc(xmlbuflen);
if (!buffer)
goto out;
memcpy(buffer, xmlbuffer, xmlbuflen);
buflen = xmlbuflen;
memcpy(buffer, xmlbuffer, xmlbuflen);
buffer[xmlbuflen-1] = '\0'; /* make sure it's there as requested in the API */
} else {
ret = hwloc_nolibxml_read_file(xmlpath, &buffer, &buflen);

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2025 Inria. All rights reserved.
* Copyright © 2009-2011, 2020 Université Bordeaux
* Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@ -415,6 +415,20 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
}
}
else if (!strcmp(name, "numanode_type")) {
switch (obj->type) {
case HWLOC_OBJ_NUMANODE: {
/* ignored for now, here for possible forward compat */
break;
}
default:
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring numanode_type attribute for non-NUMA object\n",
state->global->msgprefix);
break;
}
}
else if (data->version_major < 2) {
/************************
* deprecated from 1.x
@ -562,7 +576,13 @@ hwloc__xml_import_pagetype(hwloc_topology_t topology __hwloc_attribute_unused, s
char *attrname, *attrvalue;
if (state->global->next_attr(state, &attrname, &attrvalue) < 0)
break;
if (!strcmp(attrname, "size"))
if (!strcmp(attrname, "info")) {
char *infoname, *infovalue;
int ret = hwloc___xml_import_info(&infoname, &infovalue, state);
if (ret < 0)
return -1;
/* ignored */
} else if (!strcmp(attrname, "size"))
size = strtoull(attrvalue, NULL, 10);
else if (!strcmp(attrname, "count"))
count = strtoull(attrvalue, NULL, 10);
@ -866,14 +886,23 @@ hwloc__xml_import_object(hwloc_topology_t topology,
/* deal with possible future type */
obj->type = HWLOC_OBJ_GROUP;
obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_MODULE;
} else if (!strcasecmp(attrvalue, "MemCache")) {
} else if (!strcasecmp(attrvalue, "Cluster")) {
/* deal with possible future type */
obj->type = HWLOC_OBJ_GROUP;
obj->attr->group.kind = HWLOC_GROUP_KIND_LINUX_CLUSTER;
}
#if 0
/* reenable if there's ever a future type that should be ignored without being an error */
else if (!strcasecmp(attrvalue, "MemCache")) {
/* ignore possible future type */
obj->type = _HWLOC_OBJ_FUTURE;
ignored = 1;
if (hwloc__xml_verbose())
fprintf(stderr, "%s: %s object not-supported, will be ignored\n",
state->global->msgprefix, attrvalue);
} else {
}
#endif
else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: unrecognized object type string %s\n",
state->global->msgprefix, attrvalue);
@ -948,22 +977,22 @@ hwloc__xml_import_object(hwloc_topology_t topology,
if (hwloc__obj_type_is_normal(obj->type)) {
if (!hwloc__obj_type_is_normal(parent->type)) {
if (hwloc__xml_verbose())
fprintf(stderr, "normal object %s cannot be child of non-normal parent %s\n",
hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
fprintf(stderr, "%s: normal object %s cannot be child of non-normal parent %s\n",
state->global->msgprefix, hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
goto error_with_object;
}
} else if (hwloc__obj_type_is_memory(obj->type)) {
if (hwloc__obj_type_is_io(parent->type) || HWLOC_OBJ_MISC == parent->type) {
if (hwloc__xml_verbose())
fprintf(stderr, "Memory object %s cannot be child of non-normal-or-memory parent %s\n",
hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
fprintf(stderr, "%s: Memory object %s cannot be child of non-normal-or-memory parent %s\n",
state->global->msgprefix, hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
goto error_with_object;
}
} else if (hwloc__obj_type_is_io(obj->type)) {
if (hwloc__obj_type_is_memory(parent->type) || HWLOC_OBJ_MISC == parent->type) {
if (hwloc__xml_verbose())
fprintf(stderr, "I/O object %s cannot be child of non-normal-or-I/O parent %s\n",
hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
fprintf(stderr, "%s: I/O object %s cannot be child of non-normal-or-I/O parent %s\n",
state->global->msgprefix, hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type));
goto error_with_object;
}
}
@ -1160,6 +1189,48 @@ hwloc__xml_import_object(hwloc_topology_t topology,
data->last_numanode = obj;
}
/* 3.0 forward compatibility */
if (data->version_major >= 3 && obj->type == HWLOC_OBJ_OS_DEVICE) {
/* osdev.type changed into bitmak in 3.0 */
if (obj->attr->osdev.type & 3 /* STORAGE|MEMORY for BLOCK */) {
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_BLOCK;
} else if (obj->attr->osdev.type & 8 /* COPROC for COPROC and rsmi/nvml GPUs */) {
if (obj->subtype && (!strcmp(obj->subtype, "RSMI") || !strcmp(obj->subtype, "NVML")))
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_GPU;
else
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_COPROC;
} else if (obj->attr->osdev.type & 4 /* GPU for non-COPROC GPUs */) {
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_GPU;
} else if (obj->attr->osdev.type & 32 /* OFED */) {
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_OPENFABRICS;
} else if (obj->attr->osdev.type & 16 /* NET for NET and BXI v2-fake-OFED */) {
if (obj->subtype && !strcmp(obj->subtype, "BXI"))
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_OPENFABRICS;
else
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_NETWORK;
} else if (obj->attr->osdev.type & 64 /* DMA */) {
obj->attr->osdev.type = HWLOC_OBJ_OSDEV_DMA;
} else { /* none or unknown */
obj->attr->osdev.type = (hwloc_obj_osdev_type_t) -1;
}
/* Backend info only in root */
if (obj->subtype && !hwloc_obj_get_info_by_name(obj, "Backend")) {
if (!strcmp(obj->subtype, "CUDA")) {
hwloc_obj_add_info(obj, "Backend", "CUDA");
} else if (!strcmp(obj->subtype, "NVML")) {
hwloc_obj_add_info(obj, "Backend", "NVML");
} else if (!strcmp(obj->subtype, "OpenCL")) {
hwloc_obj_add_info(obj, "Backend", "OpenCL");
} else if (!strcmp(obj->subtype, "RSMI")) {
hwloc_obj_add_info(obj, "Backend", "RSMI");
} else if (!strcmp(obj->subtype, "LevelZero")) {
hwloc_obj_add_info(obj, "Backend", "LevelZero");
} else if (!strcmp(obj->subtype, "Display")) {
hwloc_obj_add_info(obj, "Backend", "GL");
}
}
}
if (!hwloc_filter_check_keep_object(topology, obj)) {
/* Ignore this object instead of inserting it.
*
@ -1296,7 +1367,7 @@ hwloc__xml_v2import_support(hwloc_topology_t topology,
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_support) == 4*sizeof(void*));
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_discovery_support) == 6);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_cpubind_support) == 11);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 15);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 16);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_misc_support) == 1);
#endif
@ -1330,6 +1401,7 @@ hwloc__xml_v2import_support(hwloc_topology_t topology,
else DO(membind,firsttouch_membind);
else DO(membind,bind_membind);
else DO(membind,interleave_membind);
else DO(membind,weighted_interleave_membind);
else DO(membind,nexttouch_membind);
else DO(membind,migrate_membind);
else DO(membind,get_area_memlocation);
@ -1388,6 +1460,10 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
}
else if (!strcmp(attrname, "kind")) {
kind = strtoul(attrvalue, NULL, 10);
/* forward compat with "HOPS" kind in v3 */
if (kind & (1UL<<5))
/* hops becomes latency */
kind = (kind & ~(1UL<<5)) | HWLOC_DISTANCES_KIND_MEANS_LATENCY;
}
else if (!strcmp(attrname, "name")) {
name = attrvalue;
@ -1433,7 +1509,14 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
if (ret <= 0)
break;
if (!strcmp(tag, "indexes"))
if (!strcmp(tag, "info")) {
char *infoname, *infovalue;
ret = hwloc___xml_import_info(&infoname, &infovalue, state);
if (ret < 0)
goto out_with_arrays;
/* ignored */
continue;
} else if (!strcmp(tag, "indexes"))
is_index = 1;
else if (!strcmp(tag, "u64values"))
is_u64values = 1;
@ -1766,6 +1849,10 @@ hwloc__xml_import_memattr(hwloc_topology_t topology,
if (!strcmp(tag, "memattr_value")) {
ret = hwloc__xml_import_memattr_value(topology, id, flags, &childstate);
} else if (!strcmp(tag, "info")) {
char *infoname, *infovalue;
ret = hwloc___xml_import_info(&infoname, &infovalue, &childstate);
/* ignored */
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: memattr with unrecognized child %s\n",
@ -2094,9 +2181,10 @@ hwloc_look_xml(struct hwloc_backend *backend, struct hwloc_disc_status *dstatus)
if (ret < 0)
goto failed;
if (data->version_major > 2) {
if (data->version_major > 3
|| (data->version_major == 3 && data->version_minor > 0)) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: cannot import XML version %u.%u > 2\n",
fprintf(stderr, "%s: cannot import XML version %u.%u > 3.0\n",
data->msgprefix, data->version_major, data->version_minor);
goto err;
}
@ -2144,6 +2232,13 @@ hwloc_look_xml(struct hwloc_backend *backend, struct hwloc_disc_status *dstatus)
ret = hwloc__xml_import_cpukind(topology, &childstate);
if (ret < 0)
goto failed;
} else if (!strcmp(tag, "info")) {
char *infoname, *infovalue;
ret = hwloc___xml_import_info(&infoname, &infovalue, &childstate);
if (ret < 0)
goto failed;
/* move 3.x topology info back to the root object */
hwloc_obj_add_info(topology->levels[0][0], infoname, infovalue);
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown tag `%s' after root object.\n",
@ -3020,7 +3115,7 @@ hwloc__xml_v2export_support(hwloc__xml_export_state_t parentstate, hwloc_topolog
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_support) == 4*sizeof(void*));
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_discovery_support) == 6);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_cpubind_support) == 11);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 15);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 16);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_misc_support) == 1);
#endif
@ -3065,6 +3160,7 @@ hwloc__xml_v2export_support(hwloc__xml_export_state_t parentstate, hwloc_topolog
DO(membind,firsttouch_membind);
DO(membind,bind_membind);
DO(membind,interleave_membind);
DO(membind,weighted_interleave_membind);
DO(membind,nexttouch_membind);
DO(membind,migrate_membind);
DO(membind,get_area_memlocation);

View file

@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2025 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2022 IBM Corporation. All rights reserved.
@ -54,56 +54,6 @@
#endif
#ifdef HWLOC_HAVE_LEVELZERO
/*
* Define ZES_ENABLE_SYSMAN=1 early so that the LevelZero backend gets Sysman enabled.
*
* Only if the levelzero was enabled in this build so that we don't enable sysman
* for external levelzero users when hwloc doesn't need it. If somebody ever loads
* an external levelzero plugin in a hwloc library built without levelzero (unlikely),
* he may have to manually set ZES_ENABLE_SYSMAN=1.
*
* Use the constructor if supported and/or the Windows DllMain callback.
* Do it in the main hwloc library instead of the levelzero component because
* the latter could be loaded later as a plugin.
*
* L0 seems to be using getenv() to check this variable on Windows
* (at least in the Intel Compute-Runtime of March 2021),
* but setenv() doesn't seem to exist on Windows, hence use putenv() to set the variable.
*
* For the record, Get/SetEnvironmentVariable() is not exactly the same as getenv/putenv():
* - getenv() doesn't see what was set with SetEnvironmentVariable()
* - GetEnvironmentVariable() doesn't see putenv() in cygwin (while it does in MSVC and MinGW).
* Hence, if L0 ever switches from getenv() to GetEnvironmentVariable(),
* it will break in cygwin, we'll have to use both putenv() and SetEnvironmentVariable().
* Hopefully L0 will provide a way to enable Sysman without env vars before it happens.
*/
#if HWLOC_HAVE_ATTRIBUTE_CONSTRUCTOR
static void hwloc_constructor(void) __attribute__((constructor));
static void hwloc_constructor(void)
{
if (!getenv("ZES_ENABLE_SYSMAN"))
#ifdef HWLOC_WIN_SYS
putenv("ZES_ENABLE_SYSMAN=1");
#else
setenv("ZES_ENABLE_SYSMAN", "1", 1);
#endif
}
#endif
#ifdef HWLOC_WIN_SYS
BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved)
{
if (fdwReason == DLL_PROCESS_ATTACH) {
if (!getenv("ZES_ENABLE_SYSMAN"))
/* Windows does not have a setenv, so use putenv. */
putenv((char *) "ZES_ENABLE_SYSMAN=1");
}
return TRUE;
}
#endif
#endif /* HWLOC_HAVE_LEVELZERO */
unsigned hwloc_get_api_version(void)
{
return HWLOC_API_VERSION;
@ -146,21 +96,24 @@ report_insert_error_format_obj(char *buf, size_t buflen, hwloc_obj_t obj)
char typestr[64];
char *cpusetstr;
char *nodesetstr = NULL;
char indexstr[64] = "";
char groupstr[64] = "";
hwloc_obj_type_snprintf(typestr, sizeof(typestr), obj, 0);
hwloc_bitmap_asprintf(&cpusetstr, obj->cpuset);
if (obj->os_index != HWLOC_UNKNOWN_INDEX)
snprintf(indexstr, sizeof(indexstr), "P#%u ", obj->os_index);
if (obj->type == HWLOC_OBJ_GROUP)
snprintf(groupstr, sizeof(groupstr), "groupkind %u-%u ", obj->attr->group.kind, obj->attr->group.subkind);
if (obj->nodeset) /* may be missing during insert */
hwloc_bitmap_asprintf(&nodesetstr, obj->nodeset);
if (obj->os_index != HWLOC_UNKNOWN_INDEX)
snprintf(buf, buflen, "%s (P#%u cpuset %s%s%s)",
typestr, obj->os_index, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
else
snprintf(buf, buflen, "%s (cpuset %s%s%s)",
typestr, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
snprintf(buf, buflen, "%s (%s%s%s%s%scpuset %s%s%s)",
typestr,
indexstr,
obj->subtype ? "subtype " : "", obj->subtype ? obj->subtype : "", obj->subtype ? " " : "",
groupstr,
cpusetstr,
nodesetstr ? " nodeset " : "", nodesetstr ? nodesetstr : "");
free(cpusetstr);
free(nodesetstr);
}
@ -176,10 +129,11 @@ static void report_insert_error(hwloc_obj_t new, hwloc_obj_t old, const char *ms
report_insert_error_format_obj(oldstr, sizeof(oldstr), old);
fprintf(stderr, "****************************************************************************\n");
fprintf(stderr, "* hwloc %s received invalid information from the operating system.\n", HWLOC_VERSION);
fprintf(stderr, "* hwloc %s received invalid information.\n", HWLOC_VERSION);
fprintf(stderr, "*\n");
fprintf(stderr, "* Failed with: %s\n", msg);
fprintf(stderr, "* while inserting %s at %s\n", newstr, oldstr);
fprintf(stderr, "* Failed with error: %s\n", msg);
fprintf(stderr, "* while inserting %s\n", newstr);
fprintf(stderr, "* at %s\n", oldstr);
fprintf(stderr, "* coming from: %s\n", reason);
fprintf(stderr, "*\n");
fprintf(stderr, "* The following FAQ entry in the hwloc documentation may help:\n");
@ -461,6 +415,20 @@ hwloc_debug_print_objects(int indent __hwloc_attribute_unused, hwloc_obj_t obj)
#define hwloc_debug_print_objects(indent, obj) do { /* nothing */ } while (0)
#endif /* !HWLOC_DEBUG */
int hwloc_obj_set_subtype(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, const char *subtype)
{
char *new = NULL;
if (subtype) {
new = strdup(subtype);
if (!new)
return -1;
}
if (obj->subtype)
free(obj->subtype);
obj->subtype = new;
return 0;
}
void hwloc__free_infos(struct hwloc_info_s *infos, unsigned count)
{
unsigned i;
@ -679,7 +647,8 @@ unlink_and_free_object_and_children(hwloc_obj_t *pobj)
void
hwloc_free_object_and_children(hwloc_obj_t obj)
{
unlink_and_free_object_and_children(&obj);
if (obj)
unlink_and_free_object_and_children(&obj);
}
/* Free an object, its next siblings and their children without unlinking from parent.
@ -1925,17 +1894,78 @@ hwloc_topology_alloc_group_object(struct hwloc_topology *topology)
return hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX);
}
int
hwloc_topology_free_group_object(struct hwloc_topology *topology, hwloc_obj_t obj)
{
if (!topology->is_loaded) {
/* this could actually work, see insert() below */
errno = EINVAL;
return -1;
}
if (topology->adopted_shmem_addr) {
errno = EPERM;
return -1;
}
hwloc_free_unlinked_object(obj);
return 0;
}
static void hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root);
static void propagate_total_memory(hwloc_obj_t obj);
static void hwloc_set_group_depth(hwloc_topology_t topology);
static void hwloc_connect_children(hwloc_obj_t parent);
static int hwloc_connect_levels(hwloc_topology_t topology);
static int hwloc_connect_special_levels(hwloc_topology_t topology);
static int hwloc_filter_levels_keep_structure(hwloc_topology_t topology);
/* reconnect children and levels,
* and optionnally merged identical levels while keeping structure.
*/
int
hwloc__reconnect(struct hwloc_topology *topology, unsigned long flags)
{
int merged_levels = 0;
if (topology->modified) {
hwloc_connect_children(topology->levels[0][0]);
if (hwloc_connect_levels(topology) < 0)
return -1;
}
if (flags & _HWLOC_RECONNECT_FLAG_KEEPSTRUCTURE) {
merged_levels = hwloc_filter_levels_keep_structure(topology);
/* If > 0, we merged some levels,
* some child+parent special children list may have been merged,
* hence specials level might need reordering,
* So reconnect special levels only here at the end.
*/
}
if (topology->modified || merged_levels) {
if (hwloc_connect_special_levels(topology) < 0)
return -1;
}
topology->modified = 0;
return 0;
}
int
hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags)
{
if (flags) {
errno = EINVAL;
return -1;
}
return hwloc__reconnect(topology, 0);
}
hwloc_obj_t
hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t obj)
{
hwloc_obj_t res, root;
hwloc_obj_t res, root, child;
int cmp;
if (!topology->is_loaded) {
@ -1945,6 +1975,7 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
return NULL;
}
if (topology->adopted_shmem_addr) {
hwloc_free_unlinked_object(obj);
errno = EPERM;
return NULL;
}
@ -1998,6 +2029,7 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
res = hwloc__insert_object_by_cpuset(topology, NULL, obj, NULL /* do not show errors on stdout */);
} else {
/* just merge root */
hwloc_free_unlinked_object(obj);
res = root;
}
@ -2021,9 +2053,19 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
/* properly inserted */
hwloc_obj_add_children_sets(res);
if (hwloc_topology_reconnect(topology, 0) < 0)
/* reconnect levels.
* no need to filter levels keep_structure because groups are either auto-merged
* or have the dont_merge attribute */
if (hwloc__reconnect(topology, 0) < 0)
return NULL;
/* Compute group total_memory. */
res->total_memory = 0;
for_each_child(child, res)
res->total_memory += child->total_memory;
for_each_memory_child(child, res)
res->total_memory += child->total_memory;
hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
hwloc_set_group_depth(topology);
@ -2254,11 +2296,13 @@ fixup_sets(hwloc_obj_t obj)
int
hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src)
{
#define ADD_OTHER_OBJ_SET(_dst, _src, _set) \
if ((_src)->_set) { \
if (!(_dst)->_set) \
(_dst)->_set = hwloc_bitmap_alloc(); \
hwloc_bitmap_or((_dst)->_set, (_dst)->_set, (_src)->_set); \
#define ADD_OTHER_OBJ_SET(_dst, _src, _set) \
if ((_src)->_set) { \
if (!(_dst)->_set) \
(_dst)->_set = hwloc_bitmap_alloc(); \
if (!(_dst)->_set \
|| hwloc_bitmap_or((_dst)->_set, (_dst)->_set, (_src)->_set) < 0) \
return -1; \
}
ADD_OTHER_OBJ_SET(dst, src, cpuset);
ADD_OTHER_OBJ_SET(dst, src, complete_cpuset);
@ -2504,26 +2548,13 @@ hwloc_compare_levels_structure(hwloc_topology_t topology, unsigned i)
return 0;
}
/* return > 0 if any level was removed.
* performs its own reconnect internally if needed
*/
/* return > 0 if any level was removed. */
static int
hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
{
unsigned i, j;
int res = 0;
if (topology->modified) {
/* WARNING: hwloc_topology_reconnect() is duplicated partially here
* and at the end of this function:
* - we need normal levels before merging.
* - and we'll need to update special levels after merging.
*/
hwloc_connect_children(topology->levels[0][0]);
if (hwloc_connect_levels(topology) < 0)
return -1;
}
/* start from the bottom since we'll remove intermediate levels */
for(i=topology->nb_levels-1; i>0; i--) {
int replacechild = 0, replaceparent = 0;
@ -2545,9 +2576,15 @@ hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
if (type1 == HWLOC_OBJ_GROUP && hwloc_dont_merge_group_level(topology, i))
replacechild = 0;
}
if (!replacechild && !replaceparent)
if (!replacechild && !replaceparent) {
/* always merge Die into Package when levels are identical */
if (type1 == HWLOC_OBJ_PACKAGE && type2 == HWLOC_OBJ_DIE)
replacechild = 1;
}
if (!replacechild && !replaceparent) {
/* no ignoring */
continue;
}
/* Decide which one to actually replace */
if (replaceparent && replacechild) {
/* If both may be replaced, look at obj_type_priority */
@ -2690,20 +2727,6 @@ hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
}
}
if (res > 0 || topology-> modified) {
/* WARNING: hwloc_topology_reconnect() is duplicated partially here
* and at the beginning of this function.
* If we merged some levels, some child+parent special children lisst
* may have been merged, hence specials level might need reordering,
* So reconnect special levels only here at the end
* (it's not needed at the beginning of this function).
*/
if (hwloc_connect_special_levels(topology) < 0)
return -1;
topology->modified = 0;
}
return 0;
}
@ -3232,33 +3255,6 @@ hwloc_connect_levels(hwloc_topology_t topology)
return 0;
}
int
hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags)
{
/* WARNING: when updating this function, the replicated code must
* also be updated inside hwloc_filter_levels_keep_structure()
*/
if (flags) {
errno = EINVAL;
return -1;
}
if (!topology->modified)
return 0;
hwloc_connect_children(topology->levels[0][0]);
if (hwloc_connect_levels(topology) < 0)
return -1;
if (hwloc_connect_special_levels(topology) < 0)
return -1;
topology->modified = 0;
return 0;
}
/* for regression testing, make sure the order of io devices
* doesn't change with the dentry order in the filesystem
*
@ -3515,32 +3511,13 @@ hwloc_discover(struct hwloc_topology *topology,
hwloc_debug_print_objects(0, topology->levels[0][0]);
}
/* see if we should ignore the root now that we know how many children it has */
if (!hwloc_filter_check_keep_object(topology, topology->levels[0][0])
&& topology->levels[0][0]->first_child && !topology->levels[0][0]->first_child->next_sibling) {
hwloc_obj_t oldroot = topology->levels[0][0];
hwloc_obj_t newroot = oldroot->first_child;
/* switch to the new root */
newroot->parent = NULL;
topology->levels[0][0] = newroot;
/* move oldroot memory/io/misc children before newroot children */
if (oldroot->memory_first_child)
prepend_siblings_list(&newroot->memory_first_child, oldroot->memory_first_child, newroot);
if (oldroot->io_first_child)
prepend_siblings_list(&newroot->io_first_child, oldroot->io_first_child, newroot);
if (oldroot->misc_first_child)
prepend_siblings_list(&newroot->misc_first_child, oldroot->misc_first_child, newroot);
/* destroy oldroot and use the new one */
hwloc_free_unlinked_object(oldroot);
}
/*
* All object cpusets and nodesets are properly set now.
*/
/* Now connect handy pointers to make remaining discovery easier. */
hwloc_debug("%s", "\nOk, finished tweaking, now connect\n");
if (hwloc_topology_reconnect(topology, 0) < 0)
if (hwloc__reconnect(topology, 0) < 0)
return -1;
hwloc_debug_print_objects(0, topology->levels[0][0]);
@ -3596,12 +3573,12 @@ hwloc_discover(struct hwloc_topology *topology,
}
hwloc_debug_print_objects(0, topology->levels[0][0]);
/* reconnect all (new groups might have appears, IO added, etc),
* and (now that everything was added) remove identical levels while keeping structure
*/
hwloc_debug("%s", "\nRemoving levels with HWLOC_TYPE_FILTER_KEEP_STRUCTURE\n");
if (hwloc_filter_levels_keep_structure(topology) < 0)
if (hwloc__reconnect(topology, _HWLOC_RECONNECT_FLAG_KEEPSTRUCTURE) < 0)
return -1;
/* takes care of reconnecting children/levels internally,
* because it needs normal levels.
* and it's often needed below because of Groups inserted for I/Os anyway */
hwloc_debug_print_objects(0, topology->levels[0][0]);
/* accumulate children memory in total_memory fields (only once parent is set) */
@ -3730,6 +3707,7 @@ hwloc__topology_init (struct hwloc_topology **topologyp,
hwloc__topology_filter_init(topology);
/* always initialize since we don't know flags to disable those yet */
hwloc_internal_distances_init(topology);
hwloc_internal_memattrs_init(topology);
hwloc_internal_cpukinds_init(topology);
@ -3942,8 +3920,12 @@ int
hwloc_topology_set_cache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
{
unsigned i;
for(i=HWLOC_OBJ_L1CACHE; i<HWLOC_OBJ_L3ICACHE; i++)
hwloc_topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
if (topology->is_loaded) {
errno = EBUSY;
return -1;
}
for(i=HWLOC_OBJ_L1CACHE; i<=HWLOC_OBJ_L3ICACHE; i++)
hwloc__topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
return 0;
}
@ -3951,17 +3933,25 @@ int
hwloc_topology_set_icache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
{
unsigned i;
for(i=HWLOC_OBJ_L1ICACHE; i<HWLOC_OBJ_L3ICACHE; i++)
hwloc_topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
if (topology->is_loaded) {
errno = EBUSY;
return -1;
}
for(i=HWLOC_OBJ_L1ICACHE; i<=HWLOC_OBJ_L3ICACHE; i++)
hwloc__topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
return 0;
}
int
hwloc_topology_set_io_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
{
hwloc_topology_set_type_filter(topology, HWLOC_OBJ_BRIDGE, filter);
hwloc_topology_set_type_filter(topology, HWLOC_OBJ_PCI_DEVICE, filter);
hwloc_topology_set_type_filter(topology, HWLOC_OBJ_OS_DEVICE, filter);
if (topology->is_loaded) {
errno = EBUSY;
return -1;
}
hwloc__topology_set_type_filter(topology, HWLOC_OBJ_BRIDGE, filter);
hwloc__topology_set_type_filter(topology, HWLOC_OBJ_PCI_DEVICE, filter);
hwloc__topology_set_type_filter(topology, HWLOC_OBJ_OS_DEVICE, filter);
return 0;
}
@ -3982,9 +3972,12 @@ hwloc_topology_clear (struct hwloc_topology *topology)
{
/* no need to set to NULL after free() since callers will call setup_defaults() or just destroy the rest of the topology */
unsigned l;
/* always destroy cpukinds/distances/memattrs since there are always initialized during init() */
hwloc_internal_cpukinds_destroy(topology);
hwloc_internal_distances_destroy(topology);
hwloc_internal_memattrs_destroy(topology);
hwloc_free_object_and_children(topology->levels[0][0]);
hwloc_bitmap_free(topology->allowed_cpuset);
hwloc_bitmap_free(topology->allowed_nodeset);
@ -4024,6 +4017,7 @@ hwloc_topology_load (struct hwloc_topology *topology)
{
struct hwloc_disc_status dstatus;
const char *env;
unsigned i;
int err;
if (topology->is_loaded) {
@ -4032,8 +4026,18 @@ hwloc_topology_load (struct hwloc_topology *topology)
}
/* initialize envvar-related things */
hwloc_internal_distances_prepare(topology);
hwloc_internal_memattrs_prepare(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES))
hwloc_internal_distances_prepare(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS))
hwloc_internal_memattrs_prepare(topology);
/* check if any cpu cache filter is not NONE */
topology->want_some_cpu_caches = 0;
for(i=HWLOC_OBJ_L1CACHE; i<=HWLOC_OBJ_L3ICACHE; i++)
if (topology->type_filter[i] != HWLOC_TYPE_FILTER_KEEP_NONE) {
topology->want_some_cpu_caches = 1;
break;
}
if (getenv("HWLOC_XML_USERDATA_NOT_DECODED"))
topology->userdata_not_decoded = 1;
@ -4110,23 +4114,32 @@ hwloc_topology_load (struct hwloc_topology *topology)
#endif
hwloc_topology_check(topology);
/* Rank cpukinds */
hwloc_internal_cpukinds_rank(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
/* Rank cpukinds */
hwloc_internal_cpukinds_rank(topology);
}
/* Mark distances objs arrays as invalid since we may have removed objects
* from the topology after adding the distances (remove_empty, etc).
* It would be hard to actually verify whether it's needed.
*/
hwloc_internal_distances_invalidate_cached_objs(topology);
/* And refresh distances so that multithreaded concurrent distances_get()
* don't refresh() concurrently (disallowed).
*/
hwloc_internal_distances_refresh(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES)) {
/* Mark distances objs arrays as invalid since we may have removed objects
* from the topology after adding the distances (remove_empty, etc).
* It would be hard to actually verify whether it's needed.
*/
hwloc_internal_distances_invalidate_cached_objs(topology);
/* And refresh distances so that multithreaded concurrent distances_get()
* don't refresh() concurrently (disallowed).
*/
hwloc_internal_distances_refresh(topology);
}
/* Same for memattrs */
hwloc_internal_memattrs_need_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
hwloc_internal_memattrs_guess_memory_tiers(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS)) {
int force_memtiers = (getenv("HWLOC_MEMTIERS_REFRESH") != NULL);
/* Same for memattrs */
hwloc_internal_memattrs_need_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
/* update memtiers unless XML */
if (force_memtiers || strcmp(topology->backends->component->name, "xml"))
hwloc_internal_memattrs_guess_memory_tiers(topology, force_memtiers);
}
topology->is_loaded = 1;
@ -4185,20 +4198,11 @@ restrict_object_by_cpuset(hwloc_topology_t topology, unsigned long flags, hwloc_
hwloc_bitmap_andnot(obj->cpuset, obj->cpuset, droppedcpuset);
hwloc_bitmap_andnot(obj->complete_cpuset, obj->complete_cpuset, droppedcpuset);
modified = 1;
} else {
if ((flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS)
&& hwloc_bitmap_iszero(obj->complete_cpuset)) {
/* we're empty, there's a NUMAnode below us, it'll be removed this time */
modified = 1;
}
/* nodeset cannot intersect unless cpuset intersects or is empty */
if (droppednodeset)
assert(!hwloc_bitmap_intersects(obj->complete_nodeset, droppednodeset)
|| hwloc_bitmap_iszero(obj->complete_cpuset));
}
if (droppednodeset) {
if (droppednodeset && hwloc_bitmap_intersects(obj->complete_nodeset, droppednodeset)) {
hwloc_bitmap_andnot(obj->nodeset, obj->nodeset, droppednodeset);
hwloc_bitmap_andnot(obj->complete_nodeset, obj->complete_nodeset, droppednodeset);
modified = 1;
}
if (modified) {
@ -4251,20 +4255,11 @@ restrict_object_by_nodeset(hwloc_topology_t topology, unsigned long flags, hwloc
hwloc_bitmap_andnot(obj->nodeset, obj->nodeset, droppednodeset);
hwloc_bitmap_andnot(obj->complete_nodeset, obj->complete_nodeset, droppednodeset);
modified = 1;
} else {
if ((flags & HWLOC_RESTRICT_FLAG_REMOVE_MEMLESS)
&& hwloc_bitmap_iszero(obj->complete_nodeset)) {
/* we're empty, there's a PU below us, it'll be removed this time */
modified = 1;
}
/* cpuset cannot intersect unless nodeset intersects or is empty */
if (droppedcpuset)
assert(!hwloc_bitmap_intersects(obj->complete_cpuset, droppedcpuset)
|| hwloc_bitmap_iszero(obj->complete_nodeset));
}
if (droppedcpuset) {
if (droppedcpuset && hwloc_bitmap_intersects(obj->complete_cpuset, droppedcpuset)) {
hwloc_bitmap_andnot(obj->cpuset, obj->cpuset, droppedcpuset);
hwloc_bitmap_andnot(obj->complete_cpuset, obj->complete_cpuset, droppedcpuset);
modified = 1;
}
if (modified) {
@ -4430,16 +4425,21 @@ hwloc_topology_restrict(struct hwloc_topology *topology, hwloc_const_bitmap_t se
hwloc_bitmap_free(droppedcpuset);
hwloc_bitmap_free(droppednodeset);
if (hwloc_filter_levels_keep_structure(topology) < 0) /* takes care of reconnecting internally */
if (hwloc__reconnect(topology, _HWLOC_RECONNECT_FLAG_KEEPSTRUCTURE) < 0)
goto out;
/* some objects may have disappeared, we need to update distances objs arrays */
hwloc_internal_distances_invalidate_cached_objs(topology);
hwloc_internal_memattrs_need_refresh(topology);
/* some objects may have disappeared and sets were modified,
* we need to update distances, etc */
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES))
hwloc_internal_distances_invalidate_cached_objs(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS))
hwloc_internal_memattrs_need_refresh(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS))
hwloc_internal_cpukinds_restrict(topology);
hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
propagate_total_memory(topology->levels[0][0]);
hwloc_internal_cpukinds_restrict(topology);
#ifndef HWLOC_DEBUG
if (getenv("HWLOC_DEBUG_CHECK"))
@ -4527,9 +4527,12 @@ hwloc_topology_allow(struct hwloc_topology *topology,
int
hwloc_topology_refresh(struct hwloc_topology *topology)
{
hwloc_internal_cpukinds_rank(topology);
hwloc_internal_distances_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS))
hwloc_internal_cpukinds_rank(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES))
hwloc_internal_distances_refresh(topology);
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS))
hwloc_internal_memattrs_refresh(topology);
return 0;
}
@ -5044,6 +5047,8 @@ hwloc_topology_check(struct hwloc_topology *topology)
unsigned i;
int j, depth;
assert(!topology->modified);
/* make sure we can use ranges to check types */
/* hwloc__obj_type_is_{,d,i}cache() want cache types to be ordered like this */
@ -5081,6 +5086,9 @@ hwloc_topology_check(struct hwloc_topology *topology)
for(i=HWLOC_OBJ_TYPE_MIN; i<HWLOC_OBJ_TYPE_MAX; i++)
assert(obj_type_order[obj_order_type[i]] == i);
if (!topology->is_loaded)
return;
depth = hwloc_topology_get_depth(topology);
assert(!topology->modified);

View file

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.1)
cmake_minimum_required(VERSION 3.10)
project (ethash C)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os")

View file

@ -4,7 +4,7 @@
#include "llhttp.h"
#define CALLBACK_MAYBE(PARSER, NAME, ...) \
#define CALLBACK_MAYBE(PARSER, NAME) \
do { \
const llhttp_settings_t* settings; \
settings = (const llhttp_settings_t*) (PARSER)->settings; \
@ -12,7 +12,22 @@
err = 0; \
break; \
} \
err = settings->NAME(__VA_ARGS__); \
err = settings->NAME((PARSER)); \
} while (0)
#define SPAN_CALLBACK_MAYBE(PARSER, NAME, START, LEN) \
do { \
const llhttp_settings_t* settings; \
settings = (const llhttp_settings_t*) (PARSER)->settings; \
if (settings == NULL || settings->NAME == NULL) { \
err = 0; \
break; \
} \
err = settings->NAME((PARSER), (START), (LEN)); \
if (err == -1) { \
err = HPE_USER; \
llhttp_set_error_reason((PARSER), "Span callback error in " #NAME); \
} \
} while (0)
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
@ -31,21 +46,25 @@ extern int wasm_on_url(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_status(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_field(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_value(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_headers_complete(llhttp_t * p);
extern int wasm_on_headers_complete(llhttp_t * p, int status_code,
uint8_t upgrade, int should_keep_alive);
extern int wasm_on_body(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_message_complete(llhttp_t * p);
static int wasm_on_headers_complete_wrap(llhttp_t* p) {
return wasm_on_headers_complete(p, p->status_code, p->upgrade,
llhttp_should_keep_alive(p));
}
const llhttp_settings_t wasm_settings = {
wasm_on_message_begin,
wasm_on_url,
wasm_on_status,
wasm_on_header_field,
wasm_on_header_value,
wasm_on_headers_complete,
wasm_on_body,
wasm_on_message_complete,
NULL,
NULL,
.on_message_begin = wasm_on_message_begin,
.on_url = wasm_on_url,
.on_status = wasm_on_status,
.on_header_field = wasm_on_header_field,
.on_header_value = wasm_on_header_value,
.on_headers_complete = wasm_on_headers_complete_wrap,
.on_body = wasm_on_body,
.on_message_complete = wasm_on_message_complete,
};
@ -59,6 +78,8 @@ void llhttp_free(llhttp_t* parser) {
free(parser);
}
#endif // defined(__wasm__)
/* Some getters required to get stuff from the parser */
uint8_t llhttp_get_type(llhttp_t* parser) {
@ -85,14 +106,12 @@ uint8_t llhttp_get_upgrade(llhttp_t* parser) {
return parser->upgrade;
}
#endif // defined(__wasm__)
void llhttp_reset(llhttp_t* parser) {
llhttp_type_t type = parser->type;
const llhttp_settings_t* settings = parser->settings;
void* data = parser->data;
uint8_t lenient_flags = parser->lenient_flags;
uint16_t lenient_flags = parser->lenient_flags;
llhttp__internal_init(parser);
@ -123,7 +142,7 @@ llhttp_errno_t llhttp_finish(llhttp_t* parser) {
switch (parser->finish) {
case HTTP_FINISH_SAFE_WITH_CB:
CALLBACK_MAYBE(parser, on_message_complete, parser);
CALLBACK_MAYBE(parser, on_message_complete);
if (err != HPE_OK) return err;
/* FALLTHROUGH */
@ -199,12 +218,21 @@ const char* llhttp_errno_name(llhttp_errno_t err) {
const char* llhttp_method_name(llhttp_method_t method) {
#define HTTP_METHOD_GEN(NUM, NAME, STRING) case HTTP_##NAME: return #STRING;
switch (method) {
HTTP_METHOD_MAP(HTTP_METHOD_GEN)
HTTP_ALL_METHOD_MAP(HTTP_METHOD_GEN)
default: abort();
}
#undef HTTP_METHOD_GEN
}
const char* llhttp_status_name(llhttp_status_t status) {
#define HTTP_STATUS_GEN(NUM, NAME, STRING) case HTTP_STATUS_##NAME: return #STRING;
switch (status) {
HTTP_STATUS_MAP(HTTP_STATUS_GEN)
default: abort();
}
#undef HTTP_STATUS_GEN
}
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled) {
if (enabled) {
@ -232,103 +260,236 @@ void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled) {
}
}
void llhttp_set_lenient_transfer_encoding(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_TRANSFER_ENCODING;
} else {
parser->lenient_flags &= ~LENIENT_TRANSFER_ENCODING;
}
}
void llhttp_set_lenient_version(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_VERSION;
} else {
parser->lenient_flags &= ~LENIENT_VERSION;
}
}
void llhttp_set_lenient_data_after_close(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_DATA_AFTER_CLOSE;
} else {
parser->lenient_flags &= ~LENIENT_DATA_AFTER_CLOSE;
}
}
void llhttp_set_lenient_optional_lf_after_cr(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_OPTIONAL_LF_AFTER_CR;
} else {
parser->lenient_flags &= ~LENIENT_OPTIONAL_LF_AFTER_CR;
}
}
void llhttp_set_lenient_optional_crlf_after_chunk(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_OPTIONAL_CRLF_AFTER_CHUNK;
} else {
parser->lenient_flags &= ~LENIENT_OPTIONAL_CRLF_AFTER_CHUNK;
}
}
void llhttp_set_lenient_optional_cr_before_lf(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_OPTIONAL_CR_BEFORE_LF;
} else {
parser->lenient_flags &= ~LENIENT_OPTIONAL_CR_BEFORE_LF;
}
}
void llhttp_set_lenient_spaces_after_chunk_size(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_SPACES_AFTER_CHUNK_SIZE;
} else {
parser->lenient_flags &= ~LENIENT_SPACES_AFTER_CHUNK_SIZE;
}
}
/* Callbacks */
int llhttp__on_message_begin(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_begin, s);
CALLBACK_MAYBE(s, on_message_begin);
return err;
}
int llhttp__on_protocol(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_protocol, p, endp - p);
return err;
}
int llhttp__on_protocol_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_protocol_complete);
return err;
}
int llhttp__on_url(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url, s, p, endp - p);
SPAN_CALLBACK_MAYBE(s, on_url, p, endp - p);
return err;
}
int llhttp__on_url_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url_complete, s);
CALLBACK_MAYBE(s, on_url_complete);
return err;
}
int llhttp__on_status(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_status, s, p, endp - p);
SPAN_CALLBACK_MAYBE(s, on_status, p, endp - p);
return err;
}
int llhttp__on_status_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_status_complete, s);
CALLBACK_MAYBE(s, on_status_complete);
return err;
}
int llhttp__on_method(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_method, p, endp - p);
return err;
}
int llhttp__on_method_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_method_complete);
return err;
}
int llhttp__on_version(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_version, p, endp - p);
return err;
}
int llhttp__on_version_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_version_complete);
return err;
}
int llhttp__on_header_field(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_field, s, p, endp - p);
SPAN_CALLBACK_MAYBE(s, on_header_field, p, endp - p);
return err;
}
int llhttp__on_header_field_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_field_complete, s);
CALLBACK_MAYBE(s, on_header_field_complete);
return err;
}
int llhttp__on_header_value(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_value, s, p, endp - p);
SPAN_CALLBACK_MAYBE(s, on_header_value, p, endp - p);
return err;
}
int llhttp__on_header_value_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_value_complete, s);
CALLBACK_MAYBE(s, on_header_value_complete);
return err;
}
int llhttp__on_headers_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_headers_complete, s);
CALLBACK_MAYBE(s, on_headers_complete);
return err;
}
int llhttp__on_message_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_complete, s);
CALLBACK_MAYBE(s, on_message_complete);
return err;
}
int llhttp__on_body(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_body, s, p, endp - p);
SPAN_CALLBACK_MAYBE(s, on_body, p, endp - p);
return err;
}
int llhttp__on_chunk_header(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_header, s);
CALLBACK_MAYBE(s, on_chunk_header);
return err;
}
int llhttp__on_chunk_extension_name(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_chunk_extension_name, p, endp - p);
return err;
}
int llhttp__on_chunk_extension_name_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_extension_name_complete);
return err;
}
int llhttp__on_chunk_extension_value(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_chunk_extension_value, p, endp - p);
return err;
}
int llhttp__on_chunk_extension_value_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_extension_value_complete);
return err;
}
int llhttp__on_chunk_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_complete, s);
CALLBACK_MAYBE(s, on_chunk_complete);
return err;
}
int llhttp__on_reset(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_reset);
return err;
}

View file

@ -1,253 +0,0 @@
#ifndef INCLUDE_LLHTTP_API_H_
#define INCLUDE_LLHTTP_API_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#if defined(__wasm__)
#define LLHTTP_EXPORT __attribute__((visibility("default")))
#else
#define LLHTTP_EXPORT
#endif
typedef llhttp__internal_t llhttp_t;
typedef struct llhttp_settings_s llhttp_settings_t;
typedef int (*llhttp_data_cb)(llhttp_t*, const char *at, size_t length);
typedef int (*llhttp_cb)(llhttp_t*);
struct llhttp_settings_s {
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_begin;
llhttp_data_cb on_url;
llhttp_data_cb on_status;
llhttp_data_cb on_header_field;
llhttp_data_cb on_header_value;
/* Possible return values:
* 0 - Proceed normally
* 1 - Assume that request/response has no body, and proceed to parsing the
* next message
* 2 - Assume absence of body (as above) and make `llhttp_execute()` return
* `HPE_PAUSED_UPGRADE`
* -1 - Error
* `HPE_PAUSED`
*/
llhttp_cb on_headers_complete;
llhttp_data_cb on_body;
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
* Possible return values 0, -1, `HPE_PAUSED`
*/
llhttp_cb on_chunk_header;
llhttp_cb on_chunk_complete;
llhttp_cb on_url_complete;
llhttp_cb on_status_complete;
llhttp_cb on_header_field_complete;
llhttp_cb on_header_value_complete;
};
/* Initialize the parser with specific type and user settings.
*
* NOTE: lifetime of `settings` has to be at least the same as the lifetime of
* the `parser` here. In practice, `settings` has to be either a static
* variable or be allocated with `malloc`, `new`, etc.
*/
LLHTTP_EXPORT
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings);
#if defined(__wasm__)
LLHTTP_EXPORT
llhttp_t* llhttp_alloc(llhttp_type_t type);
LLHTTP_EXPORT
void llhttp_free(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_type(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_major(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_minor(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_method(llhttp_t* parser);
LLHTTP_EXPORT
int llhttp_get_status_code(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_upgrade(llhttp_t* parser);
#endif // defined(__wasm__)
/* Reset an already initialized parser back to the start state, preserving the
* existing parser type, callback settings, user data, and lenient flags.
*/
LLHTTP_EXPORT
void llhttp_reset(llhttp_t* parser);
/* Initialize the settings object */
LLHTTP_EXPORT
void llhttp_settings_init(llhttp_settings_t* settings);
/* Parse full or partial request/response, invoking user callbacks along the
* way.
*
* If any of `llhttp_data_cb` returns errno not equal to `HPE_OK` - the parsing
* interrupts, and such errno is returned from `llhttp_execute()`. If
* `HPE_PAUSED` was used as a errno, the execution can be resumed with
* `llhttp_resume()` call.
*
* In a special case of CONNECT/Upgrade request/response `HPE_PAUSED_UPGRADE`
* is returned after fully parsing the request/response. If the user wishes to
* continue parsing, they need to invoke `llhttp_resume_after_upgrade()`.
*
* NOTE: if this function ever returns a non-pause type error, it will continue
* to return the same error upon each successive call up until `llhttp_init()`
* is called.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len);
/* This method should be called when the other side has no further bytes to
* send (e.g. shutdown of readable side of the TCP connection.)
*
* Requests without `Content-Length` and other messages might require treating
* all incoming bytes as the part of the body, up to the last byte of the
* connection. This method will invoke `on_message_complete()` callback if the
* request was terminated safely. Otherwise a error code would be returned.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_finish(llhttp_t* parser);
/* Returns `1` if the incoming message is parsed until the last byte, and has
* to be completed by calling `llhttp_finish()` on EOF
*/
LLHTTP_EXPORT
int llhttp_message_needs_eof(const llhttp_t* parser);
/* Returns `1` if there might be any other messages following the last that was
* successfully parsed.
*/
LLHTTP_EXPORT
int llhttp_should_keep_alive(const llhttp_t* parser);
/* Make further calls of `llhttp_execute()` return `HPE_PAUSED` and set
* appropriate error reason.
*
* Important: do not call this from user callbacks! User callbacks must return
* `HPE_PAUSED` if pausing is required.
*/
LLHTTP_EXPORT
void llhttp_pause(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED`.
*/
LLHTTP_EXPORT
void llhttp_resume(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED_UPGRADE`
*/
LLHTTP_EXPORT
void llhttp_resume_after_upgrade(llhttp_t* parser);
/* Returns the latest return error */
LLHTTP_EXPORT
llhttp_errno_t llhttp_get_errno(const llhttp_t* parser);
/* Returns the verbal explanation of the latest returned error.
*
* Note: User callback should set error reason when returning the error. See
* `llhttp_set_error_reason()` for details.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_reason(const llhttp_t* parser);
/* Assign verbal description to the returned error. Must be called in user
* callbacks right before returning the errno.
*
* Note: `HPE_USER` error code might be useful in user callbacks.
*/
LLHTTP_EXPORT
void llhttp_set_error_reason(llhttp_t* parser, const char* reason);
/* Returns the pointer to the last parsed byte before the returned error. The
* pointer is relative to the `data` argument of `llhttp_execute()`.
*
* Note: this method might be useful for counting the number of parsed bytes.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_pos(const llhttp_t* parser);
/* Returns textual name of error code */
LLHTTP_EXPORT
const char* llhttp_errno_name(llhttp_errno_t err);
/* Returns textual name of HTTP method */
LLHTTP_EXPORT
const char* llhttp_method_name(llhttp_method_t method);
/* Enables/disables lenient header value parsing (disabled by default).
*
* Lenient parsing disables header value token checks, extending llhttp's
* protocol support to highly non-compliant clients/server. No
* `HPE_INVALID_HEADER_TOKEN` will be raised for incorrect header values when
* lenient parsing is "on".
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of conflicting `Transfer-Encoding` and
* `Content-Length` headers (disabled by default).
*
* Normally `llhttp` would error when `Transfer-Encoding` is present in
* conjunction with `Content-Length`. This error is important to prevent HTTP
* request smuggling, but may be less desirable for small number of cases
* involving legacy servers.
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of `Connection: close` and HTTP/1.0
* requests responses.
*
* Normally `llhttp` would error on (in strict mode) or discard (in loose mode)
* the HTTP request/response after the request/response with `Connection: close`
* and `Content-Length`. This is important to prevent cache poisoning attacks,
* but might interact badly with outdated and insecure clients. With this flag
* the extra request/response will be parsed normally.
*
* **(USE AT YOUR OWN RISK)**
*/
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INCLUDE_LLHTTP_API_H_ */

View file

@ -39,20 +39,41 @@ int llhttp__after_headers_complete(llhttp_t* parser, const char* p,
int hasBody;
hasBody = parser->flags & F_CHUNKED || parser->content_length > 0;
if (parser->upgrade && (parser->method == HTTP_CONNECT ||
(parser->flags & F_SKIPBODY) || !hasBody)) {
if (
(parser->upgrade && (parser->method == HTTP_CONNECT ||
(parser->flags & F_SKIPBODY) || !hasBody)) ||
/* See RFC 2616 section 4.4 - 1xx e.g. Continue */
(parser->type == HTTP_RESPONSE && parser->status_code == 101)
) {
/* Exit, the rest of the message is in a different protocol. */
return 1;
}
if (parser->flags & F_SKIPBODY) {
if (parser->type == HTTP_RESPONSE && parser->status_code == 100) {
/* No body, restart as the message is complete */
return 0;
}
/* See RFC 2616 section 4.4 */
if (
parser->flags & F_SKIPBODY || /* response to a HEAD request */
(
parser->type == HTTP_RESPONSE && (
parser->status_code == 102 || /* Processing */
parser->status_code == 103 || /* Early Hints */
parser->status_code == 204 || /* No Content */
parser->status_code == 304 /* Not Modified */
)
)
) {
return 0;
} else if (parser->flags & F_CHUNKED) {
/* chunked encoding - ignore Content-Length header, prepare for a chunk */
return 2;
} else if (parser->flags & F_TRANSFER_ENCODING) {
if (parser->type == HTTP_REQUEST &&
(parser->lenient_flags & LENIENT_CHUNKED_LENGTH) == 0) {
(parser->lenient_flags & LENIENT_CHUNKED_LENGTH) == 0 &&
(parser->lenient_flags & LENIENT_TRANSFER_ENCODING) == 0) {
/* RFC 7230 3.3.3 */
/* If a Transfer-Encoding header field

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,11 @@
#ifndef INCLUDE_LLHTTP_H_
#define INCLUDE_LLHTTP_H_
#define LLHTTP_VERSION_MAJOR 5
#define LLHTTP_VERSION_MINOR 1
#define LLHTTP_VERSION_MAJOR 9
#define LLHTTP_VERSION_MINOR 3
#define LLHTTP_VERSION_PATCH 0
#ifndef LLHTTP_STRICT_MODE
# define LLHTTP_STRICT_MODE 0
#endif
#ifndef INCLUDE_LLHTTP_ITSELF_H_
#define INCLUDE_LLHTTP_ITSELF_H_
#ifdef __cplusplus
@ -33,11 +30,12 @@ struct llhttp__internal_s {
uint8_t http_major;
uint8_t http_minor;
uint8_t header_state;
uint8_t lenient_flags;
uint16_t lenient_flags;
uint8_t upgrade;
uint8_t finish;
uint16_t flags;
uint16_t status_code;
uint8_t initial_message_completed;
void* settings;
};
@ -49,6 +47,7 @@ int llhttp__internal_execute(llhttp__internal_t* s, const char* p, const char* e
#endif
#endif /* INCLUDE_LLHTTP_ITSELF_H_ */
#ifndef LLLLHTTP_C_HEADERS_
#define LLLLHTTP_C_HEADERS_
#ifdef __cplusplus
@ -59,8 +58,10 @@ enum llhttp_errno {
HPE_OK = 0,
HPE_INTERNAL = 1,
HPE_STRICT = 2,
HPE_CR_EXPECTED = 25,
HPE_LF_EXPECTED = 3,
HPE_UNEXPECTED_CONTENT_LENGTH = 4,
HPE_UNEXPECTED_SPACE = 30,
HPE_CLOSED_CONNECTION = 5,
HPE_INVALID_METHOD = 6,
HPE_INVALID_URL = 7,
@ -80,7 +81,17 @@ enum llhttp_errno {
HPE_PAUSED = 21,
HPE_PAUSED_UPGRADE = 22,
HPE_PAUSED_H2_UPGRADE = 23,
HPE_USER = 24
HPE_USER = 24,
HPE_CB_URL_COMPLETE = 26,
HPE_CB_STATUS_COMPLETE = 27,
HPE_CB_METHOD_COMPLETE = 32,
HPE_CB_VERSION_COMPLETE = 33,
HPE_CB_HEADER_FIELD_COMPLETE = 28,
HPE_CB_HEADER_VALUE_COMPLETE = 29,
HPE_CB_CHUNK_EXTENSION_NAME_COMPLETE = 34,
HPE_CB_CHUNK_EXTENSION_VALUE_COMPLETE = 35,
HPE_CB_RESET = 31,
HPE_CB_PROTOCOL_COMPLETE = 38
};
typedef enum llhttp_errno llhttp_errno_t;
@ -100,7 +111,14 @@ typedef enum llhttp_flags llhttp_flags_t;
enum llhttp_lenient_flags {
LENIENT_HEADERS = 0x1,
LENIENT_CHUNKED_LENGTH = 0x2,
LENIENT_KEEP_ALIVE = 0x4
LENIENT_KEEP_ALIVE = 0x4,
LENIENT_TRANSFER_ENCODING = 0x8,
LENIENT_VERSION = 0x10,
LENIENT_DATA_AFTER_CLOSE = 0x20,
LENIENT_OPTIONAL_LF_AFTER_CR = 0x40,
LENIENT_OPTIONAL_CRLF_AFTER_CHUNK = 0x80,
LENIENT_OPTIONAL_CR_BEFORE_LF = 0x100,
LENIENT_SPACES_AFTER_CHUNK_SIZE = 0x200
};
typedef enum llhttp_lenient_flags llhttp_lenient_flags_t;
@ -164,16 +182,122 @@ enum llhttp_method {
HTTP_SET_PARAMETER = 42,
HTTP_REDIRECT = 43,
HTTP_RECORD = 44,
HTTP_FLUSH = 45
HTTP_FLUSH = 45,
HTTP_QUERY = 46
};
typedef enum llhttp_method llhttp_method_t;
enum llhttp_status {
HTTP_STATUS_CONTINUE = 100,
HTTP_STATUS_SWITCHING_PROTOCOLS = 101,
HTTP_STATUS_PROCESSING = 102,
HTTP_STATUS_EARLY_HINTS = 103,
HTTP_STATUS_RESPONSE_IS_STALE = 110,
HTTP_STATUS_REVALIDATION_FAILED = 111,
HTTP_STATUS_DISCONNECTED_OPERATION = 112,
HTTP_STATUS_HEURISTIC_EXPIRATION = 113,
HTTP_STATUS_MISCELLANEOUS_WARNING = 199,
HTTP_STATUS_OK = 200,
HTTP_STATUS_CREATED = 201,
HTTP_STATUS_ACCEPTED = 202,
HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION = 203,
HTTP_STATUS_NO_CONTENT = 204,
HTTP_STATUS_RESET_CONTENT = 205,
HTTP_STATUS_PARTIAL_CONTENT = 206,
HTTP_STATUS_MULTI_STATUS = 207,
HTTP_STATUS_ALREADY_REPORTED = 208,
HTTP_STATUS_TRANSFORMATION_APPLIED = 214,
HTTP_STATUS_IM_USED = 226,
HTTP_STATUS_MISCELLANEOUS_PERSISTENT_WARNING = 299,
HTTP_STATUS_MULTIPLE_CHOICES = 300,
HTTP_STATUS_MOVED_PERMANENTLY = 301,
HTTP_STATUS_FOUND = 302,
HTTP_STATUS_SEE_OTHER = 303,
HTTP_STATUS_NOT_MODIFIED = 304,
HTTP_STATUS_USE_PROXY = 305,
HTTP_STATUS_SWITCH_PROXY = 306,
HTTP_STATUS_TEMPORARY_REDIRECT = 307,
HTTP_STATUS_PERMANENT_REDIRECT = 308,
HTTP_STATUS_BAD_REQUEST = 400,
HTTP_STATUS_UNAUTHORIZED = 401,
HTTP_STATUS_PAYMENT_REQUIRED = 402,
HTTP_STATUS_FORBIDDEN = 403,
HTTP_STATUS_NOT_FOUND = 404,
HTTP_STATUS_METHOD_NOT_ALLOWED = 405,
HTTP_STATUS_NOT_ACCEPTABLE = 406,
HTTP_STATUS_PROXY_AUTHENTICATION_REQUIRED = 407,
HTTP_STATUS_REQUEST_TIMEOUT = 408,
HTTP_STATUS_CONFLICT = 409,
HTTP_STATUS_GONE = 410,
HTTP_STATUS_LENGTH_REQUIRED = 411,
HTTP_STATUS_PRECONDITION_FAILED = 412,
HTTP_STATUS_PAYLOAD_TOO_LARGE = 413,
HTTP_STATUS_URI_TOO_LONG = 414,
HTTP_STATUS_UNSUPPORTED_MEDIA_TYPE = 415,
HTTP_STATUS_RANGE_NOT_SATISFIABLE = 416,
HTTP_STATUS_EXPECTATION_FAILED = 417,
HTTP_STATUS_IM_A_TEAPOT = 418,
HTTP_STATUS_PAGE_EXPIRED = 419,
HTTP_STATUS_ENHANCE_YOUR_CALM = 420,
HTTP_STATUS_MISDIRECTED_REQUEST = 421,
HTTP_STATUS_UNPROCESSABLE_ENTITY = 422,
HTTP_STATUS_LOCKED = 423,
HTTP_STATUS_FAILED_DEPENDENCY = 424,
HTTP_STATUS_TOO_EARLY = 425,
HTTP_STATUS_UPGRADE_REQUIRED = 426,
HTTP_STATUS_PRECONDITION_REQUIRED = 428,
HTTP_STATUS_TOO_MANY_REQUESTS = 429,
HTTP_STATUS_REQUEST_HEADER_FIELDS_TOO_LARGE_UNOFFICIAL = 430,
HTTP_STATUS_REQUEST_HEADER_FIELDS_TOO_LARGE = 431,
HTTP_STATUS_LOGIN_TIMEOUT = 440,
HTTP_STATUS_NO_RESPONSE = 444,
HTTP_STATUS_RETRY_WITH = 449,
HTTP_STATUS_BLOCKED_BY_PARENTAL_CONTROL = 450,
HTTP_STATUS_UNAVAILABLE_FOR_LEGAL_REASONS = 451,
HTTP_STATUS_CLIENT_CLOSED_LOAD_BALANCED_REQUEST = 460,
HTTP_STATUS_INVALID_X_FORWARDED_FOR = 463,
HTTP_STATUS_REQUEST_HEADER_TOO_LARGE = 494,
HTTP_STATUS_SSL_CERTIFICATE_ERROR = 495,
HTTP_STATUS_SSL_CERTIFICATE_REQUIRED = 496,
HTTP_STATUS_HTTP_REQUEST_SENT_TO_HTTPS_PORT = 497,
HTTP_STATUS_INVALID_TOKEN = 498,
HTTP_STATUS_CLIENT_CLOSED_REQUEST = 499,
HTTP_STATUS_INTERNAL_SERVER_ERROR = 500,
HTTP_STATUS_NOT_IMPLEMENTED = 501,
HTTP_STATUS_BAD_GATEWAY = 502,
HTTP_STATUS_SERVICE_UNAVAILABLE = 503,
HTTP_STATUS_GATEWAY_TIMEOUT = 504,
HTTP_STATUS_HTTP_VERSION_NOT_SUPPORTED = 505,
HTTP_STATUS_VARIANT_ALSO_NEGOTIATES = 506,
HTTP_STATUS_INSUFFICIENT_STORAGE = 507,
HTTP_STATUS_LOOP_DETECTED = 508,
HTTP_STATUS_BANDWIDTH_LIMIT_EXCEEDED = 509,
HTTP_STATUS_NOT_EXTENDED = 510,
HTTP_STATUS_NETWORK_AUTHENTICATION_REQUIRED = 511,
HTTP_STATUS_WEB_SERVER_UNKNOWN_ERROR = 520,
HTTP_STATUS_WEB_SERVER_IS_DOWN = 521,
HTTP_STATUS_CONNECTION_TIMEOUT = 522,
HTTP_STATUS_ORIGIN_IS_UNREACHABLE = 523,
HTTP_STATUS_TIMEOUT_OCCURED = 524,
HTTP_STATUS_SSL_HANDSHAKE_FAILED = 525,
HTTP_STATUS_INVALID_SSL_CERTIFICATE = 526,
HTTP_STATUS_RAILGUN_ERROR = 527,
HTTP_STATUS_SITE_IS_OVERLOADED = 529,
HTTP_STATUS_SITE_IS_FROZEN = 530,
HTTP_STATUS_IDENTITY_PROVIDER_AUTHENTICATION_ERROR = 561,
HTTP_STATUS_NETWORK_READ_TIMEOUT = 598,
HTTP_STATUS_NETWORK_CONNECT_TIMEOUT = 599
};
typedef enum llhttp_status llhttp_status_t;
#define HTTP_ERRNO_MAP(XX) \
XX(0, OK, OK) \
XX(1, INTERNAL, INTERNAL) \
XX(2, STRICT, STRICT) \
XX(25, CR_EXPECTED, CR_EXPECTED) \
XX(3, LF_EXPECTED, LF_EXPECTED) \
XX(4, UNEXPECTED_CONTENT_LENGTH, UNEXPECTED_CONTENT_LENGTH) \
XX(30, UNEXPECTED_SPACE, UNEXPECTED_SPACE) \
XX(5, CLOSED_CONNECTION, CLOSED_CONNECTION) \
XX(6, INVALID_METHOD, INVALID_METHOD) \
XX(7, INVALID_URL, INVALID_URL) \
@ -194,9 +318,74 @@ typedef enum llhttp_method llhttp_method_t;
XX(22, PAUSED_UPGRADE, PAUSED_UPGRADE) \
XX(23, PAUSED_H2_UPGRADE, PAUSED_H2_UPGRADE) \
XX(24, USER, USER) \
XX(26, CB_URL_COMPLETE, CB_URL_COMPLETE) \
XX(27, CB_STATUS_COMPLETE, CB_STATUS_COMPLETE) \
XX(32, CB_METHOD_COMPLETE, CB_METHOD_COMPLETE) \
XX(33, CB_VERSION_COMPLETE, CB_VERSION_COMPLETE) \
XX(28, CB_HEADER_FIELD_COMPLETE, CB_HEADER_FIELD_COMPLETE) \
XX(29, CB_HEADER_VALUE_COMPLETE, CB_HEADER_VALUE_COMPLETE) \
XX(34, CB_CHUNK_EXTENSION_NAME_COMPLETE, CB_CHUNK_EXTENSION_NAME_COMPLETE) \
XX(35, CB_CHUNK_EXTENSION_VALUE_COMPLETE, CB_CHUNK_EXTENSION_VALUE_COMPLETE) \
XX(31, CB_RESET, CB_RESET) \
XX(38, CB_PROTOCOL_COMPLETE, CB_PROTOCOL_COMPLETE) \
#define HTTP_METHOD_MAP(XX) \
XX(0, DELETE, DELETE) \
XX(1, GET, GET) \
XX(2, HEAD, HEAD) \
XX(3, POST, POST) \
XX(4, PUT, PUT) \
XX(5, CONNECT, CONNECT) \
XX(6, OPTIONS, OPTIONS) \
XX(7, TRACE, TRACE) \
XX(8, COPY, COPY) \
XX(9, LOCK, LOCK) \
XX(10, MKCOL, MKCOL) \
XX(11, MOVE, MOVE) \
XX(12, PROPFIND, PROPFIND) \
XX(13, PROPPATCH, PROPPATCH) \
XX(14, SEARCH, SEARCH) \
XX(15, UNLOCK, UNLOCK) \
XX(16, BIND, BIND) \
XX(17, REBIND, REBIND) \
XX(18, UNBIND, UNBIND) \
XX(19, ACL, ACL) \
XX(20, REPORT, REPORT) \
XX(21, MKACTIVITY, MKACTIVITY) \
XX(22, CHECKOUT, CHECKOUT) \
XX(23, MERGE, MERGE) \
XX(24, MSEARCH, M-SEARCH) \
XX(25, NOTIFY, NOTIFY) \
XX(26, SUBSCRIBE, SUBSCRIBE) \
XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \
XX(28, PATCH, PATCH) \
XX(29, PURGE, PURGE) \
XX(30, MKCALENDAR, MKCALENDAR) \
XX(31, LINK, LINK) \
XX(32, UNLINK, UNLINK) \
XX(33, SOURCE, SOURCE) \
XX(46, QUERY, QUERY) \
#define RTSP_METHOD_MAP(XX) \
XX(1, GET, GET) \
XX(3, POST, POST) \
XX(6, OPTIONS, OPTIONS) \
XX(35, DESCRIBE, DESCRIBE) \
XX(36, ANNOUNCE, ANNOUNCE) \
XX(37, SETUP, SETUP) \
XX(38, PLAY, PLAY) \
XX(39, PAUSE, PAUSE) \
XX(40, TEARDOWN, TEARDOWN) \
XX(41, GET_PARAMETER, GET_PARAMETER) \
XX(42, SET_PARAMETER, SET_PARAMETER) \
XX(43, REDIRECT, REDIRECT) \
XX(44, RECORD, RECORD) \
XX(45, FLUSH, FLUSH) \
#define HTTP_ALL_METHOD_MAP(XX) \
XX(0, DELETE, DELETE) \
XX(1, GET, GET) \
XX(2, HEAD, HEAD) \
@ -243,14 +432,117 @@ typedef enum llhttp_method llhttp_method_t;
XX(43, REDIRECT, REDIRECT) \
XX(44, RECORD, RECORD) \
XX(45, FLUSH, FLUSH) \
XX(46, QUERY, QUERY) \
#define HTTP_STATUS_MAP(XX) \
XX(100, CONTINUE, CONTINUE) \
XX(101, SWITCHING_PROTOCOLS, SWITCHING_PROTOCOLS) \
XX(102, PROCESSING, PROCESSING) \
XX(103, EARLY_HINTS, EARLY_HINTS) \
XX(110, RESPONSE_IS_STALE, RESPONSE_IS_STALE) \
XX(111, REVALIDATION_FAILED, REVALIDATION_FAILED) \
XX(112, DISCONNECTED_OPERATION, DISCONNECTED_OPERATION) \
XX(113, HEURISTIC_EXPIRATION, HEURISTIC_EXPIRATION) \
XX(199, MISCELLANEOUS_WARNING, MISCELLANEOUS_WARNING) \
XX(200, OK, OK) \
XX(201, CREATED, CREATED) \
XX(202, ACCEPTED, ACCEPTED) \
XX(203, NON_AUTHORITATIVE_INFORMATION, NON_AUTHORITATIVE_INFORMATION) \
XX(204, NO_CONTENT, NO_CONTENT) \
XX(205, RESET_CONTENT, RESET_CONTENT) \
XX(206, PARTIAL_CONTENT, PARTIAL_CONTENT) \
XX(207, MULTI_STATUS, MULTI_STATUS) \
XX(208, ALREADY_REPORTED, ALREADY_REPORTED) \
XX(214, TRANSFORMATION_APPLIED, TRANSFORMATION_APPLIED) \
XX(226, IM_USED, IM_USED) \
XX(299, MISCELLANEOUS_PERSISTENT_WARNING, MISCELLANEOUS_PERSISTENT_WARNING) \
XX(300, MULTIPLE_CHOICES, MULTIPLE_CHOICES) \
XX(301, MOVED_PERMANENTLY, MOVED_PERMANENTLY) \
XX(302, FOUND, FOUND) \
XX(303, SEE_OTHER, SEE_OTHER) \
XX(304, NOT_MODIFIED, NOT_MODIFIED) \
XX(305, USE_PROXY, USE_PROXY) \
XX(306, SWITCH_PROXY, SWITCH_PROXY) \
XX(307, TEMPORARY_REDIRECT, TEMPORARY_REDIRECT) \
XX(308, PERMANENT_REDIRECT, PERMANENT_REDIRECT) \
XX(400, BAD_REQUEST, BAD_REQUEST) \
XX(401, UNAUTHORIZED, UNAUTHORIZED) \
XX(402, PAYMENT_REQUIRED, PAYMENT_REQUIRED) \
XX(403, FORBIDDEN, FORBIDDEN) \
XX(404, NOT_FOUND, NOT_FOUND) \
XX(405, METHOD_NOT_ALLOWED, METHOD_NOT_ALLOWED) \
XX(406, NOT_ACCEPTABLE, NOT_ACCEPTABLE) \
XX(407, PROXY_AUTHENTICATION_REQUIRED, PROXY_AUTHENTICATION_REQUIRED) \
XX(408, REQUEST_TIMEOUT, REQUEST_TIMEOUT) \
XX(409, CONFLICT, CONFLICT) \
XX(410, GONE, GONE) \
XX(411, LENGTH_REQUIRED, LENGTH_REQUIRED) \
XX(412, PRECONDITION_FAILED, PRECONDITION_FAILED) \
XX(413, PAYLOAD_TOO_LARGE, PAYLOAD_TOO_LARGE) \
XX(414, URI_TOO_LONG, URI_TOO_LONG) \
XX(415, UNSUPPORTED_MEDIA_TYPE, UNSUPPORTED_MEDIA_TYPE) \
XX(416, RANGE_NOT_SATISFIABLE, RANGE_NOT_SATISFIABLE) \
XX(417, EXPECTATION_FAILED, EXPECTATION_FAILED) \
XX(418, IM_A_TEAPOT, IM_A_TEAPOT) \
XX(419, PAGE_EXPIRED, PAGE_EXPIRED) \
XX(420, ENHANCE_YOUR_CALM, ENHANCE_YOUR_CALM) \
XX(421, MISDIRECTED_REQUEST, MISDIRECTED_REQUEST) \
XX(422, UNPROCESSABLE_ENTITY, UNPROCESSABLE_ENTITY) \
XX(423, LOCKED, LOCKED) \
XX(424, FAILED_DEPENDENCY, FAILED_DEPENDENCY) \
XX(425, TOO_EARLY, TOO_EARLY) \
XX(426, UPGRADE_REQUIRED, UPGRADE_REQUIRED) \
XX(428, PRECONDITION_REQUIRED, PRECONDITION_REQUIRED) \
XX(429, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS) \
XX(430, REQUEST_HEADER_FIELDS_TOO_LARGE_UNOFFICIAL, REQUEST_HEADER_FIELDS_TOO_LARGE_UNOFFICIAL) \
XX(431, REQUEST_HEADER_FIELDS_TOO_LARGE, REQUEST_HEADER_FIELDS_TOO_LARGE) \
XX(440, LOGIN_TIMEOUT, LOGIN_TIMEOUT) \
XX(444, NO_RESPONSE, NO_RESPONSE) \
XX(449, RETRY_WITH, RETRY_WITH) \
XX(450, BLOCKED_BY_PARENTAL_CONTROL, BLOCKED_BY_PARENTAL_CONTROL) \
XX(451, UNAVAILABLE_FOR_LEGAL_REASONS, UNAVAILABLE_FOR_LEGAL_REASONS) \
XX(460, CLIENT_CLOSED_LOAD_BALANCED_REQUEST, CLIENT_CLOSED_LOAD_BALANCED_REQUEST) \
XX(463, INVALID_X_FORWARDED_FOR, INVALID_X_FORWARDED_FOR) \
XX(494, REQUEST_HEADER_TOO_LARGE, REQUEST_HEADER_TOO_LARGE) \
XX(495, SSL_CERTIFICATE_ERROR, SSL_CERTIFICATE_ERROR) \
XX(496, SSL_CERTIFICATE_REQUIRED, SSL_CERTIFICATE_REQUIRED) \
XX(497, HTTP_REQUEST_SENT_TO_HTTPS_PORT, HTTP_REQUEST_SENT_TO_HTTPS_PORT) \
XX(498, INVALID_TOKEN, INVALID_TOKEN) \
XX(499, CLIENT_CLOSED_REQUEST, CLIENT_CLOSED_REQUEST) \
XX(500, INTERNAL_SERVER_ERROR, INTERNAL_SERVER_ERROR) \
XX(501, NOT_IMPLEMENTED, NOT_IMPLEMENTED) \
XX(502, BAD_GATEWAY, BAD_GATEWAY) \
XX(503, SERVICE_UNAVAILABLE, SERVICE_UNAVAILABLE) \
XX(504, GATEWAY_TIMEOUT, GATEWAY_TIMEOUT) \
XX(505, HTTP_VERSION_NOT_SUPPORTED, HTTP_VERSION_NOT_SUPPORTED) \
XX(506, VARIANT_ALSO_NEGOTIATES, VARIANT_ALSO_NEGOTIATES) \
XX(507, INSUFFICIENT_STORAGE, INSUFFICIENT_STORAGE) \
XX(508, LOOP_DETECTED, LOOP_DETECTED) \
XX(509, BANDWIDTH_LIMIT_EXCEEDED, BANDWIDTH_LIMIT_EXCEEDED) \
XX(510, NOT_EXTENDED, NOT_EXTENDED) \
XX(511, NETWORK_AUTHENTICATION_REQUIRED, NETWORK_AUTHENTICATION_REQUIRED) \
XX(520, WEB_SERVER_UNKNOWN_ERROR, WEB_SERVER_UNKNOWN_ERROR) \
XX(521, WEB_SERVER_IS_DOWN, WEB_SERVER_IS_DOWN) \
XX(522, CONNECTION_TIMEOUT, CONNECTION_TIMEOUT) \
XX(523, ORIGIN_IS_UNREACHABLE, ORIGIN_IS_UNREACHABLE) \
XX(524, TIMEOUT_OCCURED, TIMEOUT_OCCURED) \
XX(525, SSL_HANDSHAKE_FAILED, SSL_HANDSHAKE_FAILED) \
XX(526, INVALID_SSL_CERTIFICATE, INVALID_SSL_CERTIFICATE) \
XX(527, RAILGUN_ERROR, RAILGUN_ERROR) \
XX(529, SITE_IS_OVERLOADED, SITE_IS_OVERLOADED) \
XX(530, SITE_IS_FROZEN, SITE_IS_FROZEN) \
XX(561, IDENTITY_PROVIDER_AUTHENTICATION_ERROR, IDENTITY_PROVIDER_AUTHENTICATION_ERROR) \
XX(598, NETWORK_READ_TIMEOUT, NETWORK_READ_TIMEOUT) \
XX(599, NETWORK_CONNECT_TIMEOUT, NETWORK_CONNECT_TIMEOUT) \
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* LLLLHTTP_C_HEADERS_ */
#ifndef INCLUDE_LLHTTP_API_H_
#define INCLUDE_LLHTTP_API_H_
#ifdef __cplusplus
@ -274,10 +566,16 @@ struct llhttp_settings_s {
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_begin;
/* Possible return values 0, -1, HPE_USER */
llhttp_data_cb on_protocol;
llhttp_data_cb on_url;
llhttp_data_cb on_status;
llhttp_data_cb on_method;
llhttp_data_cb on_version;
llhttp_data_cb on_header_field;
llhttp_data_cb on_header_value;
llhttp_data_cb on_chunk_extension_name;
llhttp_data_cb on_chunk_extension_value;
/* Possible return values:
* 0 - Proceed normally
@ -290,10 +588,20 @@ struct llhttp_settings_s {
*/
llhttp_cb on_headers_complete;
/* Possible return values 0, -1, HPE_USER */
llhttp_data_cb on_body;
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_complete;
llhttp_cb on_protocol_complete;
llhttp_cb on_url_complete;
llhttp_cb on_status_complete;
llhttp_cb on_method_complete;
llhttp_cb on_version_complete;
llhttp_cb on_header_field_complete;
llhttp_cb on_header_value_complete;
llhttp_cb on_chunk_extension_name_complete;
llhttp_cb on_chunk_extension_value_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
@ -301,11 +609,7 @@ struct llhttp_settings_s {
*/
llhttp_cb on_chunk_header;
llhttp_cb on_chunk_complete;
llhttp_cb on_url_complete;
llhttp_cb on_status_complete;
llhttp_cb on_header_field_complete;
llhttp_cb on_header_value_complete;
llhttp_cb on_reset;
};
/* Initialize the parser with specific type and user settings.
@ -318,8 +622,6 @@ LLHTTP_EXPORT
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings);
#if defined(__wasm__)
LLHTTP_EXPORT
llhttp_t* llhttp_alloc(llhttp_type_t type);
@ -344,8 +646,6 @@ int llhttp_get_status_code(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_upgrade(llhttp_t* parser);
#endif // defined(__wasm__)
/* Reset an already initialized parser back to the start state, preserving the
* existing parser type, callback settings, user data, and lenient flags.
*/
@ -459,6 +759,9 @@ const char* llhttp_errno_name(llhttp_errno_t err);
LLHTTP_EXPORT
const char* llhttp_method_name(llhttp_method_t method);
/* Returns textual name of HTTP status */
LLHTTP_EXPORT
const char* llhttp_status_name(llhttp_status_t status);
/* Enables/disables lenient header value parsing (disabled by default).
*
@ -467,7 +770,8 @@ const char* llhttp_method_name(llhttp_method_t method);
* `HPE_INVALID_HEADER_TOKEN` will be raised for incorrect header values when
* lenient parsing is "on".
*
* **(USE AT YOUR OWN RISK)**
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled);
@ -481,7 +785,8 @@ void llhttp_set_lenient_headers(llhttp_t* parser, int enabled);
* request smuggling, but may be less desirable for small number of cases
* involving legacy servers.
*
* **(USE AT YOUR OWN RISK)**
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled);
@ -496,13 +801,105 @@ void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled);
* but might interact badly with outdated and insecure clients. With this flag
* the extra request/response will be parsed normally.
*
* **(USE AT YOUR OWN RISK)**
* **Enabling this flag can pose a security issue since you will be exposed to
* poisoning attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of `Transfer-Encoding` header.
*
* Normally `llhttp` would error when a `Transfer-Encoding` has `chunked` value
* and another value after it (either in a single header or in multiple
* headers whose value are internally joined using `, `).
* This is mandated by the spec to reliably determine request body size and thus
* avoid request smuggling.
* With this flag the extra value will be parsed normally.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_transfer_encoding(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of HTTP version.
*
* Normally `llhttp` would error when the HTTP version in the request or status line
* is not `0.9`, `1.0`, `1.1` or `2.0`.
* With this flag the invalid value will be parsed normally.
*
* **Enabling this flag can pose a security issue since you will allow unsupported
* HTTP versions. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_version(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of additional data received after a message ends
* and keep-alive is disabled.
*
* Normally `llhttp` would error when additional unexpected data is received if the message
* contains the `Connection` header with `close` value.
* With this flag the extra data will discarded without throwing an error.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* poisoning attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_data_after_close(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of incomplete CRLF sequences.
*
* Normally `llhttp` would error when a CR is not followed by LF when terminating the
* request line, the status line, the headers or a chunk header.
* With this flag only a CR is required to terminate such sections.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_optional_lf_after_cr(llhttp_t* parser, int enabled);
/*
* Enables/disables lenient handling of line separators.
*
* Normally `llhttp` would error when a LF is not preceded by CR when terminating the
* request line, the status line, the headers, a chunk header or a chunk data.
* With this flag only a LF is required to terminate such sections.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_optional_cr_before_lf(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of chunks not separated via CRLF.
*
* Normally `llhttp` would error when after a chunk data a CRLF is missing before
* starting a new chunk.
* With this flag the new chunk can start immediately after the previous one.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_optional_crlf_after_chunk(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of spaces after chunk size.
*
* Normally `llhttp` would error when after a chunk size is followed by one or more
* spaces are present instead of a CRLF or `;`.
* With this flag this check is disabled.
*
* **Enabling this flag can pose a security issue since you will be exposed to
* request smuggling attacks. USE WITH CAUTION!**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_spaces_after_chunk_size(llhttp_t* parser, int enabled);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INCLUDE_LLHTTP_API_H_ */
#endif /* INCLUDE_LLHTTP_H_ */

View file

@ -19,6 +19,7 @@
#include "internal/meta.h"
#include <memory>
#include <limits>
#if RAPIDJSON_HAS_CXX11
#include <type_traits>
@ -433,7 +434,7 @@ namespace internal {
template<typename T, typename A>
inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
{
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= SIZE_MAX / sizeof(T) && new_n <= SIZE_MAX / sizeof(T));
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= (std::numeric_limits<size_t>::max)() / sizeof(T) && new_n <= (std::numeric_limits<size_t>::max)() / sizeof(T));
return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
}
@ -496,9 +497,9 @@ public:
#endif
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
baseAllocator_(baseAllocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT

View file

@ -75,7 +75,7 @@ class GenericDocument;
User can define this to use CrtAllocator or MemoryPoolAllocator.
*/
#ifndef RAPIDJSON_DEFAULT_ALLOCATOR
#define RAPIDJSON_DEFAULT_ALLOCATOR MemoryPoolAllocator<CrtAllocator>
#define RAPIDJSON_DEFAULT_ALLOCATOR ::RAPIDJSON_NAMESPACE::MemoryPoolAllocator<::RAPIDJSON_NAMESPACE::CrtAllocator>
#endif
/*! \def RAPIDJSON_DEFAULT_STACK_ALLOCATOR
@ -85,7 +85,7 @@ class GenericDocument;
User can define this to use CrtAllocator or MemoryPoolAllocator.
*/
#ifndef RAPIDJSON_DEFAULT_STACK_ALLOCATOR
#define RAPIDJSON_DEFAULT_STACK_ALLOCATOR CrtAllocator
#define RAPIDJSON_DEFAULT_STACK_ALLOCATOR ::RAPIDJSON_NAMESPACE::CrtAllocator
#endif
/*! \def RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY
@ -1033,7 +1033,7 @@ public:
return false;
for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) {
typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name);
if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value)
if (rhsMemberItr == rhs.MemberEnd() || (!(lhsMemberItr->value == rhsMemberItr->value)))
return false;
}
return true;
@ -1042,7 +1042,7 @@ public:
if (data_.a.size != rhs.data_.a.size)
return false;
for (SizeType i = 0; i < data_.a.size; i++)
if ((*this)[i] != rhs[i])
if (!((*this)[i] == rhs[i]))
return false;
return true;
@ -1078,6 +1078,7 @@ public:
*/
template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>,internal::IsGenericValue<T> >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); }
#ifndef __cpp_impl_three_way_comparison
//! Not-equal-to operator
/*! \return !(*this == rhs)
*/
@ -1092,7 +1093,6 @@ public:
*/
template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); }
#ifndef __cpp_lib_three_way_comparison
//! Equal-to operator with arbitrary types (symmetric version)
/*! \return (rhs == lhs)
*/
@ -1230,13 +1230,28 @@ public:
else {
RAPIDJSON_ASSERT(false); // see above note
// This will generate -Wexit-time-destructors in clang
// static GenericValue NullValue;
// return NullValue;
// Use static buffer and placement-new to prevent destruction
static char buffer[sizeof(GenericValue)];
#if RAPIDJSON_HAS_CXX11
// Use thread-local storage to prevent races between threads.
// Use static buffer and placement-new to prevent destruction, with
// alignas() to ensure proper alignment.
alignas(GenericValue) thread_local static char buffer[sizeof(GenericValue)];
return *new (buffer) GenericValue();
#elif defined(_MSC_VER) && _MSC_VER < 1900
// There's no way to solve both thread locality and proper alignment
// simultaneously.
__declspec(thread) static char buffer[sizeof(GenericValue)];
return *new (buffer) GenericValue();
#elif defined(__GNUC__) || defined(__clang__)
// This will generate -Wexit-time-destructors in clang, but that's
// better than having under-alignment.
__thread static GenericValue buffer;
return buffer;
#else
// Don't know what compiler this is, so don't know how to ensure
// thread-locality.
static GenericValue buffer;
return buffer;
#endif
}
}
template <typename SourceAllocator>
@ -2430,13 +2445,14 @@ private:
data_.f.flags = kShortStringFlag;
data_.ss.SetLength(s.length);
str = data_.ss.str;
std::memmove(str, s, s.length * sizeof(Ch));
} else {
data_.f.flags = kCopyStringFlag;
data_.s.length = s.length;
str = static_cast<Ch *>(allocator.Malloc((s.length + 1) * sizeof(Ch)));
SetStringPointer(str);
std::memcpy(str, s, s.length * sizeof(Ch));
}
std::memcpy(str, s, s.length * sizeof(Ch));
str[s.length] = '\0';
}
@ -2486,6 +2502,7 @@ public:
typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding.
typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of the document.
typedef Allocator AllocatorType; //!< Allocator type from template parameter.
typedef StackAllocator StackAllocatorType; //!< StackAllocator type from template parameter.
//! Constructor
/*! Creates an empty document of specified type.

View file

@ -177,10 +177,10 @@ struct UTF8 {
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os) {
#define RAPIDJSON_COPY() os.Put(c = is.Take())
#define RAPIDJSON_COPY() if (c != '\0') os.Put(c = is.Take())
#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70)
Ch c;
Ch c = static_cast<Ch>(-1);
RAPIDJSON_COPY();
if (!(c & 0x80))
return true;

View file

@ -104,15 +104,69 @@ inline const RAPIDJSON_ERROR_CHARTYPE* GetValidateError_En(ValidateErrorCode val
case kValidateErrorType: return RAPIDJSON_ERROR_STRING("Property has a type '%actual' that is not in the following list: '%expected'.");
case kValidateErrorOneOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'oneOf', refer to following errors.");
case kValidateErrorOneOfMatch: return RAPIDJSON_ERROR_STRING("Property matched more than one of the sub-schemas specified by 'oneOf'.");
case kValidateErrorOneOfMatch: return RAPIDJSON_ERROR_STRING("Property matched more than one of the sub-schemas specified by 'oneOf', indices '%matches'.");
case kValidateErrorAllOf: return RAPIDJSON_ERROR_STRING("Property did not match all of the sub-schemas specified by 'allOf', refer to following errors.");
case kValidateErrorAnyOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'anyOf', refer to following errors.");
case kValidateErrorNot: return RAPIDJSON_ERROR_STRING("Property matched the sub-schema specified by 'not'.");
case kValidateErrorReadOnly: return RAPIDJSON_ERROR_STRING("Property is read-only but has been provided when validation is for writing.");
case kValidateErrorWriteOnly: return RAPIDJSON_ERROR_STRING("Property is write-only but has been provided when validation is for reading.");
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
}
}
//! Maps error code of schema document compilation into error message.
/*!
\ingroup RAPIDJSON_ERRORS
\param schemaErrorCode Error code obtained from compiling the schema document.
\return the error message.
\note User can make a copy of this function for localization.
Using switch-case is safer for future modification of error codes.
*/
inline const RAPIDJSON_ERROR_CHARTYPE* GetSchemaError_En(SchemaErrorCode schemaErrorCode) {
switch (schemaErrorCode) {
case kSchemaErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
case kSchemaErrorStartUnknown: return RAPIDJSON_ERROR_STRING("Pointer '%value' to start of schema does not resolve to a location in the document.");
case kSchemaErrorRefPlainName: return RAPIDJSON_ERROR_STRING("$ref fragment '%value' must be a JSON pointer.");
case kSchemaErrorRefInvalid: return RAPIDJSON_ERROR_STRING("$ref must not be an empty string.");
case kSchemaErrorRefPointerInvalid: return RAPIDJSON_ERROR_STRING("$ref fragment '%value' is not a valid JSON pointer at offset '%offset'.");
case kSchemaErrorRefUnknown: return RAPIDJSON_ERROR_STRING("$ref '%value' does not resolve to a location in the target document.");
case kSchemaErrorRefCyclical: return RAPIDJSON_ERROR_STRING("$ref '%value' is cyclical.");
case kSchemaErrorRefNoRemoteProvider: return RAPIDJSON_ERROR_STRING("$ref is remote but there is no remote provider.");
case kSchemaErrorRefNoRemoteSchema: return RAPIDJSON_ERROR_STRING("$ref '%value' is remote but the remote provider did not return a schema.");
case kSchemaErrorRegexInvalid: return RAPIDJSON_ERROR_STRING("Invalid regular expression '%value' in 'pattern' or 'patternProperties'.");
case kSchemaErrorSpecUnknown: return RAPIDJSON_ERROR_STRING("JSON schema draft or OpenAPI version is not recognized.");
case kSchemaErrorSpecUnsupported: return RAPIDJSON_ERROR_STRING("JSON schema draft or OpenAPI version is not supported.");
case kSchemaErrorSpecIllegal: return RAPIDJSON_ERROR_STRING("Both JSON schema draft and OpenAPI version found in document.");
case kSchemaErrorReadOnlyAndWriteOnly: return RAPIDJSON_ERROR_STRING("Property must not be both 'readOnly' and 'writeOnly'.");
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
}
}
//! Maps error code of pointer parse into error message.
/*!
\ingroup RAPIDJSON_ERRORS
\param pointerParseErrorCode Error code obtained from pointer parse.
\return the error message.
\note User can make a copy of this function for localization.
Using switch-case is safer for future modification of error codes.
*/
inline const RAPIDJSON_ERROR_CHARTYPE* GetPointerParseError_En(PointerParseErrorCode pointerParseErrorCode) {
switch (pointerParseErrorCode) {
case kPointerParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
case kPointerParseErrorTokenMustBeginWithSolidus: return RAPIDJSON_ERROR_STRING("A token must begin with a '/'.");
case kPointerParseErrorInvalidEscape: return RAPIDJSON_ERROR_STRING("Invalid escape.");
case kPointerParseErrorInvalidPercentEncoding: return RAPIDJSON_ERROR_STRING("Invalid percent encoding in URI fragment.");
case kPointerParseErrorCharacterMustPercentEncode: return RAPIDJSON_ERROR_STRING("A character must be percent encoded in a URI fragment.");
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
}
}
RAPIDJSON_NAMESPACE_END
#ifdef __clang__

View file

@ -42,7 +42,7 @@ RAPIDJSON_DIAG_OFF(padded)
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ERROR_STRING
//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
//! Macro for converting string literal to \ref RAPIDJSON_ERROR_CHARTYPE[].
/*! \ingroup RAPIDJSON_ERRORS
By default this conversion macro does nothing.
On Windows, user can define this macro as \c _T(x) for supporting both
@ -185,14 +185,17 @@ enum ValidateErrorCode {
kValidateErrorPatternProperties, //!< See other errors.
kValidateErrorDependencies, //!< Object has missing property or schema dependencies.
kValidateErrorEnum, //!< Property has a value that is not one of its allowed enumerated values
kValidateErrorType, //!< Property has a type that is not allowed by the schema..
kValidateErrorEnum, //!< Property has a value that is not one of its allowed enumerated values.
kValidateErrorType, //!< Property has a type that is not allowed by the schema.
kValidateErrorOneOf, //!< Property did not match any of the sub-schemas specified by 'oneOf'.
kValidateErrorOneOfMatch, //!< Property matched more than one of the sub-schemas specified by 'oneOf'.
kValidateErrorAllOf, //!< Property did not match all of the sub-schemas specified by 'allOf'.
kValidateErrorAnyOf, //!< Property did not match any of the sub-schemas specified by 'anyOf'.
kValidateErrorNot //!< Property matched the sub-schema specified by 'not'.
kValidateErrorNot, //!< Property matched the sub-schema specified by 'not'.
kValidateErrorReadOnly, //!< Property is read-only but has been provided when validation is for writing
kValidateErrorWriteOnly //!< Property is write-only but has been provided when validation is for reading
};
//! Function pointer type of GetValidateError().
@ -207,6 +210,72 @@ enum ValidateErrorCode {
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetValidateErrorFunc)(ValidateErrorCode);
///////////////////////////////////////////////////////////////////////////////
// SchemaErrorCode
//! Error codes when validating.
/*! \ingroup RAPIDJSON_ERRORS
\see GenericSchemaValidator
*/
enum SchemaErrorCode {
kSchemaErrorNone = 0, //!< No error.
kSchemaErrorStartUnknown, //!< Pointer to start of schema does not resolve to a location in the document
kSchemaErrorRefPlainName, //!< $ref fragment must be a JSON pointer
kSchemaErrorRefInvalid, //!< $ref must not be an empty string
kSchemaErrorRefPointerInvalid, //!< $ref fragment is not a valid JSON pointer at offset
kSchemaErrorRefUnknown, //!< $ref does not resolve to a location in the target document
kSchemaErrorRefCyclical, //!< $ref is cyclical
kSchemaErrorRefNoRemoteProvider, //!< $ref is remote but there is no remote provider
kSchemaErrorRefNoRemoteSchema, //!< $ref is remote but the remote provider did not return a schema
kSchemaErrorRegexInvalid, //!< Invalid regular expression in 'pattern' or 'patternProperties'
kSchemaErrorSpecUnknown, //!< JSON schema draft or OpenAPI version is not recognized
kSchemaErrorSpecUnsupported, //!< JSON schema draft or OpenAPI version is not supported
kSchemaErrorSpecIllegal, //!< Both JSON schema draft and OpenAPI version found in document
kSchemaErrorReadOnlyAndWriteOnly //!< Property must not be both 'readOnly' and 'writeOnly'
};
//! Function pointer type of GetSchemaError().
/*! \ingroup RAPIDJSON_ERRORS
This is the prototype for \c GetSchemaError_X(), where \c X is a locale.
User can dynamically change locale in runtime, e.g.:
\code
GetSchemaErrorFunc GetSchemaError = GetSchemaError_En; // or whatever
const RAPIDJSON_ERROR_CHARTYPE* s = GetSchemaError(validator.GetInvalidSchemaCode());
\endcode
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetSchemaErrorFunc)(SchemaErrorCode);
///////////////////////////////////////////////////////////////////////////////
// PointerParseErrorCode
//! Error code of JSON pointer parsing.
/*! \ingroup RAPIDJSON_ERRORS
\see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode
*/
enum PointerParseErrorCode {
kPointerParseErrorNone = 0, //!< The parse is successful
kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/'
kPointerParseErrorInvalidEscape, //!< Invalid escape
kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment
kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment
};
//! Function pointer type of GetPointerParseError().
/*! \ingroup RAPIDJSON_ERRORS
This is the prototype for \c GetPointerParseError_X(), where \c X is a locale.
User can dynamically change locale in runtime, e.g.:
\code
GetPointerParseErrorFunc GetPointerParseError = GetPointerParseError_En; // or whatever
const RAPIDJSON_ERROR_CHARTYPE* s = GetPointerParseError(pointer.GetParseErrorCode());
\endcode
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetPointerParseErrorFunc)(PointerParseErrorCode);
RAPIDJSON_NAMESPACE_END
#ifdef __clang__

View file

@ -19,7 +19,11 @@
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_M_AMD64)
#include <intrin.h> // for _umul128
#if !defined(_ARM64EC_)
#pragma intrinsic(_umul128)
#else
#pragma comment(lib,"softintrin")
#endif
#endif
RAPIDJSON_NAMESPACE_BEGIN
@ -255,7 +259,7 @@ private:
if (low < k)
(*outHigh)++;
return low;
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
p += k;

View file

@ -25,7 +25,11 @@
#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER)
#include <intrin.h>
#if !defined(_ARM64EC_)
#pragma intrinsic(_umul128)
#else
#pragma comment(lib,"softintrin")
#endif
#endif
RAPIDJSON_NAMESPACE_BEGIN
@ -75,7 +79,7 @@ struct DiyFp {
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
uint64_t h = static_cast<uint64_t>(p >> 64);

View file

@ -58,11 +58,11 @@ inline int CountDecimalDigit32(uint32_t n) {
}
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
static const uint64_t kPow10[] = { 1U, 10U, 100U, 1000U, 10000U, 100000U, 1000000U, 10000000U, 100000000U,
1000000000U, 10000000000U, 100000000000U, 1000000000000U,
10000000000000U, 100000000000000U, 1000000000000000U,
10000000000000000U, 100000000000000000U, 1000000000000000000U,
10000000000000000000U };
static const uint64_t kPow10[] = { 1ULL, 10ULL, 100ULL, 1000ULL, 10000ULL, 100000ULL, 1000000ULL, 10000000ULL, 100000000ULL,
1000000000ULL, 10000000000ULL, 100000000000ULL, 1000000000000ULL,
10000000000000ULL, 100000000000000ULL, 1000000000000000ULL,
10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL,
10000000000000000000ULL };
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
const DiyFp wp_w = Mp - W;
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);

View file

@ -615,7 +615,7 @@ public:
RAPIDJSON_ASSERT(regex_.IsValid());
if (!allocator_)
ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
stateSet_ = static_cast<unsigned*>(allocator_->Malloc(GetStateSetSize()));
stateSet_ = static_cast<uint32_t*>(allocator_->Malloc(GetStateSetSize()));
state0_.template Reserve<SizeType>(regex_.stateCount_);
state1_.template Reserve<SizeType>(regex_.stateCount_);
}

View file

@ -134,7 +134,7 @@ inline bool StrtodDiyFp(const Ch* decimals, int dLen, int dExp, double* result)
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > Ch('5')))
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] >= Ch('5')))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - Ch('0'));
}

View file

@ -18,6 +18,7 @@
#include "document.h"
#include "uri.h"
#include "internal/itoa.h"
#include "error/error.h" // PointerParseErrorCode
#ifdef __clang__
RAPIDJSON_DIAG_PUSH
@ -27,23 +28,16 @@ RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
#endif
#if defined(RAPIDJSON_CPLUSPLUS) && RAPIDJSON_CPLUSPLUS >= 201703L
#define RAPIDJSON_IF_CONSTEXPR if constexpr
#else
#define RAPIDJSON_IF_CONSTEXPR if
#endif
RAPIDJSON_NAMESPACE_BEGIN
static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token
//! Error code of parsing.
/*! \ingroup RAPIDJSON_ERRORS
\see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode
*/
enum PointerParseErrorCode {
kPointerParseErrorNone = 0, //!< The parse is successful
kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/'
kPointerParseErrorInvalidEscape, //!< Invalid escape
kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment
kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment
};
///////////////////////////////////////////////////////////////////////////////
// GenericPointer
@ -84,7 +78,7 @@ public:
typedef GenericUri<ValueType, Allocator> UriType;
//! A token is the basic units of internal representation.
//! A token is the basic units of internal representation.
/*!
A JSON pointer string representation "/foo/123" is parsed to two tokens:
"foo" and 123. 123 will be represented in both numeric form and string form.
@ -303,7 +297,7 @@ public:
SizeType length = static_cast<SizeType>(end - buffer);
buffer[length] = '\0';
if (sizeof(Ch) == 1) {
RAPIDJSON_IF_CONSTEXPR (sizeof(Ch) == 1) {
Token token = { reinterpret_cast<Ch*>(buffer), length, index };
return Append(token, allocator);
}
@ -902,10 +896,16 @@ private:
std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch));
}
// Adjust pointers to name buffer
std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_;
for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t)
t->name += diff;
// The names of each token point to a string in the nameBuffer_. The
// previous memcpy copied over string pointers into the rhs.nameBuffer_,
// but they should point to the strings in the new nameBuffer_.
for (size_t i = 0; i < rhs.tokenCount_; ++i) {
// The offset between the string address and the name buffer should
// still be constant, so we can just get this offset and set each new
// token name according the new buffer start + the known offset.
std::ptrdiff_t name_offset = rhs.tokens_[i].name - rhs.nameBuffer_;
tokens_[i].name = nameBuffer_ + name_offset;
}
return nameBuffer_ + nameBufferSize;
}

View file

@ -195,7 +195,7 @@
*/
#ifndef RAPIDJSON_NO_INT64DEFINE
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
#include "msinttypes/stdint.h"
#include "msinttypes/inttypes.h"
#else
@ -268,7 +268,7 @@
# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
// Detect with architecture macros
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN

View file

@ -1433,7 +1433,7 @@ private:
class NumberStream<InputStream, StackCharacter, true, false> : public NumberStream<InputStream, StackCharacter, false, false> {
typedef NumberStream<InputStream, StackCharacter, false, false> Base;
public:
NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {}
NumberStream(GenericReader& reader, InputStream& s) : Base(reader, s), stackStream(reader.stack_) {}
RAPIDJSON_FORCEINLINE Ch TakePush() {
stackStream.Put(static_cast<StackCharacter>(Base::is.Peek()));
@ -1459,7 +1459,7 @@ private:
class NumberStream<InputStream, StackCharacter, true, true> : public NumberStream<InputStream, StackCharacter, true, false> {
typedef NumberStream<InputStream, StackCharacter, true, false> Base;
public:
NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {}
NumberStream(GenericReader& reader, InputStream& s) : Base(reader, s) {}
RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); }
};
@ -1584,7 +1584,7 @@ private:
// Parse frac = decimal-point 1*DIGIT
int expFrac = 0;
size_t decimalPosition;
if (Consume(s, '.')) {
if (!useNanOrInf && Consume(s, '.')) {
decimalPosition = s.Length();
if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9')))
@ -1631,7 +1631,7 @@ private:
// Parse exp = e [ minus / plus ] 1*DIGIT
int exp = 0;
if (Consume(s, 'e') || Consume(s, 'E')) {
if (!useNanOrInf && (Consume(s, 'e') || Consume(s, 'E'))) {
if (!useDouble) {
d = static_cast<double>(use64bit ? i64 : i);
useDouble = true;
@ -1694,7 +1694,7 @@ private:
}
else {
SizeType numCharsToCopy = static_cast<SizeType>(s.Length());
GenericStringStream<UTF8<NumberCharacter>> srcStream(s.Pop());
GenericStringStream<UTF8<NumberCharacter> > srcStream(s.Pop());
StackStream<typename TargetEncoding::Ch> dstStream(stack_);
while (numCharsToCopy--) {
Transcoder<UTF8<typename TargetEncoding::Ch>, TargetEncoding>::Transcode(srcStream, dstStream);

File diff suppressed because it is too large Load diff

View file

@ -238,20 +238,27 @@ private:
// Allocate one block containing each part of the URI (5) plus base plus full URI, all null terminated.
// Order: scheme, auth, path, query, frag, base, uri
// Note need to set, increment, assign in 3 stages to avoid compiler warning bug.
size_t total = (3 * len + 7) * sizeof(Ch);
scheme_ = static_cast<Ch*>(allocator_->Malloc(total));
*scheme_ = '\0';
auth_ = scheme_ + 1;
auth_ = scheme_;
auth_++;
*auth_ = '\0';
path_ = auth_ + 1;
path_ = auth_;
path_++;
*path_ = '\0';
query_ = path_ + 1;
query_ = path_;
query_++;
*query_ = '\0';
frag_ = query_ + 1;
frag_ = query_;
frag_++;
*frag_ = '\0';
base_ = frag_ + 1;
base_ = frag_;
base_++;
*base_ = '\0';
uri_ = base_ + 1;
uri_ = base_;
uri_++;
*uri_ = '\0';
return total;
}
@ -293,7 +300,9 @@ private:
}
}
// Look for auth (//([^/?#]*))?
auth_ = scheme_ + GetSchemeStringLength() + 1;
// Note need to set, increment, assign in 3 stages to avoid compiler warning bug.
auth_ = scheme_ + GetSchemeStringLength();
auth_++;
*auth_ = '\0';
if (start < len - 1 && uri[start] == '/' && uri[start + 1] == '/') {
pos2 = start + 2;
@ -308,7 +317,9 @@ private:
start = pos2;
}
// Look for path ([^?#]*)
path_ = auth_ + GetAuthStringLength() + 1;
// Note need to set, increment, assign in 3 stages to avoid compiler warning bug.
path_ = auth_ + GetAuthStringLength();
path_++;
*path_ = '\0';
if (start < len) {
pos2 = start;
@ -326,7 +337,9 @@ private:
}
}
// Look for query (\?([^#]*))?
query_ = path_ + GetPathStringLength() + 1;
// Note need to set, increment, assign in 3 stages to avoid compiler warning bug.
query_ = path_ + GetPathStringLength();
query_++;
*query_ = '\0';
if (start < len && uri[start] == '?') {
pos2 = start + 1;
@ -341,7 +354,9 @@ private:
}
}
// Look for fragment (#(.*))?
frag_ = query_ + GetQueryStringLength() + 1;
// Note need to set, increment, assign in 3 stages to avoid compiler warning bug.
frag_ = query_ + GetQueryStringLength();
frag_++;
*frag_ = '\0';
if (start < len && uri[start] == '#') {
std::memcpy(frag_, &uri[start], (len - start) * sizeof(Ch));

View file

@ -67,6 +67,7 @@ enum WriteFlag {
kWriteNoFlags = 0, //!< No flags are set.
kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings.
kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN.
kWriteNanAndInfNullFlag = 4, //!< Allow writing of Infinity, -Infinity and NaN as null.
kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS
};
@ -349,8 +350,13 @@ protected:
bool WriteDouble(double d) {
if (internal::Double(d).IsNanOrInf()) {
if (!(writeFlags & kWriteNanAndInfFlag))
if (!(writeFlags & kWriteNanAndInfFlag) && !(writeFlags & kWriteNanAndInfNullFlag))
return false;
if (writeFlags & kWriteNanAndInfNullFlag) {
PutReserve(*os_, 4);
PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l');
return true;
}
if (internal::Double(d).IsNan()) {
PutReserve(*os_, 3);
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
@ -549,6 +555,11 @@ inline bool Writer<StringBuffer>::WriteDouble(double d) {
// Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag).
if (!(kWriteDefaultFlags & kWriteNanAndInfFlag))
return false;
if (kWriteDefaultFlags & kWriteNanAndInfNullFlag) {
PutReserve(*os_, 4);
PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l');
return true;
}
if (internal::Double(d).IsNan()) {
PutReserve(*os_, 3);
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');

View file

@ -6,8 +6,8 @@
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2024 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2024 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -23,7 +23,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cstdlib>
#include <uv.h>
@ -61,13 +60,13 @@ int xmrig::App::exec()
return 2;
}
m_signals = std::make_shared<Signals>(this);
int rc = 0;
if (background(rc)) {
return rc;
}
m_signals = std::make_shared<Signals>(this);
rc = m_controller->init();
if (rc != 0) {
return rc;

View file

@ -5,8 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2024 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2024 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -22,7 +22,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cstdlib>
#include <csignal>
#include <cerrno>
@ -53,16 +52,9 @@ bool xmrig::App::background(int &rc)
return true;
}
i = setsid();
if (i < 0) {
if (setsid() < 0) {
LOG_ERR("setsid() failed (errno = %d)", errno);
}
i = chdir("/");
if (i < 0) {
LOG_ERR("chdir() failed (errno = %d)", errno);
}
return false;
}

View file

@ -30,10 +30,10 @@
#include "base/tools/Handle.h"
inline static const char *format(double h, char *buf, size_t size)
inline static const char *format(std::pair<bool, double> h, char *buf, size_t size)
{
if (std::isnormal(h)) {
snprintf(buf, size, (h < 100.0) ? "%04.2f" : "%03.1f", h);
if (h.first) {
snprintf(buf, size, (h.second < 100.0) ? "%04.2f" : "%03.1f", h.second);
return buf;
}
@ -80,15 +80,16 @@ double xmrig::Hashrate::average() const
}
const char *xmrig::Hashrate::format(double h, char *buf, size_t size)
const char *xmrig::Hashrate::format(std::pair<bool, double> h, char *buf, size_t size)
{
return ::format(h, buf, size);
}
rapidjson::Value xmrig::Hashrate::normalize(double d)
rapidjson::Value xmrig::Hashrate::normalize(std::pair<bool, double> d)
{
return Json::normalize(d, false);
using namespace rapidjson;
return d.first ? Value(floor(d.second * 100.0) / 100.0) : Value(kNullType);
}
@ -122,11 +123,11 @@ rapidjson::Value xmrig::Hashrate::toJSON(size_t threadId, rapidjson::Document &d
#endif
double xmrig::Hashrate::hashrate(size_t index, size_t ms) const
std::pair<bool, double> xmrig::Hashrate::hashrate(size_t index, size_t ms) const
{
assert(index < m_threads);
if (index >= m_threads) {
return nan("");
return { false, 0.0 };
}
uint64_t earliestHashCount = 0;
@ -157,17 +158,27 @@ double xmrig::Hashrate::hashrate(size_t index, size_t ms) const
} while (idx != idx_start);
if (!haveFullSet || earliestStamp == 0 || lastestStamp == 0) {
return nan("");
return { false, 0.0 };
}
if (lastestStamp - earliestStamp == 0) {
return nan("");
if (lastestHashCnt == earliestHashCount) {
return { true, 0.0 };
}
if (lastestStamp == earliestStamp) {
return { false, 0.0 };
}
const auto hashes = static_cast<double>(lastestHashCnt - earliestHashCount);
const auto time = static_cast<double>(lastestStamp - earliestStamp) / 1000.0;
const auto time = static_cast<double>(lastestStamp - earliestStamp);
return hashes / time;
const auto hr = hashes * 1000.0 / time;
if (!std::isnormal(hr)) {
return { false, 0.0 };
}
return { true, hr };
}

View file

@ -47,16 +47,16 @@ public:
Hashrate(size_t threads);
~Hashrate();
inline double calc(size_t ms) const { const double data = hashrate(0U, ms); return std::isnormal(data) ? data : 0.0; }
inline double calc(size_t threadId, size_t ms) const { return hashrate(threadId + 1, ms); }
inline std::pair<bool, double> calc(size_t ms) const { return hashrate(0U, ms); }
inline std::pair<bool, double> calc(size_t threadId, size_t ms) const { return hashrate(threadId + 1, ms); }
inline size_t threads() const { return m_threads > 0U ? m_threads - 1U : 0U; }
inline void add(size_t threadId, uint64_t count, uint64_t timestamp) { addData(threadId + 1U, count, timestamp); }
inline void add(uint64_t count, uint64_t timestamp) { addData(0U, count, timestamp); }
double average() const;
static const char *format(double h, char *buf, size_t size);
static rapidjson::Value normalize(double d);
static const char *format(std::pair<bool, double> h, char *buf, size_t size);
static rapidjson::Value normalize(std::pair<bool, double> d);
# ifdef XMRIG_FEATURE_API
rapidjson::Value toJSON(rapidjson::Document &doc) const;
@ -64,7 +64,7 @@ public:
# endif
private:
double hashrate(size_t index, size_t ms) const;
std::pair<bool, double> hashrate(size_t index, size_t ms) const;
void addData(size_t index, uint64_t count, uint64_t timestamp);
constexpr static size_t kBucketSize = 2 << 11;

View file

@ -1,6 +1,6 @@
/* XMRig
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2024 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2024 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -19,10 +19,8 @@
#ifndef XMRIG_PCITOPOLOGY_H
#define XMRIG_PCITOPOLOGY_H
#include <cstdio>
#include "base/tools/String.h"
@ -33,7 +31,14 @@ class PciTopology
{
public:
PciTopology() = default;
PciTopology(uint32_t bus, uint32_t device, uint32_t function) : m_valid(true), m_bus(bus), m_device(device), m_function(function) {}
template<typename T>
inline PciTopology(T bus, T device, T function)
: m_valid(true),
m_bus(static_cast<uint8_t>(bus)),
m_device(static_cast<uint8_t>(device)),
m_function(static_cast<uint8_t>(function))
{}
inline bool isEqual(const PciTopology &other) const { return m_valid == other.m_valid && toUint32() == other.toUint32(); }
inline bool isValid() const { return m_valid; }
@ -70,4 +75,4 @@ private:
} // namespace xmrig
#endif /* XMRIG_PCITOPOLOGY_H */
#endif // XMRIG_PCITOPOLOGY_H

View file

@ -1,6 +1,6 @@
/* XMRig
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2024 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2024 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -122,17 +122,6 @@ size_t inline generate<Algorithm::RANDOM_X>(Threads<CpuThreads> &threads, uint32
}
}
if (!threads.isExist(Algorithm::RX_KEVA)) {
auto keva = cpuInfo->threads(Algorithm::RX_KEVA, limit);
if (keva == wow) {
threads.setAlias(Algorithm::RX_KEVA, Algorithm::kRX_WOW);
++count;
}
else {
count += threads.move(Algorithm::kRX_KEVA, std::move(keva));
}
}
if (!threads.isExist(Algorithm::RX_WOW)) {
count += threads.move(Algorithm::kRX_WOW, std::move(wow));
}

View file

@ -359,7 +359,9 @@ void xmrig::CpuWorker<N>::start()
}
}
consumeJob();
if (!Nonce::isPaused()) {
consumeJob();
}
}
}

View file

@ -49,8 +49,15 @@ endif()
if (XMRIG_ARM)
list(APPEND SOURCES_BACKEND_CPU src/backend/cpu/platform/BasicCpuInfo_arm.cpp)
if (XMRIG_OS_UNIX)
list(APPEND SOURCES_BACKEND_CPU src/backend/cpu/platform/lscpu_arm.cpp)
if (XMRIG_OS_WIN)
list(APPEND SOURCES_BACKEND_CPU src/backend/cpu/platform/BasicCpuInfo_arm_win.cpp)
elseif(XMRIG_OS_APPLE)
list(APPEND SOURCES_BACKEND_CPU src/backend/cpu/platform/BasicCpuInfo_arm_mac.cpp)
else()
list(APPEND SOURCES_BACKEND_CPU
src/backend/cpu/platform/lscpu_arm.cpp
src/backend/cpu/platform/BasicCpuInfo_arm_unix.cpp
)
endif()
else()
list(APPEND SOURCES_BACKEND_CPU src/backend/cpu/platform/BasicCpuInfo.cpp)

View file

@ -52,7 +52,8 @@ public:
ARCH_ZEN_PLUS,
ARCH_ZEN2,
ARCH_ZEN3,
ARCH_ZEN4
ARCH_ZEN4,
ARCH_ZEN5
};
enum MsrMod : uint32_t {
@ -60,12 +61,13 @@ public:
MSR_MOD_RYZEN_17H,
MSR_MOD_RYZEN_19H,
MSR_MOD_RYZEN_19H_ZEN4,
MSR_MOD_RYZEN_1AH_ZEN5,
MSR_MOD_INTEL,
MSR_MOD_CUSTOM,
MSR_MOD_MAX
};
# define MSR_NAMES_LIST "none", "ryzen_17h", "ryzen_19h", "ryzen_19h_zen4", "intel", "custom"
# define MSR_NAMES_LIST "none", "ryzen_17h", "ryzen_19h", "ryzen_19h_zen4", "ryzen_1Ah_zen5", "intel", "custom"
enum Flag : uint32_t {
FLAG_AES,

View file

@ -64,7 +64,7 @@ static_assert(kCpuFlagsSize == ICpuInfo::FLAG_MAX, "kCpuFlagsSize and FLAG_MAX m
#ifdef XMRIG_FEATURE_MSR
constexpr size_t kMsrArraySize = 6;
constexpr size_t kMsrArraySize = 7;
static const std::array<const char *, kMsrArraySize> msrNames = { MSR_NAMES_LIST };
static_assert(kMsrArraySize == ICpuInfo::MSR_MOD_MAX, "kMsrArraySize and MSR_MOD_MAX mismatch");
#endif
@ -260,6 +260,11 @@ xmrig::BasicCpuInfo::BasicCpuInfo() :
}
break;
case 0x1a:
m_arch = ARCH_ZEN5;
m_msrMod = MSR_MOD_RYZEN_1AH_ZEN5;
break;
default:
m_msrMod = MSR_MOD_NONE;
break;

View file

@ -1,7 +1,7 @@
/* XMRig
* Copyright (c) 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2023 XMRig <support@xmrig.com>
* Copyright (c) 2018-2025 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2025 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -85,6 +85,8 @@ private:
uint32_t m_family = 0;
uint32_t m_model = 0;
uint32_t m_stepping = 0;
# else
void init_arm();
# endif
Assembly m_assembly = Assembly::NONE;

Some files were not shown because too many files have changed in this diff Show more