diff --git a/CHANGELOG.md b/CHANGELOG.md
index b380d75b..feba3b05 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,6 @@
+# v4.5.0-beta
+- Added NVIDIA CUDA support via external [CUDA plugun](https://github.com/xmrig/xmrig-cuda). XMRig now is unified 3 in 1 miner.
+
# v4.4.0-beta
- [#1068](https://github.com/xmrig/xmrig/pull/1068) Added support for `self-select` stratum protocol extension.
- [#1240](https://github.com/xmrig/xmrig/pull/1240) Sync with the latest RandomX code.
@@ -6,7 +9,6 @@
- [#1247](https://github.com/xmrig/xmrig/pull/1247) Fixed ARM64 RandomX code alignment.
- [#1248](https://github.com/xmrig/xmrig/pull/1248) Fixed RandomX code cache cleanup on iOS/Darwin.
-
# v4.3.1-beta
- Fixed regression in v4.3.0, miner didn't create `cn` mining profile with default config example.
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 90abfd42..849c1257 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,6 +15,8 @@ option(WITH_TLS "Enable OpenSSL support" ON)
option(WITH_ASM "Enable ASM PoW implementations" ON)
option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF)
option(WITH_OPENCL "Enable OpenCL backend" ON)
+option(WITH_CUDA "Enable CUDA backend" ON)
+option(WITH_NVML "Enable NVML (NVIDIA Management Library) support (only if CUDA backend enabled)" ON)
option(WITH_STRICT_CACHE "Enable strict checks for OpenCL cache" ON)
option(WITH_INTERLEAVE_DEBUG_LOG "Enable debug log for threads interleave" OFF)
diff --git a/README.md b/README.md
index e520a505..ac978393 100644
--- a/README.md
+++ b/README.md
@@ -9,17 +9,15 @@
[](https://github.com/xmrig/xmrig/stargazers)
[](https://github.com/xmrig/xmrig/network)
-XMRig High performance, open source, cross platform RandomX, CryptoNight and Argon2 CPU/GPU miner, with official support for Windows
+XMRig High performance, open source, cross platform RandomX, CryptoNight and Argon2 CPU/GPU miner, with official support for Windows.
+
+## Mining backends
+- **CPU** (x64/x86/ARM)
+- **OpenCL** for AMD GPUs.
+- **CUDA** for NVIDIA GPUs via external [CUDA plugin](https://github.com/xmrig/xmrig-cuda).
-#### Table of contents
-* [Download](#download)
-* [Usage](#usage)
-* [Build](https://github.com/xmrig/xmrig/wiki/Build)
-* [Donations](#donations)
-* [Contacts](#contacts)
-
## Download
* Binary releases: https://github.com/xmrig/xmrig/releases
* Git tree: https://github.com/xmrig/xmrig.git
@@ -29,6 +27,7 @@ XMRig High performance, open source, cross platform RandomX, CryptoNight and Arg
The preferred way to configure the miner is the [JSON config file](src/config.json) as it is more flexible and human friendly. The command line interface does not cover all features, such as mining profiles for different algorithms. Important options can be changed during runtime without miner restart by editing the config file or executing API calls.
* **[xmrig.com/wizard](https://xmrig.com/wizard)** helps you create initial configuration for the miner.
+* **[workers.xmrig.info](http://workers.xmrig.info)** helps manage your miners via HTTP API.
### Command line options
```
@@ -77,13 +76,20 @@ OpenCL backend:
--opencl enable OpenCL mining backend
--opencl-devices=N list of OpenCL devices to use
--opencl-platform=N OpenCL platform index or name
- --opencl-loader=N path to OpenCL-ICD-Loader (OpenCL.dll or libOpenCL.so)
+ --opencl-loader=PATH path to OpenCL-ICD-Loader (OpenCL.dll or libOpenCL.so)
--opencl-no-cache disable OpenCL cache
--print-platforms print available OpenCL platforms and exit
+CUDA backend:
+ --cuda enable CUDA mining backend
+ --cuda-loader=PATH path to CUDA plugin (xmrig-cuda.dll or libxmrig-cuda.so)
+ --no-nvml disable NVML (NVIDIA Management Library) support
+
Logging:
+ -S, --syslog use system log for output messages
-l, --log-file=FILE log all output to a file
--print-time=N print hashrate report every N seconds
+ --health-print-time=N print health report every N seconds
--no-color disable colored output
Misc:
diff --git a/doc/build/CMAKE_OPTIONS.md b/doc/build/CMAKE_OPTIONS.md
index 8cb66eec..fad15f50 100644
--- a/doc/build/CMAKE_OPTIONS.md
+++ b/doc/build/CMAKE_OPTIONS.md
@@ -21,6 +21,8 @@ This feature add external dependency to libhwloc (1.10.0+) (except MSVC builds).
* **`-DWITH_TLS=OFF`** disable SSL/TLS support (secure connections to pool). This feature add external dependency to OpenSSL.
* **`-DWITH_ASM=OFF`** disable assembly optimizations for modern CryptoNight algorithms.
* **`-DWITH_EMBEDDED_CONFIG=ON`** Enable [embedded](https://github.com/xmrig/xmrig/issues/957) config support.
+* **`-DWITH_OPENCL=OFF`** Disable OpenCL backend.
+* **`-DWITH_CUDA=OFF`** Disable CUDA backend.
## Debug options
diff --git a/src/App.cpp b/src/App.cpp
index 7db2ace2..04b05451 100644
--- a/src/App.cpp
+++ b/src/App.cpp
@@ -98,29 +98,12 @@ int xmrig::App::exec()
void xmrig::App::onConsoleCommand(char command)
{
- switch (command) {
- case 'h':
- case 'H':
- m_controller->miner()->printHashrate(true);
- break;
-
- case 'p':
- case 'P':
- m_controller->miner()->setEnabled(false);
- break;
-
- case 'r':
- case 'R':
- m_controller->miner()->setEnabled(true);
- break;
-
- case 3:
+ if (command == 3) {
LOG_WARN("Ctrl+C received, exiting");
close();
- break;
-
- default:
- break;
+ }
+ else {
+ m_controller->miner()->execCommand(command);
}
}
diff --git a/src/Summary.cpp b/src/Summary.cpp
index 2b8939a7..2055e972 100644
--- a/src/Summary.cpp
+++ b/src/Summary.cpp
@@ -126,9 +126,9 @@ static void print_threads(Config *config)
static void print_commands(Config *)
{
if (Log::colors) {
- Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BOLD("h") WHITE_BOLD("ashrate, ")
- MAGENTA_BOLD("p") WHITE_BOLD("ause, ")
- MAGENTA_BOLD("r") WHITE_BOLD("esume"));
+ Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BG(WHITE_BOLD_S "h") WHITE_BOLD("ashrate, ")
+ MAGENTA_BG(WHITE_BOLD_S "p") WHITE_BOLD("ause, ")
+ MAGENTA_BG(WHITE_BOLD_S "r") WHITE_BOLD("esume"));
}
else {
Log::print(" * COMMANDS 'h' hashrate, 'p' pause, 'r' resume");
diff --git a/src/backend/backend.cmake b/src/backend/backend.cmake
index 14f0ab9c..6bf6c3b2 100644
--- a/src/backend/backend.cmake
+++ b/src/backend/backend.cmake
@@ -1,5 +1,6 @@
include (src/backend/cpu/cpu.cmake)
include (src/backend/opencl/opencl.cmake)
+include (src/backend/cuda/cuda.cmake)
include (src/backend/common/common.cmake)
@@ -7,10 +8,12 @@ set(HEADERS_BACKEND
"${HEADERS_BACKEND_COMMON}"
"${HEADERS_BACKEND_CPU}"
"${HEADERS_BACKEND_OPENCL}"
+ "${HEADERS_BACKEND_CUDA}"
)
set(SOURCES_BACKEND
"${SOURCES_BACKEND_COMMON}"
"${SOURCES_BACKEND_CPU}"
"${SOURCES_BACKEND_OPENCL}"
+ "${SOURCES_BACKEND_CUDA}"
)
diff --git a/src/backend/common/Tags.h b/src/backend/common/Tags.h
index 16022e33..9141eb72 100644
--- a/src/backend/common/Tags.h
+++ b/src/backend/common/Tags.h
@@ -27,10 +27,15 @@
#define XMRIG_TAGS_H
+#include
+
+
namespace xmrig {
+const char *backend_tag(uint32_t backend);
const char *cpu_tag();
+const char *net_tag();
#ifdef XMRIG_FEATURE_OPENCL
@@ -38,6 +43,12 @@ const char *ocl_tag();
#endif
+#ifdef XMRIG_FEATURE_CUDA
+const char *cuda_tag();
+#endif
+
+
+
#ifdef XMRIG_ALGO_RANDOMX
const char *rx_tag();
#endif
diff --git a/src/backend/common/Threads.cpp b/src/backend/common/Threads.cpp
index bb9e440f..f85e18f3 100644
--- a/src/backend/common/Threads.cpp
+++ b/src/backend/common/Threads.cpp
@@ -34,6 +34,11 @@
#endif
+#ifdef XMRIG_FEATURE_CUDA
+# include "backend/cuda/CudaThreads.h"
+#endif
+
+
namespace xmrig {
@@ -167,4 +172,8 @@ template class Threads;
template class Threads;
#endif
+#ifdef XMRIG_FEATURE_CUDA
+template class Threads;
+#endif
+
} // namespace xmrig
diff --git a/src/backend/common/WorkerJob.h b/src/backend/common/WorkerJob.h
index 6e31a701..2ea41476 100644
--- a/src/backend/common/WorkerJob.h
+++ b/src/backend/common/WorkerJob.h
@@ -26,7 +26,7 @@
#define XMRIG_WORKERJOB_H
-#include
+#include
#include "base/net/stratum/Job.h"
@@ -47,9 +47,9 @@ public:
inline uint8_t index() const { return m_index; }
- inline void add(const Job &job, uint64_t sequence, uint32_t reserveCount)
+ inline void add(const Job &job, uint32_t reserveCount, Nonce::Backend backend)
{
- m_sequence = sequence;
+ m_sequence = Nonce::sequence(backend);
if (currentJob() == job) {
return;
@@ -60,7 +60,7 @@ public:
return;
}
- save(job, reserveCount);
+ save(job, reserveCount, backend);
}
@@ -82,13 +82,15 @@ public:
private:
- inline void save(const Job &job, uint32_t reserveCount)
+ inline void save(const Job &job, uint32_t reserveCount, Nonce::Backend backend)
{
m_index = job.index();
const size_t size = job.size();
m_jobs[index()] = job;
m_rounds[index()] = 0;
+ m_jobs[index()].setBackend(backend);
+
for (size_t i = 0; i < N; ++i) {
memcpy(m_blobs[index()] + (i * size), job.blob(), size);
*nonce(i) = Nonce::next(index(), *nonce(i), reserveCount, job.isNicehash());
@@ -96,7 +98,7 @@ private:
}
- alignas(16) uint8_t m_blobs[2][Job::kMaxBlobSize * N];
+ alignas(16) uint8_t m_blobs[2][Job::kMaxBlobSize * N]{};
Job m_jobs[2];
uint32_t m_rounds[2] = { 0, 0 };
uint64_t m_sequence = 0;
@@ -126,12 +128,14 @@ inline void xmrig::WorkerJob<1>::nextRound(uint32_t rounds, uint32_t roundSize)
template<>
-inline void xmrig::WorkerJob<1>::save(const Job &job, uint32_t reserveCount)
+inline void xmrig::WorkerJob<1>::save(const Job &job, uint32_t reserveCount, Nonce::Backend backend)
{
m_index = job.index();
m_jobs[index()] = job;
m_rounds[index()] = 0;
+ m_jobs[index()].setBackend(backend);
+
memcpy(blob(), job.blob(), job.size());
*nonce() = Nonce::next(index(), *nonce(), reserveCount, currentJob().isNicehash());
}
diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp
index 9fb6859e..319f2804 100644
--- a/src/backend/common/Workers.cpp
+++ b/src/backend/common/Workers.cpp
@@ -37,6 +37,11 @@
#endif
+#ifdef XMRIG_FEATURE_CUDA
+# include "backend/cuda/CudaWorker.h"
+#endif
+
+
namespace xmrig {
@@ -217,4 +222,16 @@ template class Workers;
#endif
+#ifdef XMRIG_FEATURE_CUDA
+template<>
+xmrig::IWorker *xmrig::Workers::create(Thread *handle)
+{
+ return new CudaWorker(handle->id(), handle->config());
+}
+
+
+template class Workers;
+#endif
+
+
} // namespace xmrig
diff --git a/src/backend/common/Workers.h b/src/backend/common/Workers.h
index 2688a1e3..637a33c9 100644
--- a/src/backend/common/Workers.h
+++ b/src/backend/common/Workers.h
@@ -37,6 +37,11 @@
#endif
+#ifdef XMRIG_FEATURE_CUDA
+# include "backend/cuda/CudaLaunchData.h"
+#endif
+
+
namespace xmrig {
@@ -80,6 +85,13 @@ extern template class Workers;
#endif
+#ifdef XMRIG_FEATURE_CUDA
+template<>
+IWorker *Workers::create(Thread *handle);
+extern template class Workers;
+#endif
+
+
} // namespace xmrig
diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h
index f9073229..c6e05bcf 100644
--- a/src/backend/common/interfaces/IBackend.h
+++ b/src/backend/common/interfaces/IBackend.h
@@ -53,6 +53,7 @@ public:
virtual const Hashrate *hashrate() const = 0;
virtual const String &profileName() const = 0;
virtual const String &type() const = 0;
+ virtual void execCommand(char command) = 0;
virtual void prepare(const Job &nextJob) = 0;
virtual void printHashrate(bool details) = 0;
virtual void setJob(const Job &job) = 0;
diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp
index 78b71e25..ddee6768 100644
--- a/src/backend/cpu/CpuBackend.cpp
+++ b/src/backend/cpu/CpuBackend.cpp
@@ -146,10 +146,11 @@ public:
inline void start()
{
- LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"),
+ LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" thread%s)") " scratchpad " CYAN_BOLD("%zu KB"),
tag,
profileName.data(),
threads.size(),
+ threads.size() > 1 ? "s" : "",
algo.l3() / 1024
);
@@ -210,6 +211,24 @@ public:
} // namespace xmrig
+const char *xmrig::backend_tag(uint32_t backend)
+{
+# ifdef XMRIG_FEATURE_OPENCL
+ if (backend == Nonce::OPENCL) {
+ return ocl_tag();
+ }
+# endif
+
+# ifdef XMRIG_FEATURE_CUDA
+ if (backend == Nonce::CUDA) {
+ return cuda_tag();
+ }
+# endif
+
+ return tag;
+}
+
+
const char *xmrig::cpu_tag()
{
return tag;
diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h
index d5d9fbf6..d0e2267a 100644
--- a/src/backend/cpu/CpuBackend.h
+++ b/src/backend/cpu/CpuBackend.h
@@ -50,6 +50,8 @@ public:
~CpuBackend() override;
protected:
+ inline void execCommand(char) override {}
+
bool isEnabled() const override;
bool isEnabled(const Algorithm &algorithm) const override;
const Hashrate *hashrate() const override;
diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp
index d6e917fb..f64882ba 100644
--- a/src/backend/cpu/CpuWorker.cpp
+++ b/src/backend/cpu/CpuWorker.cpp
@@ -303,7 +303,7 @@ void xmrig::CpuWorker::consumeJob()
return;
}
- m_job.add(m_miner->job(), Nonce::sequence(Nonce::CPU), kReserveCount);
+ m_job.add(m_miner->job(), kReserveCount, Nonce::CPU);
# ifdef XMRIG_ALGO_RANDOMX
if (m_job.currentJob().algorithm().family() == Algorithm::RANDOM_X) {
diff --git a/src/backend/cuda/CudaBackend.cpp b/src/backend/cuda/CudaBackend.cpp
new file mode 100644
index 00000000..feb1dd93
--- /dev/null
+++ b/src/backend/cuda/CudaBackend.cpp
@@ -0,0 +1,519 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include
+#include
+
+
+#include "backend/cuda/CudaBackend.h"
+#include "backend/common/Hashrate.h"
+#include "backend/common/interfaces/IWorker.h"
+#include "backend/common/Tags.h"
+#include "backend/common/Workers.h"
+#include "backend/cuda/CudaConfig.h"
+#include "backend/cuda/CudaThreads.h"
+#include "backend/cuda/CudaWorker.h"
+#include "backend/cuda/wrappers/CudaDevice.h"
+#include "backend/cuda/wrappers/CudaLib.h"
+#include "base/io/log/Log.h"
+#include "base/net/stratum/Job.h"
+#include "base/tools/Chrono.h"
+#include "base/tools/String.h"
+#include "core/config/Config.h"
+#include "core/Controller.h"
+#include "rapidjson/document.h"
+
+
+#ifdef XMRIG_FEATURE_API
+# include "base/api/interfaces/IApiRequest.h"
+#endif
+
+
+#ifdef XMRIG_FEATURE_NVML
+#include "backend/cuda/wrappers/NvmlLib.h"
+
+namespace xmrig { static const char *kNvmlLabel = "NVML"; }
+#endif
+
+
+namespace xmrig {
+
+
+extern template class Threads;
+
+
+constexpr const size_t oneMiB = 1024u * 1024u;
+static const char *kLabel = "CUDA";
+static const char *tag = GREEN_BG_BOLD(WHITE_BOLD_S " nv ");
+static const String kType = "cuda";
+static std::mutex mutex;
+
+
+
+static void printDisabled(const char *label, const char *reason)
+{
+ Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") RED_BOLD("disabled") "%s", label, reason);
+}
+
+
+struct CudaLaunchStatus
+{
+public:
+ inline size_t threads() const { return m_threads; }
+
+ inline bool started(bool ready)
+ {
+ ready ? m_started++ : m_errors++;
+
+ return (m_started + m_errors) == m_threads;
+ }
+
+ inline void start(size_t threads)
+ {
+ m_started = 0;
+ m_errors = 0;
+ m_threads = threads;
+ m_ts = Chrono::steadyMSecs();
+ CudaWorker::ready = false;
+ }
+
+ inline void print() const
+ {
+ if (m_started == 0) {
+ LOG_ERR("%s " RED_BOLD("disabled") YELLOW(" (failed to start threads)"), tag);
+
+ return;
+ }
+
+ LOG_INFO("%s" GREEN_BOLD(" READY") " threads " "%s%zu/%zu" BLACK_BOLD(" (%" PRIu64 " ms)"),
+ tag,
+ m_errors == 0 ? CYAN_BOLD_S : YELLOW_BOLD_S,
+ m_started,
+ m_threads,
+ Chrono::steadyMSecs() - m_ts
+ );
+ }
+
+private:
+ size_t m_errors = 0;
+ size_t m_started = 0;
+ size_t m_threads = 0;
+ uint64_t m_ts = 0;
+};
+
+
+class CudaBackendPrivate
+{
+public:
+ inline CudaBackendPrivate(Controller *controller) :
+ controller(controller)
+ {
+ init(controller->config()->cuda());
+ }
+
+
+ void init(const CudaConfig &cuda)
+ {
+ if (!cuda.isEnabled()) {
+ return printDisabled(kLabel, "");
+ }
+
+ if (!CudaLib::init(cuda.loader())) {
+ return printDisabled(kLabel, RED_S " (failed to load CUDA plugin)");
+ }
+
+ runtimeVersion = CudaLib::runtimeVersion();
+ driverVersion = CudaLib::driverVersion();
+
+ if (!runtimeVersion || !driverVersion || !CudaLib::deviceCount()) {
+ return printDisabled(kLabel, RED_S " (no devices)");
+ }
+
+ if (!devices.empty()) {
+ return;
+ }
+
+ Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s") "/" WHITE_BOLD("%s") BLACK_BOLD("/%s"), kLabel,
+ CudaLib::version(runtimeVersion).c_str(), CudaLib::version(driverVersion).c_str(), CudaLib::pluginVersion());
+
+ devices = CudaLib::devices(cuda.bfactor(), cuda.bsleep());
+
+# ifdef XMRIG_FEATURE_NVML
+ if (cuda.isNvmlEnabled()) {
+ if (NvmlLib::init(cuda.nvmlLoader())) {
+ NvmlLib::assign(devices);
+
+ Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s") "/" GREEN_BOLD("%s") " press " MAGENTA_BG(WHITE_BOLD_S "e") " for health report",
+ kNvmlLabel,
+ NvmlLib::version(),
+ NvmlLib::driverVersion()
+ );
+ }
+ else {
+ printDisabled(kLabel, RED_S " (failed to load NVML)");
+ }
+ }
+ else {
+ printDisabled(kNvmlLabel, "");
+ }
+# endif
+
+ for (const CudaDevice &device : devices) {
+ Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("#%zu") YELLOW(" %s") GREEN_BOLD(" %s ") WHITE_BOLD("%u/%u MHz") " smx:" WHITE_BOLD("%u") " arch:" WHITE_BOLD("%u%u") " mem:" CYAN("%zu/%zu") " MB",
+ "CUDA GPU",
+ device.index(),
+ device.topology().toString().data(),
+ device.name().data(),
+ device.clock(),
+ device.memoryClock(),
+ device.smx(),
+ device.computeCapability(true),
+ device.computeCapability(false),
+ device.freeMemSize() / oneMiB,
+ device.globalMemSize() / oneMiB);
+ }
+ }
+
+
+ inline void start(const Job &)
+ {
+ LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" thread%s)") " scratchpad " CYAN_BOLD("%zu KB"),
+ tag,
+ profileName.data(),
+ threads.size(),
+ threads.size() > 1 ? "s" : "",
+ algo.l3() / 1024
+ );
+
+ Log::print(WHITE_BOLD("| # | GPU | BUS ID | I | T | B | BF | BS | MEM | NAME"));
+
+ size_t i = 0;
+ for (const auto &data : threads) {
+ Log::print("|" CYAN_BOLD("%3zu") " |" CYAN_BOLD("%4u") " |" YELLOW(" %7s") " |" CYAN_BOLD("%5d") " |" CYAN_BOLD("%4d") " |"
+ CYAN_BOLD("%4d") " |" CYAN_BOLD("%3d") " |" CYAN_BOLD("%4d") " |" CYAN("%5zu") " | " GREEN("%s"),
+ i,
+ data.thread.index(),
+ data.device.topology().toString().data(),
+ data.thread.threads() * data.thread.blocks(),
+ data.thread.threads(),
+ data.thread.blocks(),
+ data.thread.bfactor(),
+ data.thread.bsleep(),
+ (data.thread.threads() * data.thread.blocks()) * algo.l3() / oneMiB,
+ data.device.name().data()
+ );
+
+ i++;
+ }
+
+ status.start(threads.size());
+ workers.start(threads);
+ }
+
+
+# ifdef XMRIG_FEATURE_NVML
+ void printHealth()
+ {
+ for (const auto &device : devices) {
+ const auto health = NvmlLib::health(device.nvmlDevice());
+
+ std::string clocks;
+ if (health.clock && health.memClock) {
+ clocks += " " + std::to_string(health.clock) + "/" + std::to_string(health.memClock) + " MHz";
+ }
+
+ std::string fans;
+ if (!health.fanSpeed.empty()) {
+ for (uint32_t i = 0; i < health.fanSpeed.size(); ++i) {
+ fans += " fan" + std::to_string(i) + ":" CYAN_BOLD_S + std::to_string(health.fanSpeed[i]) + "%" CLEAR;
+ }
+ }
+
+ LOG_INFO(CYAN_BOLD("#%u") YELLOW(" %s") MAGENTA_BOLD("%4uW") CSI "1;%um %2uC" CLEAR WHITE_BOLD("%s") "%s",
+ device.index(),
+ device.topology().toString().data(),
+ health.power,
+ health.temperature < 60 ? 32 : (health.temperature > 85 ? 31 : 33),
+ health.temperature,
+ clocks.c_str(),
+ fans.c_str()
+ );
+ }
+ }
+# endif
+
+
+ Algorithm algo;
+ Controller *controller;
+ CudaLaunchStatus status;
+ std::vector devices;
+ std::vector threads;
+ String profileName;
+ uint32_t driverVersion = 0;
+ uint32_t runtimeVersion = 0;
+ Workers workers;
+};
+
+
+} // namespace xmrig
+
+
+const char *xmrig::cuda_tag()
+{
+ return tag;
+}
+
+
+xmrig::CudaBackend::CudaBackend(Controller *controller) :
+ d_ptr(new CudaBackendPrivate(controller))
+{
+ d_ptr->workers.setBackend(this);
+}
+
+
+xmrig::CudaBackend::~CudaBackend()
+{
+ delete d_ptr;
+
+ CudaLib::close();
+
+# ifdef XMRIG_FEATURE_NVML
+ NvmlLib::close();
+# endif
+}
+
+
+bool xmrig::CudaBackend::isEnabled() const
+{
+ return d_ptr->controller->config()->cuda().isEnabled() && CudaLib::isInitialized() && !d_ptr->devices.empty();;
+}
+
+
+bool xmrig::CudaBackend::isEnabled(const Algorithm &algorithm) const
+{
+ return !d_ptr->controller->config()->cuda().threads().get(algorithm).isEmpty();
+}
+
+
+const xmrig::Hashrate *xmrig::CudaBackend::hashrate() const
+{
+ return d_ptr->workers.hashrate();
+}
+
+
+const xmrig::String &xmrig::CudaBackend::profileName() const
+{
+ return d_ptr->profileName;
+}
+
+
+const xmrig::String &xmrig::CudaBackend::type() const
+{
+ return kType;
+}
+
+
+void xmrig::CudaBackend::execCommand(char command)
+{
+# ifdef XMRIG_FEATURE_NVML
+ if (command == 'e' || command == 'E') {
+ d_ptr->printHealth();
+ }
+# endif
+}
+
+
+void xmrig::CudaBackend::prepare(const Job &)
+{
+}
+
+
+void xmrig::CudaBackend::printHashrate(bool details)
+{
+ if (!details || !hashrate()) {
+ return;
+ }
+
+ char num[8 * 3] = { 0 };
+
+ Log::print(WHITE_BOLD_S "| CUDA # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |");
+
+ size_t i = 0;
+ for (const auto &data : d_ptr->threads) {
+ Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") GREEN(" %s"),
+ i,
+ data.thread.affinity(),
+ Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3),
+ Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3),
+ Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3),
+ data.device.index(),
+ data.device.topology().toString().data(),
+ data.device.name().data()
+ );
+
+ i++;
+ }
+
+ Log::print(WHITE_BOLD_S "| - | - | %7s | %7s | %7s |",
+ Hashrate::format(hashrate()->calc(Hashrate::ShortInterval), num, sizeof num / 3),
+ Hashrate::format(hashrate()->calc(Hashrate::MediumInterval), num + 8, sizeof num / 3),
+ Hashrate::format(hashrate()->calc(Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3)
+ );
+}
+
+
+void xmrig::CudaBackend::setJob(const Job &job)
+{
+ const auto &cuda = d_ptr->controller->config()->cuda();
+ if (cuda.isEnabled()) {
+ d_ptr->init(cuda);
+ }
+
+ if (!isEnabled()) {
+ return stop();
+ }
+
+ auto threads = cuda.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->devices);
+ if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) {
+ return;
+ }
+
+ d_ptr->algo = job.algorithm();
+ d_ptr->profileName = cuda.threads().profileName(job.algorithm());
+
+ if (d_ptr->profileName.isNull() || threads.empty()) {
+ LOG_WARN("%s " RED_BOLD("disabled") YELLOW(" (no suitable configuration found)"), tag);
+
+ return stop();
+ }
+
+ stop();
+
+ d_ptr->threads = std::move(threads);
+ d_ptr->start(job);
+}
+
+
+void xmrig::CudaBackend::start(IWorker *worker, bool ready)
+{
+ mutex.lock();
+
+ if (d_ptr->status.started(ready)) {
+ d_ptr->status.print();
+
+ CudaWorker::ready = true;
+ }
+
+ mutex.unlock();
+
+ if (ready) {
+ worker->start();
+ }
+}
+
+
+void xmrig::CudaBackend::stop()
+{
+ if (d_ptr->threads.empty()) {
+ return;
+ }
+
+ const uint64_t ts = Chrono::steadyMSecs();
+
+ d_ptr->workers.stop();
+ d_ptr->threads.clear();
+
+ LOG_INFO("%s" YELLOW(" stopped") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts);
+}
+
+
+void xmrig::CudaBackend::tick(uint64_t ticks)
+{
+ d_ptr->workers.tick(ticks);
+
+# ifdef XMRIG_FEATURE_NVML
+ auto seconds = d_ptr->controller->config()->healthPrintTime();
+ if (seconds && ticks && (ticks % (seconds * 2)) == 0) {
+ d_ptr->printHealth();
+ }
+# endif
+}
+
+
+#ifdef XMRIG_FEATURE_API
+rapidjson::Value xmrig::CudaBackend::toJSON(rapidjson::Document &doc) const
+{
+ using namespace rapidjson;
+ auto &allocator = doc.GetAllocator();
+
+ Value out(kObjectType);
+ out.AddMember("type", type().toJSON(), allocator);
+ out.AddMember("enabled", isEnabled(), allocator);
+ out.AddMember("algo", d_ptr->algo.toJSON(), allocator);
+ out.AddMember("profile", profileName().toJSON(), allocator);
+
+ Value versions(kObjectType);
+ versions.AddMember("cuda-runtime", Value(CudaLib::version(d_ptr->runtimeVersion).c_str(), allocator), allocator);
+ versions.AddMember("cuda-driver", Value(CudaLib::version(d_ptr->driverVersion).c_str(), allocator), allocator);
+ versions.AddMember("plugin", String(CudaLib::pluginVersion()).toJSON(doc), allocator);
+
+# ifdef XMRIG_FEATURE_NVML
+ if (NvmlLib::isReady()) {
+ versions.AddMember("nvml", StringRef(NvmlLib::version()), allocator);
+ versions.AddMember("driver", StringRef(NvmlLib::driverVersion()), allocator);
+ }
+# endif
+
+ out.AddMember("versions", versions, allocator);
+
+ if (d_ptr->threads.empty() || !hashrate()) {
+ return out;
+ }
+
+ out.AddMember("hashrate", hashrate()->toJSON(doc), allocator);
+
+ Value threads(kArrayType);
+
+ size_t i = 0;
+ for (const auto &data : d_ptr->threads) {
+ Value thread = data.thread.toJSON(doc);
+ thread.AddMember("hashrate", hashrate()->toJSON(i, doc), allocator);
+
+ data.device.toJSON(thread, doc);
+
+ i++;
+ threads.PushBack(thread, allocator);
+ }
+
+ out.AddMember("threads", threads, allocator);
+
+ return out;
+}
+
+
+void xmrig::CudaBackend::handleRequest(IApiRequest &)
+{
+}
+#endif
diff --git a/src/backend/cuda/CudaBackend.h b/src/backend/cuda/CudaBackend.h
new file mode 100644
index 00000000..cf0bb621
--- /dev/null
+++ b/src/backend/cuda/CudaBackend.h
@@ -0,0 +1,80 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDABACKEND_H
+#define XMRIG_CUDABACKEND_H
+
+
+#include
+
+
+#include "backend/common/interfaces/IBackend.h"
+#include "base/tools/Object.h"
+
+
+namespace xmrig {
+
+
+class Controller;
+class CudaBackendPrivate;
+class Miner;
+
+
+class CudaBackend : public IBackend
+{
+public:
+ XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaBackend)
+
+ CudaBackend(Controller *controller);
+
+ ~CudaBackend() override;
+
+protected:
+ bool isEnabled() const override;
+ bool isEnabled(const Algorithm &algorithm) const override;
+ const Hashrate *hashrate() const override;
+ const String &profileName() const override;
+ const String &type() const override;
+ void execCommand(char command) override;
+ void prepare(const Job &nextJob) override;
+ void printHashrate(bool details) override;
+ void setJob(const Job &job) override;
+ void start(IWorker *worker, bool ready) override;
+ void stop() override;
+ void tick(uint64_t ticks) override;
+
+# ifdef XMRIG_FEATURE_API
+ rapidjson::Value toJSON(rapidjson::Document &doc) const override;
+ void handleRequest(IApiRequest &request) override;
+# endif
+
+private:
+ CudaBackendPrivate *d_ptr;
+};
+
+
+} /* namespace xmrig */
+
+
+#endif /* XMRIG_CUDABACKEND_H */
diff --git a/src/backend/cuda/CudaConfig.cpp b/src/backend/cuda/CudaConfig.cpp
new file mode 100644
index 00000000..49a28d11
--- /dev/null
+++ b/src/backend/cuda/CudaConfig.cpp
@@ -0,0 +1,186 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/CudaConfig.h"
+#include "backend/common/Tags.h"
+#include "backend/cuda/CudaConfig_gen.h"
+#include "backend/cuda/wrappers/CudaLib.h"
+#include "base/io/json/Json.h"
+#include "base/io/log/Log.h"
+#include "rapidjson/document.h"
+
+
+namespace xmrig {
+
+
+static bool generated = false;
+static const char *kDevicesHint = "devices-hint";
+static const char *kEnabled = "enabled";
+static const char *kLoader = "loader";
+
+#ifdef XMRIG_FEATURE_NVML
+static const char *kNvml = "nvml";
+#endif
+
+
+extern template class Threads;
+
+
+}
+
+
+rapidjson::Value xmrig::CudaConfig::toJSON(rapidjson::Document &doc) const
+{
+ using namespace rapidjson;
+ auto &allocator = doc.GetAllocator();
+
+ Value obj(kObjectType);
+
+ obj.AddMember(StringRef(kEnabled), m_enabled, allocator);
+ obj.AddMember(StringRef(kLoader), m_loader.toJSON(), allocator);
+
+# ifdef XMRIG_FEATURE_NVML
+ if (m_nvmlLoader.isNull()) {
+ obj.AddMember(StringRef(kNvml), m_nvml, allocator);
+ }
+ else {
+ obj.AddMember(StringRef(kNvml), m_nvmlLoader.toJSON(), allocator);
+ }
+# endif
+
+ m_threads.toJSON(obj, doc);
+
+ return obj;
+}
+
+
+std::vector xmrig::CudaConfig::get(const Miner *miner, const Algorithm &algorithm, const std::vector &devices) const
+{
+ std::vector out;
+ const auto &threads = m_threads.get(algorithm);
+
+ if (threads.isEmpty()) {
+ return out;
+ }
+
+ out.reserve(threads.count() * 2);
+
+ for (const auto &thread : threads.data()) {
+ if (thread.index() >= devices.size()) {
+ LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), cuda_tag(), thread.index());
+ continue;
+ }
+
+ out.emplace_back(miner, algorithm, thread, devices[thread.index()]);
+ }
+
+ return out;
+}
+
+
+void xmrig::CudaConfig::read(const rapidjson::Value &value)
+{
+ if (value.IsObject()) {
+ m_enabled = Json::getBool(value, kEnabled, m_enabled);
+ m_loader = Json::getString(value, kLoader);
+
+ setDevicesHint(Json::getString(value, kDevicesHint));
+
+# ifdef XMRIG_FEATURE_NVML
+ auto &nvml = Json::getValue(value, kNvml);
+ if (nvml.IsString()) {
+ m_nvmlLoader = nvml.GetString();
+ }
+ else if (nvml.IsBool()) {
+ m_nvml = nvml.GetBool();
+ }
+# endif
+
+ m_threads.read(value);
+
+ generate();
+ }
+ else if (value.IsBool()) {
+ m_enabled = value.GetBool();
+
+ generate();
+ }
+ else {
+ m_shouldSave = true;
+
+ generate();
+ }
+}
+
+
+void xmrig::CudaConfig::generate()
+{
+ if (generated) {
+ return;
+ }
+
+ if (!isEnabled() || m_threads.has("*")) {
+ return;
+ }
+
+ if (!CudaLib::init(loader())) {
+ return;
+ }
+
+ if (!CudaLib::runtimeVersion() || !CudaLib::driverVersion() || !CudaLib::deviceCount()) {
+ return;
+ }
+
+ const auto devices = CudaLib::devices(bfactor(), bsleep());
+ if (devices.empty()) {
+ return;
+ }
+
+ size_t count = 0;
+
+ count += xmrig::generate(m_threads, devices);
+ count += xmrig::generate(m_threads, devices);
+ count += xmrig::generate(m_threads, devices);
+ count += xmrig::generate(m_threads, devices);
+ count += xmrig::generate(m_threads, devices);
+
+ generated = true;
+ m_shouldSave = count > 0;
+}
+
+
+void xmrig::CudaConfig::setDevicesHint(const char *devicesHint)
+{
+ if (devicesHint == nullptr) {
+ return;
+ }
+
+ const auto indexes = String(devicesHint).split(',');
+ m_devicesHint.reserve(indexes.size());
+
+ for (const auto &index : indexes) {
+ m_devicesHint.push_back(strtoul(index, nullptr, 10));
+ }
+}
diff --git a/src/backend/cuda/CudaConfig.h b/src/backend/cuda/CudaConfig.h
new file mode 100644
index 00000000..77be3dd4
--- /dev/null
+++ b/src/backend/cuda/CudaConfig.h
@@ -0,0 +1,86 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDACONFIG_H
+#define XMRIG_CUDACONFIG_H
+
+
+#include "backend/cuda/CudaLaunchData.h"
+#include "backend/common/Threads.h"
+#include "backend/cuda/CudaThreads.h"
+
+
+namespace xmrig {
+
+
+class CudaConfig
+{
+public:
+ CudaConfig() = default;
+
+ rapidjson::Value toJSON(rapidjson::Document &doc) const;
+ std::vector get(const Miner *miner, const Algorithm &algorithm, const std::vector &devices) const;
+ void read(const rapidjson::Value &value);
+
+ inline bool isEnabled() const { return m_enabled; }
+ inline bool isShouldSave() const { return m_shouldSave; }
+ inline const String &loader() const { return m_loader; }
+ inline const Threads &threads() const { return m_threads; }
+ inline int32_t bfactor() const { return m_bfactor; }
+ inline int32_t bsleep() const { return m_bsleep; }
+
+# ifdef XMRIG_FEATURE_NVML
+ inline bool isNvmlEnabled() const { return m_nvml; }
+ inline const String &nvmlLoader() const { return m_nvmlLoader; }
+# endif
+
+private:
+ void generate();
+ void setDevicesHint(const char *devicesHint);
+
+ bool m_enabled = false;
+ bool m_shouldSave = false;
+ std::vector m_devicesHint;
+ String m_loader;
+ Threads m_threads;
+
+# ifdef _WIN32
+ int32_t m_bfactor = 6;
+ int32_t m_bsleep = 25;
+# else
+ int32_t m_bfactor = 0;
+ int32_t m_bsleep = 0;
+# endif
+
+# ifdef XMRIG_FEATURE_NVML
+ bool m_nvml = true;
+ String m_nvmlLoader;
+# endif
+};
+
+
+} /* namespace xmrig */
+
+
+#endif /* XMRIG_CUDACONFIG_H */
diff --git a/src/backend/cuda/CudaConfig_gen.h b/src/backend/cuda/CudaConfig_gen.h
new file mode 100644
index 00000000..87e35dc4
--- /dev/null
+++ b/src/backend/cuda/CudaConfig_gen.h
@@ -0,0 +1,137 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDACONFIG_GEN_H
+#define XMRIG_CUDACONFIG_GEN_H
+
+
+#include "backend/common/Threads.h"
+#include "backend/cuda/CudaThreads.h"
+#include "backend/cuda/wrappers/CudaDevice.h"
+
+
+#include
+
+
+namespace xmrig {
+
+
+static inline size_t generate(const char *key, Threads &threads, const Algorithm &algorithm, const std::vector &devices)
+{
+ if (threads.isExist(algorithm) || threads.has(key)) {
+ return 0;
+ }
+
+ return threads.move(key, CudaThreads(devices, algorithm));
+}
+
+
+template
+static inline size_t generate(Threads &, const std::vector &) { return 0; }
+
+
+template<>
+size_t inline generate(Threads &threads, const std::vector &devices)
+{
+ size_t count = 0;
+
+ count += generate("cn", threads, Algorithm::CN_1, devices);
+ count += generate("cn/2", threads, Algorithm::CN_2, devices);
+
+ if (!threads.isExist(Algorithm::CN_0)) {
+ threads.disable(Algorithm::CN_0);
+ count++;
+ }
+
+# ifdef XMRIG_ALGO_CN_GPU
+ count += generate("cn/gpu", threads, Algorithm::CN_GPU, devices);
+# endif
+
+ return count;
+}
+
+
+#ifdef XMRIG_ALGO_CN_LITE
+template<>
+size_t inline generate(Threads &threads, const std::vector &devices)
+{
+ size_t count = generate("cn-lite", threads, Algorithm::CN_LITE_1, devices);
+
+ if (!threads.isExist(Algorithm::CN_LITE_0)) {
+ threads.disable(Algorithm::CN_LITE_0);
+ ++count;
+ }
+
+ return count;
+}
+#endif
+
+
+#ifdef XMRIG_ALGO_CN_HEAVY
+template<>
+size_t inline generate(Threads &threads, const std::vector &devices)
+{
+ return generate("cn-heavy", threads, Algorithm::CN_HEAVY_0, devices);
+}
+#endif
+
+
+#ifdef XMRIG_ALGO_CN_PICO
+template<>
+size_t inline generate(Threads &threads, const std::vector &devices)
+{
+ return generate("cn-pico", threads, Algorithm::CN_PICO_0, devices);
+}
+#endif
+
+
+#ifdef XMRIG_ALGO_RANDOMX
+template<>
+size_t inline generate(Threads &threads, const std::vector &devices)
+{
+ size_t count = 0;
+
+ auto rx = CudaThreads(devices, Algorithm::RX_0);
+ auto wow = CudaThreads(devices, Algorithm::RX_WOW);
+ auto arq = CudaThreads(devices, Algorithm::RX_ARQ);
+
+ if (!threads.isExist(Algorithm::RX_WOW) && wow != rx) {
+ count += threads.move("rx/wow", std::move(wow));
+ }
+
+ if (!threads.isExist(Algorithm::RX_ARQ) && arq != rx) {
+ count += threads.move("rx/arq", std::move(arq));
+ }
+
+ count += threads.move("rx", std::move(rx));
+
+ return count;
+}
+#endif
+
+
+} /* namespace xmrig */
+
+
+#endif /* XMRIG_CUDACONFIG_GEN_H */
diff --git a/src/backend/cuda/CudaLaunchData.cpp b/src/backend/cuda/CudaLaunchData.cpp
new file mode 100644
index 00000000..11cf70c8
--- /dev/null
+++ b/src/backend/cuda/CudaLaunchData.cpp
@@ -0,0 +1,51 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018 Lee Clagett
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/CudaLaunchData.h"
+#include "backend/common/Tags.h"
+
+
+xmrig::CudaLaunchData::CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device) :
+ algorithm(algorithm),
+ miner(miner),
+ device(device),
+ thread(thread)
+{
+}
+
+
+bool xmrig::CudaLaunchData::isEqual(const CudaLaunchData &other) const
+{
+ return (other.algorithm.family() == algorithm.family() &&
+ other.algorithm.l3() == algorithm.l3() &&
+ other.thread == thread);
+}
+
+
+const char *xmrig::CudaLaunchData::tag()
+{
+ return cuda_tag();
+}
diff --git a/src/backend/cuda/CudaLaunchData.h b/src/backend/cuda/CudaLaunchData.h
new file mode 100644
index 00000000..33173ffb
--- /dev/null
+++ b/src/backend/cuda/CudaLaunchData.h
@@ -0,0 +1,66 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018 Lee Clagett
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDALAUNCHDATA_H
+#define XMRIG_CUDALAUNCHDATA_H
+
+
+#include "backend/cuda/CudaThread.h"
+#include "crypto/common/Algorithm.h"
+#include "crypto/common/Nonce.h"
+
+
+namespace xmrig {
+
+
+class CudaDevice;
+class Miner;
+
+
+class CudaLaunchData
+{
+public:
+ CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device);
+
+ bool isEqual(const CudaLaunchData &other) const;
+
+ inline constexpr static Nonce::Backend backend() { return Nonce::CUDA; }
+
+ inline bool operator!=(const CudaLaunchData &other) const { return !isEqual(other); }
+ inline bool operator==(const CudaLaunchData &other) const { return isEqual(other); }
+
+ static const char *tag();
+
+ const Algorithm algorithm;
+ const Miner *miner;
+ const CudaDevice &device;
+ const CudaThread thread;
+};
+
+
+} // namespace xmrig
+
+
+#endif /* XMRIG_OCLLAUNCHDATA_H */
diff --git a/src/backend/cuda/CudaThread.cpp b/src/backend/cuda/CudaThread.cpp
new file mode 100644
index 00000000..3100b662
--- /dev/null
+++ b/src/backend/cuda/CudaThread.cpp
@@ -0,0 +1,99 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/CudaThread.h"
+#include "backend/cuda/wrappers/CudaLib.h"
+#include "base/io/json/Json.h"
+#include "rapidjson/document.h"
+
+
+#include
+
+
+namespace xmrig {
+
+static const char *kAffinity = "affinity";
+static const char *kBFactor = "bfactor";
+static const char *kBlocks = "blocks";
+static const char *kBSleep = "bsleep";
+static const char *kIndex = "index";
+static const char *kThreads = "threads";
+
+} // namespace xmrig
+
+
+xmrig::CudaThread::CudaThread(const rapidjson::Value &value)
+{
+ if (!value.IsObject()) {
+ return;
+ }
+
+ m_index = Json::getUint(value, kIndex);
+ m_threads = Json::getInt(value, kThreads);
+ m_blocks = Json::getInt(value, kBlocks);
+ m_bfactor = std::min(Json::getUint(value, kBFactor, m_bfactor), 12u);
+ m_bsleep = Json::getUint(value, kBSleep, m_bsleep);
+ m_affinity = Json::getUint64(value, kAffinity, m_affinity);
+}
+
+
+xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) :
+ m_blocks(CudaLib::deviceInt(ctx, CudaLib::DeviceBlocks)),
+ m_threads(CudaLib::deviceInt(ctx, CudaLib::DeviceThreads)),
+ m_index(index),
+ m_bfactor(CudaLib::deviceUint(ctx, CudaLib::DeviceBFactor)),
+ m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep))
+{
+
+}
+
+
+bool xmrig::CudaThread::isEqual(const CudaThread &other) const
+{
+ return m_blocks == other.m_blocks &&
+ m_threads == other.m_threads &&
+ m_affinity == other.m_affinity &&
+ m_index == other.m_index &&
+ m_bfactor == other.m_bfactor &&
+ m_bsleep == other.m_bsleep;
+}
+
+
+rapidjson::Value xmrig::CudaThread::toJSON(rapidjson::Document &doc) const
+{
+ using namespace rapidjson;
+ auto &allocator = doc.GetAllocator();
+
+ Value out(kObjectType);
+
+ out.AddMember(StringRef(kIndex), index(), allocator);
+ out.AddMember(StringRef(kThreads), threads(), allocator);
+ out.AddMember(StringRef(kBlocks), blocks(), allocator);
+ out.AddMember(StringRef(kBFactor), bfactor(), allocator);
+ out.AddMember(StringRef(kBSleep), bsleep(), allocator);
+ out.AddMember(StringRef(kAffinity), affinity(), allocator);
+
+ return out;
+}
diff --git a/src/backend/cuda/CudaThread.h b/src/backend/cuda/CudaThread.h
new file mode 100644
index 00000000..f6523d95
--- /dev/null
+++ b/src/backend/cuda/CudaThread.h
@@ -0,0 +1,79 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDATHREAD_H
+#define XMRIG_CUDATHREAD_H
+
+
+using nvid_ctx = struct nvid_ctx;
+
+
+#include "crypto/common/Algorithm.h"
+#include "rapidjson/fwd.h"
+
+
+namespace xmrig {
+
+
+class CudaThread
+{
+public:
+ CudaThread() = delete;
+ CudaThread(const rapidjson::Value &value);
+ CudaThread(uint32_t index, nvid_ctx *ctx);
+
+ inline bool isValid() const { return m_blocks > 0 && m_threads > 0; }
+ inline int32_t bfactor() const { return static_cast(m_bfactor); }
+ inline int32_t blocks() const { return m_blocks; }
+ inline int32_t bsleep() const { return static_cast(m_bsleep); }
+ inline int32_t threads() const { return m_threads; }
+ inline int64_t affinity() const { return m_affinity; }
+ inline uint32_t index() const { return m_index; }
+
+ inline bool operator!=(const CudaThread &other) const { return !isEqual(other); }
+ inline bool operator==(const CudaThread &other) const { return isEqual(other); }
+
+ bool isEqual(const CudaThread &other) const;
+ rapidjson::Value toJSON(rapidjson::Document &doc) const;
+
+private:
+ int32_t m_blocks = 0;
+ int32_t m_threads = 0;
+ int64_t m_affinity = -1;
+ uint32_t m_index = 0;
+
+# ifdef _WIN32
+ uint32_t m_bfactor = 6;
+ uint32_t m_bsleep = 25;
+# else
+ uint32_t m_bfactor = 0;
+ uint32_t m_bsleep = 0;
+# endif
+};
+
+
+} /* namespace xmrig */
+
+
+#endif /* XMRIG_CUDATHREAD_H */
diff --git a/src/backend/cuda/CudaThreads.cpp b/src/backend/cuda/CudaThreads.cpp
new file mode 100644
index 00000000..5ff4cb24
--- /dev/null
+++ b/src/backend/cuda/CudaThreads.cpp
@@ -0,0 +1,79 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/CudaThreads.h"
+#include "base/io/json/Json.h"
+#include "rapidjson/document.h"
+
+
+#include
+
+
+xmrig::CudaThreads::CudaThreads(const rapidjson::Value &value)
+{
+ if (value.IsArray()) {
+ for (auto &v : value.GetArray()) {
+ CudaThread thread(v);
+ if (thread.isValid()) {
+ add(std::move(thread));
+ }
+ }
+ }
+}
+
+
+xmrig::CudaThreads::CudaThreads(const std::vector &devices, const Algorithm &algorithm)
+{
+ for (const auto &device : devices) {
+ device.generate(algorithm, *this);
+ }
+}
+
+
+bool xmrig::CudaThreads::isEqual(const CudaThreads &other) const
+{
+ if (isEmpty() && other.isEmpty()) {
+ return true;
+ }
+
+ return count() == other.count() && std::equal(m_data.begin(), m_data.end(), other.m_data.begin());
+}
+
+
+rapidjson::Value xmrig::CudaThreads::toJSON(rapidjson::Document &doc) const
+{
+ using namespace rapidjson;
+ auto &allocator = doc.GetAllocator();
+
+ Value out(kArrayType);
+
+ out.SetArray();
+
+ for (const CudaThread &thread : m_data) {
+ out.PushBack(thread.toJSON(doc), allocator);
+ }
+
+ return out;
+}
diff --git a/src/backend/cuda/CudaThreads.h b/src/backend/cuda/CudaThreads.h
new file mode 100644
index 00000000..5f174d8e
--- /dev/null
+++ b/src/backend/cuda/CudaThreads.h
@@ -0,0 +1,66 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDATHREADS_H
+#define XMRIG_CUDATHREADS_H
+
+
+#include
+
+
+#include "backend/cuda/CudaThread.h"
+#include "backend/cuda/wrappers/CudaDevice.h"
+
+
+namespace xmrig {
+
+
+class CudaThreads
+{
+public:
+ CudaThreads() = default;
+ CudaThreads(const rapidjson::Value &value);
+ CudaThreads(const std::vector &devices, const Algorithm &algorithm);
+
+ inline bool isEmpty() const { return m_data.empty(); }
+ inline const std::vector &data() const { return m_data; }
+ inline size_t count() const { return m_data.size(); }
+ inline void add(CudaThread &&thread) { m_data.push_back(thread); }
+ inline void reserve(size_t capacity) { m_data.reserve(capacity); }
+
+ inline bool operator!=(const CudaThreads &other) const { return !isEqual(other); }
+ inline bool operator==(const CudaThreads &other) const { return isEqual(other); }
+
+ bool isEqual(const CudaThreads &other) const;
+ rapidjson::Value toJSON(rapidjson::Document &doc) const;
+
+private:
+ std::vector m_data;
+};
+
+
+} /* namespace xmrig */
+
+
+#endif /* XMRIG_CUDATHREADS_H */
diff --git a/src/backend/cuda/CudaWorker.cpp b/src/backend/cuda/CudaWorker.cpp
new file mode 100644
index 00000000..b280e294
--- /dev/null
+++ b/src/backend/cuda/CudaWorker.cpp
@@ -0,0 +1,171 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018 Lee Clagett
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/CudaWorker.h"
+#include "backend/common/Tags.h"
+#include "backend/cuda/runners/CudaCnRunner.h"
+#include "base/io/log/Log.h"
+#include "base/tools/Chrono.h"
+#include "core/Miner.h"
+#include "crypto/common/Nonce.h"
+#include "net/JobResults.h"
+
+
+#ifdef XMRIG_ALGO_RANDOMX
+# include "backend/cuda/runners/CudaRxRunner.h"
+#endif
+
+
+#include
+#include
+
+
+namespace xmrig {
+
+
+static constexpr uint32_t kReserveCount = 32768;
+std::atomic CudaWorker::ready;
+
+
+static inline bool isReady() { return !Nonce::isPaused() && CudaWorker::ready; }
+static inline uint32_t roundSize(uint32_t intensity) { return kReserveCount / intensity + 1; }
+
+
+} // namespace xmrig
+
+
+
+xmrig::CudaWorker::CudaWorker(size_t id, const CudaLaunchData &data) :
+ Worker(id, data.thread.affinity(), -1),
+ m_algorithm(data.algorithm),
+ m_miner(data.miner)
+{
+ switch (m_algorithm.family()) {
+ case Algorithm::RANDOM_X:
+# ifdef XMRIG_ALGO_RANDOMX
+ m_runner = new CudaRxRunner(id, data);
+# endif
+ break;
+
+ case Algorithm::ARGON2:
+ break;
+
+ default:
+ m_runner = new CudaCnRunner(id, data);
+ break;
+ }
+
+ if (!m_runner || !m_runner->init()) {
+ return;
+ }
+}
+
+
+xmrig::CudaWorker::~CudaWorker()
+{
+ delete m_runner;
+}
+
+
+bool xmrig::CudaWorker::selfTest()
+{
+ return m_runner != nullptr;
+}
+
+
+size_t xmrig::CudaWorker::intensity() const
+{
+ return m_runner ? m_runner->intensity() : 0;
+}
+
+
+void xmrig::CudaWorker::start()
+{
+ while (Nonce::sequence(Nonce::CUDA) > 0) {
+ if (!isReady()) {
+ do {
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+ }
+ while (!isReady() && Nonce::sequence(Nonce::CUDA) > 0);
+
+ if (Nonce::sequence(Nonce::CUDA) == 0) {
+ break;
+ }
+
+ if (!consumeJob()) {
+ return;
+ }
+ }
+
+ while (!Nonce::isOutdated(Nonce::CUDA, m_job.sequence())) {
+ uint32_t foundNonce[10] = { 0 };
+ uint32_t foundCount = 0;
+
+ if (!m_runner->run(*m_job.nonce(), &foundCount, foundNonce)) {
+ return;
+ }
+
+ if (foundCount) {
+ JobResults::submit(m_job.currentJob(), foundNonce, foundCount);
+ }
+
+ const size_t batch_size = intensity();
+ m_job.nextRound(roundSize(batch_size), batch_size);
+
+ storeStats();
+ std::this_thread::yield();
+ }
+
+ if (!consumeJob()) {
+ return;
+ }
+ }
+}
+
+
+bool xmrig::CudaWorker::consumeJob()
+{
+ if (Nonce::sequence(Nonce::CUDA) == 0) {
+ return false;
+ }
+
+ const size_t batch_size = intensity();
+ m_job.add(m_miner->job(), roundSize(batch_size) * batch_size, Nonce::CUDA);
+
+ return m_runner->set(m_job.currentJob(), m_job.blob());;
+}
+
+
+void xmrig::CudaWorker::storeStats()
+{
+ if (!isReady()) {
+ return;
+ }
+
+ m_count += intensity();
+
+ Worker::storeStats();
+}
diff --git a/src/backend/cuda/CudaWorker.h b/src/backend/cuda/CudaWorker.h
new file mode 100644
index 00000000..f717ca50
--- /dev/null
+++ b/src/backend/cuda/CudaWorker.h
@@ -0,0 +1,73 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018 Lee Clagett
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDAWORKER_H
+#define XMRIG_CUDAWORKER_H
+
+
+#include "backend/common/Worker.h"
+#include "backend/common/WorkerJob.h"
+#include "backend/cuda/CudaLaunchData.h"
+#include "base/tools/Object.h"
+#include "net/JobResult.h"
+
+
+namespace xmrig {
+
+
+class ICudaRunner;
+
+
+class CudaWorker : public Worker
+{
+public:
+ XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaWorker)
+
+ CudaWorker(size_t id, const CudaLaunchData &data);
+
+ ~CudaWorker() override;
+
+ static std::atomic ready;
+
+protected:
+ bool selfTest() override;
+ size_t intensity() const override;
+ void start() override;
+
+private:
+ bool consumeJob();
+ void storeStats();
+
+ const Algorithm m_algorithm;
+ const Miner *m_miner;
+ ICudaRunner *m_runner = nullptr;
+ WorkerJob<1> m_job;
+};
+
+
+} // namespace xmrig
+
+
+#endif /* XMRIG_CUDAWORKER_H */
diff --git a/src/backend/cuda/cuda.cmake b/src/backend/cuda/cuda.cmake
new file mode 100644
index 00000000..58ba3f5a
--- /dev/null
+++ b/src/backend/cuda/cuda.cmake
@@ -0,0 +1,53 @@
+if (WITH_CUDA)
+ add_definitions(/DXMRIG_FEATURE_CUDA)
+
+ set(HEADERS_BACKEND_CUDA
+ src/backend/cuda/CudaBackend.h
+ src/backend/cuda/CudaConfig_gen.h
+ src/backend/cuda/CudaConfig.h
+ src/backend/cuda/CudaLaunchData.h
+ src/backend/cuda/CudaThread.h
+ src/backend/cuda/CudaThreads.h
+ src/backend/cuda/CudaWorker.h
+ src/backend/cuda/interfaces/ICudaRunner.h
+ src/backend/cuda/runners/CudaBaseRunner.h
+ src/backend/cuda/runners/CudaCnRunner.h
+ src/backend/cuda/runners/CudaRxRunner.h
+ src/backend/cuda/wrappers/CudaDevice.h
+ src/backend/cuda/wrappers/CudaLib.h
+ )
+
+ set(SOURCES_BACKEND_CUDA
+ src/backend/cuda/CudaBackend.cpp
+ src/backend/cuda/CudaConfig.cpp
+ src/backend/cuda/CudaLaunchData.cpp
+ src/backend/cuda/CudaThread.cpp
+ src/backend/cuda/CudaThreads.cpp
+ src/backend/cuda/CudaWorker.cpp
+ src/backend/cuda/runners/CudaBaseRunner.cpp
+ src/backend/cuda/runners/CudaCnRunner.cpp
+ src/backend/cuda/runners/CudaRxRunner.cpp
+ src/backend/cuda/wrappers/CudaDevice.cpp
+ src/backend/cuda/wrappers/CudaLib.cpp
+ )
+
+ if (WITH_NVML AND NOT APPLE)
+ add_definitions(/DXMRIG_FEATURE_NVML)
+
+ list(APPEND HEADERS_BACKEND_CUDA
+ src/backend/cuda/wrappers/nvml_lite.h
+ src/backend/cuda/wrappers/NvmlHealth.h
+ src/backend/cuda/wrappers/NvmlLib.h
+ )
+
+ list(APPEND SOURCES_BACKEND_CUDA src/backend/cuda/wrappers/NvmlLib.cpp)
+ else()
+ remove_definitions(/DXMRIG_FEATURE_NVML)
+ endif()
+else()
+ remove_definitions(/DXMRIG_FEATURE_CUDA)
+ remove_definitions(/DXMRIG_FEATURE_NVML)
+
+ set(HEADERS_BACKEND_CUDA "")
+ set(SOURCES_BACKEND_CUDA "")
+endif()
diff --git a/src/backend/cuda/interfaces/ICudaRunner.h b/src/backend/cuda/interfaces/ICudaRunner.h
new file mode 100644
index 00000000..b5772c89
--- /dev/null
+++ b/src/backend/cuda/interfaces/ICudaRunner.h
@@ -0,0 +1,71 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_ICUDARUNNER_H
+#define XMRIG_ICUDARUNNER_H
+
+
+#include "base/tools/Object.h"
+
+
+#include
+
+
+namespace xmrig {
+
+
+class Job;
+
+
+class ICudaRunner
+{
+public:
+ XMRIG_DISABLE_COPY_MOVE(ICudaRunner)
+
+ ICudaRunner() = default;
+ virtual ~ICudaRunner() = default;
+
+// virtual cl_context ctx() const = 0;
+// virtual const Algorithm &algorithm() const = 0;
+// virtual const char *buildOptions() const = 0;
+// virtual const char *deviceKey() const = 0;
+// virtual const char *source() const = 0;
+// virtual const OclLaunchData &data() const = 0;
+ virtual size_t intensity() const = 0;
+// virtual size_t threadId() const = 0;
+// virtual uint32_t deviceIndex() const = 0;
+// virtual void build() = 0;
+ virtual bool init() = 0;
+ virtual bool run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce) = 0;
+ virtual bool set(const Job &job, uint8_t *blob) = 0;
+
+protected:
+// virtual size_t bufferSize() const = 0;
+};
+
+
+} /* namespace xmrig */
+
+
+#endif // XMRIG_ICUDARUNNER_H
diff --git a/src/backend/cuda/runners/CudaBaseRunner.cpp b/src/backend/cuda/runners/CudaBaseRunner.cpp
new file mode 100644
index 00000000..032d50c9
--- /dev/null
+++ b/src/backend/cuda/runners/CudaBaseRunner.cpp
@@ -0,0 +1,83 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/runners/CudaBaseRunner.h"
+#include "backend/cuda/wrappers/CudaLib.h"
+#include "backend/cuda/CudaLaunchData.h"
+#include "backend/common/Tags.h"
+#include "base/io/log/Log.h"
+#include "base/net/stratum/Job.h"
+
+
+xmrig::CudaBaseRunner::CudaBaseRunner(size_t id, const CudaLaunchData &data) :
+ m_data(data),
+ m_threadId(id)
+{
+}
+
+
+xmrig::CudaBaseRunner::~CudaBaseRunner()
+{
+ CudaLib::release(m_ctx);
+}
+
+
+bool xmrig::CudaBaseRunner::init()
+{
+ m_ctx = CudaLib::alloc(m_data.thread.index(), m_data.thread.bfactor(), m_data.thread.bsleep());
+ if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm) != 0) {
+ return false;
+ }
+
+ return callWrapper(CudaLib::deviceInit(m_ctx));
+}
+
+
+bool xmrig::CudaBaseRunner::set(const Job &job, uint8_t *blob)
+{
+ m_height = job.height();
+ m_target = job.target();
+
+ return callWrapper(CudaLib::setJob(m_ctx, blob, job.size(), job.algorithm()));
+}
+
+
+size_t xmrig::CudaBaseRunner::intensity() const
+{
+ return m_data.thread.threads() * m_data.thread.blocks();
+}
+
+
+bool xmrig::CudaBaseRunner::callWrapper(bool result) const
+{
+ if (!result) {
+ const char *error = CudaLib::lastError(m_ctx);
+ if (error) {
+ LOG_ERR("%s" RED_S " thread " RED_BOLD("#%zu") RED_S " failed with error " RED_BOLD("%s"), cuda_tag(), m_threadId, error);
+ }
+ }
+
+ return result;
+}
diff --git a/src/backend/cuda/runners/CudaBaseRunner.h b/src/backend/cuda/runners/CudaBaseRunner.h
new file mode 100644
index 00000000..c0e1aef0
--- /dev/null
+++ b/src/backend/cuda/runners/CudaBaseRunner.h
@@ -0,0 +1,68 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef XMRIG_CUDABASERUNNER_H
+#define XMRIG_CUDABASERUNNER_H
+
+
+#include "backend/cuda/interfaces/ICudaRunner.h"
+
+
+using nvid_ctx = struct nvid_ctx;
+
+
+namespace xmrig {
+
+
+class CudaLaunchData;
+
+
+class CudaBaseRunner : public ICudaRunner
+{
+public:
+ XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaBaseRunner)
+
+ CudaBaseRunner(size_t id, const CudaLaunchData &data);
+ ~CudaBaseRunner() override;
+
+protected:
+ bool init() override;
+ bool set(const Job &job, uint8_t *blob) override;
+ size_t intensity() const override;
+
+protected:
+ bool callWrapper(bool result) const;
+
+ const CudaLaunchData &m_data;
+ const size_t m_threadId;
+ nvid_ctx *m_ctx = nullptr;
+ uint64_t m_height = 0;
+ uint64_t m_target = 0;
+};
+
+
+} /* namespace xmrig */
+
+
+#endif // XMRIG_CUDABASERUNNER_H
diff --git a/src/backend/cuda/runners/CudaCnRunner.cpp b/src/backend/cuda/runners/CudaCnRunner.cpp
new file mode 100644
index 00000000..4d79efe3
--- /dev/null
+++ b/src/backend/cuda/runners/CudaCnRunner.cpp
@@ -0,0 +1,38 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+
+#include "backend/cuda/runners/CudaCnRunner.h"
+#include "backend/cuda/wrappers/CudaLib.h"
+
+
+xmrig::CudaCnRunner::CudaCnRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data)
+{
+}
+
+
+bool xmrig::CudaCnRunner::run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce)
+{
+ return callWrapper(CudaLib::cnHash(m_ctx, startNonce, m_height, m_target, rescount, resnonce));
+}
diff --git a/src/backend/cuda/runners/CudaCnRunner.h b/src/backend/cuda/runners/CudaCnRunner.h
new file mode 100644
index 00000000..e563435b
--- /dev/null
+++ b/src/backend/cuda/runners/CudaCnRunner.h
@@ -0,0 +1,48 @@
+/* XMRig
+ * Copyright 2010 Jeff Garzik
+ * Copyright 2012-2014 pooler
+ * Copyright 2014 Lucas Jones
+ * Copyright 2014-2016 Wolf9466
+ * Copyright 2016 Jay D Dee
+ * Copyright 2017-2018 XMR-Stak ,
+ * Copyright 2018-2019 SChernykh
+ * Copyright 2016-2019 XMRig ,