From 2c9b034c0808e98868e45322fb8495aacf35b73a Mon Sep 17 00:00:00 2001 From: SChernykh Date: Tue, 5 Nov 2019 22:24:48 +0100 Subject: [PATCH 1/4] RandomX: added support for dataset on host --- src/backend/cuda/CudaThread.cpp | 15 +++++++++++++-- src/backend/cuda/CudaThread.h | 2 ++ src/backend/cuda/runners/CudaBaseRunner.cpp | 2 +- src/backend/cuda/runners/CudaRxRunner.cpp | 4 +++- src/backend/cuda/runners/CudaRxRunner.h | 1 + src/backend/cuda/wrappers/CudaDevice.cpp | 4 ++-- src/backend/cuda/wrappers/CudaLib.cpp | 12 ++++++------ src/backend/cuda/wrappers/CudaLib.h | 7 ++++--- 8 files changed, 32 insertions(+), 15 deletions(-) diff --git a/src/backend/cuda/CudaThread.cpp b/src/backend/cuda/CudaThread.cpp index 3100b662..36b44936 100644 --- a/src/backend/cuda/CudaThread.cpp +++ b/src/backend/cuda/CudaThread.cpp @@ -40,6 +40,7 @@ static const char *kBlocks = "blocks"; static const char *kBSleep = "bsleep"; static const char *kIndex = "index"; static const char *kThreads = "threads"; +static const char *kDatasetHost = "dataset_host"; } // namespace xmrig @@ -56,6 +57,13 @@ xmrig::CudaThread::CudaThread(const rapidjson::Value &value) m_bfactor = std::min(Json::getUint(value, kBFactor, m_bfactor), 12u); m_bsleep = Json::getUint(value, kBSleep, m_bsleep); m_affinity = Json::getUint64(value, kAffinity, m_affinity); + + if (Json::getValue(value, kDatasetHost).IsInt()) { + m_dataset_host = Json::getInt(value, kDatasetHost) != 0; + } + else { + m_dataset_host = Json::getBool(value, kDatasetHost); + } } @@ -64,7 +72,8 @@ xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) : m_threads(CudaLib::deviceInt(ctx, CudaLib::DeviceThreads)), m_index(index), m_bfactor(CudaLib::deviceUint(ctx, CudaLib::DeviceBFactor)), - m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep)) + m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep)), + m_dataset_host(CudaLib::deviceInt(ctx, CudaLib::DeviceDatasetHost) != 0) { } @@ -77,7 +86,8 @@ bool xmrig::CudaThread::isEqual(const CudaThread &other) const m_affinity == other.m_affinity && m_index == other.m_index && m_bfactor == other.m_bfactor && - m_bsleep == other.m_bsleep; + m_bsleep == other.m_bsleep && + m_dataset_host == other.m_dataset_host; } @@ -94,6 +104,7 @@ rapidjson::Value xmrig::CudaThread::toJSON(rapidjson::Document &doc) const out.AddMember(StringRef(kBFactor), bfactor(), allocator); out.AddMember(StringRef(kBSleep), bsleep(), allocator); out.AddMember(StringRef(kAffinity), affinity(), allocator); + out.AddMember(StringRef(kDatasetHost), dataset_host(), allocator); return out; } diff --git a/src/backend/cuda/CudaThread.h b/src/backend/cuda/CudaThread.h index f6523d95..afba36bd 100644 --- a/src/backend/cuda/CudaThread.h +++ b/src/backend/cuda/CudaThread.h @@ -50,6 +50,7 @@ public: inline int32_t threads() const { return m_threads; } inline int64_t affinity() const { return m_affinity; } inline uint32_t index() const { return m_index; } + inline uint32_t dataset_host() const { return m_dataset_host; } inline bool operator!=(const CudaThread &other) const { return !isEqual(other); } inline bool operator==(const CudaThread &other) const { return isEqual(other); } @@ -62,6 +63,7 @@ private: int32_t m_threads = 0; int64_t m_affinity = -1; uint32_t m_index = 0; + bool m_dataset_host = false; # ifdef _WIN32 uint32_t m_bfactor = 6; diff --git a/src/backend/cuda/runners/CudaBaseRunner.cpp b/src/backend/cuda/runners/CudaBaseRunner.cpp index 032d50c9..62534cb4 100644 --- a/src/backend/cuda/runners/CudaBaseRunner.cpp +++ b/src/backend/cuda/runners/CudaBaseRunner.cpp @@ -47,7 +47,7 @@ xmrig::CudaBaseRunner::~CudaBaseRunner() bool xmrig::CudaBaseRunner::init() { m_ctx = CudaLib::alloc(m_data.thread.index(), m_data.thread.bfactor(), m_data.thread.bsleep()); - if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm) != 0) { + if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm, m_data.thread.dataset_host() ? 1 : 0) != 0) { return false; } diff --git a/src/backend/cuda/runners/CudaRxRunner.cpp b/src/backend/cuda/runners/CudaRxRunner.cpp index 83bf21ff..066e87ef 100644 --- a/src/backend/cuda/runners/CudaRxRunner.cpp +++ b/src/backend/cuda/runners/CudaRxRunner.cpp @@ -42,6 +42,8 @@ xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : Cu } m_intensity -= m_intensity % 32; + + m_dataset_host = m_data.thread.dataset_host(); } @@ -59,7 +61,7 @@ bool xmrig::CudaRxRunner::set(const Job &job, uint8_t *blob) } auto dataset = Rx::dataset(job, 0); - m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_intensity)); + m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_dataset_host, m_intensity)); return m_ready; } diff --git a/src/backend/cuda/runners/CudaRxRunner.h b/src/backend/cuda/runners/CudaRxRunner.h index 06ed1b90..af882fb8 100644 --- a/src/backend/cuda/runners/CudaRxRunner.h +++ b/src/backend/cuda/runners/CudaRxRunner.h @@ -46,6 +46,7 @@ protected: private: bool m_ready = false; size_t m_intensity = 0; + bool m_dataset_host = false; }; diff --git a/src/backend/cuda/wrappers/CudaDevice.cpp b/src/backend/cuda/wrappers/CudaDevice.cpp index 740a063c..7e27b058 100644 --- a/src/backend/cuda/wrappers/CudaDevice.cpp +++ b/src/backend/cuda/wrappers/CudaDevice.cpp @@ -38,7 +38,7 @@ xmrig::CudaDevice::CudaDevice(uint32_t index, int32_t bfactor, int32_t bsleep) : m_index(index) { auto ctx = CudaLib::alloc(index, bfactor, bsleep); - if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID) != 0) { + if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID, 0) != 0) { CudaLib::release(ctx); return; @@ -104,7 +104,7 @@ uint32_t xmrig::CudaDevice::smx() const void xmrig::CudaDevice::generate(const Algorithm &algorithm, CudaThreads &threads) const { - if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm) != 0) { + if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm, 0) != 0) { return; } diff --git a/src/backend/cuda/wrappers/CudaLib.cpp b/src/backend/cuda/wrappers/CudaLib.cpp index 5f3018d3..a659dddb 100644 --- a/src/backend/cuda/wrappers/CudaLib.cpp +++ b/src/backend/cuda/wrappers/CudaLib.cpp @@ -68,7 +68,7 @@ static const char *kVersion = "version"; using alloc_t = nvid_ctx * (*)(uint32_t, int32_t, int32_t); using cnHash_t = bool (*)(nvid_ctx *, uint32_t, uint64_t, uint64_t, uint32_t *, uint32_t *); using deviceCount_t = uint32_t (*)(); -using deviceInfo_t = int32_t (*)(nvid_ctx *, int32_t, int32_t, int32_t); +using deviceInfo_t = int32_t (*)(nvid_ctx *, int32_t, int32_t, int32_t, int32_t); using deviceInit_t = bool (*)(nvid_ctx *); using deviceInt_t = int32_t (*)(nvid_ctx *, CudaLib::DeviceProperty); using deviceName_t = const char * (*)(nvid_ctx *); @@ -79,7 +79,7 @@ using lastError_t = const char * (*)(nvid_ using pluginVersion_t = const char * (*)(); using release_t = void (*)(nvid_ctx *); using rxHash_t = bool (*)(nvid_ctx *, uint32_t, uint64_t, uint32_t *, uint32_t *); -using rxPrepare_t = bool (*)(nvid_ctx *, const void *, size_t, uint32_t); +using rxPrepare_t = bool (*)(nvid_ctx *, const void *, size_t, bool, uint32_t); using setJob_t = bool (*)(nvid_ctx *, const void *, size_t, int32_t); using version_t = uint32_t (*)(Version); @@ -156,9 +156,9 @@ bool xmrig::CudaLib::rxHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t target, } -bool xmrig::CudaLib::rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, uint32_t batchSize) noexcept +bool xmrig::CudaLib::rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, bool dataset_host, uint32_t batchSize) noexcept { - return pRxPrepare(ctx, dataset, datasetSize, batchSize); + return pRxPrepare(ctx, dataset, datasetSize, dataset_host, batchSize); } @@ -186,9 +186,9 @@ const char *xmrig::CudaLib::pluginVersion() noexcept } -int xmrig::CudaLib::deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm) noexcept +int xmrig::CudaLib::deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host) noexcept { - return pDeviceInfo(ctx, blocks, threads, algorithm); + return pDeviceInfo(ctx, blocks, threads, algorithm, dataset_host); } diff --git a/src/backend/cuda/wrappers/CudaLib.h b/src/backend/cuda/wrappers/CudaLib.h index 7fb1c1eb..c4ce14fc 100644 --- a/src/backend/cuda/wrappers/CudaLib.h +++ b/src/backend/cuda/wrappers/CudaLib.h @@ -61,7 +61,8 @@ public: DeviceMemoryFree, DevicePciBusID, DevicePciDeviceID, - DevicePciDomainID + DevicePciDomainID, + DeviceDatasetHost, }; static bool init(const char *fileName = nullptr); @@ -74,12 +75,12 @@ public: static bool cnHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t height, uint64_t target, uint32_t *rescount, uint32_t *resnonce); static bool deviceInit(nvid_ctx *ctx) noexcept; static bool rxHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t target, uint32_t *rescount, uint32_t *resnonce) noexcept; - static bool rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, uint32_t batchSize) noexcept; + static bool rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, bool dataset_host, uint32_t batchSize) noexcept; static bool setJob(nvid_ctx *ctx, const void *data, size_t size, const Algorithm &algorithm) noexcept; static const char *deviceName(nvid_ctx *ctx) noexcept; static const char *lastError(nvid_ctx *ctx) noexcept; static const char *pluginVersion() noexcept; - static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm) noexcept; + static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host) noexcept; static int32_t deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept; static nvid_ctx *alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept; static std::string version(uint32_t version); From 0013e610d528cf92be7cc8ad4cf99ea70c59e456 Mon Sep 17 00:00:00 2001 From: SChernykh Date: Tue, 5 Nov 2019 23:27:15 +0100 Subject: [PATCH 2/4] Updated required API version --- src/backend/cuda/wrappers/CudaLib.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/cuda/wrappers/CudaLib.cpp b/src/backend/cuda/wrappers/CudaLib.cpp index a659dddb..fe564aae 100644 --- a/src/backend/cuda/wrappers/CudaLib.cpp +++ b/src/backend/cuda/wrappers/CudaLib.cpp @@ -273,7 +273,7 @@ bool xmrig::CudaLib::load() return false; } - if (pVersion(ApiVersion) != 1u) { + if (pVersion(ApiVersion) != 2u) { return false; } From c23514512188da6aaec177deb85758d86e752194 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 6 Nov 2019 19:00:50 +0700 Subject: [PATCH 3/4] Make option "dataset_host" available only for RandomX. --- src/backend/cuda/CudaThread.cpp | 27 ++++++++++++--------- src/backend/cuda/CudaThread.h | 12 ++++----- src/backend/cuda/runners/CudaBaseRunner.cpp | 2 +- src/backend/cuda/runners/CudaRxRunner.cpp | 7 +++--- src/backend/cuda/runners/CudaRxRunner.h | 6 ++--- src/backend/cuda/wrappers/CudaDevice.cpp | 4 +-- src/backend/cuda/wrappers/CudaLib.h | 2 +- 7 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/backend/cuda/CudaThread.cpp b/src/backend/cuda/CudaThread.cpp index 36b44936..d98989ab 100644 --- a/src/backend/cuda/CudaThread.cpp +++ b/src/backend/cuda/CudaThread.cpp @@ -59,21 +59,21 @@ xmrig::CudaThread::CudaThread(const rapidjson::Value &value) m_affinity = Json::getUint64(value, kAffinity, m_affinity); if (Json::getValue(value, kDatasetHost).IsInt()) { - m_dataset_host = Json::getInt(value, kDatasetHost) != 0; + m_datasetHost = Json::getInt(value, kDatasetHost, m_datasetHost) != 0; } else { - m_dataset_host = Json::getBool(value, kDatasetHost); + m_datasetHost = Json::getBool(value, kDatasetHost); } } xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) : m_blocks(CudaLib::deviceInt(ctx, CudaLib::DeviceBlocks)), + m_datasetHost(CudaLib::deviceInt(ctx, CudaLib::DeviceDatasetHost)), m_threads(CudaLib::deviceInt(ctx, CudaLib::DeviceThreads)), m_index(index), m_bfactor(CudaLib::deviceUint(ctx, CudaLib::DeviceBFactor)), - m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep)), - m_dataset_host(CudaLib::deviceInt(ctx, CudaLib::DeviceDatasetHost) != 0) + m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep)) { } @@ -81,13 +81,13 @@ xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) : bool xmrig::CudaThread::isEqual(const CudaThread &other) const { - return m_blocks == other.m_blocks && - m_threads == other.m_threads && - m_affinity == other.m_affinity && - m_index == other.m_index && - m_bfactor == other.m_bfactor && - m_bsleep == other.m_bsleep && - m_dataset_host == other.m_dataset_host; + return m_blocks == other.m_blocks && + m_threads == other.m_threads && + m_affinity == other.m_affinity && + m_index == other.m_index && + m_bfactor == other.m_bfactor && + m_bsleep == other.m_bsleep && + m_datasetHost == other.m_datasetHost; } @@ -104,7 +104,10 @@ rapidjson::Value xmrig::CudaThread::toJSON(rapidjson::Document &doc) const out.AddMember(StringRef(kBFactor), bfactor(), allocator); out.AddMember(StringRef(kBSleep), bsleep(), allocator); out.AddMember(StringRef(kAffinity), affinity(), allocator); - out.AddMember(StringRef(kDatasetHost), dataset_host(), allocator); + + if (m_datasetHost >= 0) { + out.AddMember(StringRef(kDatasetHost), m_datasetHost > 0, allocator); + } return out; } diff --git a/src/backend/cuda/CudaThread.h b/src/backend/cuda/CudaThread.h index afba36bd..8943ac22 100644 --- a/src/backend/cuda/CudaThread.h +++ b/src/backend/cuda/CudaThread.h @@ -47,10 +47,10 @@ public: inline int32_t bfactor() const { return static_cast(m_bfactor); } inline int32_t blocks() const { return m_blocks; } inline int32_t bsleep() const { return static_cast(m_bsleep); } + inline int32_t datasetHost() const { return m_datasetHost; } inline int32_t threads() const { return m_threads; } inline int64_t affinity() const { return m_affinity; } inline uint32_t index() const { return m_index; } - inline uint32_t dataset_host() const { return m_dataset_host; } inline bool operator!=(const CudaThread &other) const { return !isEqual(other); } inline bool operator==(const CudaThread &other) const { return isEqual(other); } @@ -59,11 +59,11 @@ public: rapidjson::Value toJSON(rapidjson::Document &doc) const; private: - int32_t m_blocks = 0; - int32_t m_threads = 0; - int64_t m_affinity = -1; - uint32_t m_index = 0; - bool m_dataset_host = false; + int32_t m_blocks = 0; + int32_t m_datasetHost = -1; + int32_t m_threads = 0; + int64_t m_affinity = -1; + uint32_t m_index = 0; # ifdef _WIN32 uint32_t m_bfactor = 6; diff --git a/src/backend/cuda/runners/CudaBaseRunner.cpp b/src/backend/cuda/runners/CudaBaseRunner.cpp index 62534cb4..757f91de 100644 --- a/src/backend/cuda/runners/CudaBaseRunner.cpp +++ b/src/backend/cuda/runners/CudaBaseRunner.cpp @@ -47,7 +47,7 @@ xmrig::CudaBaseRunner::~CudaBaseRunner() bool xmrig::CudaBaseRunner::init() { m_ctx = CudaLib::alloc(m_data.thread.index(), m_data.thread.bfactor(), m_data.thread.bsleep()); - if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm, m_data.thread.dataset_host() ? 1 : 0) != 0) { + if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm, m_data.thread.datasetHost()) != 0) { return false; } diff --git a/src/backend/cuda/runners/CudaRxRunner.cpp b/src/backend/cuda/runners/CudaRxRunner.cpp index 066e87ef..20603e76 100644 --- a/src/backend/cuda/runners/CudaRxRunner.cpp +++ b/src/backend/cuda/runners/CudaRxRunner.cpp @@ -31,7 +31,8 @@ #include "crypto/rx/RxDataset.h" -xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data) +xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data), + m_datasetHost(data.thread.datasetHost() > 0) { m_intensity = m_data.thread.threads() * m_data.thread.blocks(); const size_t scratchpads_size = m_intensity * m_data.algorithm.l3(); @@ -42,8 +43,6 @@ xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : Cu } m_intensity -= m_intensity % 32; - - m_dataset_host = m_data.thread.dataset_host(); } @@ -61,7 +60,7 @@ bool xmrig::CudaRxRunner::set(const Job &job, uint8_t *blob) } auto dataset = Rx::dataset(job, 0); - m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_dataset_host, m_intensity)); + m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_datasetHost, m_intensity)); return m_ready; } diff --git a/src/backend/cuda/runners/CudaRxRunner.h b/src/backend/cuda/runners/CudaRxRunner.h index af882fb8..448400bc 100644 --- a/src/backend/cuda/runners/CudaRxRunner.h +++ b/src/backend/cuda/runners/CudaRxRunner.h @@ -44,9 +44,9 @@ protected: bool set(const Job &job, uint8_t *blob) override; private: - bool m_ready = false; - size_t m_intensity = 0; - bool m_dataset_host = false; + bool m_ready = false; + const bool m_datasetHost = false; + size_t m_intensity = 0; }; diff --git a/src/backend/cuda/wrappers/CudaDevice.cpp b/src/backend/cuda/wrappers/CudaDevice.cpp index 0849acee..efacc800 100644 --- a/src/backend/cuda/wrappers/CudaDevice.cpp +++ b/src/backend/cuda/wrappers/CudaDevice.cpp @@ -41,7 +41,7 @@ xmrig::CudaDevice::CudaDevice(uint32_t index, int32_t bfactor, int32_t bsleep) : m_index(index) { auto ctx = CudaLib::alloc(index, bfactor, bsleep); - if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID, 0) != 0) { + if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID) != 0) { CudaLib::release(ctx); return; @@ -107,7 +107,7 @@ uint32_t xmrig::CudaDevice::smx() const void xmrig::CudaDevice::generate(const Algorithm &algorithm, CudaThreads &threads) const { - if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm, 0) != 0) { + if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm) != 0) { return; } diff --git a/src/backend/cuda/wrappers/CudaLib.h b/src/backend/cuda/wrappers/CudaLib.h index c4ce14fc..10ef24fe 100644 --- a/src/backend/cuda/wrappers/CudaLib.h +++ b/src/backend/cuda/wrappers/CudaLib.h @@ -80,7 +80,7 @@ public: static const char *deviceName(nvid_ctx *ctx) noexcept; static const char *lastError(nvid_ctx *ctx) noexcept; static const char *pluginVersion() noexcept; - static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host) noexcept; + static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host = -1) noexcept; static int32_t deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept; static nvid_ctx *alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept; static std::string version(uint32_t version); From ed2f2c5a6048dab35b4bbf857b2e29d548d14382 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 9 Nov 2019 20:36:29 +0700 Subject: [PATCH 4/4] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index feba3b05..5e908ca5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +# v4.6.0-beta +- [#1263](https://github.com/xmrig/xmrig/pull/1263) Added new option `dataset_host` for NVIDIA GPUs with less than 4 GB memory (RandomX only). + # v4.5.0-beta - Added NVIDIA CUDA support via external [CUDA plugun](https://github.com/xmrig/xmrig-cuda). XMRig now is unified 3 in 1 miner.