Added interface IRxStorage and RxBasicStorage class.

This commit is contained in:
XMRig 2019-10-04 08:45:13 +07:00
parent d1aadc2e3b
commit f34031a984
10 changed files with 372 additions and 210 deletions

View file

@ -28,6 +28,8 @@
#include "crypto/rx/Rx.h"
#include "backend/common/interfaces/IRxListener.h"
#include "backend/common/interfaces/IRxStorage.h"
#include "backend/common/Tags.h"
#include "backend/cpu/Cpu.h"
#include "base/io/log/Log.h"
#include "base/kernel/Platform.h"
@ -37,18 +39,13 @@
#include "base/tools/Handle.h"
#include "base/tools/Object.h"
#include "crypto/rx/RxAlgo.h"
#include "crypto/rx/RxBasicStorage.h"
#include "crypto/rx/RxCache.h"
#include "crypto/rx/RxConfig.h"
#include "crypto/rx/RxDataset.h"
#include "crypto/rx/RxSeed.h"
#ifdef XMRIG_FEATURE_HWLOC
# include <hwloc.h>
# include "backend/cpu/platform/HwlocCpuInfo.h"
#endif
#include <atomic>
#include <map>
#include <mutex>
@ -67,58 +64,18 @@ static RxPrivate *d_ptr = nullptr;
static std::mutex mutex;
#ifdef XMRIG_FEATURE_HWLOC
static void bindToNUMANode(uint32_t nodeId)
{
hwloc_topology_t topology;
hwloc_topology_init(&topology);
hwloc_topology_load(topology);
hwloc_obj_t node = hwloc_get_numanode_obj_by_os_index(topology, nodeId);
if (node) {
if (HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) {
# if HWLOC_API_VERSION >= 0x20000
hwloc_set_membind(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD | HWLOC_MEMBIND_BYNODESET);
# else
hwloc_set_membind_nodeset(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD);
# endif
}
Platform::setThreadAffinity(static_cast<uint64_t>(hwloc_bitmap_first(node->cpuset)));
}
hwloc_topology_destroy(topology);
}
#else
inline static void bindToNUMANode(uint32_t) {}
#endif
class RxPrivate
{
public:
XMRIG_DISABLE_COPY_MOVE(RxPrivate)
inline RxPrivate() :
m_counter(0),
m_last(0)
m_pending(0)
{
m_async = new uv_async_t;
m_async->data = this;
uv_async_init(uv_default_loop(), m_async, [](uv_async_t *) { d_ptr->onReady(); });
# ifdef XMRIG_FEATURE_HWLOC
if (Cpu::info()->nodes() > 1) {
for (uint32_t nodeId : HwlocCpuInfo::nodeIndexes()) {
datasets.insert({ nodeId, nullptr });
}
}
else
# endif
{
datasets.insert({ 0, nullptr });
}
uv_async_init(uv_default_loop(), m_async, [](uv_async_t *handle) { static_cast<RxPrivate *>(handle->data)->onReady(); });
}
@ -126,130 +83,66 @@ public:
{
Handle::close(m_async);
for (auto const &item : datasets) {
delete item.second;
}
datasets.clear();
delete m_storage;
}
inline bool isNUMA() const { return m_numa; }
inline bool isReady(const Job &job) const { return m_ready == count() && m_seed == job; }
inline const Algorithm &algorithm() const { return m_seed.algorithm(); }
inline size_t count() const { return isNUMA() ? datasets.size() : 1; }
inline uint64_t counter() { return m_counter.load(std::memory_order_relaxed); }
inline void asyncSend(uint64_t counter) { m_ready++; if (m_ready == count()) { m_last = counter; uv_async_send(m_async); } }
inline bool isReady(const Job &job) const { return pending() == 0 && m_seed == job; }
inline RxDataset *dataset(const Job &job, uint32_t nodeId) { return m_storage ? m_storage->dataset(job, nodeId) : nullptr; }
inline std::pair<uint32_t, uint32_t> hugePages() { return m_storage ? m_storage->hugePages() : std::pair<uint32_t, uint32_t>(0u, 0u); }
inline uint64_t pending() const { return m_pending.load(std::memory_order_relaxed); }
inline void asyncSend() { --m_pending; if (pending() == 0) { uv_async_send(m_async); } }
static void allocate(uint32_t nodeId)
inline IRxStorage *storage()
{
const uint64_t ts = Chrono::steadyMSecs();
if (d_ptr->isNUMA()) {
bindToNUMANode(nodeId);
if (!m_storage) {
m_storage = new RxBasicStorage();
}
LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"),
tag,
nodeId,
(RxDataset::maxSize() + RxCache::maxSize()) / 1024 / 1024,
RxDataset::maxSize() / 1024 / 1024,
RxCache::maxSize() / 1024 / 1024
);
auto dataset = new RxDataset(d_ptr->m_hugePages);
d_ptr->datasets[nodeId] = dataset;
if (dataset->get() != nullptr) {
const auto hugePages = dataset->hugePages();
const double percent = hugePages.first == 0 ? 0.0 : static_cast<double>(hugePages.first) / hugePages.second * 100.0;
LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"),
tag,
nodeId,
(hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)),
hugePages.first,
hugePages.second,
percent,
dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-",
Chrono::steadyMSecs() - ts
);
}
else {
LOG_WARN(CLEAR "%s" CYAN_BOLD("#%u") YELLOW_BOLD_S " failed to allocate RandomX dataset, switching to slow mode", tag, nodeId);
}
return m_storage;
}
static void initDataset(uint32_t nodeId, uint32_t threads, uint64_t counter)
static void initDataset(const RxSeed &seed, uint32_t threads, bool hugePages)
{
std::lock_guard<std::mutex> lock(mutex);
const uint64_t ts = Chrono::steadyMSecs();
LOG_INFO("%s" MAGENTA_BOLD("init dataset%s") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."),
tag,
false ? "s" : "", // FIXME
seed.algorithm().shortName(),
threads,
Buffer::toHex(seed.data().data(), 8).data()
);
d_ptr->getOrAllocate(nodeId)->init(d_ptr->m_seed.data(), threads);
d_ptr->asyncSend(counter);
LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" init done ") CYAN_BOLD("%zu/%zu") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, nodeId, d_ptr->m_ready, d_ptr->count(), Chrono::steadyMSecs() - ts);
d_ptr->storage()->init(seed, threads, hugePages);
d_ptr->asyncSend();
}
inline RxDataset *getOrAllocate(uint32_t nodeId)
inline void setState(const Job &job, IRxListener *listener)
{
RxDataset *dataset = datasets.at(nodeId);
if (dataset == nullptr) {
# ifdef XMRIG_FEATURE_HWLOC
if (d_ptr->isNUMA()) {
std::thread thread(allocate, nodeId);
thread.join();
} else
# endif
{
allocate(nodeId);
}
dataset = datasets.at(nodeId);
}
return dataset;
}
inline void setState(const Job &job, bool hugePages, bool numa, IRxListener *listener)
{
if (m_seed.algorithm() != job.algorithm()) {
RxAlgo::apply(job.algorithm());
}
m_ready = 0;
m_numa = numa;
m_hugePages = hugePages;
m_listener = listener;
m_seed = job;
++m_counter;
++m_pending;
}
std::map<uint32_t, RxDataset *> datasets;
private:
inline void onReady()
{
if (m_listener && counter() == m_last.load(std::memory_order_relaxed)) {
if (m_listener && pending() == 0) {
m_listener->onDatasetReady();
}
}
bool m_hugePages = true;
bool m_numa = true;
RxSeed m_seed;
IRxListener *m_listener = nullptr;
size_t m_ready = 0;
std::atomic<uint64_t> m_counter;
std::atomic<uint64_t> m_last;
IRxStorage *m_storage = nullptr;
RxSeed m_seed;
std::atomic<uint64_t> m_pending;
uv_async_t *m_async = nullptr;
};
@ -257,44 +150,26 @@ private:
} // namespace xmrig
const char *xmrig::rx_tag()
{
return tag;
}
bool xmrig::Rx::init(const Job &job, const RxConfig &config, bool hugePages, IRxListener *listener)
{
if (job.algorithm().family() != Algorithm::RANDOM_X) {
return true;
}
std::lock_guard<std::mutex> lock(mutex);
if (d_ptr->isReady(job)) {
return true;
}
d_ptr->setState(job, hugePages, config.isNUMA(), listener);
const uint32_t threads = config.threads();
const String buf = Buffer::toHex(job.seed().data(), 8);
const uint64_t counter = d_ptr->counter();
d_ptr->setState(job, listener);
LOG_INFO("%s" MAGENTA_BOLD("init dataset%s") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."),
tag,
d_ptr->count() > 1 ? "s" : "",
job.algorithm().shortName(),
threads,
buf.data()
);
# ifdef XMRIG_FEATURE_HWLOC
if (d_ptr->isNUMA()) {
for (auto const &item : d_ptr->datasets) {
std::thread thread(RxPrivate::initDataset, item.first, threads, counter);
thread.detach();
}
}
else
# endif
{
std::thread thread(RxPrivate::initDataset, 0, threads, counter);
thread.detach();
}
std::thread thread(RxPrivate::initDataset, job, config.threads(), hugePages);
thread.detach();
return false;
}
@ -302,8 +177,6 @@ bool xmrig::Rx::init(const Job &job, const RxConfig &config, bool hugePages, IRx
bool xmrig::Rx::isReady(const Job &job)
{
std::lock_guard<std::mutex> lock(mutex);
return d_ptr->isReady(job);
}
@ -311,30 +184,16 @@ bool xmrig::Rx::isReady(const Job &job)
xmrig::RxDataset *xmrig::Rx::dataset(const Job &job, uint32_t nodeId)
{
std::lock_guard<std::mutex> lock(mutex);
if (!d_ptr->isReady(job)) {
return nullptr;
}
return d_ptr->datasets.at(d_ptr->isNUMA() ? (d_ptr->datasets.count(nodeId) ? nodeId : HwlocCpuInfo::nodeIndexes().front()) : 0);
return d_ptr->dataset(job, nodeId);
}
std::pair<unsigned, unsigned> xmrig::Rx::hugePages()
std::pair<uint32_t, uint32_t> xmrig::Rx::hugePages()
{
std::pair<unsigned, unsigned> pages(0, 0);
std::lock_guard<std::mutex> lock(mutex);
for (auto const &item : d_ptr->datasets) {
if (!item.second) {
continue;
}
const auto p = item.second->hugePages();
pages.first += p.first;
pages.second += p.second;
}
return pages;
return d_ptr->hugePages();
}