Added 1GB hugepages support for Linux

This commit is contained in:
SChernykh 2019-12-05 19:39:47 +01:00
parent caa2da8bb3
commit 1fbbae1e4a
28 changed files with 156 additions and 50 deletions

View file

@ -34,6 +34,7 @@ namespace xmrig {
static const char *kEnabled = "enabled";
static const char *kHugePages = "huge-pages";
static const char *kOneGbPages = "1gb-pages";
static const char *kHwAes = "hw-aes";
static const char *kMaxThreadsHint = "max-threads-hint";
static const char *kMemoryPool = "memory-pool";
@ -68,6 +69,7 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const
obj.AddMember(StringRef(kEnabled), m_enabled, allocator);
obj.AddMember(StringRef(kHugePages), m_hugePages, allocator);
obj.AddMember(StringRef(kOneGbPages), m_oneGbPages, allocator);
obj.AddMember(StringRef(kHwAes), m_aes == AES_AUTO ? Value(kNullType) : Value(m_aes == AES_HW), allocator);
obj.AddMember(StringRef(kPriority), priority() != -1 ? Value(priority()) : Value(kNullType), allocator);
obj.AddMember(StringRef(kMemoryPool), m_memoryPool < 1 ? Value(m_memoryPool < 0) : Value(m_memoryPool), allocator);
@ -119,10 +121,11 @@ std::vector<xmrig::CpuLaunchData> xmrig::CpuConfig::get(const Miner *miner, cons
void xmrig::CpuConfig::read(const rapidjson::Value &value)
{
if (value.IsObject()) {
m_enabled = Json::getBool(value, kEnabled, m_enabled);
m_hugePages = Json::getBool(value, kHugePages, m_hugePages);
m_limit = Json::getUint(value, kMaxThreadsHint, m_limit);
m_yield = Json::getBool(value, kYield, m_yield);
m_enabled = Json::getBool(value, kEnabled, m_enabled);
m_hugePages = Json::getBool(value, kHugePages, m_hugePages);
m_oneGbPages = Json::getBool(value, kOneGbPages, m_oneGbPages);
m_limit = Json::getUint(value, kMaxThreadsHint, m_limit);
m_yield = Json::getBool(value, kYield, m_yield);
setAesMode(Json::getValue(value, kHwAes));
setPriority(Json::getInt(value, kPriority, -1));