Moved from PerfAlgo to Algo in threads to removed not really used cn-fast threads setup

This commit is contained in:
MoneroOcean 2018-08-06 10:40:55 +02:00
parent 0f880db56f
commit 67ed81f1ec
5 changed files with 45 additions and 44 deletions

View file

@ -44,7 +44,7 @@ public:
m_variant(VARIANT_AUTO) m_variant(VARIANT_AUTO)
{} {}
inline Algorithm(Algo algo, Variant variant) : inline Algorithm(Algo algo, Variant variant = VARIANT_AUTO) :
m_variant(variant) m_variant(variant)
{ {
setAlgo(algo); setAlgo(algo);

View file

@ -34,7 +34,8 @@ enum Algo {
INVALID_ALGO = -1, INVALID_ALGO = -1,
CRYPTONIGHT, /* CryptoNight (Monero) */ CRYPTONIGHT, /* CryptoNight (Monero) */
CRYPTONIGHT_LITE, /* CryptoNight-Lite (AEON) */ CRYPTONIGHT_LITE, /* CryptoNight-Lite (AEON) */
CRYPTONIGHT_HEAVY /* CryptoNight-Heavy (SUMO) */ CRYPTONIGHT_HEAVY, /* CryptoNight-Heavy (SUMO) */
ALGO_MAX
}; };
// algorithms that can has different performance // algorithms that can has different performance

View file

@ -124,20 +124,20 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const
// save extended "threads" based on m_threads // save extended "threads" based on m_threads
Value threads(kObjectType); Value threads(kObjectType);
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) { for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a); const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
Value key(xmrig::Algorithm::perfAlgoName(pa), allocator); Value key(xmrig::Algorithm::perfAlgoName(xmrig::Algorithm(algo).perf_algo()), allocator);
if (threadsMode(pa) == Advanced) { if (threadsMode(algo) == Advanced) {
Value threads2(kArrayType); Value threads2(kArrayType);
for (const IThread *thread : m_threads[pa].list) { for (const IThread *thread : m_threads[algo].list) {
threads2.PushBack(thread->toConfig(doc), allocator); threads2.PushBack(thread->toConfig(doc), allocator);
} }
threads.AddMember(key, threads2, allocator); threads.AddMember(key, threads2, allocator);
} }
else { else {
threads.AddMember(key, threadsMode(pa) == Automatic ? Value(kNullType) : Value(threadsCount(pa)), allocator); threads.AddMember(key, threadsMode(algo) == Automatic ? Value(kNullType) : Value(threadsCount(algo)), allocator);
} }
} }
doc.AddMember("threads", threads, allocator); doc.AddMember("threads", threads, allocator);
@ -181,33 +181,33 @@ bool xmrig::Config::finalize()
} }
// auto configure m_threads // auto configure m_threads
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) { for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a); const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
if (!m_threads[pa].cpu.empty()) { if (!m_threads[algo].cpu.empty()) {
m_threads[pa].mode = Advanced; m_threads[algo].mode = Advanced;
const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT; const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT;
for (size_t i = 0; i < m_threads[pa].cpu.size(); ++i) { for (size_t i = 0; i < m_threads[algo].cpu.size(); ++i) {
m_threads[pa].list.push_back(CpuThread::createFromData(i, xmrig::Algorithm(pa).algo(), m_threads[pa].cpu[i], m_priority, softAES)); m_threads[algo].list.push_back(CpuThread::createFromData(i, algo, m_threads[algo].cpu[i], m_priority, softAES));
} }
} else { } else {
const AlgoVariant av = getAlgoVariant(); const AlgoVariant av = getAlgoVariant();
m_threads[pa].mode = m_threads[pa].count ? Simple : Automatic; m_threads[algo].mode = m_threads[algo].count ? Simple : Automatic;
const size_t size = CpuThread::multiway(av) * cn_select_memory(xmrig::Algorithm(pa).algo()) / 1024; const size_t size = CpuThread::multiway(av) * cn_select_memory(algo) / 1024;
if (!m_threads[pa].count) { if (!m_threads[algo].count) {
m_threads[pa].count = Cpu::optimalThreadsCount(size, m_maxCpuUsage); m_threads[algo].count = Cpu::optimalThreadsCount(size, m_maxCpuUsage);
} }
else if (m_safe) { else if (m_safe) {
const size_t count = Cpu::optimalThreadsCount(size, m_maxCpuUsage); const size_t count = Cpu::optimalThreadsCount(size, m_maxCpuUsage);
if (m_threads[pa].count > count) { if (m_threads[algo].count > count) {
m_threads[pa].count = count; m_threads[algo].count = count;
} }
} }
for (size_t i = 0; i < m_threads[pa].count; ++i) { for (size_t i = 0; i < m_threads[algo].count; ++i) {
m_threads[pa].list.push_back(CpuThread::createFromAV(i, xmrig::Algorithm(pa).algo(), av, m_threads[pa].mask, m_priority)); m_threads[algo].list.push_back(CpuThread::createFromAV(i, algo, av, m_threads[algo].mask, m_priority));
} }
} }
} }
@ -263,7 +263,7 @@ bool xmrig::Config::parseString(int key, const char *arg)
case ThreadsKey: /* --threads */ case ThreadsKey: /* --threads */
if (strncmp(arg, "all", 3) == 0) { if (strncmp(arg, "all", 3) == 0) {
m_threads[m_algorithm.perf_algo()].count = Cpu::threads(); // sets default algo threads m_threads[m_algorithm.algo()].count = Cpu::threads(); // sets default algo threads
return true; return true;
} }
@ -292,7 +292,7 @@ bool xmrig::Config::parseUint64(int key, uint64_t arg)
switch (key) { switch (key) {
case CPUAffinityKey: /* --cpu-affinity */ case CPUAffinityKey: /* --cpu-affinity */
if (arg) { if (arg) {
m_threads[m_algorithm.perf_algo()].mask = arg; // sets default algo threads m_threads[m_algorithm.algo()].mask = arg; // sets default algo threads
} }
break; break;
@ -305,7 +305,7 @@ bool xmrig::Config::parseUint64(int key, uint64_t arg)
// parse specific perf algo (or generic) threads config // parse specific perf algo (or generic) threads config
void xmrig::Config::parseThreadsJSON(const rapidjson::Value &threads, const xmrig::PerfAlgo pa) void xmrig::Config::parseThreadsJSON(const rapidjson::Value &threads, const xmrig::Algo algo)
{ {
for (const rapidjson::Value &value : threads.GetArray()) { for (const rapidjson::Value &value : threads.GetArray()) {
if (!value.IsObject()) { if (!value.IsObject()) {
@ -316,7 +316,7 @@ void xmrig::Config::parseThreadsJSON(const rapidjson::Value &threads, const xmri
auto data = CpuThread::parse(value); auto data = CpuThread::parse(value);
if (data.valid) { if (data.valid) {
m_threads[pa].cpu.push_back(std::move(data)); m_threads[algo].cpu.push_back(std::move(data));
} }
} }
} }
@ -328,14 +328,14 @@ void xmrig::Config::parseJSON(const rapidjson::Document &doc)
if (threads.IsArray()) { if (threads.IsArray()) {
// parse generic (old) threads // parse generic (old) threads
parseThreadsJSON(threads, m_algorithm.perf_algo()); parseThreadsJSON(threads, m_algorithm.algo());
} else if (threads.IsObject()) { } else if (threads.IsObject()) {
// parse new specific perf algo threads // parse new specific perf algo threads
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) { for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a); const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
const rapidjson::Value &threads2 = threads[xmrig::Algorithm::perfAlgoName(pa)]; const rapidjson::Value &threads2 = threads[xmrig::Algorithm::perfAlgoName(xmrig::Algorithm(algo).perf_algo())];
if (threads2.IsArray()) { if (threads2.IsArray()) {
parseThreadsJSON(threads2, pa); parseThreadsJSON(threads2, algo);
} }
} }
} }
@ -360,7 +360,7 @@ bool xmrig::Config::parseInt(int key, int arg)
switch (key) { switch (key) {
case ThreadsKey: /* --threads */ case ThreadsKey: /* --threads */
if (arg >= 0 && arg < 1024) { if (arg >= 0 && arg < 1024) {
m_threads[m_algorithm.perf_algo()].count = arg; // sets default algo threads m_threads[m_algorithm.algo()].count = arg; // sets default algo threads
} }
break; break;

View file

@ -83,17 +83,17 @@ public:
inline int priority() const { return m_priority; } inline int priority() const { return m_priority; }
// access to m_threads taking into accoun that it is now separated for each perf algo // access to m_threads taking into accoun that it is now separated for each perf algo
inline const std::vector<IThread *> &threads(const xmrig::PerfAlgo pa = PA_INVALID) const { inline const std::vector<IThread *> &threads(const xmrig::Algo algo = INVALID_ALGO) const {
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].list; return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].list;
} }
inline int threadsCount(const xmrig::PerfAlgo pa = PA_INVALID) const { inline int threadsCount(const xmrig::Algo algo = INVALID_ALGO) const {
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].list.size(); return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].list.size();
} }
inline int64_t affinity(const xmrig::PerfAlgo pa = PA_INVALID) const { inline int64_t affinity(const xmrig::Algo algo = INVALID_ALGO) const {
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].mask; return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].mask;
} }
inline ThreadsMode threadsMode(const xmrig::PerfAlgo pa = PA_INVALID) const { inline ThreadsMode threadsMode(const xmrig::Algo algo = INVALID_ALGO) const {
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].mode; return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].mode;
} }
// access to perf algo results // access to perf algo results
@ -109,7 +109,7 @@ protected:
bool parseUint64(int key, uint64_t arg) override; bool parseUint64(int key, uint64_t arg) override;
void parseJSON(const rapidjson::Document &doc) override; void parseJSON(const rapidjson::Document &doc) override;
// parse specific perf algo (or generic) threads config // parse specific perf algo (or generic) threads config
void parseThreadsJSON(const rapidjson::Value &threads, xmrig::PerfAlgo); void parseThreadsJSON(const rapidjson::Value &threads, xmrig::Algo);
private: private:
bool parseInt(int key, int arg); bool parseInt(int key, int arg);
@ -139,8 +139,8 @@ private:
bool m_safe; bool m_safe;
int m_maxCpuUsage; int m_maxCpuUsage;
int m_priority; int m_priority;
// threads config for each perf algo // threads config for each algo
Threads m_threads[xmrig::PerfAlgo::PA_MAX]; Threads m_threads[xmrig::Algo::ALGO_MAX];
// perf algo hashrate results // perf algo hashrate results
float m_algo_perf[xmrig::PerfAlgo::PA_MAX]; float m_algo_perf[xmrig::PerfAlgo::PA_MAX];
}; };

View file

@ -223,7 +223,7 @@ void Workers::switch_algo(const xmrig::Algorithm& algorithm)
m_sequence = 1; m_sequence = 1;
m_paused = 1; m_paused = 1;
const std::vector<xmrig::IThread *> &threads = m_controller->config()->threads(algorithm.perf_algo()); const std::vector<xmrig::IThread *> &threads = m_controller->config()->threads(algorithm.algo());
m_status.algo = algorithm.algo(); m_status.algo = algorithm.algo();
m_status.threads = threads.size(); m_status.threads = threads.size();