Added more detailed thread perf
This commit is contained in:
parent
ca70cd4d75
commit
9499d03bf1
3 changed files with 42 additions and 42 deletions
|
@ -157,12 +157,12 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const
|
||||||
|
|
||||||
// save extended "threads" based on m_threads
|
// save extended "threads" based on m_threads
|
||||||
Value threads(kObjectType);
|
Value threads(kObjectType);
|
||||||
for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
|
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) {
|
||||||
const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
|
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a);
|
||||||
Value key(xmrig::Algorithm::perfAlgoName(xmrig::Algorithm(algo).perf_algo()), allocator);
|
Value key(xmrig::Algorithm::perfAlgoName(pa), allocator);
|
||||||
if (threadsMode(algo) != Simple) {
|
if (threadsMode(pa) != Simple) {
|
||||||
Value threads2(kArrayType);
|
Value threads2(kArrayType);
|
||||||
for (const IThread *thread : m_threads[algo].list) {
|
for (const IThread *thread : m_threads[pa].list) {
|
||||||
threads2.PushBack(thread->toConfig(doc), allocator);
|
threads2.PushBack(thread->toConfig(doc), allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,40 +194,40 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const
|
||||||
|
|
||||||
bool xmrig::Config::finalize()
|
bool xmrig::Config::finalize()
|
||||||
{
|
{
|
||||||
for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
|
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) {
|
||||||
const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
|
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a);
|
||||||
if (!m_threads[algo].cpu.empty()) {
|
if (!m_threads[pa].cpu.empty()) {
|
||||||
m_threads[algo].mode = Advanced;
|
m_threads[pa].mode = Advanced;
|
||||||
const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT;
|
const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT;
|
||||||
|
|
||||||
for (size_t i = 0; i < m_threads[algo].cpu.size(); ++i) {
|
for (size_t i = 0; i < m_threads[pa].cpu.size(); ++i) {
|
||||||
m_threads[algo].list.push_back(CpuThread::createFromData(i, algo, m_threads[algo].cpu[i], m_priority, softAES));
|
m_threads[pa].list.push_back(CpuThread::createFromData(i, xmrig::Algorithm(pa), m_threads[pa].cpu[i], m_priority, softAES));
|
||||||
}
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const AlgoVariant av = getAlgoVariant();
|
const AlgoVariant av = getAlgoVariant();
|
||||||
m_threads[algo].mode = m_threads[algo].count ? Simple : Automatic;
|
m_threads[pa].mode = m_threads[pa].count ? Simple : Automatic;
|
||||||
|
|
||||||
const Variant v = m_algorithm.variant();
|
const Variant v = m_algorithm.variant();
|
||||||
const size_t size = CpuThread::multiway(av) * cn_select_memory(algo, v) / 1024;
|
const size_t size = CpuThread::multiway(av) * cn_select_memory(xmrig::Algorithm(pa), v) / 1024;
|
||||||
|
|
||||||
if (!m_threads[algo].count) {
|
if (!m_threads[pa].count) {
|
||||||
m_threads[algo].count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage);
|
m_threads[pa].count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage);
|
||||||
}
|
}
|
||||||
else if (m_safe) {
|
else if (m_safe) {
|
||||||
const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage);
|
const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage);
|
||||||
if (m_threads[algo].count > count) {
|
if (m_threads[pa].count > count) {
|
||||||
m_threads[algo].count = count;
|
m_threads[pa].count = count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < m_threads[algo].count; ++i) {
|
for (size_t i = 0; i < m_threads[pa].count; ++i) {
|
||||||
m_threads[algo].list.push_back(CpuThread::createFromAV(i, algo, av, m_threads[algo].mask, m_priority, m_assembly));
|
m_threads[pa].list.push_back(CpuThread::createFromAV(i, xmrig::Algorithm(pa), av, m_threads[pa].mask, m_priority, m_assembly));
|
||||||
}
|
}
|
||||||
|
|
||||||
m_shouldSave = m_shouldSave || m_threads[algo].mode == Automatic;
|
m_shouldSave = m_shouldSave || m_threads[pa].mode == Automatic;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -267,9 +267,9 @@ void xmrig::Config::setPriority(int priority)
|
||||||
|
|
||||||
|
|
||||||
// parse specific perf algo (or generic) threads config
|
// parse specific perf algo (or generic) threads config
|
||||||
void xmrig::Config::setThread(const rapidjson::Value &threads, const xmrig::Algo algo)
|
void xmrig::Config::setThread(const rapidjson::Value &threads, const xmrig::PerfAlgo pa)
|
||||||
{
|
{
|
||||||
m_threads[algo].cpu.clear();
|
m_threads[pa].cpu.clear();
|
||||||
|
|
||||||
for (const rapidjson::Value &value : threads.GetArray()) {
|
for (const rapidjson::Value &value : threads.GetArray()) {
|
||||||
if (!value.IsObject()) {
|
if (!value.IsObject()) {
|
||||||
|
@ -280,7 +280,7 @@ void xmrig::Config::setThread(const rapidjson::Value &threads, const xmrig::Algo
|
||||||
auto data = CpuThread::parse(value);
|
auto data = CpuThread::parse(value);
|
||||||
|
|
||||||
if (data.valid) {
|
if (data.valid) {
|
||||||
m_threads[algo].cpu.push_back(std::move(data));
|
m_threads[pa].cpu.push_back(std::move(data));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,22 +290,22 @@ void xmrig::Config::setThread(const rapidjson::Value &threads, const xmrig::Algo
|
||||||
void xmrig::Config::setThreads(const rapidjson::Value &threads)
|
void xmrig::Config::setThreads(const rapidjson::Value &threads)
|
||||||
{
|
{
|
||||||
if (threads.IsArray()) {
|
if (threads.IsArray()) {
|
||||||
setThread(threads, m_algorithm.algo());
|
setThread(threads, m_algorithm.perf_algo());
|
||||||
}
|
}
|
||||||
else if (threads.IsObject()) {
|
else if (threads.IsObject()) {
|
||||||
// parse new specific perf algo threads
|
// parse new specific perf algo threads
|
||||||
for (int a = 0; a != xmrig::Algo::ALGO_MAX; ++ a) {
|
for (int a = 0; a != xmrig::PerfAlgo::PA_MAX; ++ a) {
|
||||||
const xmrig::Algo algo = static_cast<xmrig::Algo>(a);
|
const xmrig::PerfAlgo pa = static_cast<xmrig::PerfAlgo>(a);
|
||||||
const rapidjson::Value &threads2 = threads[xmrig::Algorithm::perfAlgoName(xmrig::Algorithm(algo).perf_algo())];
|
const rapidjson::Value &threads2 = threads[xmrig::Algorithm::perfAlgoName(pa)];
|
||||||
if (threads2.IsArray()) {
|
if (threads2.IsArray()) {
|
||||||
setThread(threads2, algo);
|
setThread(threads2, pa);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (threads.IsUint()) {
|
else if (threads.IsUint()) {
|
||||||
const unsigned count = threads.GetUint();
|
const unsigned count = threads.GetUint();
|
||||||
if (count < 1024) {
|
if (count < 1024) {
|
||||||
m_threads[m_algorithm.algo()].count = count;
|
m_threads[m_algorithm.perf_algo()].count = count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,17 +80,17 @@ public:
|
||||||
inline int priority() const { return m_priority; }
|
inline int priority() const { return m_priority; }
|
||||||
|
|
||||||
// access to m_threads taking into accoun that it is now separated for each perf algo
|
// access to m_threads taking into accoun that it is now separated for each perf algo
|
||||||
inline const std::vector<IThread *> &threads(const xmrig::Algo algo = INVALID_ALGO) const {
|
inline const std::vector<IThread *> &threads(const xmrig::PerfAlgo pa = PA_INVALID) const {
|
||||||
return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].list;
|
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].list;
|
||||||
}
|
}
|
||||||
inline int threadsCount(const xmrig::Algo algo = INVALID_ALGO) const {
|
inline int threadsCount(const xmrig::PerfAlgo pa = PA_INVALID) const {
|
||||||
return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].list.size();
|
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].list.size();
|
||||||
}
|
}
|
||||||
inline int64_t affinity(const xmrig::Algo algo = INVALID_ALGO) const {
|
inline int64_t affinity(const xmrig::PerfAlgo pa = PA_INVALID) const {
|
||||||
return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].mask;
|
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].mask;
|
||||||
}
|
}
|
||||||
inline ThreadsMode threadsMode(const xmrig::Algo algo = INVALID_ALGO) const {
|
inline ThreadsMode threadsMode(const xmrig::PerfAlgo pa = PA_INVALID) const {
|
||||||
return m_threads[algo == INVALID_ALGO ? m_algorithm.algo() : algo].mode;
|
return m_threads[pa == PA_INVALID ? m_algorithm.perf_algo() : pa].mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
// access to perf algo results
|
// access to perf algo results
|
||||||
|
@ -104,7 +104,7 @@ private:
|
||||||
void setMaxCpuUsage(int max);
|
void setMaxCpuUsage(int max);
|
||||||
void setPriority(int priority);
|
void setPriority(int priority);
|
||||||
// parse specific perf algo (or generic) threads config
|
// parse specific perf algo (or generic) threads config
|
||||||
void setThread(const rapidjson::Value &threads, xmrig::Algo);
|
void setThread(const rapidjson::Value &threads, xmrig::PerfAlgo);
|
||||||
void setThreads(const rapidjson::Value &threads);
|
void setThreads(const rapidjson::Value &threads);
|
||||||
|
|
||||||
AlgoVariant getAlgoVariant() const;
|
AlgoVariant getAlgoVariant() const;
|
||||||
|
@ -138,7 +138,7 @@ private:
|
||||||
int m_maxCpuUsage;
|
int m_maxCpuUsage;
|
||||||
int m_priority;
|
int m_priority;
|
||||||
// threads config for each algo
|
// threads config for each algo
|
||||||
Threads m_threads[xmrig::Algo::ALGO_MAX];
|
Threads m_threads[xmrig::PerfAlgo::PA_MAX];
|
||||||
// perf algo hashrate results
|
// perf algo hashrate results
|
||||||
float m_algo_perf[xmrig::PerfAlgo::PA_MAX];
|
float m_algo_perf[xmrig::PerfAlgo::PA_MAX];
|
||||||
};
|
};
|
||||||
|
|
|
@ -165,7 +165,6 @@ void Workers::setJob(const xmrig::Job &job, bool donate)
|
||||||
m_paused = 0;
|
m_paused = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Workers::start(xmrig::Controller *controller)
|
void Workers::start(xmrig::Controller *controller)
|
||||||
{
|
{
|
||||||
# ifdef APP_DEBUG
|
# ifdef APP_DEBUG
|
||||||
|
@ -234,15 +233,16 @@ void Workers::soft_stop() // stop current workers leaving uv stuff intact (used
|
||||||
// setups workers based on specified algorithm (or its basic perf algo more specifically)
|
// setups workers based on specified algorithm (or its basic perf algo more specifically)
|
||||||
void Workers::switch_algo(const xmrig::Algorithm& algorithm)
|
void Workers::switch_algo(const xmrig::Algorithm& algorithm)
|
||||||
{
|
{
|
||||||
if (m_status.algo == algorithm.algo()) return;
|
if (xmrig::Algorithm(m_status.algo, m_status.variant).perf_algo() == algorithm.perf_algo()) return;
|
||||||
|
|
||||||
soft_stop();
|
soft_stop();
|
||||||
|
|
||||||
m_sequence = 1;
|
m_sequence = 1;
|
||||||
m_paused = 1;
|
m_paused = 1;
|
||||||
|
|
||||||
const std::vector<xmrig::IThread *> &threads = m_controller->config()->threads(algorithm.algo());
|
const std::vector<xmrig::IThread *> &threads = m_controller->config()->threads(algorithm.perf_algo());
|
||||||
m_status.algo = algorithm.algo();
|
m_status.algo = algorithm.algo();
|
||||||
|
m_status.variant = algorithm.variant();
|
||||||
m_status.threads = threads.size();
|
m_status.threads = threads.size();
|
||||||
|
|
||||||
// string with multiway thread info
|
// string with multiway thread info
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue