Added draft support for multi backend benchmark
This commit is contained in:
parent
c208f8eb8f
commit
f131c2e5fb
3 changed files with 44 additions and 16 deletions
|
@ -23,6 +23,8 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
#include "backend/common/Threads.h"
|
#include "backend/common/Threads.h"
|
||||||
#include "backend/cpu/CpuThreads.h"
|
#include "backend/cpu/CpuThreads.h"
|
||||||
#include "crypto/cn/CnAlgo.h"
|
#include "crypto/cn/CnAlgo.h"
|
||||||
|
@ -136,7 +138,7 @@ xmrig::String xmrig::Threads<T>::profileName(const Algorithm &algorithm, bool st
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (name == "defyx" && has("rx")) return "rx";
|
if (std::is_same<T, CpuThreads>::value && name == "defyx" && has("rx")) return "rx";
|
||||||
|
|
||||||
if (has(kAsterisk)) {
|
if (has(kAsterisk)) {
|
||||||
return kAsterisk;
|
return kAsterisk;
|
||||||
|
|
|
@ -48,6 +48,7 @@ void Benchmark::start() {
|
||||||
LOG_ALERT(">>>>> STARTING ALGO PERFORMANCE CALIBRATION (with %i seconds round)", m_controller->config()->benchAlgoTime());
|
LOG_ALERT(">>>>> STARTING ALGO PERFORMANCE CALIBRATION (with %i seconds round)", m_controller->config()->benchAlgoTime());
|
||||||
// start benchmarking from first PerfAlgo in the list
|
// start benchmarking from first PerfAlgo in the list
|
||||||
start(xmrig::Benchmark::MIN);
|
start(xmrig::Benchmark::MIN);
|
||||||
|
m_isNewBenchRun = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// end of benchmarks, switch to jobs from the pool (network), fill algo_perf
|
// end of benchmarks, switch to jobs from the pool (network), fill algo_perf
|
||||||
|
@ -134,6 +135,13 @@ float Benchmark::get_algo_perf(Algorithm::Id algo) const {
|
||||||
|
|
||||||
// start performance measurements for specified perf bench_algo
|
// start performance measurements for specified perf bench_algo
|
||||||
void Benchmark::start(const BenchAlgo bench_algo) {
|
void Benchmark::start(const BenchAlgo bench_algo) {
|
||||||
|
// calculate number of active miner backends in m_enabled_backend_count
|
||||||
|
m_enabled_backend_count = 0;
|
||||||
|
for (backend : m_controller->miner()->backends) if (backend.isEnabled(Algorithm(ba2a[bench_algo]))) ++ m_enabled_backend_count;
|
||||||
|
if (m_enabled_backend_count == 0) {
|
||||||
|
run_next_bench_algo(m_bench_algo);
|
||||||
|
return;
|
||||||
|
}
|
||||||
// prepare test job for benchmark runs ("benchmark" client id is to make sure we can detect benchmark jobs)
|
// prepare test job for benchmark runs ("benchmark" client id is to make sure we can detect benchmark jobs)
|
||||||
Job& job = *m_bench_job[bench_algo];
|
Job& job = *m_bench_job[bench_algo];
|
||||||
job.setId(Algorithm(ba2a[bench_algo]).shortName()); // need to set different id so that workers will see job change
|
job.setId(Algorithm(ba2a[bench_algo]).shortName()); // need to set different id so that workers will see job change
|
||||||
|
@ -143,10 +151,22 @@ void Benchmark::start(const BenchAlgo bench_algo) {
|
||||||
job.setSeedHash("0000000000000000000000000000000000000000000000000000000000000001");
|
job.setSeedHash("0000000000000000000000000000000000000000000000000000000000000001");
|
||||||
m_bench_algo = bench_algo; // current perf bench_algo
|
m_bench_algo = bench_algo; // current perf bench_algo
|
||||||
m_hash_count = 0; // number of hashes calculated for current perf bench_algo
|
m_hash_count = 0; // number of hashes calculated for current perf bench_algo
|
||||||
m_time_start = 0; // init time of measurements start (in ms) during the first onJobResult
|
m_time_start = 0; // init time of the first result (in ms) during the first onJobResult
|
||||||
|
m_bench_start = 0; // init time of measurements start (in ms) during the first onJobResult
|
||||||
|
m_backends_started.clear();
|
||||||
m_controller->miner()->setJob(job, false); // set job for workers to compute
|
m_controller->miner()->setJob(job, false); // set job for workers to compute
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// run next bench algo or finish benchmark for the last one
|
||||||
|
void Benchmark::run_next_bench_algo(const BenchAlgo bench_algo) {
|
||||||
|
const BenchAlgo next_bench_algo = static_cast<BenchAlgo>(bench_algo + 1); // compute next perf bench_algo to benchmark
|
||||||
|
if (next_bench_algo != BenchAlgo::MAX) {
|
||||||
|
start(next_bench_algo);
|
||||||
|
} else {
|
||||||
|
finish();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Benchmark::onJobResult(const JobResult& result) {
|
void Benchmark::onJobResult(const JobResult& result) {
|
||||||
if (result.clientId != String("benchmark")) { // switch to network pool jobs
|
if (result.clientId != String("benchmark")) { // switch to network pool jobs
|
||||||
JobResults::setListener(m_controller->network(), m_controller->config()->cpu().isHwAES());
|
JobResults::setListener(m_controller->network(), m_controller->config()->cpu().isHwAES());
|
||||||
|
@ -155,19 +175,20 @@ void Benchmark::onJobResult(const JobResult& result) {
|
||||||
}
|
}
|
||||||
// ignore benchmark results for other perf bench_algo
|
// ignore benchmark results for other perf bench_algo
|
||||||
if (m_bench_algo == BenchAlgo::INVALID || result.jobId != String(Algorithm(ba2a[m_bench_algo]).shortName())) return;
|
if (m_bench_algo == BenchAlgo::INVALID || result.jobId != String(Algorithm(ba2a[m_bench_algo]).shortName())) return;
|
||||||
++ m_hash_count;
|
|
||||||
const uint64_t now = get_now();
|
const uint64_t now = get_now();
|
||||||
if (!m_time_start) m_time_start = now; // time of measurements start (in ms)
|
if (!m_time_start) m_time_start = now; // time of the first result (in ms)
|
||||||
else if (now - m_time_start > static_cast<unsigned>(m_controller->config()->benchAlgoTime()*1000)) { // end of benchmark round for m_bench_algo
|
m_backends_started.insert(result.backend);
|
||||||
const float hashrate = static_cast<float>(m_hash_count) * result.diff / (now - m_time_start) * 1000.0f;
|
// waiting for all backends to start
|
||||||
|
if (m_backends_started.size() < m_enabled_backend_count && (now - m_time_start < static_cast<unsigned>(3*60*1000))) return;
|
||||||
|
++ m_hash_count;
|
||||||
|
if (!m_bench_start) {
|
||||||
|
LOG_ALERT(" ===> Starting benchmark of %s algo", Algorithm(ba2a[m_bench_algo]).shortName());
|
||||||
|
m_bench_start = now; // time of measurements start (in ms)
|
||||||
|
} else if (now - m_bench_start > static_cast<unsigned>(m_controller->config()->benchAlgoTime()*1000)) { // end of benchmark round for m_bench_algo
|
||||||
|
const float hashrate = static_cast<float>(m_hash_count) * result.diff / (now - m_bench_start) * 1000.0f;
|
||||||
m_bench_algo_perf[m_bench_algo] = hashrate; // store hashrate result
|
m_bench_algo_perf[m_bench_algo] = hashrate; // store hashrate result
|
||||||
LOG_ALERT(" ===> %s hasrate: %f", Algorithm(ba2a[m_bench_algo]).shortName(), hashrate);
|
LOG_ALERT(" ===> %s hasrate: %f", Algorithm(ba2a[m_bench_algo]).shortName(), hashrate);
|
||||||
const BenchAlgo next_bench_algo = static_cast<BenchAlgo>(m_bench_algo + 1); // compute next perf bench_algo to benchmark
|
run_next_bench_algo(m_bench_algo);
|
||||||
if (next_bench_algo != BenchAlgo::MAX) {
|
|
||||||
start(next_bench_algo);
|
|
||||||
} else {
|
|
||||||
finish();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <set>
|
||||||
#include "net/interfaces/IJobResultListener.h"
|
#include "net/interfaces/IJobResultListener.h"
|
||||||
#include "crypto/common/Algorithm.h"
|
#include "crypto/common/Algorithm.h"
|
||||||
#include "rapidjson/fwd.h"
|
#include "rapidjson/fwd.h"
|
||||||
|
@ -62,16 +63,20 @@ class Benchmark : public IJobResultListener {
|
||||||
float m_bench_algo_perf[BenchAlgo::MAX];
|
float m_bench_algo_perf[BenchAlgo::MAX];
|
||||||
|
|
||||||
Controller* m_controller; // to get access to config and network
|
Controller* m_controller; // to get access to config and network
|
||||||
bool m_isNewBenchRun;
|
bool m_isNewBenchRun; // true if benchmark is need to be executed or was executed
|
||||||
Benchmark::BenchAlgo m_bench_algo; // current perf algo we benchmark
|
Benchmark::BenchAlgo m_bench_algo; // current perf algo we benchmark
|
||||||
uint64_t m_hash_count; // number of hashes calculated for current perf algo
|
uint64_t m_hash_count; // number of hashes calculated for current perf algo
|
||||||
uint64_t m_time_start; // time of measurements start for current perf algo (in ms)
|
uint64_t m_time_start; // time of the first resultt for current perf algo (in ms)
|
||||||
|
uint64_t m_bench_start; // time of measurements start for current perf algo (in ms) after all backends are started
|
||||||
|
unsigned m_enabled_backend_count; // number of active miner backends
|
||||||
|
std::set<uint32_t> m_backends_started; // id of backend started for benchmark
|
||||||
|
|
||||||
uint64_t get_now() const; // get current time in ms
|
uint64_t get_now() const; // get current time in ms
|
||||||
float get_algo_perf(Algorithm::Id algo) const; // get algo perf based on m_bench_algo_perf
|
float get_algo_perf(Algorithm::Id algo) const; // get algo perf based on m_bench_algo_perf
|
||||||
void start(const Benchmark::BenchAlgo); // start benchmark for specified perf algo
|
void start(const Benchmark::BenchAlgo); // start benchmark for specified perf algo
|
||||||
void finish(); // end of benchmarks, switch to jobs from the pool (network), fill algo_perf
|
void finish(); // end of benchmarks, switch to jobs from the pool (network), fill algo_perf
|
||||||
void onJobResult(const JobResult&) override; // onJobResult is called after each computed benchmark hash
|
void onJobResult(const JobResult&) override; // onJobResult is called after each computed benchmark hash
|
||||||
|
void run_next_bench_algo(BenchAlgo); // run next bench algo or finish benchmark for the last one
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Benchmark();
|
Benchmark();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue