From 4524b875e9397a5856444090ff7f9a930f8a690a Mon Sep 17 00:00:00 2001
From: psychocrypt <psychocryptHPC@gmail.com>
Date: Fri, 1 Feb 2019 22:32:55 +0100
Subject: [PATCH] OpenCL: fix work size message

Fix message with the maximal allowed worksize if cryptonight_gpu is
used.
---
 xmrstak/backend/amd/amd_gpu/gpu.cpp | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

diff --git a/xmrstak/backend/amd/amd_gpu/gpu.cpp b/xmrstak/backend/amd/amd_gpu/gpu.cpp
index 857abc1..f80c37a 100644
--- a/xmrstak/backend/amd/amd_gpu/gpu.cpp
+++ b/xmrstak/backend/amd/amd_gpu/gpu.cpp
@@ -284,10 +284,21 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_
 		return ERR_OCL_API;
 	}
 
-	/* Some kernel spawn 8 times more threads than the user is configuring.
-	 * To give the user the correct maximum work size we divide the hardware specific max by 8.
-	 */
-	MaximumWorkSize /= 8;
+	auto neededAlgorithms = ::jconf::inst()->GetCurrentCoinSelection().GetAllAlgorithms();
+	bool useCryptonight_gpu = std::find(neededAlgorithms.begin(), neededAlgorithms.end(), cryptonight_gpu) != neededAlgorithms.end();
+
+	if(useCryptonight_gpu)
+	{
+		// work cn_1 we use 16x more threads than configured by the user
+		MaximumWorkSize /= 16;
+	}
+	else
+	{
+		/* Some kernel spawn 8 times more threads than the user is configuring.
+		 * To give the user the correct maximum work size we divide the hardware specific max by 8.
+		 */
+		MaximumWorkSize /= 8;
+	}
 	printer::inst()->print_msg(L1,"Device %lu work size %lu / %lu.", ctx->deviceIdx, ctx->workSize, MaximumWorkSize);
 #if defined(CL_VERSION_2_0) && !defined(CONF_ENFORCE_OpenCL_1_2)
 	const cl_queue_properties CommandQueueProperties[] = { 0, 0, 0 };
@@ -316,8 +327,6 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_
 		return ERR_OCL_API;
 	}
 
-	auto neededAlgorithms = ::jconf::inst()->GetCurrentCoinSelection().GetAllAlgorithms();
-
 	size_t scratchPadSize = 0;
 	for(const auto algo : neededAlgorithms)
 	{
-- 
GitLab