mirror of
https://github.com/peterosterlund2/droidfish.git
synced 2024-11-23 11:31:33 +01:00
Update to current Stockfish development version
Corresponds to commit 8b8a510fd6a1a17b39b2d4b166f60ac7be0dab23 in Stockfish repository, from Wed Sep 16 17:39:11 2020 +0200.
This commit is contained in:
parent
ed5ef03dba
commit
1871f1d54a
Binary file not shown.
|
@ -164,5 +164,7 @@ vector<string> setup_bench(const Position& current, istream& is) {
|
|||
++posCounter;
|
||||
}
|
||||
|
||||
list.emplace_back("setoption name Use NNUE value true");
|
||||
|
||||
return list;
|
||||
}
|
||||
|
|
|
@ -1015,12 +1015,19 @@ make_v:
|
|||
|
||||
Value Eval::evaluate(const Position& pos) {
|
||||
|
||||
// Use classical eval if there is a large imbalance
|
||||
// If there is a moderate imbalance, use classical eval with probability (1/8),
|
||||
// as derived from the node counter.
|
||||
bool useClassical = abs(eg_value(pos.psq_score())) * 16 > NNUEThreshold1 * (16 + pos.rule50_count());
|
||||
bool classical = !Eval::useNNUE
|
||||
|| abs(eg_value(pos.psq_score())) * 16 > NNUEThreshold1 * (16 + pos.rule50_count());
|
||||
|| useClassical
|
||||
|| (abs(eg_value(pos.psq_score())) > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB));
|
||||
Value v = classical ? Evaluation<NO_TRACE>(pos).value()
|
||||
: NNUE::evaluate(pos) * 5 / 4 + Tempo;
|
||||
|
||||
if (classical && Eval::useNNUE && abs(v) * 16 < NNUEThreshold2 * (16 + pos.rule50_count()))
|
||||
if ( useClassical
|
||||
&& Eval::useNNUE
|
||||
&& abs(v) * 16 < NNUEThreshold2 * (16 + pos.rule50_count()))
|
||||
v = NNUE::evaluate(pos) * 5 / 4 + Tempo;
|
||||
|
||||
// Damp down the evaluation linearly when shuffling
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace Eval {
|
|||
// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
|
||||
// for the build process (profile-build and fishtest) to work. Do not change the
|
||||
// name of the macro, as it is used in the Makefile.
|
||||
#define EvalFileDefaultName "nn-82215d0fd0df.nnue"
|
||||
#define EvalFileDefaultName "nn-03744f8d56d8.nnue"
|
||||
|
||||
namespace NNUE {
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ namespace {
|
|||
|
||||
/// Version number. If Version is left empty, then compile date in the format
|
||||
/// DD-MM-YY and show in engine_info.
|
||||
const string Version = "12";
|
||||
const string Version = "";
|
||||
|
||||
/// Our fancy logging facility. The trick here is to replace cin.rdbuf() and
|
||||
/// cout.rdbuf() with two Tie objects that tie cin and cout to a file stream. We
|
||||
|
|
|
@ -115,31 +115,16 @@ namespace Eval::NNUE {
|
|||
return stream && stream.peek() == std::ios::traits_type::eof();
|
||||
}
|
||||
|
||||
// Proceed with the difference calculation if possible
|
||||
static void UpdateAccumulatorIfPossible(const Position& pos) {
|
||||
|
||||
feature_transformer->UpdateAccumulatorIfPossible(pos);
|
||||
}
|
||||
|
||||
// Calculate the evaluation value
|
||||
static Value ComputeScore(const Position& pos, bool refresh) {
|
||||
|
||||
auto& accumulator = pos.state()->accumulator;
|
||||
if (!refresh && accumulator.computed_score) {
|
||||
return accumulator.score;
|
||||
}
|
||||
// Evaluation function. Perform differential calculation.
|
||||
Value evaluate(const Position& pos) {
|
||||
|
||||
alignas(kCacheLineSize) TransformedFeatureType
|
||||
transformed_features[FeatureTransformer::kBufferSize];
|
||||
feature_transformer->Transform(pos, transformed_features, refresh);
|
||||
feature_transformer->Transform(pos, transformed_features);
|
||||
alignas(kCacheLineSize) char buffer[Network::kBufferSize];
|
||||
const auto output = network->Propagate(transformed_features, buffer);
|
||||
|
||||
auto score = static_cast<Value>(output[0] / FV_SCALE);
|
||||
|
||||
accumulator.score = score;
|
||||
accumulator.computed_score = true;
|
||||
return accumulator.score;
|
||||
return static_cast<Value>(output[0] / FV_SCALE);
|
||||
}
|
||||
|
||||
// Load eval, from a file stream or a memory stream
|
||||
|
@ -150,19 +135,4 @@ namespace Eval::NNUE {
|
|||
return ReadParameters(stream);
|
||||
}
|
||||
|
||||
// Evaluation function. Perform differential calculation.
|
||||
Value evaluate(const Position& pos) {
|
||||
return ComputeScore(pos, false);
|
||||
}
|
||||
|
||||
// Evaluation function. Perform full calculation.
|
||||
Value compute_eval(const Position& pos) {
|
||||
return ComputeScore(pos, true);
|
||||
}
|
||||
|
||||
// Proceed with the difference calculation if possible
|
||||
void update_eval(const Position& pos) {
|
||||
UpdateAccumulatorIfPossible(pos);
|
||||
}
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
|
|
@ -29,9 +29,7 @@ namespace Eval::NNUE {
|
|||
struct alignas(kCacheLineSize) Accumulator {
|
||||
std::int16_t
|
||||
accumulation[2][kRefreshTriggers.size()][kTransformedFeatureDimensions];
|
||||
Value score;
|
||||
bool computed_accumulation;
|
||||
bool computed_score;
|
||||
};
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
|
|
@ -29,6 +29,56 @@
|
|||
|
||||
namespace Eval::NNUE {
|
||||
|
||||
// If vector instructions are enabled, we update and refresh the
|
||||
// accumulator tile by tile such that each tile fits in the CPU's
|
||||
// vector registers.
|
||||
#define TILING
|
||||
|
||||
#ifdef USE_AVX512
|
||||
typedef __m512i vec_t;
|
||||
#define vec_load(a) _mm512_loadA_si512(a)
|
||||
#define vec_store(a,b) _mm512_storeA_si512(a,b)
|
||||
#define vec_add_16(a,b) _mm512_add_epi16(a,b)
|
||||
#define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
|
||||
static constexpr IndexType kNumRegs = 8; // only 8 are needed
|
||||
|
||||
#elif USE_AVX2
|
||||
typedef __m256i vec_t;
|
||||
#define vec_load(a) _mm256_loadA_si256(a)
|
||||
#define vec_store(a,b) _mm256_storeA_si256(a,b)
|
||||
#define vec_add_16(a,b) _mm256_add_epi16(a,b)
|
||||
#define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
|
||||
static constexpr IndexType kNumRegs = 16;
|
||||
|
||||
#elif USE_SSE2
|
||||
typedef __m128i vec_t;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a,b) *(a)=(b)
|
||||
#define vec_add_16(a,b) _mm_add_epi16(a,b)
|
||||
#define vec_sub_16(a,b) _mm_sub_epi16(a,b)
|
||||
static constexpr IndexType kNumRegs = Is64Bit ? 16 : 8;
|
||||
|
||||
#elif USE_MMX
|
||||
typedef __m64 vec_t;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a,b) *(a)=(b)
|
||||
#define vec_add_16(a,b) _mm_add_pi16(a,b)
|
||||
#define vec_sub_16(a,b) _mm_sub_pi16(a,b)
|
||||
static constexpr IndexType kNumRegs = 8;
|
||||
|
||||
#elif USE_NEON
|
||||
typedef int16x8_t vec_t;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a,b) *(a)=(b)
|
||||
#define vec_add_16(a,b) vaddq_s16(a,b)
|
||||
#define vec_sub_16(a,b) vsubq_s16(a,b)
|
||||
static constexpr IndexType kNumRegs = 16;
|
||||
|
||||
#else
|
||||
#undef TILING
|
||||
|
||||
#endif
|
||||
|
||||
// Input feature converter
|
||||
class FeatureTransformer {
|
||||
|
||||
|
@ -36,6 +86,11 @@ namespace Eval::NNUE {
|
|||
// Number of output dimensions for one side
|
||||
static constexpr IndexType kHalfDimensions = kTransformedFeatureDimensions;
|
||||
|
||||
#ifdef TILING
|
||||
static constexpr IndexType kTileHeight = kNumRegs * sizeof(vec_t) / 2;
|
||||
static_assert(kHalfDimensions % kTileHeight == 0, "kTileHeight must divide kHalfDimensions");
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Output type
|
||||
using OutputType = TransformedFeatureType;
|
||||
|
@ -50,11 +105,13 @@ namespace Eval::NNUE {
|
|||
|
||||
// Hash value embedded in the evaluation file
|
||||
static constexpr std::uint32_t GetHashValue() {
|
||||
|
||||
return RawFeatures::kHashValue ^ kOutputDimensions;
|
||||
}
|
||||
|
||||
// Read network parameters
|
||||
bool ReadParameters(std::istream& stream) {
|
||||
|
||||
for (std::size_t i = 0; i < kHalfDimensions; ++i)
|
||||
biases_[i] = read_little_endian<BiasType>(stream);
|
||||
for (std::size_t i = 0; i < kHalfDimensions * kInputDimensions; ++i)
|
||||
|
@ -64,23 +121,26 @@ namespace Eval::NNUE {
|
|||
|
||||
// Proceed with the difference calculation if possible
|
||||
bool UpdateAccumulatorIfPossible(const Position& pos) const {
|
||||
|
||||
const auto now = pos.state();
|
||||
if (now->accumulator.computed_accumulation) {
|
||||
if (now->accumulator.computed_accumulation)
|
||||
return true;
|
||||
}
|
||||
|
||||
const auto prev = now->previous;
|
||||
if (prev && prev->accumulator.computed_accumulation) {
|
||||
UpdateAccumulator(pos);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Convert input features
|
||||
void Transform(const Position& pos, OutputType* output, bool refresh) const {
|
||||
if (refresh || !UpdateAccumulatorIfPossible(pos)) {
|
||||
void Transform(const Position& pos, OutputType* output) const {
|
||||
|
||||
if (!UpdateAccumulatorIfPossible(pos))
|
||||
RefreshAccumulator(pos);
|
||||
}
|
||||
|
||||
const auto& accumulation = pos.state()->accumulator.accumulation;
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
|
@ -177,74 +237,58 @@ namespace Eval::NNUE {
|
|||
private:
|
||||
// Calculate cumulative value without using difference calculation
|
||||
void RefreshAccumulator(const Position& pos) const {
|
||||
|
||||
auto& accumulator = pos.state()->accumulator;
|
||||
IndexType i = 0;
|
||||
Features::IndexList active_indices[2];
|
||||
RawFeatures::AppendActiveIndices(pos, kRefreshTriggers[i],
|
||||
active_indices);
|
||||
for (Color perspective : { WHITE, BLACK }) {
|
||||
#ifdef TILING
|
||||
for (unsigned j = 0; j < kHalfDimensions / kTileHeight; ++j) {
|
||||
auto biasesTile = reinterpret_cast<const vec_t*>(
|
||||
&biases_[j * kTileHeight]);
|
||||
auto accTile = reinterpret_cast<vec_t*>(
|
||||
&accumulator.accumulation[perspective][i][j * kTileHeight]);
|
||||
vec_t acc[kNumRegs];
|
||||
|
||||
for (unsigned k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = biasesTile[k];
|
||||
|
||||
for (const auto index : active_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index + j * kTileHeight;
|
||||
auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
|
||||
|
||||
for (unsigned k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = vec_add_16(acc[k], column[k]);
|
||||
}
|
||||
|
||||
for (unsigned k = 0; k < kNumRegs; k++)
|
||||
vec_store(&accTile[k], acc[k]);
|
||||
}
|
||||
#else
|
||||
std::memcpy(accumulator.accumulation[perspective][i], biases_,
|
||||
kHalfDimensions * sizeof(BiasType));
|
||||
|
||||
for (const auto index : active_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index;
|
||||
#if defined(USE_AVX512)
|
||||
auto accumulation = reinterpret_cast<__m512i*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
auto column = reinterpret_cast<const __m512i*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
|
||||
for (IndexType j = 0; j < kNumChunks; ++j)
|
||||
_mm512_storeA_si512(&accumulation[j], _mm512_add_epi16(_mm512_loadA_si512(&accumulation[j]), column[j]));
|
||||
|
||||
#elif defined(USE_AVX2)
|
||||
auto accumulation = reinterpret_cast<__m256i*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j)
|
||||
_mm256_storeA_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadA_si256(&accumulation[j]), column[j]));
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
auto accumulation = reinterpret_cast<__m128i*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j)
|
||||
accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
|
||||
|
||||
#elif defined(USE_MMX)
|
||||
auto accumulation = reinterpret_cast<__m64*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
auto accumulation = reinterpret_cast<int16x8_t*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j)
|
||||
accumulation[j] = vaddq_s16(accumulation[j], column[j]);
|
||||
|
||||
#else
|
||||
for (IndexType j = 0; j < kHalfDimensions; ++j)
|
||||
accumulator.accumulation[perspective][i][j] += weights_[offset + j];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
#if defined(USE_MMX)
|
||||
_mm_empty();
|
||||
#endif
|
||||
|
||||
accumulator.computed_accumulation = true;
|
||||
accumulator.computed_score = false;
|
||||
}
|
||||
|
||||
// Calculate cumulative value using difference calculation
|
||||
void UpdateAccumulator(const Position& pos) const {
|
||||
|
||||
const auto prev_accumulator = pos.state()->previous->accumulator;
|
||||
auto& accumulator = pos.state()->accumulator;
|
||||
IndexType i = 0;
|
||||
|
@ -252,29 +296,55 @@ namespace Eval::NNUE {
|
|||
bool reset[2];
|
||||
RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
|
||||
removed_indices, added_indices, reset);
|
||||
|
||||
#ifdef TILING
|
||||
for (IndexType j = 0; j < kHalfDimensions / kTileHeight; ++j) {
|
||||
for (Color perspective : { WHITE, BLACK }) {
|
||||
auto accTile = reinterpret_cast<vec_t*>(
|
||||
&accumulator.accumulation[perspective][i][j * kTileHeight]);
|
||||
vec_t acc[kNumRegs];
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
auto accumulation = reinterpret_cast<__m256i*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
if (reset[perspective]) {
|
||||
auto biasesTile = reinterpret_cast<const vec_t*>(
|
||||
&biases_[j * kTileHeight]);
|
||||
for (unsigned k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = biasesTile[k];
|
||||
} else {
|
||||
auto prevAccTile = reinterpret_cast<const vec_t*>(
|
||||
&prev_accumulator.accumulation[perspective][i][j * kTileHeight]);
|
||||
for (IndexType k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = vec_load(&prevAccTile[k]);
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
auto accumulation = reinterpret_cast<__m128i*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
// Difference calculation for the deactivated features
|
||||
for (const auto index : removed_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index + j * kTileHeight;
|
||||
auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
|
||||
|
||||
#elif defined(USE_MMX)
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
auto accumulation = reinterpret_cast<__m64*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
for (IndexType k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = vec_sub_16(acc[k], column[k]);
|
||||
}
|
||||
}
|
||||
{ // Difference calculation for the activated features
|
||||
for (const auto index : added_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index + j * kTileHeight;
|
||||
auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
auto accumulation = reinterpret_cast<int16x8_t*>(
|
||||
&accumulator.accumulation[perspective][i][0]);
|
||||
for (IndexType k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = vec_add_16(acc[k], column[k]);
|
||||
}
|
||||
}
|
||||
|
||||
for (IndexType k = 0; k < kNumRegs; ++k)
|
||||
vec_store(&accTile[k], acc[k]);
|
||||
}
|
||||
}
|
||||
#if defined(USE_MMX)
|
||||
_mm_empty();
|
||||
#endif
|
||||
|
||||
#else
|
||||
for (Color perspective : { WHITE, BLACK }) {
|
||||
|
||||
if (reset[perspective]) {
|
||||
std::memcpy(accumulator.accumulation[perspective][i], biases_,
|
||||
kHalfDimensions * sizeof(BiasType));
|
||||
|
@ -286,83 +356,22 @@ namespace Eval::NNUE {
|
|||
for (const auto index : removed_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index;
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm256_sub_epi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm_sub_epi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_MMX)
|
||||
auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm_sub_pi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = vsubq_s16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#else
|
||||
for (IndexType j = 0; j < kHalfDimensions; ++j) {
|
||||
accumulator.accumulation[perspective][i][j] -=
|
||||
weights_[offset + j];
|
||||
}
|
||||
#endif
|
||||
|
||||
for (IndexType j = 0; j < kHalfDimensions; ++j)
|
||||
accumulator.accumulation[perspective][i][j] -= weights_[offset + j];
|
||||
}
|
||||
}
|
||||
{ // Difference calculation for the activated features
|
||||
for (const auto index : added_indices[perspective]) {
|
||||
const IndexType offset = kHalfDimensions * index;
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_MMX)
|
||||
auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
accumulation[j] = vaddq_s16(accumulation[j], column[j]);
|
||||
}
|
||||
|
||||
#else
|
||||
for (IndexType j = 0; j < kHalfDimensions; ++j) {
|
||||
accumulator.accumulation[perspective][i][j] +=
|
||||
weights_[offset + j];
|
||||
}
|
||||
#endif
|
||||
|
||||
for (IndexType j = 0; j < kHalfDimensions; ++j)
|
||||
accumulator.accumulation[perspective][i][j] += weights_[offset + j];
|
||||
}
|
||||
}
|
||||
}
|
||||
#if defined(USE_MMX)
|
||||
_mm_empty();
|
||||
#endif
|
||||
|
||||
accumulator.computed_accumulation = true;
|
||||
accumulator.computed_score = false;
|
||||
}
|
||||
|
||||
using BiasType = std::int16_t;
|
||||
|
|
|
@ -704,7 +704,6 @@ void Position::do_move(Move m, StateInfo& newSt, bool givesCheck) {
|
|||
|
||||
// Used by NNUE
|
||||
st->accumulator.computed_accumulation = false;
|
||||
st->accumulator.computed_score = false;
|
||||
auto& dp = st->dirtyPiece;
|
||||
dp.dirty_num = 1;
|
||||
|
||||
|
@ -1000,7 +999,6 @@ void Position::do_null_move(StateInfo& newSt) {
|
|||
if (Eval::useNNUE)
|
||||
{
|
||||
std::memcpy(&newSt, st, sizeof(StateInfo));
|
||||
st->accumulator.computed_score = false;
|
||||
}
|
||||
else
|
||||
std::memcpy(&newSt, st, offsetof(StateInfo, accumulator));
|
||||
|
|
|
@ -520,7 +520,7 @@ void Thread::search() {
|
|||
totBestMoveChanges += th->bestMoveChanges;
|
||||
th->bestMoveChanges = 0;
|
||||
}
|
||||
double bestMoveInstability = 1 + totBestMoveChanges / Threads.size();
|
||||
double bestMoveInstability = 1 + 2 * totBestMoveChanges / Threads.size();
|
||||
|
||||
double totalTime = rootMoves.size() == 1 ? 0 :
|
||||
Time.optimum() * fallingEval * reduction * bestMoveInstability;
|
||||
|
@ -597,7 +597,7 @@ namespace {
|
|||
Move ttMove, move, excludedMove, bestMove;
|
||||
Depth extension, newDepth;
|
||||
Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
|
||||
bool ttHit, formerPv, givesCheck, improving, didLMR, priorCapture;
|
||||
bool formerPv, givesCheck, improving, didLMR, priorCapture;
|
||||
bool captureOrPromotion, doFullDepthSearch, moveCountPruning,
|
||||
ttCapture, singularQuietLMR;
|
||||
Piece movedPiece;
|
||||
|
@ -654,9 +654,7 @@ namespace {
|
|||
// starts with statScore = 0. Later grandchildren start with the last calculated
|
||||
// statScore of the previous grandchild. This influences the reduction rules in
|
||||
// LMR which are based on the statScore of parent position.
|
||||
if (rootNode)
|
||||
(ss+4)->statScore = 0;
|
||||
else
|
||||
if (!rootNode)
|
||||
(ss+2)->statScore = 0;
|
||||
|
||||
// Step 4. Transposition table lookup. We don't want the score of a partial
|
||||
|
@ -664,12 +662,12 @@ namespace {
|
|||
// position key in case of an excluded move.
|
||||
excludedMove = ss->excludedMove;
|
||||
posKey = excludedMove == MOVE_NONE ? pos.key() : pos.key() ^ make_key(excludedMove);
|
||||
tte = TT.probe(posKey, ttHit);
|
||||
ttValue = ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
|
||||
tte = TT.probe(posKey, ss->ttHit);
|
||||
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
|
||||
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
|
||||
: ttHit ? tte->move() : MOVE_NONE;
|
||||
: ss->ttHit ? tte->move() : MOVE_NONE;
|
||||
if (!excludedMove)
|
||||
ss->ttPv = PvNode || (ttHit && tte->is_pv());
|
||||
ss->ttPv = PvNode || (ss->ttHit && tte->is_pv());
|
||||
formerPv = ss->ttPv && !PvNode;
|
||||
|
||||
if ( ss->ttPv
|
||||
|
@ -681,11 +679,11 @@ namespace {
|
|||
|
||||
// thisThread->ttHitAverage can be used to approximate the running average of ttHit
|
||||
thisThread->ttHitAverage = (TtHitAverageWindow - 1) * thisThread->ttHitAverage / TtHitAverageWindow
|
||||
+ TtHitAverageResolution * ttHit;
|
||||
+ TtHitAverageResolution * ss->ttHit;
|
||||
|
||||
// At non-PV nodes we check for an early TT cutoff
|
||||
if ( !PvNode
|
||||
&& ttHit
|
||||
&& ss->ttHit
|
||||
&& tte->depth() >= depth
|
||||
&& ttValue != VALUE_NONE // Possible in case of TT access race
|
||||
&& (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
|
||||
|
@ -778,7 +776,7 @@ namespace {
|
|||
improving = false;
|
||||
goto moves_loop;
|
||||
}
|
||||
else if (ttHit)
|
||||
else if (ss->ttHit)
|
||||
{
|
||||
// Never assume anything about values stored in TT
|
||||
ss->staticEval = eval = tte->eval();
|
||||
|
@ -882,14 +880,14 @@ namespace {
|
|||
// there and in further interactions with transposition table cutoff depth is set to depth - 3
|
||||
// because probCut search has depth set to depth - 4 but we also do a move before it
|
||||
// so effective depth is equal to depth - 3
|
||||
&& !( ttHit
|
||||
&& !( ss->ttHit
|
||||
&& tte->depth() >= depth - 3
|
||||
&& ttValue != VALUE_NONE
|
||||
&& ttValue < probCutBeta))
|
||||
{
|
||||
// if ttMove is a capture and value from transposition table is good enough produce probCut
|
||||
// cutoff without digging into actual probCut search
|
||||
if ( ttHit
|
||||
if ( ss->ttHit
|
||||
&& tte->depth() >= depth - 3
|
||||
&& ttValue != VALUE_NONE
|
||||
&& ttValue >= probCutBeta
|
||||
|
@ -933,7 +931,7 @@ namespace {
|
|||
if (value >= probCutBeta)
|
||||
{
|
||||
// if transposition table doesn't have equal or more deep info write probCut data into it
|
||||
if ( !(ttHit
|
||||
if ( !(ss->ttHit
|
||||
&& tte->depth() >= depth - 3
|
||||
&& ttValue != VALUE_NONE))
|
||||
tte->save(posKey, value_to_tt(value, ss->ply), ttPv,
|
||||
|
@ -1058,7 +1056,6 @@ moves_loop: // When in check, search starts from here
|
|||
if ( !givesCheck
|
||||
&& lmrDepth < 6
|
||||
&& !(PvNode && abs(bestValue) < 2)
|
||||
&& PieceValue[MG][type_of(movedPiece)] >= PieceValue[MG][type_of(pos.piece_on(to_sq(move)))]
|
||||
&& !ss->inCheck
|
||||
&& ss->staticEval + 169 + 244 * lmrDepth
|
||||
+ PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha)
|
||||
|
@ -1129,11 +1126,6 @@ moves_loop: // When in check, search starts from here
|
|||
&& pos.non_pawn_material() <= 2 * RookValueMg)
|
||||
extension = 1;
|
||||
|
||||
// Castling extension
|
||||
if ( type_of(move) == CASTLING
|
||||
&& popcount(pos.pieces(us) & ~pos.pieces(PAWN) & (to_sq(move) & KingSide ? KingSide : QueenSide)) <= 2)
|
||||
extension = 1;
|
||||
|
||||
// Late irreversible move extension
|
||||
if ( move == ttMove
|
||||
&& pos.rule50_count() > 80
|
||||
|
@ -1168,13 +1160,6 @@ moves_loop: // When in check, search starts from here
|
|||
{
|
||||
Depth r = reduction(improving, depth, moveCount);
|
||||
|
||||
// Decrease reduction at non-check cut nodes for second move at low depths
|
||||
if ( cutNode
|
||||
&& depth <= 10
|
||||
&& moveCount <= 2
|
||||
&& !ss->inCheck)
|
||||
r--;
|
||||
|
||||
// Decrease reduction if the ttHit running average is large
|
||||
if (thisThread->ttHitAverage > 509 * TtHitAverageResolution * TtHitAverageWindow / 1024)
|
||||
r--;
|
||||
|
@ -1196,7 +1181,7 @@ moves_loop: // When in check, search starts from here
|
|||
|
||||
// Decrease reduction if ttMove has been singularly extended (~3 Elo)
|
||||
if (singularQuietLMR)
|
||||
r -= 1 + formerPv;
|
||||
r--;
|
||||
|
||||
if (!captureOrPromotion)
|
||||
{
|
||||
|
@ -1430,7 +1415,7 @@ moves_loop: // When in check, search starts from here
|
|||
Move ttMove, move, bestMove;
|
||||
Depth ttDepth;
|
||||
Value bestValue, value, ttValue, futilityValue, futilityBase, oldAlpha;
|
||||
bool ttHit, pvHit, givesCheck, captureOrPromotion;
|
||||
bool pvHit, givesCheck, captureOrPromotion;
|
||||
int moveCount;
|
||||
|
||||
if (PvNode)
|
||||
|
@ -1460,13 +1445,13 @@ moves_loop: // When in check, search starts from here
|
|||
: DEPTH_QS_NO_CHECKS;
|
||||
// Transposition table lookup
|
||||
posKey = pos.key();
|
||||
tte = TT.probe(posKey, ttHit);
|
||||
ttValue = ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
|
||||
ttMove = ttHit ? tte->move() : MOVE_NONE;
|
||||
pvHit = ttHit && tte->is_pv();
|
||||
tte = TT.probe(posKey, ss->ttHit);
|
||||
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
|
||||
ttMove = ss->ttHit ? tte->move() : MOVE_NONE;
|
||||
pvHit = ss->ttHit && tte->is_pv();
|
||||
|
||||
if ( !PvNode
|
||||
&& ttHit
|
||||
&& ss->ttHit
|
||||
&& tte->depth() >= ttDepth
|
||||
&& ttValue != VALUE_NONE // Only in case of TT access race
|
||||
&& (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
|
||||
|
@ -1481,7 +1466,7 @@ moves_loop: // When in check, search starts from here
|
|||
}
|
||||
else
|
||||
{
|
||||
if (ttHit)
|
||||
if (ss->ttHit)
|
||||
{
|
||||
// Never assume anything about values stored in TT
|
||||
if ((ss->staticEval = bestValue = tte->eval()) == VALUE_NONE)
|
||||
|
@ -1500,7 +1485,7 @@ moves_loop: // When in check, search starts from here
|
|||
// Stand pat. Return immediately if static value is at least beta
|
||||
if (bestValue >= beta)
|
||||
{
|
||||
if (!ttHit)
|
||||
if (!ss->ttHit)
|
||||
tte->save(posKey, value_to_tt(bestValue, ss->ply), false, BOUND_LOWER,
|
||||
DEPTH_NONE, MOVE_NONE, ss->staticEval);
|
||||
|
||||
|
@ -1564,7 +1549,9 @@ moves_loop: // When in check, search starts from here
|
|||
}
|
||||
|
||||
// Do not search moves with negative SEE values
|
||||
if (!ss->inCheck && !pos.see_ge(move))
|
||||
if ( !ss->inCheck
|
||||
&& !(givesCheck && pos.is_discovery_check_on_king(~pos.side_to_move(), move))
|
||||
&& !pos.see_ge(move))
|
||||
continue;
|
||||
|
||||
// Speculative prefetch as early as possible
|
||||
|
@ -1716,8 +1703,8 @@ moves_loop: // When in check, search starts from here
|
|||
else
|
||||
captureHistory[moved_piece][to_sq(bestMove)][captured] << bonus1;
|
||||
|
||||
// Extra penalty for a quiet TT or main killer move in previous ply when it gets refuted
|
||||
if ( ((ss-1)->moveCount == 1 || ((ss-1)->currentMove == (ss-1)->killers[0]))
|
||||
// Extra penalty for a quiet early move that was not a TT move or main killer move in previous ply when it gets refuted
|
||||
if ( ((ss-1)->moveCount == 1 + (ss-1)->ttHit || ((ss-1)->currentMove == (ss-1)->killers[0]))
|
||||
&& !pos.captured_piece())
|
||||
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, -bonus1);
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ struct Stack {
|
|||
int moveCount;
|
||||
bool inCheck;
|
||||
bool ttPv;
|
||||
bool ttHit;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ namespace {
|
|||
|
||||
if (token == "go" || token == "eval")
|
||||
{
|
||||
cerr << "\nPosition: " << cnt++ << '/' << num << endl;
|
||||
cerr << "\nPosition: " << cnt++ << '/' << num << " (" << pos.fen() << ")" << endl;
|
||||
if (token == "go")
|
||||
{
|
||||
go(pos, is, states);
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.petero.droidfish.EngineOptions;
|
|||
|
||||
/** Stockfish engine running as process, started from assets resource. */
|
||||
public class InternalStockFish extends ExternalEngine {
|
||||
private static final String defaultNet = "nn-82215d0fd0df.nnue";
|
||||
private static final String defaultNet = "nn-03744f8d56d8.nnue";
|
||||
private static final String netOption = "evalfile";
|
||||
private File defaultNetFile; // To get the full path of the copied default network file
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user