mirror of
https://github.com/peterosterlund2/droidfish.git
synced 2024-11-23 11:31:33 +01:00
Update Stockfish to development version from 2020-09-28
This commit is contained in:
parent
d4e899b6be
commit
5394ccb165
Binary file not shown.
|
@ -60,7 +60,7 @@ namespace Eval {
|
|||
bool useNNUE;
|
||||
string eval_file_loaded = "None";
|
||||
|
||||
/// init_NNUE() tries to load a nnue network at startup time, or when the engine
|
||||
/// NNUE::init() tries to load a nnue network at startup time, or when the engine
|
||||
/// receives a UCI command "setoption name EvalFile value nn-[a-z0-9]{12}.nnue"
|
||||
/// The name of the nnue network is always retrieved from the EvalFile option.
|
||||
/// We search the given network in three locations: internally (the default
|
||||
|
@ -68,7 +68,7 @@ namespace Eval {
|
|||
/// in the engine directory. Distro packagers may define the DEFAULT_NNUE_DIRECTORY
|
||||
/// variable to have the engine search in a special directory in their distro.
|
||||
|
||||
void init_NNUE() {
|
||||
void NNUE::init() {
|
||||
|
||||
useNNUE = Options["Use NNUE"];
|
||||
if (!useNNUE)
|
||||
|
@ -111,8 +111,8 @@ namespace Eval {
|
|||
}
|
||||
}
|
||||
|
||||
/// verify_NNUE() verifies that the last net used was loaded successfully
|
||||
void verify_NNUE() {
|
||||
/// NNUE::verify() verifies that the last net used was loaded successfully
|
||||
void NNUE::verify() {
|
||||
|
||||
string eval_file = string(Options["EvalFile"]);
|
||||
|
||||
|
@ -199,7 +199,7 @@ namespace {
|
|||
// SafeCheck[PieceType][single/multiple] contains safe check bonus by piece type,
|
||||
// higher if multiple safe checks are possible for that piece type.
|
||||
constexpr int SafeCheck[][2] = {
|
||||
{}, {}, {792, 1283}, {645, 967}, {1084, 1897}, {772, 1119}
|
||||
{}, {}, {803, 1292}, {639, 974}, {1087, 1878}, {759, 1132}
|
||||
};
|
||||
|
||||
#define S(mg, eg) make_score(mg, eg)
|
||||
|
@ -207,19 +207,19 @@ namespace {
|
|||
// MobilityBonus[PieceType-2][attacked] contains bonuses for middle and end game,
|
||||
// indexed by piece type and number of attacked squares in the mobility area.
|
||||
constexpr Score MobilityBonus[][32] = {
|
||||
{ S(-62,-81), S(-53,-56), S(-12,-31), S( -4,-16), S( 3, 5), S( 13, 11), // Knight
|
||||
S( 22, 17), S( 28, 20), S( 33, 25) },
|
||||
{ S(-48,-59), S(-20,-23), S( 16, -3), S( 26, 13), S( 38, 24), S( 51, 42), // Bishop
|
||||
S( 55, 54), S( 63, 57), S( 63, 65), S( 68, 73), S( 81, 78), S( 81, 86),
|
||||
S( 91, 88), S( 98, 97) },
|
||||
{ S(-60,-78), S(-20,-17), S( 2, 23), S( 3, 39), S( 3, 70), S( 11, 99), // Rook
|
||||
S( 22,103), S( 31,121), S( 40,134), S( 40,139), S( 41,158), S( 48,164),
|
||||
S( 57,168), S( 57,169), S( 62,172) },
|
||||
{ S(-30,-48), S(-12,-30), S( -8, -7), S( -9, 19), S( 20, 40), S( 23, 55), // Queen
|
||||
S( 23, 59), S( 35, 75), S( 38, 78), S( 53, 96), S( 64, 96), S( 65,100),
|
||||
S( 65,121), S( 66,127), S( 67,131), S( 67,133), S( 72,136), S( 72,141),
|
||||
S( 77,147), S( 79,150), S( 93,151), S(108,168), S(108,168), S(108,171),
|
||||
S(110,182), S(114,182), S(114,192), S(116,219) }
|
||||
{ S(-62,-79), S(-53,-57), S(-12,-31), S( -3,-17), S( 3, 7), S( 12, 13), // Knight
|
||||
S( 21, 16), S( 28, 21), S( 37, 26) },
|
||||
{ S(-47,-59), S(-20,-25), S( 14, -8), S( 29, 12), S( 39, 21), S( 53, 40), // Bishop
|
||||
S( 53, 56), S( 60, 58), S( 62, 65), S( 69, 72), S( 78, 78), S( 83, 87),
|
||||
S( 91, 88), S( 96, 98) },
|
||||
{ S(-61,-82), S(-20,-17), S( 2, 23) ,S( 3, 40), S( 4, 72), S( 11,100), // Rook
|
||||
S( 22,104), S( 31,120), S( 39,134), S(40 ,138), S( 41,158), S( 47,163),
|
||||
S( 59,168), S( 60,169), S( 64,173) },
|
||||
{ S(-29,-49), S(-16,-29), S( -8, -8), S( -8, 17), S( 18, 39), S( 25, 54), // Queen
|
||||
S( 23, 59), S( 37, 73), S( 41, 76), S( 54, 95), S( 65, 95) ,S( 68,101),
|
||||
S( 69,124), S( 70,128), S( 70,132), S( 70,133) ,S( 71,136), S( 72,140),
|
||||
S( 74,147), S( 76,149), S( 90,153), S(104,169), S(105,171), S(106,171),
|
||||
S(112,178), S(114,185), S(114,187), S(119,221) }
|
||||
};
|
||||
|
||||
// KingProtector[knight/bishop] contains penalty for each distance unit to own king
|
||||
|
@ -1015,20 +1015,36 @@ make_v:
|
|||
|
||||
Value Eval::evaluate(const Position& pos) {
|
||||
|
||||
// Use classical eval if there is a large imbalance
|
||||
// If there is a moderate imbalance, use classical eval with probability (1/8),
|
||||
// as derived from the node counter.
|
||||
bool useClassical = abs(eg_value(pos.psq_score())) * 16 > NNUEThreshold1 * (16 + pos.rule50_count());
|
||||
bool classical = !Eval::useNNUE
|
||||
|| useClassical
|
||||
|| (abs(eg_value(pos.psq_score())) > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB));
|
||||
Value v = classical ? Evaluation<NO_TRACE>(pos).value()
|
||||
: NNUE::evaluate(pos) * 5 / 4 + Tempo;
|
||||
Value v;
|
||||
|
||||
if ( useClassical
|
||||
&& Eval::useNNUE
|
||||
&& abs(v) * 16 < NNUEThreshold2 * (16 + pos.rule50_count()))
|
||||
v = NNUE::evaluate(pos) * 5 / 4 + Tempo;
|
||||
if (!Eval::useNNUE)
|
||||
v = Evaluation<NO_TRACE>(pos).value();
|
||||
else
|
||||
{
|
||||
// Scale and shift NNUE for compatibility with search and classical evaluation
|
||||
auto adjusted_NNUE = [&](){
|
||||
int mat = pos.non_pawn_material() + PieceValue[MG][PAWN] * pos.count<PAWN>();
|
||||
return NNUE::evaluate(pos) * (720 + mat / 32) / 1024 + Tempo;
|
||||
};
|
||||
|
||||
// If there is PSQ imbalance use classical eval, with small probability if it is small
|
||||
Value psq = Value(abs(eg_value(pos.psq_score())));
|
||||
int r50 = 16 + pos.rule50_count();
|
||||
bool largePsq = psq * 16 > (NNUEThreshold1 + pos.non_pawn_material() / 64) * r50;
|
||||
bool classical = largePsq || (psq > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB));
|
||||
|
||||
v = classical ? Evaluation<NO_TRACE>(pos).value() : adjusted_NNUE();
|
||||
|
||||
// If the classical eval is small and imbalance large, use NNUE nevertheless.
|
||||
// For the case of opposite colored bishops, switch to NNUE eval with
|
||||
// small probability if the classical eval is less than the threshold.
|
||||
if ( largePsq
|
||||
&& (abs(v) * 16 < NNUEThreshold2 * r50
|
||||
|| ( pos.opposite_bishops()
|
||||
&& abs(v) * 16 < (NNUEThreshold1 + pos.non_pawn_material() / 64) * r50
|
||||
&& !(pos.this_thread()->nodes & 0xB))))
|
||||
v = adjusted_NNUE();
|
||||
}
|
||||
|
||||
// Damp down the evaluation linearly when shuffling
|
||||
v = v * (100 - pos.rule50_count()) / 100;
|
||||
|
|
|
@ -32,20 +32,18 @@ namespace Eval {
|
|||
|
||||
extern bool useNNUE;
|
||||
extern std::string eval_file_loaded;
|
||||
void init_NNUE();
|
||||
void verify_NNUE();
|
||||
|
||||
// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
|
||||
// for the build process (profile-build and fishtest) to work. Do not change the
|
||||
// name of the macro, as it is used in the Makefile.
|
||||
#define EvalFileDefaultName "nn-03744f8d56d8.nnue"
|
||||
#define EvalFileDefaultName "nn-baeb9ef2d183.nnue"
|
||||
|
||||
namespace NNUE {
|
||||
|
||||
Value evaluate(const Position& pos);
|
||||
Value compute_eval(const Position& pos);
|
||||
void update_eval(const Position& pos);
|
||||
bool load_eval(std::string streamName, std::istream& stream);
|
||||
bool load_eval(std::string name, std::istream& stream);
|
||||
void init();
|
||||
void verify();
|
||||
|
||||
} // namespace NNUE
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ int main(int argc, char* argv[]) {
|
|||
Endgames::init();
|
||||
Threads.set(size_t(Options["Threads"]));
|
||||
Search::clear(); // After threads are up
|
||||
Eval::init_NNUE();
|
||||
Eval::NNUE::init();
|
||||
|
||||
UCI::loop(argc, argv);
|
||||
|
||||
|
|
|
@ -357,27 +357,11 @@ void std_aligned_free(void* ptr) {
|
|||
#endif
|
||||
}
|
||||
|
||||
/// aligned_ttmem_alloc() will return suitably aligned memory, if possible using large pages.
|
||||
/// The returned pointer is the aligned one, while the mem argument is the one that needs
|
||||
/// to be passed to free. With c++17 some of this functionality could be simplified.
|
||||
/// aligned_large_pages_alloc() will return suitably aligned memory, if possible using large pages.
|
||||
|
||||
#if defined(__linux__) && !defined(__ANDROID__)
|
||||
#if defined(_WIN32)
|
||||
|
||||
void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
|
||||
|
||||
constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes
|
||||
size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment
|
||||
if (posix_memalign(&mem, alignment, size))
|
||||
mem = nullptr;
|
||||
#if defined(MADV_HUGEPAGE)
|
||||
madvise(mem, allocSize, MADV_HUGEPAGE);
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
#elif defined(_WIN64)
|
||||
|
||||
static void* aligned_ttmem_alloc_large_pages(size_t allocSize) {
|
||||
static void* aligned_large_pages_alloc_win(size_t allocSize) {
|
||||
|
||||
HANDLE hProcessToken { };
|
||||
LUID luid { };
|
||||
|
@ -422,23 +406,10 @@ static void* aligned_ttmem_alloc_large_pages(size_t allocSize) {
|
|||
return mem;
|
||||
}
|
||||
|
||||
void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
|
||||
|
||||
static bool firstCall = true;
|
||||
void* aligned_large_pages_alloc(size_t allocSize) {
|
||||
|
||||
// Try to allocate large pages
|
||||
mem = aligned_ttmem_alloc_large_pages(allocSize);
|
||||
|
||||
// Suppress info strings on the first call. The first call occurs before 'uci'
|
||||
// is received and in that case this output confuses some GUIs.
|
||||
if (!firstCall)
|
||||
{
|
||||
if (mem)
|
||||
sync_cout << "info string Hash table allocation: Windows large pages used." << sync_endl;
|
||||
else
|
||||
sync_cout << "info string Hash table allocation: Windows large pages not used." << sync_endl;
|
||||
}
|
||||
firstCall = false;
|
||||
void* mem = aligned_large_pages_alloc_win(allocSize);
|
||||
|
||||
// Fall back to regular, page aligned, allocation if necessary
|
||||
if (!mem)
|
||||
|
@ -449,23 +420,31 @@ void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
|
|||
|
||||
#else
|
||||
|
||||
void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
|
||||
void* aligned_large_pages_alloc(size_t allocSize) {
|
||||
|
||||
constexpr size_t alignment = 64; // assumed cache line size
|
||||
size_t size = allocSize + alignment - 1; // allocate some extra space
|
||||
mem = malloc(size);
|
||||
void* ret = reinterpret_cast<void*>((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1));
|
||||
return ret;
|
||||
#if defined(__linux__)
|
||||
constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page size
|
||||
#else
|
||||
constexpr size_t alignment = 4096; // assumed small page size
|
||||
#endif
|
||||
|
||||
// round up to multiples of alignment
|
||||
size_t size = ((allocSize + alignment - 1) / alignment) * alignment;
|
||||
void *mem = std_aligned_alloc(alignment, size);
|
||||
#if defined(MADV_HUGEPAGE)
|
||||
madvise(mem, size, MADV_HUGEPAGE);
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/// aligned_ttmem_free() will free the previously allocated ttmem
|
||||
/// aligned_large_pages_free() will free the previously allocated ttmem
|
||||
|
||||
#if defined(_WIN64)
|
||||
#if defined(_WIN32)
|
||||
|
||||
void aligned_ttmem_free(void* mem) {
|
||||
void aligned_large_pages_free(void* mem) {
|
||||
|
||||
if (mem && !VirtualFree(mem, 0, MEM_RELEASE))
|
||||
{
|
||||
|
@ -478,8 +457,8 @@ void aligned_ttmem_free(void* mem) {
|
|||
|
||||
#else
|
||||
|
||||
void aligned_ttmem_free(void *mem) {
|
||||
free(mem);
|
||||
void aligned_large_pages_free(void *mem) {
|
||||
std_aligned_free(mem);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,8 +33,8 @@ void prefetch(void* addr);
|
|||
void start_logger(const std::string& fname);
|
||||
void* std_aligned_alloc(size_t alignment, size_t size);
|
||||
void std_aligned_free(void* ptr);
|
||||
void* aligned_ttmem_alloc(size_t size, void*& mem);
|
||||
void aligned_ttmem_free(void* mem); // nop if mem == nullptr
|
||||
void* aligned_large_pages_alloc(size_t size); // memory aligned by page size, min alignment: 4096 bytes
|
||||
void aligned_large_pages_free(void* mem); // nop if mem == nullptr
|
||||
|
||||
void dbg_hit_on(bool b);
|
||||
void dbg_hit_on(bool c, bool b);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
namespace Eval::NNUE {
|
||||
|
||||
uint32_t kpp_board_index[PIECE_NB][COLOR_NB] = {
|
||||
const uint32_t kpp_board_index[PIECE_NB][COLOR_NB] = {
|
||||
// convention: W - us, B - them
|
||||
// viewed from other side, W and B are reversed
|
||||
{ PS_NONE, PS_NONE },
|
||||
|
@ -52,7 +52,7 @@ namespace Eval::NNUE {
|
|||
};
|
||||
|
||||
// Input feature converter
|
||||
AlignedPtr<FeatureTransformer> feature_transformer;
|
||||
LargePagePtr<FeatureTransformer> feature_transformer;
|
||||
|
||||
// Evaluation function
|
||||
AlignedPtr<Network> network;
|
||||
|
@ -70,14 +70,22 @@ namespace Eval::NNUE {
|
|||
std::memset(pointer.get(), 0, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Initialize(LargePagePtr<T>& pointer) {
|
||||
|
||||
static_assert(alignof(T) <= 4096, "aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
|
||||
pointer.reset(reinterpret_cast<T*>(aligned_large_pages_alloc(sizeof(T))));
|
||||
std::memset(pointer.get(), 0, sizeof(T));
|
||||
}
|
||||
|
||||
// Read evaluation function parameters
|
||||
template <typename T>
|
||||
bool ReadParameters(std::istream& stream, const AlignedPtr<T>& pointer) {
|
||||
bool ReadParameters(std::istream& stream, T& reference) {
|
||||
|
||||
std::uint32_t header;
|
||||
header = read_little_endian<std::uint32_t>(stream);
|
||||
if (!stream || header != T::GetHashValue()) return false;
|
||||
return pointer->ReadParameters(stream);
|
||||
return reference.ReadParameters(stream);
|
||||
}
|
||||
|
||||
} // namespace Detail
|
||||
|
@ -110,8 +118,8 @@ namespace Eval::NNUE {
|
|||
std::string architecture;
|
||||
if (!ReadHeader(stream, &hash_value, &architecture)) return false;
|
||||
if (hash_value != kHashValue) return false;
|
||||
if (!Detail::ReadParameters(stream, feature_transformer)) return false;
|
||||
if (!Detail::ReadParameters(stream, network)) return false;
|
||||
if (!Detail::ReadParameters(stream, *feature_transformer)) return false;
|
||||
if (!Detail::ReadParameters(stream, *network)) return false;
|
||||
return stream && stream.peek() == std::ios::traits_type::eof();
|
||||
}
|
||||
|
||||
|
@ -128,10 +136,10 @@ namespace Eval::NNUE {
|
|||
}
|
||||
|
||||
// Load eval, from a file stream or a memory stream
|
||||
bool load_eval(std::string streamName, std::istream& stream) {
|
||||
bool load_eval(std::string name, std::istream& stream) {
|
||||
|
||||
Initialize();
|
||||
fileName = streamName;
|
||||
fileName = name;
|
||||
return ReadParameters(stream);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,9 +40,20 @@ namespace Eval::NNUE {
|
|||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LargePageDeleter {
|
||||
void operator()(T* ptr) const {
|
||||
ptr->~T();
|
||||
aligned_large_pages_free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
using AlignedPtr = std::unique_ptr<T, AlignedDeleter<T>>;
|
||||
|
||||
template <typename T>
|
||||
using LargePagePtr = std::unique_ptr<T, LargePageDeleter<T>>;
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
||||
#endif // #ifndef NNUE_EVALUATE_NNUE_H_INCLUDED
|
||||
|
|
|
@ -61,11 +61,8 @@ namespace Eval::NNUE::Features {
|
|||
const PositionType& pos, TriggerEvent trigger,
|
||||
IndexListType removed[2], IndexListType added[2], bool reset[2]) {
|
||||
|
||||
const auto& dp = pos.state()->dirtyPiece;
|
||||
if (dp.dirty_num == 0) return;
|
||||
|
||||
auto collect_for_one = [&](const DirtyPiece& dp) {
|
||||
for (Color perspective : { WHITE, BLACK }) {
|
||||
reset[perspective] = false;
|
||||
switch (trigger) {
|
||||
case TriggerEvent::kFriendKingMoved:
|
||||
reset[perspective] = dp.piece[0] == make_piece(perspective, KING);
|
||||
|
@ -79,10 +76,56 @@ namespace Eval::NNUE::Features {
|
|||
pos, trigger, perspective, &added[perspective]);
|
||||
} else {
|
||||
Derived::CollectChangedIndices(
|
||||
pos, trigger, perspective,
|
||||
pos, dp, trigger, perspective,
|
||||
&removed[perspective], &added[perspective]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto collect_for_two = [&](const DirtyPiece& dp1, const DirtyPiece& dp2) {
|
||||
for (Color perspective : { WHITE, BLACK }) {
|
||||
switch (trigger) {
|
||||
case TriggerEvent::kFriendKingMoved:
|
||||
reset[perspective] = dp1.piece[0] == make_piece(perspective, KING)
|
||||
|| dp2.piece[0] == make_piece(perspective, KING);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
if (reset[perspective]) {
|
||||
Derived::CollectActiveIndices(
|
||||
pos, trigger, perspective, &added[perspective]);
|
||||
} else {
|
||||
Derived::CollectChangedIndices(
|
||||
pos, dp1, trigger, perspective,
|
||||
&removed[perspective], &added[perspective]);
|
||||
Derived::CollectChangedIndices(
|
||||
pos, dp2, trigger, perspective,
|
||||
&removed[perspective], &added[perspective]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if (pos.state()->previous->accumulator.computed_accumulation) {
|
||||
const auto& prev_dp = pos.state()->dirtyPiece;
|
||||
if (prev_dp.dirty_num == 0) return;
|
||||
collect_for_one(prev_dp);
|
||||
} else {
|
||||
const auto& prev_dp = pos.state()->previous->dirtyPiece;
|
||||
if (prev_dp.dirty_num == 0) {
|
||||
const auto& prev2_dp = pos.state()->dirtyPiece;
|
||||
if (prev2_dp.dirty_num == 0) return;
|
||||
collect_for_one(prev2_dp);
|
||||
} else {
|
||||
const auto& prev2_dp = pos.state()->dirtyPiece;
|
||||
if (prev2_dp.dirty_num == 0) {
|
||||
collect_for_one(prev_dp);
|
||||
} else {
|
||||
collect_for_two(prev_dp, prev2_dp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -115,11 +158,11 @@ namespace Eval::NNUE::Features {
|
|||
|
||||
// Get a list of indices for recently changed features
|
||||
static void CollectChangedIndices(
|
||||
const Position& pos, const TriggerEvent trigger, const Color perspective,
|
||||
const Position& pos, const DirtyPiece& dp, const TriggerEvent trigger, const Color perspective,
|
||||
IndexList* const removed, IndexList* const added) {
|
||||
|
||||
if (FeatureType::kRefreshTrigger == trigger) {
|
||||
FeatureType::AppendChangedIndices(pos, perspective, removed, added);
|
||||
FeatureType::AppendChangedIndices(pos, dp, perspective, removed, added);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,11 +52,10 @@ namespace Eval::NNUE::Features {
|
|||
// Get a list of indices for recently changed features
|
||||
template <Side AssociatedKing>
|
||||
void HalfKP<AssociatedKing>::AppendChangedIndices(
|
||||
const Position& pos, Color perspective,
|
||||
const Position& pos, const DirtyPiece& dp, Color perspective,
|
||||
IndexList* removed, IndexList* added) {
|
||||
|
||||
Square ksq = orient(perspective, pos.square<KING>(perspective));
|
||||
const auto& dp = pos.state()->dirtyPiece;
|
||||
for (int i = 0; i < dp.dirty_num; ++i) {
|
||||
Piece pc = dp.piece[i];
|
||||
if (type_of(pc) == KING) continue;
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace Eval::NNUE::Features {
|
|||
IndexList* active);
|
||||
|
||||
// Get a list of indices for recently changed features
|
||||
static void AppendChangedIndices(const Position& pos, Color perspective,
|
||||
static void AppendChangedIndices(const Position& pos, const DirtyPiece& dp, Color perspective,
|
||||
IndexList* removed, IndexList* added);
|
||||
|
||||
private:
|
||||
|
|
|
@ -113,7 +113,7 @@ namespace Eval::NNUE {
|
|||
PS_END2 = 12 * SQUARE_NB + 1
|
||||
};
|
||||
|
||||
extern uint32_t kpp_board_index[PIECE_NB][COLOR_NB];
|
||||
extern const uint32_t kpp_board_index[PIECE_NB][COLOR_NB];
|
||||
|
||||
// Type of input feature after conversion
|
||||
using TransformedFeatureType = std::uint8_t;
|
||||
|
|
|
@ -127,9 +127,14 @@ namespace Eval::NNUE {
|
|||
return true;
|
||||
|
||||
const auto prev = now->previous;
|
||||
if (prev && prev->accumulator.computed_accumulation) {
|
||||
if (prev) {
|
||||
if (prev->accumulator.computed_accumulation) {
|
||||
UpdateAccumulator(pos);
|
||||
return true;
|
||||
} else if (prev->previous && prev->previous->accumulator.computed_accumulation) {
|
||||
UpdateAccumulator(pos);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -289,11 +294,21 @@ namespace Eval::NNUE {
|
|||
// Calculate cumulative value using difference calculation
|
||||
void UpdateAccumulator(const Position& pos) const {
|
||||
|
||||
const auto prev_accumulator = pos.state()->previous->accumulator;
|
||||
Accumulator* prev_accumulator;
|
||||
assert(pos.state()->previous);
|
||||
if (pos.state()->previous->accumulator.computed_accumulation) {
|
||||
prev_accumulator = &pos.state()->previous->accumulator;
|
||||
}
|
||||
else {
|
||||
assert(pos.state()->previous->previous);
|
||||
assert(pos.state()->previous->previous->accumulator.computed_accumulation);
|
||||
prev_accumulator = &pos.state()->previous->previous->accumulator;
|
||||
}
|
||||
|
||||
auto& accumulator = pos.state()->accumulator;
|
||||
IndexType i = 0;
|
||||
Features::IndexList removed_indices[2], added_indices[2];
|
||||
bool reset[2];
|
||||
bool reset[2] = { false, false };
|
||||
RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
|
||||
removed_indices, added_indices, reset);
|
||||
|
||||
|
@ -311,7 +326,7 @@ namespace Eval::NNUE {
|
|||
acc[k] = biasesTile[k];
|
||||
} else {
|
||||
auto prevAccTile = reinterpret_cast<const vec_t*>(
|
||||
&prev_accumulator.accumulation[perspective][i][j * kTileHeight]);
|
||||
&prev_accumulator->accumulation[perspective][i][j * kTileHeight]);
|
||||
for (IndexType k = 0; k < kNumRegs; ++k)
|
||||
acc[k] = vec_load(&prevAccTile[k]);
|
||||
|
||||
|
@ -350,7 +365,7 @@ namespace Eval::NNUE {
|
|||
kHalfDimensions * sizeof(BiasType));
|
||||
} else {
|
||||
std::memcpy(accumulator.accumulation[perspective][i],
|
||||
prev_accumulator.accumulation[perspective][i],
|
||||
prev_accumulator->accumulation[perspective][i],
|
||||
kHalfDimensions * sizeof(BiasType));
|
||||
// Difference calculation for the deactivated features
|
||||
for (const auto index : removed_indices[perspective]) {
|
||||
|
|
|
@ -192,7 +192,7 @@ namespace {
|
|||
void Search::init() {
|
||||
|
||||
for (int i = 1; i < MAX_MOVES; ++i)
|
||||
Reductions[i] = int((22.0 + std::log(Threads.size())) * std::log(i));
|
||||
Reductions[i] = int((22.0 + 2 * std::log(Threads.size())) * std::log(i + 0.25 * std::log(i)));
|
||||
}
|
||||
|
||||
|
||||
|
@ -225,7 +225,7 @@ void MainThread::search() {
|
|||
Time.init(Limits, us, rootPos.game_ply());
|
||||
TT.new_search();
|
||||
|
||||
Eval::verify_NNUE();
|
||||
Eval::NNUE::verify();
|
||||
|
||||
if (rootMoves.empty())
|
||||
{
|
||||
|
@ -462,10 +462,7 @@ void Thread::search() {
|
|||
++failedHighCnt;
|
||||
}
|
||||
else
|
||||
{
|
||||
++rootMoves[pvIdx].bestMoveCount;
|
||||
break;
|
||||
}
|
||||
|
||||
delta += delta / 4 + 5;
|
||||
|
||||
|
@ -832,7 +829,7 @@ namespace {
|
|||
assert(eval - beta >= 0);
|
||||
|
||||
// Null move dynamic reduction based on depth and value
|
||||
Depth R = (817 + 71 * depth) / 213 + std::min(int(eval - beta) / 192, 3);
|
||||
Depth R = (982 + 85 * depth) / 256 + std::min(int(eval - beta) / 192, 3);
|
||||
|
||||
ss->currentMove = MOVE_NULL;
|
||||
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
|
||||
|
@ -1151,7 +1148,7 @@ moves_loop: // When in check, search starts from here
|
|||
// Step 16. Reduced depth search (LMR, ~200 Elo). If the move fails high it will be
|
||||
// re-searched at full depth.
|
||||
if ( depth >= 3
|
||||
&& moveCount > 1 + 2 * rootNode + 2 * (PvNode && abs(bestValue) < 2)
|
||||
&& moveCount > 1 + 2 * rootNode
|
||||
&& ( !captureOrPromotion
|
||||
|| moveCountPruning
|
||||
|| ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha
|
||||
|
@ -1570,6 +1567,7 @@ moves_loop: // When in check, search starts from here
|
|||
[pos.moved_piece(move)]
|
||||
[to_sq(move)];
|
||||
|
||||
// CounterMove based pruning
|
||||
if ( !captureOrPromotion
|
||||
&& moveCount
|
||||
&& (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold
|
||||
|
|
|
@ -71,7 +71,6 @@ struct RootMove {
|
|||
Value previousScore = -VALUE_INFINITE;
|
||||
int selDepth = 0;
|
||||
int tbRank = 0;
|
||||
int bestMoveCount = 0;
|
||||
Value tbScore;
|
||||
std::vector<Move> pv;
|
||||
};
|
||||
|
|
|
@ -472,8 +472,6 @@ TBTables TBTables;
|
|||
// If the corresponding file exists two new objects TBTable<WDL> and TBTable<DTZ>
|
||||
// are created and added to the lists and hash table. Called at init time.
|
||||
void TBTables::add(const std::vector<PieceType>& pieces) {
|
||||
if (sizeof(char*) < 8 && pieces.size() >= 6)
|
||||
return; // Not enough address space to support 6-men TB on 32-bit OS
|
||||
|
||||
std::string code;
|
||||
|
||||
|
|
|
@ -62,11 +62,12 @@ void TranspositionTable::resize(size_t mbSize) {
|
|||
|
||||
Threads.main()->wait_for_search_finished();
|
||||
|
||||
aligned_ttmem_free(mem);
|
||||
aligned_large_pages_free(table);
|
||||
|
||||
clusterCount = mbSize * 1024 * 1024 / sizeof(Cluster);
|
||||
table = static_cast<Cluster*>(aligned_ttmem_alloc(clusterCount * sizeof(Cluster), mem));
|
||||
if (!mem)
|
||||
|
||||
table = static_cast<Cluster*>(aligned_large_pages_alloc(clusterCount * sizeof(Cluster)));
|
||||
if (!table)
|
||||
{
|
||||
std::cerr << "Failed to allocate " << mbSize
|
||||
<< "MB for transposition table." << std::endl;
|
||||
|
|
|
@ -73,7 +73,7 @@ class TranspositionTable {
|
|||
static_assert(sizeof(Cluster) == 32, "Unexpected Cluster size");
|
||||
|
||||
public:
|
||||
~TranspositionTable() { aligned_ttmem_free(mem); }
|
||||
~TranspositionTable() { aligned_large_pages_free(table); }
|
||||
void new_search() { generation8 += 8; } // Lower 3 bits are used by PV flag and Bound
|
||||
TTEntry* probe(const Key key, bool& found) const;
|
||||
int hashfull() const;
|
||||
|
@ -89,7 +89,6 @@ private:
|
|||
|
||||
size_t clusterCount;
|
||||
Cluster* table;
|
||||
void* mem;
|
||||
uint8_t generation8; // Size must be not bigger than TTEntry::genBound8
|
||||
};
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ namespace {
|
|||
Position p;
|
||||
p.set(pos.fen(), Options["UCI_Chess960"], &states->back(), Threads.main());
|
||||
|
||||
Eval::verify_NNUE();
|
||||
Eval::NNUE::verify();
|
||||
|
||||
sync_cout << "\n" << Eval::trace(p) << sync_endl;
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ void on_hash_size(const Option& o) { TT.resize(size_t(o)); }
|
|||
void on_logger(const Option& o) { start_logger(o); }
|
||||
void on_threads(const Option& o) { Threads.set(size_t(o)); }
|
||||
void on_tb_path(const Option& o) { Tablebases::init(o); }
|
||||
void on_use_NNUE(const Option& ) { Eval::init_NNUE(); }
|
||||
void on_eval_file(const Option& ) { Eval::init_NNUE(); }
|
||||
void on_use_NNUE(const Option& ) { Eval::NNUE::init(); }
|
||||
void on_eval_file(const Option& ) { Eval::NNUE::init(); }
|
||||
|
||||
/// Our case insensitive less() function as required by UCI protocol
|
||||
bool CaseInsensitiveLess::operator() (const string& s1, const string& s2) const {
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.petero.droidfish.EngineOptions;
|
|||
|
||||
/** Stockfish engine running as process, started from assets resource. */
|
||||
public class InternalStockFish extends ExternalEngine {
|
||||
private static final String defaultNet = "nn-03744f8d56d8.nnue";
|
||||
private static final String defaultNet = "nn-baeb9ef2d183.nnue";
|
||||
private static final String netOption = "evalfile";
|
||||
private File defaultNetFile; // To get the full path of the copied default network file
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user