From ac32902a87147f78d63c931aa8a23dee762660e7 Mon Sep 17 00:00:00 2001 From: Karthik Kumar Viswanathan <195178+guilt@users.noreply.github.com> Date: Sun, 14 Jan 2024 00:41:44 -0800 Subject: llama : support WinXP build with MinGW 8.1.0 (#3419) --- llama.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'llama.cpp') diff --git a/llama.cpp b/llama.cpp index 107b0511..51e9bdae 100644 --- a/llama.cpp +++ b/llama.cpp @@ -987,6 +987,7 @@ struct llama_mmap { } if (prefetch > 0) { +#if _WIN32_WINNT >= 0x602 // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); @@ -1004,6 +1005,9 @@ struct llama_mmap { llama_format_win_err(GetLastError()).c_str()); } } +#else + throw std::runtime_error("PrefetchVirtualMemory unavailable"); +#endif } } -- cgit v1.2.3