Diego Devesa commited on
Commit
f0a0ac8
·
1 Parent(s): 00a9e2f

llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (llama/14013)

Browse files
Files changed (2) hide show
  1. ggml/CMakeLists.txt +1 -1
  2. ggml/src/CMakeLists.txt +0 -1
ggml/CMakeLists.txt CHANGED
@@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM")
137
  set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
138
 
139
 
140
- if (WIN32)
141
  set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version")
142
  endif()
143
 
 
137
  set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
138
 
139
 
140
+ if (MINGW)
141
  set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version")
142
  endif()
143
 
ggml/src/CMakeLists.txt CHANGED
@@ -125,7 +125,6 @@ if (NOT MSVC)
125
  endif()
126
 
127
  if (MINGW)
128
- # Target Windows 8 for PrefetchVirtualMemory
129
  add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
130
  endif()
131
 
 
125
  endif()
126
 
127
  if (MINGW)
 
128
  add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
129
  endif()
130