Arrcttacsrks commited on
Commit
f864c0a
·
verified ·
1 Parent(s): 0a8fb7d

Upload llama.cpp/CMakeLists.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. llama.cpp/CMakeLists.txt +216 -0
llama.cpp/CMakeLists.txt ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
2
+ project("llama.cpp" C CXX)
3
+ include(CheckIncludeFileCXX)
4
+
5
+ #set(CMAKE_WARN_DEPRECATED YES)
6
+ set(CMAKE_WARN_UNUSED_CLI YES)
7
+
8
+ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
9
+
10
+ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
11
+ set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
12
+ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
13
+ endif()
14
+
15
+ # Add path to modules
16
+ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
17
+
18
+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
19
+
20
+ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
21
+ set(LLAMA_STANDALONE ON)
22
+
23
+ include(git-vars)
24
+
25
+ # configure project version
26
+ # TODO
27
+ else()
28
+ set(LLAMA_STANDALONE OFF)
29
+ endif()
30
+
31
+ if (EMSCRIPTEN)
32
+ set(BUILD_SHARED_LIBS_DEFAULT OFF)
33
+
34
+ option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON)
35
+ else()
36
+ if (MINGW)
37
+ set(BUILD_SHARED_LIBS_DEFAULT OFF)
38
+ else()
39
+ set(BUILD_SHARED_LIBS_DEFAULT ON)
40
+ endif()
41
+ endif()
42
+
43
+ option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
44
+
45
+ if (WIN32)
46
+ add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
47
+ endif()
48
+
49
+ #
50
+ # option list
51
+ #
52
+
53
+ # debug
54
+ option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
55
+ option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
56
+
57
+ # build
58
+ option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF)
59
+
60
+ # sanitizers
61
+ option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
62
+ option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
63
+ option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
64
+
65
+ # utils
66
+ option(LLAMA_BUILD_COMMON "llama: build common utils library" ${LLAMA_STANDALONE})
67
+
68
+ # extra artifacts
69
+ option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
70
+ option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
71
+ option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE})
72
+
73
+ # 3rd party libs
74
+ option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
75
+
76
+ # Required for relocatable CMake package
77
+ include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
78
+
79
+ # override ggml options
80
+ set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD})
81
+ set(GGML_SANITIZE_ADDRESS ${LLAMA_SANITIZE_ADDRESS})
82
+ set(GGML_SANITIZE_UNDEFINED ${LLAMA_SANITIZE_UNDEFINED})
83
+ set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS})
84
+ set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS})
85
+
86
+ # change the default for these ggml options
87
+ if (NOT DEFINED GGML_LLAMAFILE)
88
+ set(GGML_LLAMAFILE_DEFAULT ON)
89
+ endif()
90
+
91
+ if (NOT DEFINED GGML_AMX)
92
+ set(GGML_AMX ON)
93
+ endif()
94
+
95
+ if (NOT DEFINED GGML_CUDA_GRAPHS)
96
+ set(GGML_CUDA_GRAPHS_DEFAULT ON)
97
+ endif()
98
+
99
+ # transition helpers
100
+ function (llama_option_depr TYPE OLD NEW)
101
+ if (${OLD})
102
+ message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n")
103
+ set(${NEW} ON PARENT_SCOPE)
104
+ endif()
105
+ endfunction()
106
+
107
+ llama_option_depr(FATAL_ERROR LLAMA_CUBLAS GGML_CUDA)
108
+ llama_option_depr(WARNING LLAMA_CUDA GGML_CUDA)
109
+ llama_option_depr(WARNING LLAMA_KOMPUTE GGML_KOMPUTE)
110
+ llama_option_depr(WARNING LLAMA_METAL GGML_METAL)
111
+ llama_option_depr(WARNING LLAMA_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
112
+ llama_option_depr(WARNING LLAMA_NATIVE GGML_NATIVE)
113
+ llama_option_depr(WARNING LLAMA_RPC GGML_RPC)
114
+ llama_option_depr(WARNING LLAMA_SYCL GGML_SYCL)
115
+ llama_option_depr(WARNING LLAMA_SYCL_F16 GGML_SYCL_F16)
116
+ llama_option_depr(WARNING LLAMA_CANN GGML_CANN)
117
+
118
+ #
119
+ # build the library
120
+ #
121
+
122
+ if (NOT TARGET ggml)
123
+ add_subdirectory(ggml)
124
+ # ... otherwise assume ggml is added by a parent CMakeLists.txt
125
+ endif()
126
+ add_subdirectory(src)
127
+
128
+ #
129
+ # install
130
+ #
131
+
132
+ include(GNUInstallDirs)
133
+ include(CMakePackageConfigHelpers)
134
+
135
+ set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER})
136
+ set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT})
137
+ set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER})
138
+
139
+ set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
140
+ set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
141
+ set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
142
+
143
+
144
+ # At the moment some compile definitions are placed within the ggml/src
145
+ # directory but not exported on the `ggml` target. This could be improved by
146
+ # determining _precisely_ which defines are necessary for the llama-config
147
+ # package.
148
+ #
149
+ set(GGML_TRANSIENT_DEFINES)
150
+ get_target_property(GGML_DIRECTORY ggml SOURCE_DIR)
151
+ get_directory_property(GGML_DIR_DEFINES DIRECTORY ${GGML_DIRECTORY} COMPILE_DEFINITIONS)
152
+ if (GGML_DIR_DEFINES)
153
+ list(APPEND GGML_TRANSIENT_DEFINES ${GGML_DIR_DEFINES})
154
+ endif()
155
+ get_target_property(GGML_TARGET_DEFINES ggml COMPILE_DEFINITIONS)
156
+ if (GGML_TARGET_DEFINES)
157
+ list(APPEND GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES})
158
+ endif()
159
+ get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES)
160
+
161
+ set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h)
162
+ install(TARGETS llama LIBRARY PUBLIC_HEADER)
163
+
164
+ configure_package_config_file(
165
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmake/llama-config.cmake.in
166
+ ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
167
+ INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama
168
+ PATH_VARS LLAMA_INCLUDE_INSTALL_DIR
169
+ LLAMA_LIB_INSTALL_DIR
170
+ LLAMA_BIN_INSTALL_DIR )
171
+
172
+ write_basic_package_version_file(
173
+ ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
174
+ VERSION ${LLAMA_INSTALL_VERSION}
175
+ COMPATIBILITY SameMajorVersion)
176
+
177
+ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
178
+ ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
179
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
180
+
181
+ install(
182
+ FILES convert_hf_to_gguf.py
183
+ PERMISSIONS
184
+ OWNER_READ
185
+ OWNER_WRITE
186
+ OWNER_EXECUTE
187
+ GROUP_READ
188
+ GROUP_EXECUTE
189
+ WORLD_READ
190
+ WORLD_EXECUTE
191
+ DESTINATION ${CMAKE_INSTALL_BINDIR})
192
+
193
+ configure_file(cmake/llama.pc.in
194
+ "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
195
+ @ONLY)
196
+
197
+ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
198
+ DESTINATION lib/pkgconfig)
199
+
200
+ #
201
+ # utils, programs, examples and tests
202
+ #
203
+
204
+ if (LLAMA_BUILD_COMMON)
205
+ add_subdirectory(common)
206
+ endif()
207
+
208
+ if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
209
+ include(CTest)
210
+ add_subdirectory(tests)
211
+ endif()
212
+
213
+ if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES)
214
+ add_subdirectory(examples)
215
+ add_subdirectory(pocs)
216
+ endif()