Tamotsu Takahashi commited on
Commit
41a13d4
·
unverified ·
1 Parent(s): cee2822

ci : build with CLBlast + ggml-opencl use GGML_API (#1576)

Browse files

* Build with CLBlast

* Declare GGML_API

After rebasing, examples/talk-llama failed:

"D:\a\whisper.cpp\whisper.cpp\build\ALL_BUILD.vcxproj" (build target) (1) ->
"D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj" (default target) (14) ->
(Link target) ->
llama.obj : error LNK2019: unresolved external symbol ggml_cl_free_data referenced in function "public: __cdecl llama_model::~llama_model(void)" (??1llama_model@@QEAA@XZ) [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj]
llama.obj : error LNK2019: unresolved external symbol ggml_cl_transform_tensor referenced in function "public: void __cdecl llama_model_loader::load_all_data(struct ggml_context *,void (__cdecl*)(float,void *),void *,struct llama_mlock *)" (?load_all_data@llama_model_loader@@QEAAXPEAUggml_context@@P6AXMPEAX@Z1PEAUllama_mlock@@@Z) [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj]
D:\a\whisper.cpp\whisper.cpp\build\bin\Release\talk-llama.exe : fatal error LNK1120: 2 unresolved externals [D:\a\whisper.cpp\whisper.cpp\build\examples\talk-llama\talk-llama.vcxproj]

.github/workflows/build.yml CHANGED
@@ -223,9 +223,12 @@ jobs:
223
  - arch: Win32
224
  obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip
225
  s2arc: x86
 
226
  - arch: x64
227
  obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip
228
  s2arc: x64
 
 
229
  - sdl2: ON
230
  s2ver: 2.28.5
231
 
@@ -252,6 +255,18 @@ jobs:
252
  7z x sdl2.zip
253
  echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
254
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  - name: Configure
256
  run: >
257
  cmake -S . -B ./build -A ${{ matrix.arch }}
@@ -259,6 +274,7 @@ jobs:
259
  -DWHISPER_OPENBLAS=${{ matrix.blas }}
260
  -DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib"
261
  -DWHISPER_SDL2=${{ matrix.sdl2 }}
 
262
 
263
  - name: Build
264
  run: |
@@ -273,11 +289,15 @@ jobs:
273
  if: matrix.sdl2 == 'ON'
274
  run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
275
 
 
 
 
 
276
  - name: Upload binaries
277
  if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
278
  uses: actions/upload-artifact@v1
279
  with:
280
- name: whisper-blas-bin-${{ matrix.arch }}
281
  path: build/bin/${{ matrix.build }}
282
 
283
  windows-cublas:
 
223
  - arch: Win32
224
  obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip
225
  s2arc: x86
226
+ clblast: OFF
227
  - arch: x64
228
  obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip
229
  s2arc: x64
230
+ clblast: ON
231
+ clver: 1.6.1
232
  - sdl2: ON
233
  s2ver: 2.28.5
234
 
 
255
  7z x sdl2.zip
256
  echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
257
 
258
+ - name: Install OpenCL
259
+ if: matrix.clblast == 'ON'
260
+ run: vcpkg.exe --triplet=${{ matrix.arch }}-windows install opencl
261
+
262
+ - name: Fetch CLBlast and set CLBlast_DIR
263
+ if: matrix.clblast == 'ON'
264
+ run: |
265
+ C:/msys64/usr/bin/wget.exe -qO clblast.zip https://github.com/CNugteren/CLBlast/releases/download/${{ matrix.clver }}/CLBlast-${{ matrix.clver }}-windows-x64.zip
266
+ 7z x clblast.zip
267
+ 7z x CLBlast-${{ matrix.clver }}-windows-x64.7z
268
+ echo "CLBlast_DIR=$env:GITHUB_WORKSPACE/CLBlast-${{ matrix.clver }}-windows-x64/lib/cmake/CLBlast" >> $env:GITHUB_ENV
269
+
270
  - name: Configure
271
  run: >
272
  cmake -S . -B ./build -A ${{ matrix.arch }}
 
274
  -DWHISPER_OPENBLAS=${{ matrix.blas }}
275
  -DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib"
276
  -DWHISPER_SDL2=${{ matrix.sdl2 }}
277
+ -DWHISPER_CLBLAST=${{ matrix.clblast }}
278
 
279
  - name: Build
280
  run: |
 
289
  if: matrix.sdl2 == 'ON'
290
  run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
291
 
292
+ - name: Copy clblast.dll
293
+ if: matrix.clblast == 'ON'
294
+ run: copy "$env:CLBlast_DIR/../../clblast.dll" build/bin/${{ matrix.build }}
295
+
296
  - name: Upload binaries
297
  if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
298
  uses: actions/upload-artifact@v1
299
  with:
300
+ name: whisper-blas${{ matrix.clblast == 'ON' && '-clblast' || ''}}-bin-${{ matrix.arch }}
301
  path: build/bin/${{ matrix.build }}
302
 
303
  windows-cublas:
examples/CMakeLists.txt CHANGED
@@ -14,6 +14,10 @@ if (WHISPER_SDL2)
14
  message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
15
  endif()
16
 
 
 
 
 
17
  # common
18
 
19
  set(TARGET common)
 
14
  message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
15
  endif()
16
 
17
+ if (WHISPER_CLBLAST)
18
+ find_package(CLBlast REQUIRED)
19
+ endif()
20
+
21
  # common
22
 
23
  set(TARGET common)
examples/talk-llama/CMakeLists.txt CHANGED
@@ -3,7 +3,11 @@ if (WHISPER_SDL2)
3
  set(TARGET talk-llama)
4
  add_executable(${TARGET} talk-llama.cpp llama.cpp)
5
  target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
6
- target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
 
 
 
 
7
 
8
  if(WIN32)
9
  # It requires Windows 8.1 or later for PrefetchVirtualMemory
 
3
  set(TARGET talk-llama)
4
  add_executable(${TARGET} talk-llama.cpp llama.cpp)
5
  target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
6
+
7
+ if (WHISPER_CLBLAST)
8
+ set(CLBLAST_LIBNAME clblast)
9
+ endif ()
10
+ target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CLBLAST_LIBNAME} ${CMAKE_THREAD_LIBS_INIT})
11
 
12
  if(WIN32)
13
  # It requires Windows 8.1 or later for PrefetchVirtualMemory
ggml-opencl.h CHANGED
@@ -6,19 +6,19 @@
6
  extern "C" {
7
  #endif
8
 
9
- void ggml_cl_init(void);
10
 
11
- void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
12
- bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
13
- size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
14
- void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
15
 
16
- void * ggml_cl_host_malloc(size_t size);
17
- void ggml_cl_host_free(void * ptr);
18
 
19
- void ggml_cl_free_data(const struct ggml_tensor* tensor);
20
 
21
- void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
22
 
23
  #ifdef __cplusplus
24
  }
 
6
  extern "C" {
7
  #endif
8
 
9
+ GGML_API void ggml_cl_init(void);
10
 
11
+ GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
12
+ GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
13
+ GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
14
+ GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
15
 
16
+ GGML_API void * ggml_cl_host_malloc(size_t size);
17
+ GGML_API void ggml_cl_host_free(void * ptr);
18
 
19
+ GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
20
 
21
+ GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
22
 
23
  #ifdef __cplusplus
24
  }