diff options
author | Anas Ahouzi <112881240+aahouzi@users.noreply.github.com> | 2024-04-23 02:53:18 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-23 08:53:18 +0800 |
commit | 4e96a812b3ce7322a29a3008db2ed73d9087b176 (patch) | |
tree | 7c2b518d017957f5e2b903b507388c839d697177 | |
parent | 192090bae47960f0d38d4967abe398a5d190057e (diff) |
[SYCL] Windows default build instructions without -DLLAMA_SYCL_F16 flag activated (#6767)
* Fix FP32/FP16 build instructions
* Fix typo
* Recommended build instruction
Co-authored-by: Neo Zhang Jianyu <jianyu.zhang@intel.com>
* Recommended build instruction
Co-authored-by: Neo Zhang Jianyu <jianyu.zhang@intel.com>
* Recommended build instruction
Co-authored-by: Neo Zhang Jianyu <jianyu.zhang@intel.com>
* Add comments in Intel GPU linux
---------
Co-authored-by: Anas Ahouzi <112881240+aahouzi-intel@users.noreply.github.com>
Co-authored-by: Neo Zhang Jianyu <jianyu.zhang@intel.com>
-rw-r--r-- | README-sycl.md | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/README-sycl.md b/README-sycl.md index 2aa46507..dc98c7b3 100644 --- a/README-sycl.md +++ b/README-sycl.md @@ -229,12 +229,12 @@ source /opt/intel/oneapi/setvars.sh # Build LLAMA with MKL BLAS acceleration for intel GPU mkdir -p build && cd build -# Option 1: Use FP16 for better performance in long-prompt inference -#cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON - -# Option 2: Use FP32 by default +# Option 1: Use FP32 (recommended for better performance in most cases) cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx +# Option 2: Use FP16 +cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON + #build all binary cmake --build . --config Release -j -v ``` @@ -250,12 +250,12 @@ export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR # Build LLAMA with Nvidia BLAS acceleration through SYCL mkdir -p build && cd build -# Option 1: Use FP16 for better performance in long-prompt inference -cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON - -# Option 2: Use FP32 by default +# Option 1: Use FP32 (recommended for better performance in most cases) cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx +# Option 2: Use FP16 +cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON + #build all binary cmake --build . --config Release -j -v @@ -416,6 +416,10 @@ mkdir -p build cd build @call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force +# Option 1: Use FP32 (recommended for better performance in most cases) +cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release + +# Option 2: Or FP16 cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON make -j |