diff options
author | alwqx <kenan3015@gmail.com> | 2024-05-02 23:56:41 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-02 11:56:41 -0400 |
commit | 6ecf3189e00a1e8e737a78b6d10e1d7006e050a2 (patch) | |
tree | e6b87308a26b448f6f545f85fd19e9a4512c5ec4 | |
parent | b0d943de179ad5dbd83d51f327fb566066f4ccda (diff) |
chore: fix typo in llama.cpp (#7032)
Co-authored-by: Jared Van Bortel <jared@nomic.ai>
-rw-r--r-- | llama.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
@@ -2359,7 +2359,7 @@ static bool llama_kv_cache_init( cache.recurrent = model.arch == LLM_ARCH_MAMBA; cache.v_trans = !cparams.flash_attn; - // TODO: support mixed reccurent Transformer architectues + // TODO: support mixed recurrent Transformer architectures // NOTE: (!a || b) is a logical implication (a -> b) GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s()); GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s()); |