summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Fontanals <joan.fontanals.martinez@jina.ai>2024-05-13 10:35:14 +0200
committerGitHub <noreply@github.com>2024-05-13 11:35:14 +0300
commit9aa672490c848e45eaa704a554e0f1f6df995fc8 (patch)
tree674957b27a03ce726c364471c4382d0398c1d58c
parentb1f8af1886e8187db6bb2a9b87cfc1c0f175f629 (diff)
llama : rename jina tokenizers to v2 (#7249)
* refactor: rename jina tokenizers to v2 * refactor: keep refactoring non-breaking
-rwxr-xr-xconvert-hf-to-gguf-update.py6
-rwxr-xr-xconvert-hf-to-gguf.py6
-rw-r--r--llama.cpp4
3 files changed, 9 insertions, 7 deletions
diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py
index cd2674a0..14aa0c45 100755
--- a/convert-hf-to-gguf-update.py
+++ b/convert-hf-to-gguf-update.py
@@ -74,9 +74,9 @@ models = [
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
- {"name": "jina-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
- {"name": "jina-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
- {"name": "jina-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
+ {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
+ {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
+ {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
]
# make directory "models/tokenizers" if it doesn't exist
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index ec7f4dd7..d6e5dece 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -475,13 +475,13 @@ class Model:
res = "dbrx"
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
- res = "jina-en"
+ res = "jina-v2-en"
if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
- res = "jina-es"
+ res = "jina-v2-es"
if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
- res = "jina-de"
+ res = "jina-v2-de"
if res is None:
logger.warning("\n")
diff --git a/llama.cpp b/llama.cpp
index e91ad728..adbcc07e 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4424,7 +4424,9 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "gpt-2" ||
tokenizer_pre == "jina-es" ||
- tokenizer_pre == "jina-de") {
+ tokenizer_pre == "jina-de" ||
+ tokenizer_pre == "jina-v2-es" ||
+ tokenizer_pre == "jina-v2-de") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
} else if (
tokenizer_pre == "refact") {