diff options
author | Herman Semenov <GermanAizek@yandex.ru> | 2024-05-17 07:08:49 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-17 10:08:49 +0300 |
commit | 359cbe3f46c90ce6f5151005e411b8fb74f8139e (patch) | |
tree | d93e39cdb25858cb7bd74337ff65e24c4520ddca /llama.cpp | |
parent | e18bc6aaf3b547890609ed254ee5248e720e5840 (diff) |
ggml-quants, llama : removed excess checks (#7274)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 8 |
1 files changed, 2 insertions, 6 deletions
@@ -13904,9 +13904,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Sample the next word X using top-k sampling llama_sample_top_k(nullptr, candidates, int(k), 1); - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; llama_token X = llama_sample_token(ctx, candidates); t_start_sample_us = ggml_time_us(); @@ -13920,9 +13918,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Update mu using the learning rate and error *mu = *mu - eta * e; - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; return X; } |