diff options
Diffstat (limited to 'examples/server/public')
| -rw-r--r-- | examples/server/public/completion.js | 8 | ||||
| -rw-r--r-- | examples/server/public/index.html | 8 |
2 files changed, 9 insertions, 7 deletions
diff --git a/examples/server/public/completion.js b/examples/server/public/completion.js index 835ce6e6..987b9a3b 100644 --- a/examples/server/public/completion.js +++ b/examples/server/public/completion.js @@ -21,6 +21,7 @@ let generation_settings = null; // export async function* llama(prompt, params = {}, config = {}) { let controller = config.controller; + const api_url = config.api_url || ""; if (!controller) { controller = new AbortController(); @@ -28,7 +29,7 @@ export async function* llama(prompt, params = {}, config = {}) { const completionParams = { ...paramDefaults, ...params, prompt }; - const response = await fetch("/completion", { + const response = await fetch(`${api_url}/completion`, { method: 'POST', body: JSON.stringify(completionParams), headers: { @@ -193,9 +194,10 @@ export const llamaComplete = async (params, controller, callback) => { } // Get the model info from the server. This is useful for getting the context window and so on. -export const llamaModelInfo = async () => { +export const llamaModelInfo = async (config = {}) => { if (!generation_settings) { - const props = await fetch("/props").then(r => r.json()); + const api_url = config.api_url || ""; + const props = await fetch(`${api_url}/props`).then(r => r.json()); generation_settings = props.default_generation_settings; } return generation_settings; diff --git a/examples/server/public/index.html b/examples/server/public/index.html index bbc5c2f6..2a75163b 100644 --- a/examples/server/public/index.html +++ b/examples/server/public/index.html @@ -199,10 +199,10 @@ <script type="module"> import { html, h, signal, effect, computed, render, useSignal, useEffect, useRef, Component - } from '/index.js'; + } from './index.js'; - import { llama } from '/completion.js'; - import { SchemaConverter } from '/json-schema-to-grammar.mjs'; + import { llama } from './completion.js'; + import { SchemaConverter } from './json-schema-to-grammar.mjs'; let selected_image = false; var slot_id = -1; @@ -405,7 +405,7 @@ throw new Error("already running"); } controller.value = new AbortController(); - for await (const chunk of llama(prompt, llamaParams, { controller: controller.value })) { + for await (const chunk of llama(prompt, llamaParams, { controller: controller.value, api_url: document.baseURI.replace(/\/+$/, '') })) { const data = chunk.data; if (data.stop) { |
