Spaces:
Running
Running
Commit
β’
ad6275a
1
Parent(s):
bd37ed6
π Stop bigcode prompts (#170)
Browse files- .env +3 -2
- src/lib/buildPrompt.ts +3 -3
- src/lib/components/chat/ChatMessage.svelte +13 -3
- src/lib/components/chat/ChatMessages.svelte +5 -1
- src/lib/server/models.ts +2 -1
- src/lib/types/Model.ts +6 -7
- src/routes/+layout.server.ts +1 -2
- src/routes/conversation/[id]/+server.ts +10 -1
.env
CHANGED
@@ -13,6 +13,7 @@ MODELS=`[
|
|
13 |
"websiteUrl": "https://open-assistant.io",
|
14 |
"userMessageToken": "<|prompter|>",
|
15 |
"assistantMessageToken": "<|assistant|>",
|
|
|
16 |
"preprompt": "Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful.\n-----\n",
|
17 |
"promptExamples": [
|
18 |
{
|
@@ -33,7 +34,6 @@ MODELS=`[
|
|
33 |
"top_k": 50,
|
34 |
"truncate": 1000,
|
35 |
"max_new_tokens": 1024,
|
36 |
-
"stop":["</s>"]
|
37 |
}
|
38 |
},
|
39 |
{
|
@@ -61,7 +61,8 @@ MODELS=`[
|
|
61 |
"top_p": 0.9,
|
62 |
"repetition_penalty": 1.2,
|
63 |
"truncate": 8000,
|
64 |
-
"max_new_tokens": 2000
|
|
|
65 |
}
|
66 |
}
|
67 |
]`
|
|
|
13 |
"websiteUrl": "https://open-assistant.io",
|
14 |
"userMessageToken": "<|prompter|>",
|
15 |
"assistantMessageToken": "<|assistant|>",
|
16 |
+
"messageEndToken": "</s>",
|
17 |
"preprompt": "Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful.\n-----\n",
|
18 |
"promptExamples": [
|
19 |
{
|
|
|
34 |
"top_k": 50,
|
35 |
"truncate": 1000,
|
36 |
"max_new_tokens": 1024,
|
|
|
37 |
}
|
38 |
},
|
39 |
{
|
|
|
61 |
"top_p": 0.9,
|
62 |
"repetition_penalty": 1.2,
|
63 |
"truncate": 8000,
|
64 |
+
"max_new_tokens": 2000,
|
65 |
+
"stop": ["Human:", "-----"]
|
66 |
}
|
67 |
}
|
68 |
]`
|
src/lib/buildPrompt.ts
CHANGED
@@ -17,10 +17,10 @@ export function buildPrompt(
|
|
17 |
(m.from === "user"
|
18 |
? model.userMessageToken + m.content
|
19 |
: model.assistantMessageToken + m.content) +
|
20 |
-
(model.
|
21 |
-
? m.content.endsWith(model.
|
22 |
? ""
|
23 |
-
: model.
|
24 |
: "")
|
25 |
)
|
26 |
.join("") + model.assistantMessageToken;
|
|
|
17 |
(m.from === "user"
|
18 |
? model.userMessageToken + m.content
|
19 |
: model.assistantMessageToken + m.content) +
|
20 |
+
(model.messageEndToken
|
21 |
+
? m.content.endsWith(model.messageEndToken)
|
22 |
? ""
|
23 |
+
: model.messageEndToken
|
24 |
: "")
|
25 |
)
|
26 |
.join("") + model.assistantMessageToken;
|
src/lib/components/chat/ChatMessage.svelte
CHANGED
@@ -8,22 +8,32 @@
|
|
8 |
import IconLoading from "../icons/IconLoading.svelte";
|
9 |
import CarbonRotate360 from "~icons/carbon/rotate-360";
|
10 |
import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken";
|
|
|
11 |
|
12 |
function sanitizeMd(md: string) {
|
13 |
-
|
14 |
.replace(/<\|[a-z]*$/, "")
|
15 |
.replace(/<\|[a-z]+\|$/, "")
|
16 |
.replace(/<$/, "")
|
17 |
.replaceAll(PUBLIC_SEP_TOKEN, " ")
|
18 |
.replaceAll(/<\|[a-z]+\|>/g, " ")
|
19 |
.replaceAll(/<br\s?\/?>/gi, "\n")
|
20 |
-
.
|
21 |
-
.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
}
|
23 |
function unsanitizeMd(md: string) {
|
24 |
return md.replaceAll("<", "<");
|
25 |
}
|
26 |
|
|
|
27 |
export let message: Message;
|
28 |
export let loading = false;
|
29 |
|
|
|
8 |
import IconLoading from "../icons/IconLoading.svelte";
|
9 |
import CarbonRotate360 from "~icons/carbon/rotate-360";
|
10 |
import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken";
|
11 |
+
import type { Model } from "$lib/types/Model";
|
12 |
|
13 |
function sanitizeMd(md: string) {
|
14 |
+
let ret = md
|
15 |
.replace(/<\|[a-z]*$/, "")
|
16 |
.replace(/<\|[a-z]+\|$/, "")
|
17 |
.replace(/<$/, "")
|
18 |
.replaceAll(PUBLIC_SEP_TOKEN, " ")
|
19 |
.replaceAll(/<\|[a-z]+\|>/g, " ")
|
20 |
.replaceAll(/<br\s?\/?>/gi, "\n")
|
21 |
+
.replaceAll("<", "<")
|
22 |
+
.trim();
|
23 |
+
|
24 |
+
for (const stop of model.parameters.stop ?? []) {
|
25 |
+
if (ret.endsWith(stop)) {
|
26 |
+
ret = ret.slice(0, -stop.length).trim();
|
27 |
+
}
|
28 |
+
}
|
29 |
+
|
30 |
+
return ret;
|
31 |
}
|
32 |
function unsanitizeMd(md: string) {
|
33 |
return md.replaceAll("<", "<");
|
34 |
}
|
35 |
|
36 |
+
export let model: Model;
|
37 |
export let message: Message;
|
38 |
export let loading = false;
|
39 |
|
src/lib/components/chat/ChatMessages.svelte
CHANGED
@@ -42,6 +42,7 @@
|
|
42 |
<ChatMessage
|
43 |
loading={loading && i === messages.length - 1}
|
44 |
{message}
|
|
|
45 |
on:retry={() => dispatch("retry", { id: message.id, content: message.content })}
|
46 |
/>
|
47 |
{:else}
|
@@ -50,7 +51,10 @@
|
|
50 |
{/if}
|
51 |
{/each}
|
52 |
{#if pending}
|
53 |
-
<ChatMessage
|
|
|
|
|
|
|
54 |
{/if}
|
55 |
<div class="h-32 flex-none" />
|
56 |
</div>
|
|
|
42 |
<ChatMessage
|
43 |
loading={loading && i === messages.length - 1}
|
44 |
{message}
|
45 |
+
model={currentModel}
|
46 |
on:retry={() => dispatch("retry", { id: message.id, content: message.content })}
|
47 |
/>
|
48 |
{:else}
|
|
|
51 |
{/if}
|
52 |
{/each}
|
53 |
{#if pending}
|
54 |
+
<ChatMessage
|
55 |
+
message={{ from: "assistant", content: "", id: randomUUID() }}
|
56 |
+
model={currentModel}
|
57 |
+
/>
|
58 |
{/if}
|
59 |
<div class="h-32 flex-none" />
|
60 |
</div>
|
src/lib/server/models.ts
CHANGED
@@ -10,6 +10,7 @@ const modelsRaw = z
|
|
10 |
datasetName: z.string().min(1).optional(),
|
11 |
userMessageToken: z.string().min(1),
|
12 |
assistantMessageToken: z.string().min(1),
|
|
|
13 |
preprompt: z.string().default(""),
|
14 |
prepromptUrl: z.string().url().optional(),
|
15 |
promptExamples: z
|
@@ -34,7 +35,7 @@ const modelsRaw = z
|
|
34 |
temperature: z.number().min(0).max(1),
|
35 |
truncate: z.number().int().positive(),
|
36 |
max_new_tokens: z.number().int().positive(),
|
37 |
-
stop: z.array(z.string()).
|
38 |
})
|
39 |
.passthrough(),
|
40 |
})
|
|
|
10 |
datasetName: z.string().min(1).optional(),
|
11 |
userMessageToken: z.string().min(1),
|
12 |
assistantMessageToken: z.string().min(1),
|
13 |
+
messageEndToken: z.string().min(1).optional(),
|
14 |
preprompt: z.string().default(""),
|
15 |
prepromptUrl: z.string().url().optional(),
|
16 |
promptExamples: z
|
|
|
35 |
temperature: z.number().min(0).max(1),
|
36 |
truncate: z.number().int().positive(),
|
37 |
max_new_tokens: z.number().int().positive(),
|
38 |
+
stop: z.array(z.string()).optional(),
|
39 |
})
|
40 |
.passthrough(),
|
41 |
})
|
src/lib/types/Model.ts
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
datasetName
|
6 |
-
|
7 |
-
}
|
|
|
1 |
+
import type { BackendModel } from "$lib/server/models";
|
2 |
+
|
3 |
+
export type Model = Pick<
|
4 |
+
BackendModel,
|
5 |
+
"name" | "displayName" | "websiteUrl" | "datasetName" | "promptExamples" | "parameters"
|
6 |
+
>;
|
|
src/routes/+layout.server.ts
CHANGED
@@ -33,8 +33,7 @@ export const load: LayoutServerLoad = async ({ locals, depends, url }) => {
|
|
33 |
settings: {
|
34 |
shareConversationsWithModelAuthors: settings?.shareConversationsWithModelAuthors ?? true,
|
35 |
ethicsModalAcceptedAt: settings?.ethicsModalAcceptedAt ?? null,
|
36 |
-
activeModel:
|
37 |
-
url.searchParams.get("model") ?? settings?.activeModel ?? defaultModel.name,
|
38 |
},
|
39 |
models: models.map((model) => ({
|
40 |
name: model.name,
|
|
|
33 |
settings: {
|
34 |
shareConversationsWithModelAuthors: settings?.shareConversationsWithModelAuthors ?? true,
|
35 |
ethicsModalAcceptedAt: settings?.ethicsModalAcceptedAt ?? null,
|
36 |
+
activeModel: url.searchParams.get("model") ?? settings?.activeModel ?? defaultModel.name,
|
|
|
37 |
},
|
38 |
models: models.map((model) => ({
|
39 |
name: model.name,
|
src/routes/conversation/[id]/+server.ts
CHANGED
@@ -107,7 +107,16 @@ export async function POST({ request, fetch, locals, params }) {
|
|
107 |
generated_text = generated_text.slice(prompt.length);
|
108 |
}
|
109 |
|
110 |
-
generated_text = trimSuffix(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
messages.push({ from: "assistant", content: generated_text, id: crypto.randomUUID() });
|
113 |
|
|
|
107 |
generated_text = generated_text.slice(prompt.length);
|
108 |
}
|
109 |
|
110 |
+
generated_text = trimSuffix(
|
111 |
+
trimPrefix(generated_text, "<|startoftext|>"),
|
112 |
+
PUBLIC_SEP_TOKEN
|
113 |
+
).trim();
|
114 |
+
|
115 |
+
for (const stop of modelInfo?.parameters?.stop ?? []) {
|
116 |
+
if (generated_text.endsWith(stop)) {
|
117 |
+
generated_text = generated_text.slice(0, -stop.length).trim();
|
118 |
+
}
|
119 |
+
}
|
120 |
|
121 |
messages.push({ from: "assistant", content: generated_text, id: crypto.randomUUID() });
|
122 |
|