diff --git a/src/lib/components/ChatMessageItem.svelte b/src/lib/components/ChatMessageItem.svelte index 4b8d56f..015b499 100644 --- a/src/lib/components/ChatMessageItem.svelte +++ b/src/lib/components/ChatMessageItem.svelte @@ -9,11 +9,9 @@ import MarkdownHtmlBlock from "./MarkdownHtmlBlock.svelte"; import "./markdown.css"; import { currentlyEditingMessage, inProgressMessageId } from "$lib/stores/stores"; - import { onMount } from "svelte"; import ChatMessageControls from "./ChatMessageControls.svelte"; import { autosize } from "$lib/utils"; - import { fly, slide } from "svelte/transition"; - import { toast } from "$lib/toast"; + import { slide } from "svelte/transition"; let _class: string = ""; export { _class as class }; export let item: ChatMessage; @@ -101,8 +99,21 @@ class="w-full bg-transparent outline-none resize-none mb-3" /> {:else if item.role === "user"} - -

{item.content}

+ {#if typeof item.content === "string" && item.content.startsWith("[{")} + {#each JSON.parse(item.content) as part} + {#if part.type === "image_url"} + Attached image + {:else if part.type === "text"} +

{part.text}

+ {/if} + {/each} + {:else} +

{item.content}

+ {/if} {:else} + import { XCircle, Image } from "lucide-svelte"; + import { attachedImage } from "$lib/stores/stores"; + import { processImageForAI } from "$lib/utils"; + import { toast } from "$lib/toast"; + import { getSystem } from "$lib/gui"; + + const sys = getSystem(); + + async function handleFileSelect(e: MouseEvent) { + e.preventDefault(); + e.stopPropagation(); + try { + const result = await sys.chooseAndOpenImageFile(); + if (!result) return; + + const file = new File([result.data], result.name, { + type: result.name.endsWith(".svg") ? "image/svg+xml" : "image/jpeg", // fallback type + }); + + const processed = await processImageForAI(file); + attachedImage.set(processed); + } catch (error) { + console.error("Error processing file:", error); + toast({ + type: "error", + title: "Error processing image", + message: + error instanceof Error ? error.message : "Could not process the selected image file", + }); + } + } + + +
+ + + {#if $attachedImage} +
+ Attached + +
+ {/if} +
diff --git a/src/lib/components/ModelPicker.svelte b/src/lib/components/ModelPicker.svelte index 6e5feb5..78e3ba0 100644 --- a/src/lib/components/ModelPicker.svelte +++ b/src/lib/components/ModelPicker.svelte @@ -4,38 +4,29 @@ import * as Popover from "$lib/components/ui/popover"; import { Button } from "$lib/components/ui/button"; import { cn, groupBy } from "$lib/utils"; - import type { ComponentType } from "svelte"; + import type { Component as SvelteComponent } from "svelte"; import IconBrain from "$lib/components/IconBrain.svelte"; import { onMount } from "svelte"; import { llmProviders, chatModels, modelPickerOpen } from "$lib/stores/stores/llmProvider"; import IconOpenAi from "./IconOpenAI.svelte"; import { gptProfileStore } from "$lib/stores/stores/llmProfile"; import { showInitScreen } from "$lib/stores/stores"; - import { writable } from "svelte/store"; import { toast } from "$lib/toast"; + import { commandScore } from "./ui/command/command-score"; let _class: string = ""; export { _class as class }; - type IconSource = - | { char: string } - | { component: ComponentType; class?: string } - | { src: string }; + type IconSource = { component: SvelteComponent; class?: string }; - type Status = { - value: string; - label: string; - icon?: IconSource; - }; - - $: options = $chatModels.models - .map((x) => { + $: options = [ + ...$chatModels.models.map((x) => { const provider = llmProviders.byId(x.provider.id); let icon: IconSource | undefined = undefined; if (provider?.id === "prompta") { icon = { component: IconBrain, class: "w-5 h-5 text-[#30CEC0] " }; } else if (provider?.id === "openai") { - icon = { component: IconOpenAi }; + icon = { component: IconOpenAi, class: "" }; } return { @@ -44,8 +35,9 @@ icon, provider, }; - }) - .concat(llmProviders.getSpecialProviders()); + }), + ...llmProviders.getSpecialProviders(), + ]; $: optionGroups = groupBy(options, (x) => x.provider?.name ?? "Other"); let value = $gptProfileStore.model || ""; @@ -83,6 +75,30 @@ onMount(() => { chatModels.refresh(); }); + + let searchValue = ""; + let selectedItem = value; + + $: filteredOptions = searchValue + ? options.filter((opt) => { + const score = commandScore(opt.label.toLowerCase(), searchValue.toLowerCase()); + return score > 0; + }) + : options; + + $: filteredGroups = groupBy(filteredOptions, (x) => x.provider?.name ?? "Other"); + + function handleSearch(event: CustomEvent) { + searchValue = event.detail; + } + + function handleKeydown(event: CustomEvent) { + const e = event.detail; + if (e.key === "ArrowDown" || e.key === "ArrowUp") { + e.preventDefault(); + // The CommandPrimitive will handle the actual selection + } + }
@@ -98,15 +114,15 @@ })} > - {#if selectedStatus?.icon?.src} - - {:else if selectedStatus?.icon?.component} + {#if selectedStatus?.icon?.component} - {:else if selectedStatus?.icon?.char} - {selectedStatus.icon.char} {:else} {/if} @@ -121,35 +137,20 @@ side="bottom" align="start" > - - + + No results found. - {#each Object.entries(optionGroups) as [name, models]} + {#each Object.entries(filteredGroups) as [name, models]} {#each models as opt} - { - handleChange(currentValue); - }} - > - {#if opt.icon?.char} - {opt.icon.char} - {:else if opt.icon?.src} - - {:else if opt.icon?.component} + + {#if opt.icon?.component} - import { Command as CommandPrimitive } from "cmdk-sv"; - import { Search } from "lucide-svelte"; - import { cn } from "$lib/utils"; + import { Command as CommandPrimitive } from "cmdk-sv"; + import { Search } from "lucide-svelte"; + import { cn } from "$lib/utils"; + import { createEventDispatcher } from "svelte"; - type $$Props = CommandPrimitive.InputProps; + type $$Props = CommandPrimitive.InputProps; - let className: string | undefined | null = undefined; - export { className as class }; - export let value: string = ""; + let className: string | undefined | null = undefined; + export { className as class }; + export let value: string = ""; + + const dispatch = createEventDispatcher(); + + function handleInput(event: Event) { + const target = event.target as HTMLInputElement; + value = target.value; + dispatch("input", value); + } -
- - +
+ +
diff --git a/src/lib/components/ui/command/command-score.ts b/src/lib/components/ui/command/command-score.ts new file mode 100644 index 0000000..af2d746 --- /dev/null +++ b/src/lib/components/ui/command/command-score.ts @@ -0,0 +1,35 @@ +/** + * Calculate a score for how well a string matches a search query. + * Higher scores indicate better matches. + */ +export function commandScore(str: string, query: string): number { + if (!str || !query) return 0; + + str = str.toLowerCase(); + query = query.toLowerCase(); + + // Exact match gets highest score + if (str === query) return 1; + + // Check if string starts with query + if (str.startsWith(query)) return 0.8; + + // Check if string contains query + if (str.includes(query)) return 0.5; + + // Check if all characters in query appear in order in str + let strIndex = 0; + let queryIndex = 0; + + while (strIndex < str.length && queryIndex < query.length) { + if (str[strIndex] === query[queryIndex]) { + queryIndex++; + } + strIndex++; + } + + // If all characters were found in order, give a lower score + if (queryIndex === query.length) return 0.3; + + return 0; +} diff --git a/src/lib/components/ui/command/command.svelte b/src/lib/components/ui/command/command.svelte index 2806759..cec5a34 100644 --- a/src/lib/components/ui/command/command.svelte +++ b/src/lib/components/ui/command/command.svelte @@ -1,22 +1,29 @@ - + diff --git a/src/lib/db.ts b/src/lib/db.ts index 15ffa60..dbf172d 100644 --- a/src/lib/db.ts +++ b/src/lib/db.ts @@ -23,6 +23,7 @@ let schemaUrl = schema_0002; import { llmProviders, openAiConfig } from "./stores/stores/llmProvider"; import { profilesStore } from "./stores/stores/llmProfile"; +import type OpenAI from "openai"; const legacyDbNames = [ "chat_db-v1", @@ -306,6 +307,7 @@ export interface LLMProviderRow { export type LLMProvider = Omit & { createdAt: Date; enabled: boolean; + client?: OpenAI; // Optional client property for custom SDK instances }; export interface VecToFragRow { diff --git a/src/lib/gui/browser.ts b/src/lib/gui/browser.ts index 614c3bc..3c74894 100644 --- a/src/lib/gui/browser.ts +++ b/src/lib/gui/browser.ts @@ -78,3 +78,44 @@ export async function chooseAndOpenTextFile() { if (inputElement) document.body.removeChild(inputElement); }); } + +export async function chooseAndOpenImageFile() { + let inputElement: HTMLInputElement; + + return new Promise<{ name: string; data: Uint8Array } | undefined>((resolve, reject) => { + inputElement = document.createElement("input"); + inputElement.type = "file"; + inputElement.accept = "image/*"; + inputElement.style.display = "none"; + + document.body.appendChild(inputElement); + + inputElement.addEventListener("change", (event) => { + // @ts-ignore Poor typing on input events? + const file: File = event.target.files[0]; + const reader = new FileReader(); + + reader.onload = () => { + const arrayBuffer = reader.result as ArrayBuffer; + resolve({ + name: file.name, + data: new Uint8Array(arrayBuffer), + }); + // @ts-ignore + event.target.value = null; + }; + + reader.onerror = () => { + reject(reader.error); + // @ts-ignore + event.target.value = null; + }; + + reader.readAsArrayBuffer(file); + }); + + inputElement.click(); + }).finally(() => { + if (inputElement) document.body.removeChild(inputElement); + }); +} diff --git a/src/lib/gui/index.ts b/src/lib/gui/index.ts index 6d40467..ed7469e 100644 --- a/src/lib/gui/index.ts +++ b/src/lib/gui/index.ts @@ -22,6 +22,12 @@ interface SystemSpecificApi { * don't give you an abstrace file handle. */ chooseAndOpenTextFile: () => Promise<{ name: string; data: string } | undefined>; + + /** + * Open a dialog for the user to choose an image file, then read that file. + * Returns undefined if cancelled. + */ + chooseAndOpenImageFile: () => Promise<{ name: string; data: Uint8Array } | undefined>; } interface SystemApi extends SystemSpecificApi { diff --git a/src/lib/gui/native.ts b/src/lib/gui/native.ts index 13c94dc..78be557 100644 --- a/src/lib/gui/native.ts +++ b/src/lib/gui/native.ts @@ -33,6 +33,7 @@ export async function saveAs(filename: string, data: string) { export async function chooseAndOpenTextFile() { const file = await dialog.open({ multiple: false, + directory: false, filters: [{ name: "JSON", extensions: ["json"] }], }); @@ -60,3 +61,27 @@ export async function alert(message: string) { export async function confirm(message: string) { return dialog.confirm(message, { kind: "warning" }); } + +export async function chooseAndOpenImageFile() { + const file = await dialog.open({ + multiple: false, + directory: false, + filters: [ + { + name: "Images", + // Include all common image formats including SVG + extensions: ["jpg", "jpeg", "png", "gif", "webp", "svg", "bmp", "ico", "tiff"], + }, + ], + }); + + if (!file) return; + + const filePath = Array.isArray(file) ? file[0] : file; + const data = await fs.readFile(filePath); + + return { + name: basename(filePath) as string, + data, + }; +} diff --git a/src/lib/llm/openai.ts b/src/lib/llm/openai.ts index b721fc8..d52ee9c 100644 --- a/src/lib/llm/openai.ts +++ b/src/lib/llm/openai.ts @@ -1,3 +1,4 @@ +import type { LLMProvider } from "$lib/db"; import { OpenAI, type ClientOptions } from "openai"; const headerWhitelist = new Set(["content-type", "authorization"]); @@ -46,3 +47,15 @@ export const initOpenAi = (opts: ClientOptions) => { ...opts, }); }; + +export const getProviderClient = (provider: LLMProvider): OpenAI => { + // If provider already has a client instance, return it + if (provider.client) { + return provider.client; + } + + return initOpenAi({ + apiKey: provider.apiKey, + baseURL: provider.baseUrl, + }); +}; diff --git a/src/lib/stores/stores/index.ts b/src/lib/stores/stores/index.ts index 191fd0b..f8597ff 100644 --- a/src/lib/stores/stores/index.ts +++ b/src/lib/stores/stores/index.ts @@ -23,6 +23,8 @@ import { createSyncer, getDefaultEndpoint, type Syncer } from "$lib/sync/vlcn"; import { PENDING_THREAD_TITLE, hasThreadTitle, persistentStore } from "../storeUtils"; import { chatModels, llmProviders, openAiConfig } from "./llmProvider"; import { activeProfileName, getOpenAi, gptProfileStore } from "./llmProfile"; +import { base64FromFile } from "$lib/utils"; +import { processImageForAI } from "$lib/utils"; export const showSettings = writable(false); export const showInitScreen = writable(false); @@ -278,38 +280,34 @@ export const insertPendingMessage = ({ threadId = "", content = "", model = "" } export const inProgressMessageId = derived(pendingMessageStore, (x) => x?.id); +interface StreamEvent { + data: OpenAI.Chat.ChatCompletionChunk; + id: string; + event: string; + retry: number; +} + /** - * Handle inbound server sent events, sent by OpenAI's API. This is how we get + * Handle inbound stream events from the OpenAI API. This is how we get * the live-typing feel from the bot. */ -const handleSSE = (ev: EventSourceMessage) => { - const message = ev.data; - - console.debug("[SSE]", message); +const handleSSE = (ev: StreamEvent) => { + const chunk = ev.data; + const content = chunk.choices[0].delta.content; - if (message === "[DONE]") { - return; // Stream finished + if (!content) { + console.log("Contentless message", chunk.id, chunk.object); + return; } - try { - const parsed: OpenAI.Chat.ChatCompletionChunk = JSON.parse(message); - const content = parsed.choices[0].delta.content; - if (!content) { - console.log("Contentless message", parsed.id, parsed.object); - return; + pendingMessageStore.update((x) => { + if (!x) { + console.warn("should never happen", x); + return x; } - pendingMessageStore.update((x) => { - if (!x) { - console.warn("should never happen", x); - return x; - } - - return { ...x, content: x.content + content }; - }); - } catch (error) { - console.error("Could not JSON parse stream message", message, error); - } + return { ...x, content: x.content + content }; + }); }; export const currentlyEditingMessage = (() => { @@ -401,7 +399,7 @@ export const currentChatThread = (() => { const promptGpt = async ({ threadId }: { threadId: string }) => { if (get(pendingMessageStore)) { - throw new Error("Already a message in progres"); + throw new Error("Already a message in progress"); } const { model: modelId, systemMessage } = get(gptProfileStore); @@ -409,23 +407,17 @@ export const currentChatThread = (() => { throw new Error("No model. activeProfile=" + get(activeProfileName)); } - const model = get(chatModels).models.find((x) => x.id === modelId); - if (!model) { - throw new Error("No model found for: " + modelId); - } - - const provider = llmProviders.byId(model.provider.id); - if (!provider) { - throw new Error("No provider found for: " + model.provider.id); - } - - insertPendingMessage({ threadId, model: modelId }); - const context = await ChatMessage.findThreadContext({ threadId }); - emit("chat message", { depth: context.length }); - let messageContext = context.map((x) => ({ content: x.content, role: x.role })); + let messageContext = context.map((x) => { + // Parse content if it's a JSON string + const content = + typeof x.content === "string" && x.content.startsWith("[{") + ? JSON.parse(x.content) + : x.content; + return { content, role: x.role }; + }); if (systemMessage.trim()) { messageContext = [ @@ -437,65 +429,12 @@ export const currentChatThread = (() => { ]; } - const prompt: OpenAI.Chat.CompletionCreateParamsStreaming = { + const botMessage = await createChatCompletion({ messages: messageContext, + threadId, model: modelId, - // max_tokens: 100, // just for testing - stream: true, - }; - - console.log("%cprompt", "color:salmon;font-size:13px;", prompt); - - abortController = new AbortController(); - - // NOTE the lack of leading slash. Important for the URL to be relative to the base URL including its path - const endpoint = new URL("chat/completions", provider.baseUrl); - - // @todo This could use the sdk now that the new version supports streaming - await fetchEventSource(endpoint.href, { - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${provider.apiKey}`, // This could be empty, but we assume that in such a case the server will ignore this header - }, - method: "POST", - body: JSON.stringify(prompt), - signal: abortController.signal, - onerror(err) { - console.error("Error in stream", err); - toast({ - type: "error", - title: "Error in stream", - message: err.message, - }); - pendingMessageStore.set(null); - throw err; - }, - onmessage: handleSSE, - - // Very important. If the stream closes and reopens when the window is - // hidden (default behavior), then the chat completion with ChatGPT will - // get _RESTARTED_. So not only do you need to wait for a new completion, - // from the beginning, you're also getting overcharged since part of the - // explanation is likely to be the same. Also, on our end, it leads to - // mangled markdown since the message completion doesn't know that - // anything is amiss, even though the event stream starts firing off from - // the beginning. - openWhenHidden: true, - }); - - const botMessage = get(pendingMessageStore); - - if (!botMessage) throw new Error("No pending message found when one was expected."); - - // Store it fully in the db - await ChatMessage.create({ - ...botMessage, - cancelled: abortController.signal.aborted, }); - // Clear the pending message. Do this afterwards because it invalidates the chat message list - pendingMessageStore.set(null); - if (!hasThreadTitle(get(currentThread))) { console.log("Generating thread title..."); try { @@ -637,20 +576,90 @@ export const currentChatThread = (() => { threadList.invalidate(); } - const newMessage = await ChatMessage.create(msg); - const backupText = get(messageText); + const image = get(attachedImage); + const content = image + ? JSON.stringify([ + { + type: "image_url", + image_url: { url: image.base64 }, + }, + { + type: "text", + text: msg.content, + }, + ]) + : msg.content; + + const newMessage = await ChatMessage.create({ + ...msg, + content, + }); + messageText.set(""); + attachedImage.set(null); - promptGpt({ threadId: msg.threadId as string }).catch((err) => { + try { + const { model: modelId } = get(gptProfileStore); + if (!modelId) { + throw new Error("No model. activeProfile=" + get(activeProfileName)); + } + + const context = await ChatMessage.findThreadContext({ + threadId: msg.threadId as string, + }); + + const messageContext = context.map((x) => { + // Parse content if it's a JSON string + const content = + typeof x.content === "string" && x.content.startsWith("[{") + ? JSON.parse(x.content) + : x.content; + return { content, role: x.role }; + }); + + await createChatCompletion({ + messages: messageContext, + threadId: msg.threadId as string, + model: modelId, + }); + + if (!hasThreadTitle(get(currentThread))) { + console.log("Generating thread title..."); + try { + await generateThreadTitle({ threadId: newMessage.threadId }); + } catch (error) { + if (error instanceof OpenAI.APIError) { + console.error({ + status: error.status, + message: error.message, + code: error.code, + type: error.type, + }); + toast({ + type: "error", + title: "Error generating thread title", + message: error.message, + }); + } else { + console.error(error); + toast({ + type: "error", + title: "Unknown error generating thread title", + message: (error as any).message, + }); + } + } + } + } catch (err) { console.error("[sendMessage]", err); toast({ type: "error", title: "Error sending message", message: err.message, }); - messageText.set(backupText); // Restore backup text - return ChatMessage.delete({ where: { id: newMessage.id } }); // Delete the message - }); + messageText.set(msg.content ?? ""); + return ChatMessage.delete({ where: { id: newMessage.id } }); + } }, }; })(); @@ -886,3 +895,95 @@ ChatMessage.onTableChange(() => { console.debug("%cmessage table changed", "color:salmon;"); currentChatThread.invalidate(); }); + +export const attachedImage = writable<{ + base64: string; + file: File; +} | null>(null); + +// Add type for message content +type MessageContent = + | string + | Array< + | { + type: "text"; + text: string; + } + | { + type: "image_url"; + image_url: { + url: string; + }; + } + >; + +// Extract common logic into a helper function +const createChatCompletion = async ({ + messages, + threadId, + model: modelId, +}: { + messages: OpenAI.ChatCompletionCreateParamsStreaming["messages"]; + threadId: string; + model: string; +}) => { + const model = get(chatModels).models.find((x) => x.id === modelId); + if (!model) { + throw new Error("No model found for: " + modelId); + } + + const provider = llmProviders.byId(model.provider.id); + if (!provider) { + throw new Error("No provider found for: " + model.provider.id); + } + + insertPendingMessage({ threadId, model: modelId }); + + const prompt: OpenAI.ChatCompletionCreateParamsStreaming = { + messages, + model: modelId, + stream: true, + }; + + console.log("%cprompt", "color:salmon;font-size:13px;", prompt); + + const abortController = new AbortController(); + + try { + const stream = await provider.client.chat.completions.create(prompt, { + signal: abortController.signal, + }); + + for await (const chunk of stream) { + handleSSE({ + data: chunk, + id: chunk.id, + event: "", + retry: 0, + }); + } + } catch (err) { + console.error("Error in stream", err); + toast({ + type: "error", + title: "Error in stream", + message: err.message, + }); + pendingMessageStore.set(null); + throw err; + } + + const botMessage = get(pendingMessageStore); + if (!botMessage) throw new Error("No pending message found when one was expected."); + + // Store it fully in the db + await ChatMessage.create({ + ...botMessage, + cancelled: abortController.signal.aborted, + }); + + // Clear the pending message + pendingMessageStore.set(null); + + return botMessage; +}; diff --git a/src/lib/stores/stores/llmProvider.ts b/src/lib/stores/stores/llmProvider.ts index a61b0a0..218cb7a 100644 --- a/src/lib/stores/stores/llmProvider.ts +++ b/src/lib/stores/stores/llmProvider.ts @@ -9,7 +9,7 @@ import IconOpenAi from "$lib/components/IconOpenAI.svelte"; import IconBrain from "$lib/components/IconBrain.svelte"; import { env } from "$env/dynamic/public"; -import { initOpenAi } from "$lib/llm/openai"; +import { initOpenAi, getProviderClient } from "$lib/llm/openai"; const promptaBaseUrl = env.PUBLIC_PROMPTA_API_URL || "https://api.prompta.dev/v1/"; @@ -181,7 +181,15 @@ export const llmProviders = (() => { }, byId: (id: string) => { - return get(store).providers.find((p) => p.id === id); + const provider = get(store).providers.find((p) => p.id === id); + if (provider) { + // Ensure provider has a client + return { + ...provider, + client: getProviderClient(provider), + }; + } + return undefined; }, getOpenAi: () => { diff --git a/src/lib/utils.ts b/src/lib/utils.ts index 1ed05b6..6199f20 100644 --- a/src/lib/utils.ts +++ b/src/lib/utils.ts @@ -198,3 +198,83 @@ export const autosize = (node: HTMLElement) => { }, }; }; + +export const base64FromFile = (file: File): Promise => { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.readAsDataURL(file); + reader.onload = () => resolve(reader.result as string); + reader.onerror = (error) => reject(error); + }); +}; + +/** + * Resizes an image, maintaining aspect ratio, and returns base64 data + */ +export async function processImageForAI(file: File): Promise<{ base64: string; file: File }> { + // Check file size (10MB limit) + const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB in bytes + if (file.size > MAX_FILE_SIZE) { + throw new Error("Image size must be less than 10MB"); + } + + // Create an image element to load the file + const img = new Image(); + const imageUrl = URL.createObjectURL(file); + + await new Promise((resolve, reject) => { + img.onload = resolve; + img.onerror = reject; + img.src = imageUrl; + }); + + // Target width for AI processing (adjust as needed) + const MAX_WIDTH = 1024; + const MAX_HEIGHT = 1024; + + let width = img.width; + let height = img.height; + + // Calculate new dimensions maintaining aspect ratio + if (width > height) { + if (width > MAX_WIDTH) { + height = Math.round((height * MAX_WIDTH) / width); + width = MAX_WIDTH; + } + } else { + if (height > MAX_HEIGHT) { + width = Math.round((width * MAX_HEIGHT) / height); + height = MAX_HEIGHT; + } + } + + // Create canvas and resize image + const canvas = document.createElement("canvas"); + canvas.width = width; + canvas.height = height; + const ctx = canvas.getContext("2d"); + if (!ctx) throw new Error("Could not get canvas context"); + + ctx.drawImage(img, 0, 0, width, height); + + // Convert to base64 with reduced quality + const base64 = canvas.toDataURL("image/jpeg", 0.8); + + // Clean up + URL.revokeObjectURL(imageUrl); + + // Convert base64 to File object + const byteString = atob(base64.split(",")[1]); + const mimeString = base64.split(",")[0].split(":")[1].split(";")[0]; + const ab = new ArrayBuffer(byteString.length); + const ia = new Uint8Array(ab); + for (let i = 0; i < byteString.length; i++) { + ia[i] = byteString.charCodeAt(i); + } + const processedFile = new File([ab], file.name, { type: mimeString }); + + return { + base64, + file: processedFile, + }; +} diff --git a/src/routes/+page.svelte b/src/routes/+page.svelte index 422e8d1..27f1b1d 100644 --- a/src/routes/+page.svelte +++ b/src/routes/+page.svelte @@ -28,6 +28,7 @@ import SyncModal from "$lib/components/SyncModal.svelte"; import ModelPicker from "$lib/components/ModelPicker.svelte"; import { chatModels } from "$lib/stores/stores/llmProvider"; + import ImageAttachment from "$lib/components/ImageAttachment.svelte"; const sys = getSystem(); let textarea: HTMLTextAreaElement | null = null; @@ -253,11 +254,13 @@ e.preventDefault(); handleSubmit($messageText); }} - class={classNames("flex flex-1 items-end rounded-lg border border-zinc-700", { + class={classNames("flex flex-1 rounded-lg border border-zinc-700", { "shadow-[0_0_0_2px_#5baba4] bg-teal-800/20 text-teal-200": isCommand, "bg-zinc-800": !isCommand, })} > + +