From b333b06772c89d96aacb5490d6a219fba7c09cc6 Mon Sep 17 00:00:00 2001 From: Mitja Felicijan Date: Thu, 12 Feb 2026 20:57:17 +0100 Subject: Engage! --- llama.cpp/tools/server/CMakeLists.txt | 70 + llama.cpp/tools/server/README-dev.md | 179 + llama.cpp/tools/server/README.md | 1782 ++++ llama.cpp/tools/server/bench/README.md | 119 + llama.cpp/tools/server/bench/bench.py | 322 + llama.cpp/tools/server/bench/prometheus.yml | 9 + llama.cpp/tools/server/bench/requirements.txt | 2 + llama.cpp/tools/server/bench/script.js | 162 + llama.cpp/tools/server/chat-llama2.sh | 109 + llama.cpp/tools/server/chat.mjs | 131 + llama.cpp/tools/server/chat.sh | 80 + llama.cpp/tools/server/public/index.html.gz | Bin 0 -> 1453103 bytes llama.cpp/tools/server/public/loading.html | 12 + .../tools/server/public_legacy/colorthemes.css | 402 + llama.cpp/tools/server/public_legacy/completion.js | 209 + llama.cpp/tools/server/public_legacy/favicon.ico | Bin 0 -> 4122 bytes .../tools/server/public_legacy/index-new.html | 1190 +++ llama.cpp/tools/server/public_legacy/index.html | 1301 +++ llama.cpp/tools/server/public_legacy/index.js | 1 + .../public_legacy/json-schema-to-grammar.mjs | 856 ++ llama.cpp/tools/server/public_legacy/loading.html | 12 + .../tools/server/public_legacy/prompt-formats.js | 331 + llama.cpp/tools/server/public_legacy/style.css | 954 ++ .../tools/server/public_legacy/system-prompts.js | 68 + .../server/public_legacy/theme-beeninorder.css | 228 + .../tools/server/public_legacy/theme-ketivah.css | 201 + .../server/public_legacy/theme-mangotango.css | 216 + .../server/public_legacy/theme-playground.css | 221 + .../server/public_legacy/theme-polarnight.css | 253 + .../tools/server/public_legacy/theme-snowstorm.css | 251 + .../tools/server/public_simplechat/datautils.mjs | 266 + .../tools/server/public_simplechat/index.html | 51 + llama.cpp/tools/server/public_simplechat/readme.md | 286 + .../tools/server/public_simplechat/simplechat.css | 79 + .../tools/server/public_simplechat/simplechat.js | 929 ++ .../public_simplechat/simplechat_screens.webp | Bin 0 -> 21376 bytes llama.cpp/tools/server/public_simplechat/ui.mjs | 211 + llama.cpp/tools/server/server-common.cpp | 1980 +++++ llama.cpp/tools/server/server-common.h | 366 + llama.cpp/tools/server/server-context.cpp | 4105 +++++++++ llama.cpp/tools/server/server-context.h | 131 + llama.cpp/tools/server/server-http.cpp | 406 + llama.cpp/tools/server/server-http.h | 78 + llama.cpp/tools/server/server-models.cpp | 1092 +++ llama.cpp/tools/server/server-models.h | 203 + llama.cpp/tools/server/server-queue.cpp | 450 + llama.cpp/tools/server/server-queue.h | 197 + llama.cpp/tools/server/server-task.cpp | 2005 +++++ llama.cpp/tools/server/server-task.h | 620 ++ llama.cpp/tools/server/server.cpp | 322 + llama.cpp/tools/server/tests/.gitignore | 2 + llama.cpp/tools/server/tests/README.md | 96 + llama.cpp/tools/server/tests/conftest.py | 21 + llama.cpp/tools/server/tests/pytest.ini | 4 + llama.cpp/tools/server/tests/requirements.txt | 8 + llama.cpp/tools/server/tests/tests.sh | 23 + llama.cpp/tools/server/tests/unit/test_basic.py | 96 + .../server/tests/unit/test_chat_completion.py | 512 ++ .../server/tests/unit/test_compat_anthropic.py | 896 ++ .../server/tests/unit/test_compat_oai_responses.py | 73 + .../tools/server/tests/unit/test_completion.py | 608 ++ .../tools/server/tests/unit/test_ctx_shift.py | 89 + .../tools/server/tests/unit/test_embedding.py | 257 + llama.cpp/tools/server/tests/unit/test_infill.py | 77 + llama.cpp/tools/server/tests/unit/test_lora.py | 115 + llama.cpp/tools/server/tests/unit/test_rerank.py | 146 + llama.cpp/tools/server/tests/unit/test_router.py | 194 + llama.cpp/tools/server/tests/unit/test_security.py | 127 + llama.cpp/tools/server/tests/unit/test_sleep.py | 39 + .../tools/server/tests/unit/test_slot_save.py | 98 + .../tools/server/tests/unit/test_speculative.py | 131 + llama.cpp/tools/server/tests/unit/test_template.py | 105 + llama.cpp/tools/server/tests/unit/test_tokenize.py | 59 + .../tools/server/tests/unit/test_tool_call.py | 625 ++ .../tools/server/tests/unit/test_vision_api.py | 160 + llama.cpp/tools/server/tests/utils.py | 643 ++ llama.cpp/tools/server/themes/README.md | 5 + .../tools/server/themes/buttons-top/README.md | 7 + .../server/themes/buttons-top/buttons_top.png | Bin 0 -> 119747 bytes .../tools/server/themes/buttons-top/favicon.ico | Bin 0 -> 4122 bytes .../tools/server/themes/buttons-top/index.html | 1052 +++ llama.cpp/tools/server/themes/wild/README.md | 5 + llama.cpp/tools/server/themes/wild/favicon.ico | Bin 0 -> 4122 bytes llama.cpp/tools/server/themes/wild/index.html | 1056 +++ llama.cpp/tools/server/themes/wild/llama_cpp.png | Bin 0 -> 76484 bytes .../tools/server/themes/wild/llamapattern.png | Bin 0 -> 259586 bytes llama.cpp/tools/server/themes/wild/wild.png | Bin 0 -> 496463 bytes llama.cpp/tools/server/webui/.gitignore | 28 + llama.cpp/tools/server/webui/.npmrc | 1 + llama.cpp/tools/server/webui/.prettierignore | 9 + llama.cpp/tools/server/webui/.prettierrc | 16 + .../webui/.storybook/ModeWatcherDecorator.svelte | 36 + .../.storybook/TooltipProviderDecorator.svelte | 13 + llama.cpp/tools/server/webui/.storybook/main.ts | 17 + llama.cpp/tools/server/webui/.storybook/preview.ts | 42 + .../tools/server/webui/.storybook/vitest.setup.ts | 12 + llama.cpp/tools/server/webui/README.md | 687 ++ llama.cpp/tools/server/webui/components.json | 16 + .../high-level-architecture-simplified.md | 106 + .../docs/architecture/high-level-architecture.md | 279 + .../tools/server/webui/docs/flows/chat-flow.md | 174 + .../server/webui/docs/flows/conversations-flow.md | 155 + .../docs/flows/data-flow-simplified-model-mode.md | 45 + .../docs/flows/data-flow-simplified-router-mode.md | 77 + .../tools/server/webui/docs/flows/database-flow.md | 155 + .../tools/server/webui/docs/flows/models-flow.md | 181 + .../tools/server/webui/docs/flows/server-flow.md | 76 + .../tools/server/webui/docs/flows/settings-flow.md | 144 + llama.cpp/tools/server/webui/eslint.config.js | 49 + llama.cpp/tools/server/webui/package-lock.json | 9343 ++++++++++++++++++++ llama.cpp/tools/server/webui/package.json | 94 + llama.cpp/tools/server/webui/playwright.config.ts | 11 + llama.cpp/tools/server/webui/scripts/dev.sh | 57 + .../server/webui/scripts/install-git-hooks.sh | 202 + llama.cpp/tools/server/webui/scripts/post-build.sh | 3 + llama.cpp/tools/server/webui/src/app.css | 138 + llama.cpp/tools/server/webui/src/app.d.ts | 133 + llama.cpp/tools/server/webui/src/app.html | 12 + .../ChatAttachments/ChatAttachmentPreview.svelte | 283 + .../ChatAttachmentThumbnailFile.svelte | 165 + .../ChatAttachmentThumbnailImage.svelte | 64 + .../ChatAttachments/ChatAttachmentsList.svelte | 243 + .../ChatAttachments/ChatAttachmentsViewAll.svelte | 117 + .../components/app/chat/ChatForm/ChatForm.svelte | 315 + .../ChatFormActionFileAttachments.svelte | 123 + .../ChatFormActions/ChatFormActionRecord.svelte | 52 + .../ChatFormActions/ChatFormActionSubmit.svelte | 55 + .../ChatFormActions/ChatFormActions.svelte | 204 + .../ChatForm/ChatFormFileInputInvisible.svelte | 30 + .../app/chat/ChatForm/ChatFormHelperText.svelte | 17 + .../app/chat/ChatForm/ChatFormTextarea.svelte | 59 + .../app/chat/ChatMessages/ChatMessage.svelte | 286 + .../chat/ChatMessages/ChatMessageActions.svelte | 100 + .../chat/ChatMessages/ChatMessageAssistant.svelte | 418 + .../ChatMessageBranchingControls.svelte | 84 + .../chat/ChatMessages/ChatMessageEditForm.svelte | 391 + .../chat/ChatMessages/ChatMessageStatistics.svelte | 175 + .../app/chat/ChatMessages/ChatMessageSystem.svelte | 216 + .../ChatMessages/ChatMessageThinkingBlock.svelte | 68 + .../app/chat/ChatMessages/ChatMessageUser.svelte | 163 + .../app/chat/ChatMessages/ChatMessages.svelte | 143 + .../app/chat/ChatScreen/ChatScreen.svelte | 617 ++ .../chat/ChatScreen/ChatScreenDragOverlay.svelte | 17 + .../app/chat/ChatScreen/ChatScreenHeader.svelte | 28 + .../ChatScreen/ChatScreenProcessingInfo.svelte | 120 + .../app/chat/ChatSettings/ChatSettings.svelte | 508 ++ .../chat/ChatSettings/ChatSettingsFields.svelte | 255 + .../chat/ChatSettings/ChatSettingsFooter.svelte | 59 + .../ChatSettingsImportExportTab.svelte | 317 + .../ChatSettingsParameterSourceIndicator.svelte | 18 + .../app/chat/ChatSidebar/ChatSidebar.svelte | 211 + .../app/chat/ChatSidebar/ChatSidebarActions.svelte | 81 + .../ChatSidebar/ChatSidebarConversationItem.svelte | 200 + .../app/chat/ChatSidebar/ChatSidebarSearch.svelte | 19 + .../handle-mobile-sidebar-item-click.ts | 9 + .../app/dialogs/DialogChatAttachmentPreview.svelte | 67 + .../dialogs/DialogChatAttachmentsViewAll.svelte | 54 + .../components/app/dialogs/DialogChatError.svelte | 70 + .../app/dialogs/DialogChatSettings.svelte | 37 + .../app/dialogs/DialogConfirmation.svelte | 72 + .../app/dialogs/DialogConversationSelection.svelte | 68 + .../dialogs/DialogConversationTitleUpdate.svelte | 46 + .../app/dialogs/DialogEmptyFileAlert.svelte | 61 + .../app/dialogs/DialogModelInformation.svelte | 211 + .../app/dialogs/DialogModelNotAvailable.svelte | 76 + .../server/webui/src/lib/components/app/index.ts | 75 + .../lib/components/app/misc/ActionButton.svelte | 47 + .../lib/components/app/misc/ActionDropdown.svelte | 86 + .../components/app/misc/BadgeChatStatistic.svelte | 44 + .../src/lib/components/app/misc/BadgeInfo.svelte | 27 + .../lib/components/app/misc/BadgeModality.svelte | 39 + .../components/app/misc/CodePreviewDialog.svelte | 93 + .../app/misc/ConversationSelection.svelte | 205 + .../components/app/misc/CopyToClipboardIcon.svelte | 18 + .../app/misc/KeyboardShortcutInfo.svelte | 31 + .../lib/components/app/misc/MarkdownContent.svelte | 870 ++ .../lib/components/app/misc/RemoveButton.svelte | 26 + .../src/lib/components/app/misc/SearchInput.svelte | 73 + .../app/misc/SyntaxHighlightedCode.svelte | 97 + .../lib/components/app/models/ModelBadge.svelte | 56 + .../components/app/models/ModelsSelector.svelte | 555 ++ .../components/app/server/ServerErrorSplash.svelte | 282 + .../app/server/ServerLoadingSplash.svelte | 33 + .../lib/components/app/server/ServerStatus.svelte | 65 + .../ui/alert-dialog/alert-dialog-action.svelte | 18 + .../ui/alert-dialog/alert-dialog-cancel.svelte | 18 + .../ui/alert-dialog/alert-dialog-content.svelte | 35 + .../alert-dialog/alert-dialog-description.svelte | 17 + .../ui/alert-dialog/alert-dialog-footer.svelte | 23 + .../ui/alert-dialog/alert-dialog-header.svelte | 20 + .../ui/alert-dialog/alert-dialog-overlay.svelte | 20 + .../ui/alert-dialog/alert-dialog-title.svelte | 17 + .../ui/alert-dialog/alert-dialog-trigger.svelte | 7 + .../src/lib/components/ui/alert-dialog/index.ts | 39 + .../components/ui/alert/alert-description.svelte | 23 + .../src/lib/components/ui/alert/alert-title.svelte | 20 + .../webui/src/lib/components/ui/alert/alert.svelte | 44 + .../webui/src/lib/components/ui/alert/index.ts | 14 + .../webui/src/lib/components/ui/badge/badge.svelte | 49 + .../webui/src/lib/components/ui/badge/index.ts | 2 + .../src/lib/components/ui/button/button.svelte | 87 + .../webui/src/lib/components/ui/button/index.ts | 17 + .../src/lib/components/ui/card/card-action.svelte | 20 + .../src/lib/components/ui/card/card-content.svelte | 15 + .../lib/components/ui/card/card-description.svelte | 20 + .../src/lib/components/ui/card/card-footer.svelte | 20 + .../src/lib/components/ui/card/card-header.svelte | 23 + .../src/lib/components/ui/card/card-title.svelte | 20 + .../webui/src/lib/components/ui/card/card.svelte | 23 + .../webui/src/lib/components/ui/card/index.ts | 25 + .../src/lib/components/ui/checkbox/checkbox.svelte | 36 + .../webui/src/lib/components/ui/checkbox/index.ts | 6 + .../ui/collapsible/collapsible-content.svelte | 7 + .../ui/collapsible/collapsible-trigger.svelte | 7 + .../components/ui/collapsible/collapsible.svelte | 11 + .../src/lib/components/ui/collapsible/index.ts | 13 + .../lib/components/ui/dialog/dialog-close.svelte | 7 + .../lib/components/ui/dialog/dialog-content.svelte | 43 + .../components/ui/dialog/dialog-description.svelte | 17 + .../lib/components/ui/dialog/dialog-footer.svelte | 20 + .../lib/components/ui/dialog/dialog-header.svelte | 20 + .../lib/components/ui/dialog/dialog-overlay.svelte | 20 + .../lib/components/ui/dialog/dialog-title.svelte | 17 + .../lib/components/ui/dialog/dialog-trigger.svelte | 7 + .../webui/src/lib/components/ui/dialog/index.ts | 37 + .../dropdown-menu-checkbox-item.svelte | 41 + .../ui/dropdown-menu/dropdown-menu-content.svelte | 27 + .../dropdown-menu-group-heading.svelte | 22 + .../ui/dropdown-menu/dropdown-menu-group.svelte | 7 + .../ui/dropdown-menu/dropdown-menu-item.svelte | 27 + .../ui/dropdown-menu/dropdown-menu-label.svelte | 24 + .../dropdown-menu/dropdown-menu-radio-group.svelte | 16 + .../dropdown-menu/dropdown-menu-radio-item.svelte | 31 + .../dropdown-menu/dropdown-menu-separator.svelte | 17 + .../ui/dropdown-menu/dropdown-menu-shortcut.svelte | 20 + .../dropdown-menu/dropdown-menu-sub-content.svelte | 20 + .../dropdown-menu/dropdown-menu-sub-trigger.svelte | 29 + .../ui/dropdown-menu/dropdown-menu-trigger.svelte | 7 + .../src/lib/components/ui/dropdown-menu/index.ts | 49 + .../webui/src/lib/components/ui/input/index.ts | 7 + .../webui/src/lib/components/ui/input/input.svelte | 51 + .../webui/src/lib/components/ui/label/index.ts | 7 + .../webui/src/lib/components/ui/label/label.svelte | 20 + .../webui/src/lib/components/ui/popover/index.ts | 19 + .../lib/components/ui/popover/popover-close.svelte | 7 + .../components/ui/popover/popover-content.svelte | 37 + .../components/ui/popover/popover-portal.svelte | 7 + .../components/ui/popover/popover-trigger.svelte | 17 + .../src/lib/components/ui/popover/popover.svelte | 7 + .../src/lib/components/ui/scroll-area/index.ts | 10 + .../ui/scroll-area/scroll-area-scrollbar.svelte | 31 + .../components/ui/scroll-area/scroll-area.svelte | 40 + .../webui/src/lib/components/ui/select/index.ts | 37 + .../lib/components/ui/select/select-content.svelte | 111 + .../ui/select/select-group-heading.svelte | 21 + .../lib/components/ui/select/select-group.svelte | 7 + .../lib/components/ui/select/select-item.svelte | 38 + .../lib/components/ui/select/select-label.svelte | 20 + .../ui/select/select-scroll-down-button.svelte | 20 + .../ui/select/select-scroll-up-button.svelte | 20 + .../components/ui/select/select-separator.svelte | 18 + .../lib/components/ui/select/select-trigger.svelte | 40 + .../webui/src/lib/components/ui/separator/index.ts | 7 + .../lib/components/ui/separator/separator.svelte | 20 + .../webui/src/lib/components/ui/sheet/index.ts | 36 + .../src/lib/components/ui/sheet/sheet-close.svelte | 7 + .../lib/components/ui/sheet/sheet-content.svelte | 60 + .../components/ui/sheet/sheet-description.svelte | 17 + .../lib/components/ui/sheet/sheet-footer.svelte | 20 + .../lib/components/ui/sheet/sheet-header.svelte | 20 + .../lib/components/ui/sheet/sheet-overlay.svelte | 20 + .../src/lib/components/ui/sheet/sheet-title.svelte | 17 + .../lib/components/ui/sheet/sheet-trigger.svelte | 7 + .../src/lib/components/ui/sidebar/constants.ts | 6 + .../lib/components/ui/sidebar/context.svelte.ts | 79 + .../webui/src/lib/components/ui/sidebar/index.ts | 75 + .../components/ui/sidebar/sidebar-content.svelte | 24 + .../components/ui/sidebar/sidebar-footer.svelte | 21 + .../ui/sidebar/sidebar-group-action.svelte | 36 + .../ui/sidebar/sidebar-group-content.svelte | 21 + .../ui/sidebar/sidebar-group-label.svelte | 34 + .../lib/components/ui/sidebar/sidebar-group.svelte | 21 + .../components/ui/sidebar/sidebar-header.svelte | 21 + .../lib/components/ui/sidebar/sidebar-input.svelte | 21 + .../lib/components/ui/sidebar/sidebar-inset.svelte | 24 + .../ui/sidebar/sidebar-menu-action.svelte | 43 + .../ui/sidebar/sidebar-menu-badge.svelte | 29 + .../ui/sidebar/sidebar-menu-button.svelte | 106 + .../components/ui/sidebar/sidebar-menu-item.svelte | 21 + .../ui/sidebar/sidebar-menu-skeleton.svelte | 36 + .../ui/sidebar/sidebar-menu-sub-button.svelte | 43 + .../ui/sidebar/sidebar-menu-sub-item.svelte | 21 + .../components/ui/sidebar/sidebar-menu-sub.svelte | 25 + .../lib/components/ui/sidebar/sidebar-menu.svelte | 21 + .../components/ui/sidebar/sidebar-provider.svelte | 50 + .../lib/components/ui/sidebar/sidebar-rail.svelte | 36 + .../components/ui/sidebar/sidebar-separator.svelte | 19 + .../components/ui/sidebar/sidebar-trigger.svelte | 35 + .../src/lib/components/ui/sidebar/sidebar.svelte | 101 + .../webui/src/lib/components/ui/skeleton/index.ts | 7 + .../src/lib/components/ui/skeleton/skeleton.svelte | 17 + .../webui/src/lib/components/ui/switch/index.ts | 7 + .../src/lib/components/ui/switch/switch.svelte | 29 + .../webui/src/lib/components/ui/table/index.ts | 28 + .../src/lib/components/ui/table/table-body.svelte | 20 + .../lib/components/ui/table/table-caption.svelte | 20 + .../src/lib/components/ui/table/table-cell.svelte | 23 + .../lib/components/ui/table/table-footer.svelte | 20 + .../src/lib/components/ui/table/table-head.svelte | 23 + .../lib/components/ui/table/table-header.svelte | 20 + .../src/lib/components/ui/table/table-row.svelte | 23 + .../webui/src/lib/components/ui/table/table.svelte | 22 + .../webui/src/lib/components/ui/textarea/index.ts | 7 + .../src/lib/components/ui/textarea/textarea.svelte | 22 + .../webui/src/lib/components/ui/tooltip/index.ts | 21 + .../components/ui/tooltip/tooltip-content.svelte | 47 + .../components/ui/tooltip/tooltip-trigger.svelte | 7 + .../server/webui/src/lib/components/ui/utils.ts | 13 + .../server/webui/src/lib/constants/auto-scroll.ts | 3 + .../webui/src/lib/constants/binary-detection.ts | 14 + .../webui/src/lib/constants/default-context.ts | 1 + .../src/lib/constants/floating-ui-constraints.ts | 2 + .../tools/server/webui/src/lib/constants/icons.ts | 32 + .../webui/src/lib/constants/input-classes.ts | 6 + .../webui/src/lib/constants/latex-protection.ts | 35 + .../server/webui/src/lib/constants/literal-html.ts | 15 + .../webui/src/lib/constants/localstorage-keys.ts | 2 + .../webui/src/lib/constants/max-bundle-size.ts | 1 + .../server/webui/src/lib/constants/precision.ts | 2 + .../webui/src/lib/constants/processing-info.ts | 1 + .../webui/src/lib/constants/settings-config.ts | 117 + .../src/lib/constants/supported-file-types.ts | 217 + .../webui/src/lib/constants/table-html-restorer.ts | 20 + .../webui/src/lib/constants/tooltip-config.ts | 1 + .../server/webui/src/lib/constants/viewport.ts | 1 + .../tools/server/webui/src/lib/enums/attachment.ts | 10 + llama.cpp/tools/server/webui/src/lib/enums/chat.ts | 4 + .../tools/server/webui/src/lib/enums/files.ts | 206 + .../tools/server/webui/src/lib/enums/index.ts | 23 + .../tools/server/webui/src/lib/enums/model.ts | 5 + .../tools/server/webui/src/lib/enums/server.ts | 20 + .../server/webui/src/lib/hooks/is-mobile.svelte.ts | 8 + .../hooks/use-model-change-validation.svelte.ts | 118 + .../src/lib/hooks/use-processing-state.svelte.ts | 262 + .../webui/src/lib/markdown/enhance-code-blocks.ts | 162 + .../server/webui/src/lib/markdown/enhance-links.ts | 33 + .../server/webui/src/lib/markdown/literal-html.ts | 121 + .../webui/src/lib/markdown/table-html-restorer.ts | 181 + .../tools/server/webui/src/lib/services/chat.ts | 784 ++ .../server/webui/src/lib/services/database.ts | 400 + .../tools/server/webui/src/lib/services/index.ts | 5 + .../tools/server/webui/src/lib/services/models.ts | 124 + .../webui/src/lib/services/parameter-sync.spec.ts | 148 + .../webui/src/lib/services/parameter-sync.ts | 279 + .../tools/server/webui/src/lib/services/props.ts | 77 + .../server/webui/src/lib/stores/chat.svelte.ts | 1487 ++++ .../webui/src/lib/stores/conversations.svelte.ts | 662 ++ .../server/webui/src/lib/stores/models.svelte.ts | 605 ++ .../webui/src/lib/stores/persisted.svelte.ts | 50 + .../server/webui/src/lib/stores/server.svelte.ts | 140 + .../server/webui/src/lib/stores/settings.svelte.ts | 421 + .../tools/server/webui/src/lib/types/api.d.ts | 430 + .../tools/server/webui/src/lib/types/chat.d.ts | 55 + .../tools/server/webui/src/lib/types/database.d.ts | 85 + .../tools/server/webui/src/lib/types/index.ts | 70 + .../tools/server/webui/src/lib/types/models.d.ts | 21 + .../tools/server/webui/src/lib/types/settings.d.ts | 67 + .../server/webui/src/lib/utils/api-headers.ts | 22 + .../webui/src/lib/utils/api-key-validation.ts | 45 + .../webui/src/lib/utils/attachment-display.ts | 61 + .../server/webui/src/lib/utils/attachment-type.ts | 105 + .../server/webui/src/lib/utils/audio-recording.ts | 226 + .../webui/src/lib/utils/autoresize-textarea.ts | 10 + .../tools/server/webui/src/lib/utils/branching.ts | 283 + .../server/webui/src/lib/utils/browser-only.ts | 35 + .../tools/server/webui/src/lib/utils/clipboard.ts | 259 + .../server/webui/src/lib/utils/config-helpers.ts | 51 + .../webui/src/lib/utils/conversation-utils.ts | 30 + .../webui/src/lib/utils/convert-files-to-extra.ts | 192 + .../server/webui/src/lib/utils/file-preview.ts | 36 + .../tools/server/webui/src/lib/utils/file-type.ts | 222 + .../tools/server/webui/src/lib/utils/formatters.ts | 53 + .../tools/server/webui/src/lib/utils/index.ts | 95 + .../server/webui/src/lib/utils/is-ime-composing.ts | 5 + .../server/webui/src/lib/utils/latex-protection.ts | 270 + .../src/lib/utils/modality-file-validation.ts | 162 + .../server/webui/src/lib/utils/model-names.ts | 56 + .../server/webui/src/lib/utils/pdf-processing.ts | 150 + .../server/webui/src/lib/utils/portal-to-body.ts | 20 + .../tools/server/webui/src/lib/utils/precision.ts | 25 + .../webui/src/lib/utils/process-uploaded-files.ts | 136 + .../tools/server/webui/src/lib/utils/svg-to-png.ts | 71 + .../src/lib/utils/syntax-highlight-language.ts | 145 + .../tools/server/webui/src/lib/utils/text-files.ts | 97 + llama.cpp/tools/server/webui/src/lib/utils/text.ts | 7 + .../server/webui/src/lib/utils/webp-to-png.ts | 73 + .../tools/server/webui/src/routes/+error.svelte | 70 + .../tools/server/webui/src/routes/+layout.svelte | 223 + .../tools/server/webui/src/routes/+page.svelte | 91 + llama.cpp/tools/server/webui/src/routes/+page.ts | 6 + .../server/webui/src/routes/chat/[id]/+page.svelte | 176 + .../server/webui/src/routes/chat/[id]/+page.ts | 6 + .../server/webui/src/styles/katex-custom.scss | 13 + llama.cpp/tools/server/webui/static/favicon.svg | 1 + llama.cpp/tools/server/webui/static/loading.html | 12 + llama.cpp/tools/server/webui/svelte.config.js | 34 + .../tests/client/components/TestWrapper.svelte | 17 + .../server/webui/tests/client/page.svelte.test.ts | 11 + .../tools/server/webui/tests/e2e/demo.test.ts | 6 + .../webui/tests/stories/ChatForm.stories.svelte | 161 + .../webui/tests/stories/ChatMessage.stories.svelte | 207 + .../tests/stories/ChatSettings.stories.svelte | 19 + .../webui/tests/stories/ChatSidebar.stories.svelte | 97 + .../server/webui/tests/stories/Introduction.mdx | 44 + .../tests/stories/MarkdownContent.stories.svelte | 130 + .../webui/tests/stories/fixtures/ai-tutorial.ts | 164 + .../webui/tests/stories/fixtures/api-docs.ts | 160 + .../webui/tests/stories/fixtures/assets/1.jpg | Bin 0 -> 44891 bytes .../fixtures/assets/beautiful-flowers-lotus.webp | Bin 0 -> 817630 bytes .../tests/stories/fixtures/assets/example.pdf | Bin 0 -> 351048 bytes .../tests/stories/fixtures/assets/hf-logo.svg | 8 + .../webui/tests/stories/fixtures/blog-post.ts | 125 + .../webui/tests/stories/fixtures/data-analysis.ts | 124 + .../server/webui/tests/stories/fixtures/empty.ts | 2 + .../webui/tests/stories/fixtures/math-formulas.ts | 221 + .../server/webui/tests/stories/fixtures/readme.ts | 136 + .../tests/stories/fixtures/storybook-mocks.ts | 81 + .../server/webui/tests/unit/clipboard.test.ts | 423 + .../webui/tests/unit/latex-protection.test.ts | 376 + .../server/webui/tests/unit/model-names.test.ts | 51 + llama.cpp/tools/server/webui/tsconfig.json | 19 + llama.cpp/tools/server/webui/vite.config.ts | 166 + .../tools/server/webui/vitest-setup-client.ts | 2 + 433 files changed, 70068 insertions(+) create mode 100644 llama.cpp/tools/server/CMakeLists.txt create mode 100644 llama.cpp/tools/server/README-dev.md create mode 100644 llama.cpp/tools/server/README.md create mode 100644 llama.cpp/tools/server/bench/README.md create mode 100644 llama.cpp/tools/server/bench/bench.py create mode 100644 llama.cpp/tools/server/bench/prometheus.yml create mode 100644 llama.cpp/tools/server/bench/requirements.txt create mode 100644 llama.cpp/tools/server/bench/script.js create mode 100755 llama.cpp/tools/server/chat-llama2.sh create mode 100644 llama.cpp/tools/server/chat.mjs create mode 100755 llama.cpp/tools/server/chat.sh create mode 100644 llama.cpp/tools/server/public/index.html.gz create mode 100644 llama.cpp/tools/server/public/loading.html create mode 100755 llama.cpp/tools/server/public_legacy/colorthemes.css create mode 100644 llama.cpp/tools/server/public_legacy/completion.js create mode 100644 llama.cpp/tools/server/public_legacy/favicon.ico create mode 100644 llama.cpp/tools/server/public_legacy/index-new.html create mode 100644 llama.cpp/tools/server/public_legacy/index.html create mode 100644 llama.cpp/tools/server/public_legacy/index.js create mode 100644 llama.cpp/tools/server/public_legacy/json-schema-to-grammar.mjs create mode 100644 llama.cpp/tools/server/public_legacy/loading.html create mode 100644 llama.cpp/tools/server/public_legacy/prompt-formats.js create mode 100644 llama.cpp/tools/server/public_legacy/style.css create mode 100644 llama.cpp/tools/server/public_legacy/system-prompts.js create mode 100755 llama.cpp/tools/server/public_legacy/theme-beeninorder.css create mode 100755 llama.cpp/tools/server/public_legacy/theme-ketivah.css create mode 100755 llama.cpp/tools/server/public_legacy/theme-mangotango.css create mode 100755 llama.cpp/tools/server/public_legacy/theme-playground.css create mode 100755 llama.cpp/tools/server/public_legacy/theme-polarnight.css create mode 100755 llama.cpp/tools/server/public_legacy/theme-snowstorm.css create mode 100644 llama.cpp/tools/server/public_simplechat/datautils.mjs create mode 100644 llama.cpp/tools/server/public_simplechat/index.html create mode 100644 llama.cpp/tools/server/public_simplechat/readme.md create mode 100644 llama.cpp/tools/server/public_simplechat/simplechat.css create mode 100644 llama.cpp/tools/server/public_simplechat/simplechat.js create mode 100644 llama.cpp/tools/server/public_simplechat/simplechat_screens.webp create mode 100644 llama.cpp/tools/server/public_simplechat/ui.mjs create mode 100644 llama.cpp/tools/server/server-common.cpp create mode 100644 llama.cpp/tools/server/server-common.h create mode 100644 llama.cpp/tools/server/server-context.cpp create mode 100644 llama.cpp/tools/server/server-context.h create mode 100644 llama.cpp/tools/server/server-http.cpp create mode 100644 llama.cpp/tools/server/server-http.h create mode 100644 llama.cpp/tools/server/server-models.cpp create mode 100644 llama.cpp/tools/server/server-models.h create mode 100644 llama.cpp/tools/server/server-queue.cpp create mode 100644 llama.cpp/tools/server/server-queue.h create mode 100644 llama.cpp/tools/server/server-task.cpp create mode 100644 llama.cpp/tools/server/server-task.h create mode 100644 llama.cpp/tools/server/server.cpp create mode 100644 llama.cpp/tools/server/tests/.gitignore create mode 100644 llama.cpp/tools/server/tests/README.md create mode 100644 llama.cpp/tools/server/tests/conftest.py create mode 100644 llama.cpp/tools/server/tests/pytest.ini create mode 100644 llama.cpp/tools/server/tests/requirements.txt create mode 100755 llama.cpp/tools/server/tests/tests.sh create mode 100644 llama.cpp/tools/server/tests/unit/test_basic.py create mode 100644 llama.cpp/tools/server/tests/unit/test_chat_completion.py create mode 100644 llama.cpp/tools/server/tests/unit/test_compat_anthropic.py create mode 100644 llama.cpp/tools/server/tests/unit/test_compat_oai_responses.py create mode 100644 llama.cpp/tools/server/tests/unit/test_completion.py create mode 100644 llama.cpp/tools/server/tests/unit/test_ctx_shift.py create mode 100644 llama.cpp/tools/server/tests/unit/test_embedding.py create mode 100644 llama.cpp/tools/server/tests/unit/test_infill.py create mode 100644 llama.cpp/tools/server/tests/unit/test_lora.py create mode 100644 llama.cpp/tools/server/tests/unit/test_rerank.py create mode 100644 llama.cpp/tools/server/tests/unit/test_router.py create mode 100644 llama.cpp/tools/server/tests/unit/test_security.py create mode 100644 llama.cpp/tools/server/tests/unit/test_sleep.py create mode 100644 llama.cpp/tools/server/tests/unit/test_slot_save.py create mode 100644 llama.cpp/tools/server/tests/unit/test_speculative.py create mode 100644 llama.cpp/tools/server/tests/unit/test_template.py create mode 100644 llama.cpp/tools/server/tests/unit/test_tokenize.py create mode 100755 llama.cpp/tools/server/tests/unit/test_tool_call.py create mode 100644 llama.cpp/tools/server/tests/unit/test_vision_api.py create mode 100644 llama.cpp/tools/server/tests/utils.py create mode 100644 llama.cpp/tools/server/themes/README.md create mode 100644 llama.cpp/tools/server/themes/buttons-top/README.md create mode 100644 llama.cpp/tools/server/themes/buttons-top/buttons_top.png create mode 100644 llama.cpp/tools/server/themes/buttons-top/favicon.ico create mode 100644 llama.cpp/tools/server/themes/buttons-top/index.html create mode 100644 llama.cpp/tools/server/themes/wild/README.md create mode 100644 llama.cpp/tools/server/themes/wild/favicon.ico create mode 100644 llama.cpp/tools/server/themes/wild/index.html create mode 100644 llama.cpp/tools/server/themes/wild/llama_cpp.png create mode 100644 llama.cpp/tools/server/themes/wild/llamapattern.png create mode 100644 llama.cpp/tools/server/themes/wild/wild.png create mode 100644 llama.cpp/tools/server/webui/.gitignore create mode 100644 llama.cpp/tools/server/webui/.npmrc create mode 100644 llama.cpp/tools/server/webui/.prettierignore create mode 100644 llama.cpp/tools/server/webui/.prettierrc create mode 100644 llama.cpp/tools/server/webui/.storybook/ModeWatcherDecorator.svelte create mode 100644 llama.cpp/tools/server/webui/.storybook/TooltipProviderDecorator.svelte create mode 100644 llama.cpp/tools/server/webui/.storybook/main.ts create mode 100644 llama.cpp/tools/server/webui/.storybook/preview.ts create mode 100644 llama.cpp/tools/server/webui/.storybook/vitest.setup.ts create mode 100644 llama.cpp/tools/server/webui/README.md create mode 100644 llama.cpp/tools/server/webui/components.json create mode 100644 llama.cpp/tools/server/webui/docs/architecture/high-level-architecture-simplified.md create mode 100644 llama.cpp/tools/server/webui/docs/architecture/high-level-architecture.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/chat-flow.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/conversations-flow.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-model-mode.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-router-mode.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/database-flow.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/models-flow.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/server-flow.md create mode 100644 llama.cpp/tools/server/webui/docs/flows/settings-flow.md create mode 100644 llama.cpp/tools/server/webui/eslint.config.js create mode 100644 llama.cpp/tools/server/webui/package-lock.json create mode 100644 llama.cpp/tools/server/webui/package.json create mode 100644 llama.cpp/tools/server/webui/playwright.config.ts create mode 100644 llama.cpp/tools/server/webui/scripts/dev.sh create mode 100755 llama.cpp/tools/server/webui/scripts/install-git-hooks.sh create mode 100755 llama.cpp/tools/server/webui/scripts/post-build.sh create mode 100644 llama.cpp/tools/server/webui/src/app.css create mode 100644 llama.cpp/tools/server/webui/src/app.d.ts create mode 100644 llama.cpp/tools/server/webui/src/app.html create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentPreview.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailFile.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailImage.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsViewAll.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionFileAttachments.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionRecord.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionSubmit.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormFileInputInvisible.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormHelperText.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormTextarea.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageActions.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageAssistant.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageBranchingControls.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageEditForm.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageStatistics.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageSystem.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageThinkingBlock.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageUser.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessages.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenDragOverlay.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenHeader.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenProcessingInfo.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettings.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsFields.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsFooter.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsImportExportTab.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsParameterSourceIndicator.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebar.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarActions.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarConversationItem.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarSearch.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSidebar/handle-mobile-sidebar-item-click.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogChatAttachmentPreview.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogChatAttachmentsViewAll.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogChatError.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogChatSettings.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogConfirmation.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogConversationSelection.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogConversationTitleUpdate.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogEmptyFileAlert.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogModelInformation.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/dialogs/DialogModelNotAvailable.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/ActionButton.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/ActionDropdown.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/BadgeChatStatistic.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/BadgeInfo.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/BadgeModality.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/CodePreviewDialog.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/ConversationSelection.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/CopyToClipboardIcon.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/KeyboardShortcutInfo.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/MarkdownContent.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/RemoveButton.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/SearchInput.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/misc/SyntaxHighlightedCode.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/models/ModelBadge.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/models/ModelsSelector.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/server/ServerErrorSplash.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/server/ServerLoadingSplash.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/app/server/ServerStatus.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-action.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-cancel.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-description.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-overlay.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-title.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/alert-dialog-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert-dialog/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert/alert-description.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert/alert-title.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert/alert.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/alert/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/badge/badge.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/badge/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/button/button.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/button/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-action.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-description.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card-title.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/card.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/card/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/checkbox/checkbox.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/checkbox/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/collapsible/collapsible-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/collapsible/collapsible-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/collapsible/collapsible.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/collapsible/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-close.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-description.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-overlay.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-title.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/dialog-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dialog/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-checkbox-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-group-heading.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-group.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-label.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-radio-group.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-radio-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-separator.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-shortcut.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-sub-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-sub-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/dropdown-menu-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/dropdown-menu/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/input/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/input/input.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/label/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/label/label.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/popover-close.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/popover-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/popover-portal.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/popover-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/popover/popover.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/scroll-area/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/scroll-area/scroll-area-scrollbar.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/scroll-area/scroll-area.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-group-heading.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-group.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-label.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-scroll-down-button.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-scroll-up-button.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-separator.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/select/select-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/separator/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/separator/separator.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-close.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-description.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-overlay.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-title.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sheet/sheet-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/constants.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/context.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-group-action.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-group-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-group-label.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-group.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-input.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-inset.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-action.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-badge.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-button.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-skeleton.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-sub-button.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-sub-item.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu-sub.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-menu.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-provider.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-rail.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-separator.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/sidebar/sidebar.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/skeleton/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/skeleton/skeleton.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/switch/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/switch/switch.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-body.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-caption.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-cell.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-footer.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-head.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-header.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table-row.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/table/table.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/textarea/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/textarea/textarea.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-content.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-trigger.svelte create mode 100644 llama.cpp/tools/server/webui/src/lib/components/ui/utils.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/auto-scroll.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/binary-detection.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/default-context.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/floating-ui-constraints.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/icons.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/input-classes.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/latex-protection.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/literal-html.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/localstorage-keys.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/max-bundle-size.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/precision.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/processing-info.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/settings-config.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/supported-file-types.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/table-html-restorer.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/tooltip-config.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/constants/viewport.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/attachment.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/chat.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/files.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/model.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/enums/server.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/hooks/is-mobile.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/hooks/use-model-change-validation.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/hooks/use-processing-state.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/markdown/enhance-code-blocks.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/markdown/enhance-links.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/markdown/literal-html.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/markdown/table-html-restorer.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/chat.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/database.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/models.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/parameter-sync.spec.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/parameter-sync.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/services/props.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/chat.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/conversations.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/models.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/persisted.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/server.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/stores/settings.svelte.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/api.d.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/chat.d.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/database.d.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/models.d.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/types/settings.d.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/api-headers.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/api-key-validation.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/attachment-display.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/attachment-type.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/audio-recording.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/autoresize-textarea.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/branching.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/browser-only.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/clipboard.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/config-helpers.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/conversation-utils.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/convert-files-to-extra.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/file-preview.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/file-type.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/formatters.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/index.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/is-ime-composing.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/latex-protection.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/modality-file-validation.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/model-names.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/pdf-processing.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/portal-to-body.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/precision.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/process-uploaded-files.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/svg-to-png.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/syntax-highlight-language.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/text-files.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/text.ts create mode 100644 llama.cpp/tools/server/webui/src/lib/utils/webp-to-png.ts create mode 100644 llama.cpp/tools/server/webui/src/routes/+error.svelte create mode 100644 llama.cpp/tools/server/webui/src/routes/+layout.svelte create mode 100644 llama.cpp/tools/server/webui/src/routes/+page.svelte create mode 100644 llama.cpp/tools/server/webui/src/routes/+page.ts create mode 100644 llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.svelte create mode 100644 llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.ts create mode 100644 llama.cpp/tools/server/webui/src/styles/katex-custom.scss create mode 100644 llama.cpp/tools/server/webui/static/favicon.svg create mode 100644 llama.cpp/tools/server/webui/static/loading.html create mode 100644 llama.cpp/tools/server/webui/svelte.config.js create mode 100644 llama.cpp/tools/server/webui/tests/client/components/TestWrapper.svelte create mode 100644 llama.cpp/tools/server/webui/tests/client/page.svelte.test.ts create mode 100644 llama.cpp/tools/server/webui/tests/e2e/demo.test.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/ChatForm.stories.svelte create mode 100644 llama.cpp/tools/server/webui/tests/stories/ChatMessage.stories.svelte create mode 100644 llama.cpp/tools/server/webui/tests/stories/ChatSettings.stories.svelte create mode 100644 llama.cpp/tools/server/webui/tests/stories/ChatSidebar.stories.svelte create mode 100644 llama.cpp/tools/server/webui/tests/stories/Introduction.mdx create mode 100644 llama.cpp/tools/server/webui/tests/stories/MarkdownContent.stories.svelte create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/ai-tutorial.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/api-docs.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/assets/1.jpg create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/assets/beautiful-flowers-lotus.webp create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/assets/example.pdf create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/assets/hf-logo.svg create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/blog-post.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/data-analysis.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/empty.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/math-formulas.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/readme.ts create mode 100644 llama.cpp/tools/server/webui/tests/stories/fixtures/storybook-mocks.ts create mode 100644 llama.cpp/tools/server/webui/tests/unit/clipboard.test.ts create mode 100644 llama.cpp/tools/server/webui/tests/unit/latex-protection.test.ts create mode 100644 llama.cpp/tools/server/webui/tests/unit/model-names.test.ts create mode 100644 llama.cpp/tools/server/webui/tsconfig.json create mode 100644 llama.cpp/tools/server/webui/vite.config.ts create mode 100644 llama.cpp/tools/server/webui/vitest-setup-client.ts (limited to 'llama.cpp/tools/server') diff --git a/llama.cpp/tools/server/CMakeLists.txt b/llama.cpp/tools/server/CMakeLists.txt new file mode 100644 index 0000000..a39b4c5 --- /dev/null +++ b/llama.cpp/tools/server/CMakeLists.txt @@ -0,0 +1,70 @@ +include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +# server-context containing the core server logic, used by llama-server and CLI + +set(TARGET server-context) + +add_library(${TARGET} STATIC + server-task.cpp + server-task.h + server-queue.cpp + server-queue.h + server-common.cpp + server-common.h + server-context.cpp + server-context.h +) + +if (BUILD_SHARED_LIBS) + set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) +endif() + +target_include_directories(${TARGET} PRIVATE ../mtmd) +target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR}) +target_link_libraries(${TARGET} PUBLIC common mtmd ${CMAKE_THREAD_LIBS_INIT}) + + +# llama-server executable + +set(TARGET llama-server) + +if (NOT LLAMA_HTTPLIB) + message(FATAL_ERROR "LLAMA_HTTPLIB is OFF, cannot build llama-server. Hint: to skip building server, set -DLLAMA_BUILD_SERVER=OFF") +endif() + +set(TARGET_SRCS + server.cpp + server-http.cpp + server-http.h + server-models.cpp + server-models.h +) +set(PUBLIC_ASSETS + index.html.gz + loading.html +) + +foreach(asset ${PUBLIC_ASSETS}) + set(input "${CMAKE_CURRENT_SOURCE_DIR}/public/${asset}") + set(output "${CMAKE_CURRENT_BINARY_DIR}/${asset}.hpp") + list(APPEND TARGET_SRCS ${output}) + add_custom_command( + DEPENDS "${input}" + OUTPUT "${output}" + COMMAND "${CMAKE_COMMAND}" "-DINPUT=${input}" "-DOUTPUT=${output}" -P "${PROJECT_SOURCE_DIR}/scripts/xxd.cmake" + ) + set_source_files_properties(${output} PROPERTIES GENERATED TRUE) +endforeach() + +add_executable(${TARGET} ${TARGET_SRCS}) +install(TARGETS ${TARGET} RUNTIME) + +target_include_directories(${TARGET} PRIVATE ../mtmd) +target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR}) +target_link_libraries(${TARGET} PRIVATE server-context PUBLIC common cpp-httplib ${CMAKE_THREAD_LIBS_INIT}) + +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() + +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/llama.cpp/tools/server/README-dev.md b/llama.cpp/tools/server/README-dev.md new file mode 100644 index 0000000..3fea304 --- /dev/null +++ b/llama.cpp/tools/server/README-dev.md @@ -0,0 +1,179 @@ +# llama-server Development Documentation + +This document provides an in-depth technical overview of `llama-server`, intended for maintainers and contributors. + +If you are an end user consuming `llama-server` as a product, please refer to the main [README](./README.md) instead. + +## Backend + +### Overview + +The server supports two primary operating modes: + +- **Inference mode**: The default mode for performing inference with a single loaded GGUF model. +- **Router mode**: Enables management of multiple inference server instances behind a single API endpoint. Requests are automatically routed to the appropriate backend instance based on the requested model. + +The core architecture consists of the following components: + +- `server_context`: Holds the primary inference state, including the main `llama_context` and all active slots. +- `server_slot`: An abstraction over a single “sequence” in llama.cpp, responsible for managing individual parallel inference requests. +- `server_routes`: Middleware layer between `server_context` and the HTTP interface; handles JSON parsing/formatting and request routing logic. +- `server_http_context`: Implements the HTTP server using `cpp-httplib`. +- `server_queue`: Thread-safe queue used by HTTP workers to submit new tasks to `server_context`. +- `server_response`: Thread-safe queue used by `server_context` to return results to HTTP workers. +- `server_response_reader`: Higher-level wrapper around the two queues above for cleaner code. +- `server_task`: Unit of work pushed into `server_queue`. +- `server_task_result`: Unit of result pushed into `server_response`. +- `server_tokens`: Unified representation of token sequences (supports both text and multimodal tokens); used by `server_task` and `server_slot`. +- `server_prompt_checkpoint`: For recurrent (e.g., RWKV) and SWA models, stores snapshots of KV cache state. Enables reuse when subsequent requests share the same prompt prefix, saving redundant computation. +- `server_models`: Standalone component for managing multiple backend instances (used in router mode). It is completely independent of `server_context`. + +```mermaid +graph TD + API_User <--> server_http_context + server_http_context <-- router mode --> server_models + server_http_context <-- inference mode --> server_routes + server_routes -- server_task --> server_queue + subgraph server_context + server_queue --> server_slot + server_slot -- server_task_result --> server_response + server_slot[multiple server_slot] + end + server_response --> server_routes +``` + +### Batching + +The server context maintains a single batch shared across all slots. When `update_slots()` is invoked, the system iterates through all active slots to populate this batch. For each slot, either a generated token from the previous decoding step or available prompt tokens are added to the batch. + +Batching constraints apply: slots can only be batched together if they share compatible configurations. For instance, slots using a specific LoRA adapter can be batched with each other, but not with slots using a different LoRA adapter or no adapter at all. + +Once the batch reaches capacity or all slots have been processed, `llama_decode` is called to execute the inference. This operation represents the primary computational bottleneck in `update_slots()`. + +Following decoding, the system either retrieves embeddings or samples the next token using `common_sampler_sample`. If a slot has remaining prompt tokens to process, it yields until the next `update_slots()` iteration. + +### Thread Management + +`server_context` runs on a dedicated single thread. Because it is single-threaded, heavy post-processing (especially after token generation) should be avoided, as it directly impacts multi-sequence throughput. + +Each incoming HTTP request is handled by its own thread managed by the HTTP library. The following operations are performed in HTTP worker threads: + +- JSON request parsing +- Chat template application +- Tokenization +- Conversion of `server_task_result` into final JSON response +- Error formatting into JSON +- Tracking of partial/incremental responses (e.g., streaming tool calls or reasoning steps) + +**Best practices to follow:** + +- All JSON formatting and chat template logic must stay in the HTTP layer. +- Avoid passing raw JSON between the HTTP layer and `server_slot`. Instead, parse everything into native C++ types as early as possible. + +### Example trace of a request + +Here is an example trace of an API request for text completion: + +- A request arrives at the HTTP layer. +- The request is routed to the corresponding handler inside `server_routes`. In this case, `handle_completions_impl` is invoked. +- The handler parses the input request, constructs a new `server_task`, and passes it to `server_res_generator`. +- `server_res_generator` creates a new `task_result_state` for each task: + - `task_result_state` stays in the HTTP layer, responsible for keeping track of the current state of the response (e.g., parsing tool calls or thinking messages). + - `server_task` is moved into `server_queue` inside `server_context`. +- `server_context` launches the task by moving it into an available slot (see `launch_slot_with_task()`). +- `update_slot()` processes the task as described in the "Batching" section above. +- Results may be sent using `send_partial_response` or `send_final_response`, which creates a new `server_task_result` and pushes it to the response queue. +- At the same time, `server_res_generator` listens to the response queue and retrieves this response. +- As the response is stateless, `server_res_generator` calls `response->update()` to update the response with the current state. +- `server_res_generator` then calls `response->to_json()` and passes the response to the HTTP layer. + +### Testing + +`llama-server` includes an automated test suite based on `pytest`. + +The framework automatically starts a `llama-server` instance, sends requests, and validates responses. + +For detailed instructions, see the [test documentation](./tests/README.md). + +### Notable Related PRs + +- Initial server implementation: https://github.com/ggml-org/llama.cpp/pull/1443 +- Parallel decoding support: https://github.com/ggml-org/llama.cpp/pull/3228 +- Refactor introducing `server_queue` and `server_response`: https://github.com/ggml-org/llama.cpp/pull/5065 +- Reranking endpoint: https://github.com/ggml-org/llama.cpp/pull/9510 +- Multimodal model support (`libmtmd`): https://github.com/ggml-org/llama.cpp/pull/12898 +- Unified KV cache handling: https://github.com/ggml-org/llama.cpp/pull/16736 +- Separation of HTTP logic into dedicated files: https://github.com/ggml-org/llama.cpp/pull/17216 +- Large-scale code base split into smaller files: https://github.com/ggml-org/llama.cpp/pull/17362 +- Introduction of router mode: https://github.com/ggml-org/llama.cpp/pull/17470 +- Speculative decoding: https://github.com/ggml-org/llama.cpp/pull/17808 and rework in https://github.com/ggml-org/llama.cpp/pull/17808 +- INI presets: https://github.com/ggml-org/llama.cpp/pull/17859 (+ refactoring: https://github.com/ggml-org/llama.cpp/pull/18169) +- Sleeping mode: https://github.com/ggml-org/llama.cpp/pull/18228 + + + + +## Web UI + +The project includes a web-based user interface for interacting with `llama-server`. It supports both single-model (`MODEL` mode) and multi-model (`ROUTER` mode) operation. + +The SvelteKit-based Web UI is introduced in this PR: https://github.com/ggml-org/llama.cpp/pull/14839 + +### Features + +- **Chat interface** with streaming responses +- **Multi-model support** (ROUTER mode) - switch between models, auto-load on selection +- **Modality validation** - ensures selected model supports conversation's attachments (images, audio) +- **Conversation management** - branching, regeneration, editing with history preservation +- **Attachment support** - images, audio, PDFs (with vision/text fallback) +- **Configurable parameters** - temperature, top_p, etc. synced with server defaults +- **Dark/light theme** + +### Tech Stack + +- **SvelteKit** - frontend framework with Svelte 5 runes for reactive state +- **TailwindCSS** + **shadcn-svelte** - styling and UI components +- **Vite** - build tooling +- **IndexedDB** (Dexie) - local storage for conversations +- **LocalStorage** - user settings persistence + +### Architecture + +The WebUI follows a layered architecture: + +``` +Routes → Components → Hooks → Stores → Services → Storage/API +``` + +- **Stores** - reactive state management (`chatStore`, `conversationsStore`, `modelsStore`, `serverStore`, `settingsStore`) +- **Services** - stateless API/database communication (`ChatService`, `ModelsService`, `PropsService`, `DatabaseService`) +- **Hooks** - reusable logic (`useModelChangeValidation`, `useProcessingState`) + +For detailed architecture diagrams, see [`tools/server/webui/docs/`](webui/docs/): + +- `high-level-architecture.mmd` - full architecture with all modules +- `high-level-architecture-simplified.mmd` - simplified overview +- `data-flow-simplified-model-mode.mmd` - data flow for single-model mode +- `data-flow-simplified-router-mode.mmd` - data flow for multi-model mode +- `flows/*.mmd` - detailed per-domain flows (chat, conversations, models, etc.) + +### Development + +```sh +# make sure you have Node.js installed +cd tools/server/webui +npm i + +# run dev server (with hot reload) +npm run dev + +# run tests +npm run test + +# build production bundle +npm run build +``` + +After `public/index.html.gz` has been generated, rebuild `llama-server` as described in the [build](#build) section to include the updated UI. + +**Note:** The Vite dev server automatically proxies API requests to `http://localhost:8080`. Make sure `llama-server` is running on that port during development. diff --git a/llama.cpp/tools/server/README.md b/llama.cpp/tools/server/README.md new file mode 100644 index 0000000..d132830 --- /dev/null +++ b/llama.cpp/tools/server/README.md @@ -0,0 +1,1782 @@ +# LLaMA.cpp HTTP Server + +Fast, lightweight, pure C/C++ HTTP server based on [httplib](https://github.com/yhirose/cpp-httplib), [nlohmann::json](https://github.com/nlohmann/json) and **llama.cpp**. + +Set of LLM REST APIs and a web UI to interact with llama.cpp. + +**Features:** + * LLM inference of F16 and quantized models on GPU and CPU + * [OpenAI API](https://github.com/openai/openai-openapi) compatible chat completions, responses, and embeddings routes + * [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) compatible chat completions + * Reranking endpoint (https://github.com/ggml-org/llama.cpp/pull/9510) + * Parallel decoding with multi-user support + * Continuous batching + * Multimodal ([documentation](../../docs/multimodal.md)) / with OpenAI-compatible API support + * Monitoring endpoints + * Schema-constrained JSON response format + * Prefilling of assistant messages similar to the Claude API + * [Function calling](../../docs/function-calling.md) / tool use for ~any model + * Speculative decoding + * Easy-to-use web UI + +For the ful list of features, please refer to [server's changelog](https://github.com/ggml-org/llama.cpp/issues/9291) + +## Usage + + + + + +### Common params + +| Argument | Explanation | +| -------- | ----------- | +| `-h, --help, --usage` | print usage and exit | +| `--version` | show version and build info | +| `--license` | show source code license and dependencies | +| `-cl, --cache-list` | show list of models in cache | +| `--completion-bash` | print source-able bash completion script for llama.cpp | +| `--verbose-prompt` | print a verbose prompt before generation (default: false) | +| `-t, --threads N` | number of CPU threads to use during generation (default: -1)
(env: LLAMA_ARG_THREADS) | +| `-tb, --threads-batch N` | number of threads to use during batch and prompt processing (default: same as --threads) | +| `-C, --cpu-mask M` | CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: "") | +| `-Cr, --cpu-range lo-hi` | range of CPUs for affinity. Complements --cpu-mask | +| `--cpu-strict <0\|1>` | use strict CPU placement (default: 0) | +| `--prio N` | set process/thread priority : low(-1), normal(0), medium(1), high(2), realtime(3) (default: 0) | +| `--poll <0...100>` | use polling level to wait for work (0 - no polling, default: 50) | +| `-Cb, --cpu-mask-batch M` | CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask) | +| `-Crb, --cpu-range-batch lo-hi` | ranges of CPUs for affinity. Complements --cpu-mask-batch | +| `--cpu-strict-batch <0\|1>` | use strict CPU placement (default: same as --cpu-strict) | +| `--prio-batch N` | set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: 0) | +| `--poll-batch <0\|1>` | use polling to wait for work (default: same as --poll) | +| `-c, --ctx-size N` | size of the prompt context (default: 0, 0 = loaded from model)
(env: LLAMA_ARG_CTX_SIZE) | +| `-n, --predict, --n-predict N` | number of tokens to predict (default: -1, -1 = infinity)
(env: LLAMA_ARG_N_PREDICT) | +| `-b, --batch-size N` | logical maximum batch size (default: 2048)
(env: LLAMA_ARG_BATCH) | +| `-ub, --ubatch-size N` | physical maximum batch size (default: 512)
(env: LLAMA_ARG_UBATCH) | +| `--keep N` | number of tokens to keep from the initial prompt (default: 0, -1 = all) | +| `--swa-full` | use full-size SWA cache (default: false)
[(more info)](https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
(env: LLAMA_ARG_SWA_FULL) | +| `-fa, --flash-attn [on\|off\|auto]` | set Flash Attention use ('on', 'off', or 'auto', default: 'auto')
(env: LLAMA_ARG_FLASH_ATTN) | +| `--perf, --no-perf` | whether to enable internal libllama performance timings (default: false)
(env: LLAMA_ARG_PERF) | +| `-e, --escape, --no-escape` | whether to process escapes sequences (\n, \r, \t, \', \", \\) (default: true) | +| `--rope-scaling {none,linear,yarn}` | RoPE frequency scaling method, defaults to linear unless specified by the model
(env: LLAMA_ARG_ROPE_SCALING_TYPE) | +| `--rope-scale N` | RoPE context scaling factor, expands context by a factor of N
(env: LLAMA_ARG_ROPE_SCALE) | +| `--rope-freq-base N` | RoPE base frequency, used by NTK-aware scaling (default: loaded from model)
(env: LLAMA_ARG_ROPE_FREQ_BASE) | +| `--rope-freq-scale N` | RoPE frequency scaling factor, expands context by a factor of 1/N
(env: LLAMA_ARG_ROPE_FREQ_SCALE) | +| `--yarn-orig-ctx N` | YaRN: original context size of model (default: 0 = model training context size)
(env: LLAMA_ARG_YARN_ORIG_CTX) | +| `--yarn-ext-factor N` | YaRN: extrapolation mix factor (default: -1.00, 0.0 = full interpolation)
(env: LLAMA_ARG_YARN_EXT_FACTOR) | +| `--yarn-attn-factor N` | YaRN: scale sqrt(t) or attention magnitude (default: -1.00)
(env: LLAMA_ARG_YARN_ATTN_FACTOR) | +| `--yarn-beta-slow N` | YaRN: high correction dim or alpha (default: -1.00)
(env: LLAMA_ARG_YARN_BETA_SLOW) | +| `--yarn-beta-fast N` | YaRN: low correction dim or beta (default: -1.00)
(env: LLAMA_ARG_YARN_BETA_FAST) | +| `-kvo, --kv-offload, -nkvo, --no-kv-offload` | whether to enable KV cache offloading (default: enabled)
(env: LLAMA_ARG_KV_OFFLOAD) | +| `--repack, -nr, --no-repack` | whether to enable weight repacking (default: enabled)
(env: LLAMA_ARG_REPACK) | +| `--no-host` | bypass host buffer allowing extra buffers to be used
(env: LLAMA_ARG_NO_HOST) | +| `-ctk, --cache-type-k TYPE` | KV cache data type for K
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_K) | +| `-ctv, --cache-type-v TYPE` | KV cache data type for V
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_V) | +| `-dt, --defrag-thold N` | KV cache defragmentation threshold (DEPRECATED)
(env: LLAMA_ARG_DEFRAG_THOLD) | +| `--mlock` | force system to keep model in RAM rather than swapping or compressing
(env: LLAMA_ARG_MLOCK) | +| `--mmap, --no-mmap` | whether to memory-map model. Explicitly enabling mmap disables direct-io. (if mmap disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)
(env: LLAMA_ARG_MMAP) | +| `-dio, --direct-io, -ndio, --no-direct-io` | use DirectIO if available. Takes precedence over --mmap (default: enabled)
(env: LLAMA_ARG_DIO) | +| `--numa TYPE` | attempt optimizations that help on some NUMA systems
- distribute: spread execution evenly over all nodes
- isolate: only spawn threads on CPUs on the node that execution started on
- numactl: use the CPU map provided by numactl
if run without this previously, it is recommended to drop the system page cache before using this
see https://github.com/ggml-org/llama.cpp/issues/1437
(env: LLAMA_ARG_NUMA) | +| `-dev, --device ` | comma-separated list of devices to use for offloading (none = don't offload)
use --list-devices to see a list of available devices
(env: LLAMA_ARG_DEVICE) | +| `--list-devices` | print list of available devices and exit | +| `-ot, --override-tensor =,...` | override tensor buffer type
(env: LLAMA_ARG_OVERRIDE_TENSOR) | +| `-cmoe, --cpu-moe` | keep all Mixture of Experts (MoE) weights in the CPU
(env: LLAMA_ARG_CPU_MOE) | +| `-ncmoe, --n-cpu-moe N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU
(env: LLAMA_ARG_N_CPU_MOE) | +| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)
(env: LLAMA_ARG_N_GPU_LAYERS) | +| `-sm, --split-mode {none,layer,row}` | how to split the model across multiple GPUs, one of:
- none: use one GPU only
- layer (default): split layers and KV across GPUs
- row: split rows across GPUs
(env: LLAMA_ARG_SPLIT_MODE) | +| `-ts, --tensor-split N0,N1,N2,...` | fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1
(env: LLAMA_ARG_TENSOR_SPLIT) | +| `-mg, --main-gpu INDEX` | the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: 0)
(env: LLAMA_ARG_MAIN_GPU) | +| `-fit, --fit [on\|off]` | whether to adjust unset arguments to fit in device memory ('on' or 'off', default: 'on')
(env: LLAMA_ARG_FIT) | +| `-fitt, --fit-target MiB0,MiB1,MiB2,...` | target margin per device for --fit, comma-separated list of values, single value is broadcast across all devices, default: 1024
(env: LLAMA_ARG_FIT_TARGET) | +| `-fitc, --fit-ctx N` | minimum ctx size that can be set by --fit option, default: 4096
(env: LLAMA_ARG_FIT_CTX) | +| `--check-tensors` | check model tensor data for invalid values (default: false) | +| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated values.
types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false | +| `--op-offload, --no-op-offload` | whether to offload host tensor operations to device (default: true) | +| `--lora FNAME` | path to LoRA adapter (use comma-separated values to load multiple adapters) | +| `--lora-scaled FNAME:SCALE,...` | path to LoRA adapter with user defined scaling (format: FNAME:SCALE,...)
note: use comma-separated values | +| `--control-vector FNAME` | add a control vector
note: use comma-separated values to add multiple control vectors | +| `--control-vector-scaled FNAME:SCALE,...` | add a control vector with user defined scaling SCALE
note: use comma-separated values (format: FNAME:SCALE,...) | +| `--control-vector-layer-range START END` | layer range to apply the control vector(s) to, start and end inclusive | +| `-m, --model FNAME` | model path to load
(env: LLAMA_ARG_MODEL) | +| `-mu, --model-url MODEL_URL` | model download url (default: unused)
(env: LLAMA_ARG_MODEL_URL) | +| `-dr, --docker-repo [/][:quant]` | Docker Hub model repository. repo is optional, default to ai/. quant is optional, default to :latest.
example: gemma3
(default: unused)
(env: LLAMA_ARG_DOCKER_REPO) | +| `-hf, -hfr, --hf-repo /[:quant]` | Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.
mmproj is also downloaded automatically if available. to disable, add --no-mmproj
example: unsloth/phi-4-GGUF:q4_k_m
(default: unused)
(env: LLAMA_ARG_HF_REPO) | +| `-hfd, -hfrd, --hf-repo-draft /[:quant]` | Same as --hf-repo, but for the draft model (default: unused)
(env: LLAMA_ARG_HFD_REPO) | +| `-hff, --hf-file FILE` | Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)
(env: LLAMA_ARG_HF_FILE) | +| `-hfv, -hfrv, --hf-repo-v /[:quant]` | Hugging Face model repository for the vocoder model (default: unused)
(env: LLAMA_ARG_HF_REPO_V) | +| `-hffv, --hf-file-v FILE` | Hugging Face model file for the vocoder model (default: unused)
(env: LLAMA_ARG_HF_FILE_V) | +| `-hft, --hf-token TOKEN` | Hugging Face access token (default: value from HF_TOKEN environment variable)
(env: HF_TOKEN) | +| `--log-disable` | Log disable | +| `--log-file FNAME` | Log to file
(env: LLAMA_LOG_FILE) | +| `--log-colors [on\|off\|auto]` | Set colored logging ('on', 'off', or 'auto', default: 'auto')
'auto' enables colors when output is to a terminal
(env: LLAMA_LOG_COLORS) | +| `-v, --verbose, --log-verbose` | Set verbosity level to infinity (i.e. log all messages, useful for debugging) | +| `--offline` | Offline mode: forces use of cache, prevents network access
(env: LLAMA_OFFLINE) | +| `-lv, --verbosity, --log-verbosity N` | Set the verbosity threshold. Messages with a higher verbosity will be ignored. Values:
- 0: generic output
- 1: error
- 2: warning
- 3: info
- 4: debug
(default: 3)

(env: LLAMA_LOG_VERBOSITY) | +| `--log-prefix` | Enable prefix in log messages
(env: LLAMA_LOG_PREFIX) | +| `--log-timestamps` | Enable timestamps in log messages
(env: LLAMA_LOG_TIMESTAMPS) | +| `-ctkd, --cache-type-k-draft TYPE` | KV cache data type for K for the draft model
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_K_DRAFT) | +| `-ctvd, --cache-type-v-draft TYPE` | KV cache data type for V for the draft model
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_V_DRAFT) | + + +### Sampling params + +| Argument | Explanation | +| -------- | ----------- | +| `--samplers SAMPLERS` | samplers that will be used for generation in the order, separated by ';'
(default: penalties;dry;top_n_sigma;top_k;typ_p;top_p;min_p;xtc;temperature) | +| `-s, --seed SEED` | RNG seed (default: -1, use random seed for -1) | +| `--sampler-seq, --sampling-seq SEQUENCE` | simplified sequence for samplers that will be used (default: edskypmxt) | +| `--ignore-eos` | ignore end of stream token and continue generating (implies --logit-bias EOS-inf) | +| `--temp N` | temperature (default: 0.80) | +| `--top-k N` | top-k sampling (default: 40, 0 = disabled)
(env: LLAMA_ARG_TOP_K) | +| `--top-p N` | top-p sampling (default: 0.95, 1.0 = disabled) | +| `--min-p N` | min-p sampling (default: 0.05, 0.0 = disabled) | +| `--top-nsigma N` | top-n-sigma sampling (default: -1.00, -1.0 = disabled) | +| `--xtc-probability N` | xtc probability (default: 0.00, 0.0 = disabled) | +| `--xtc-threshold N` | xtc threshold (default: 0.10, 1.0 = disabled) | +| `--typical N` | locally typical sampling, parameter p (default: 1.00, 1.0 = disabled) | +| `--repeat-last-n N` | last n tokens to consider for penalize (default: 64, 0 = disabled, -1 = ctx_size) | +| `--repeat-penalty N` | penalize repeat sequence of tokens (default: 1.00, 1.0 = disabled) | +| `--presence-penalty N` | repeat alpha presence penalty (default: 0.00, 0.0 = disabled) | +| `--frequency-penalty N` | repeat alpha frequency penalty (default: 0.00, 0.0 = disabled) | +| `--dry-multiplier N` | set DRY sampling multiplier (default: 0.00, 0.0 = disabled) | +| `--dry-base N` | set DRY sampling base value (default: 1.75) | +| `--dry-allowed-length N` | set allowed length for DRY sampling (default: 2) | +| `--dry-penalty-last-n N` | set DRY penalty for the last n tokens (default: -1, 0 = disable, -1 = context size) | +| `--dry-sequence-breaker STRING` | add sequence breaker for DRY sampling, clearing out default breakers ('\n', ':', '"', '*') in the process; use "none" to not use any sequence breakers | +| `--adaptive-target N` | adaptive-p: select tokens near this probability (valid range 0.0 to 1.0; negative = disabled) (default: -1.00)
[(more info)](https://github.com/ggml-org/llama.cpp/pull/17927) | +| `--adaptive-decay N` | adaptive-p: decay rate for target adaptation over time. lower values are more reactive, higher values are more stable.
(valid range 0.0 to 0.99) (default: 0.90) | +| `--dynatemp-range N` | dynamic temperature range (default: 0.00, 0.0 = disabled) | +| `--dynatemp-exp N` | dynamic temperature exponent (default: 1.00) | +| `--mirostat N` | use Mirostat sampling.
Top K, Nucleus and Locally Typical samplers are ignored if used.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | +| `--mirostat-lr N` | Mirostat learning rate, parameter eta (default: 0.10) | +| `--mirostat-ent N` | Mirostat target entropy, parameter tau (default: 5.00) | +| `-l, --logit-bias TOKEN_ID(+/-)BIAS` | modifies the likelihood of token appearing in the completion,
i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',
or `--logit-bias 15043-1` to decrease likelihood of token ' Hello' | +| `--grammar GRAMMAR` | BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '') | +| `--grammar-file FNAME` | file to read grammar from | +| `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object
For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead | +| `-jf, --json-schema-file FILE` | File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object
For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead | +| `-bs, --backend-sampling` | enable backend sampling (experimental) (default: disabled)
(env: LLAMA_ARG_BACKEND_SAMPLING) | + + +### Server-specific params + +| Argument | Explanation | +| -------- | ----------- | +| `--ctx-checkpoints, --swa-checkpoints N` | max number of context checkpoints to create per slot (default: 8)[(more info)](https://github.com/ggml-org/llama.cpp/pull/15293)
(env: LLAMA_ARG_CTX_CHECKPOINTS) | +| `-cram, --cache-ram N` | set the maximum cache size in MiB (default: 8192, -1 - no limit, 0 - disable)[(more info)](https://github.com/ggml-org/llama.cpp/pull/16391)
(env: LLAMA_ARG_CACHE_RAM) | +| `-kvu, --kv-unified` | use single unified KV buffer shared across all sequences (default: enabled if number of slots is auto)
(env: LLAMA_ARG_KV_UNIFIED) | +| `--context-shift, --no-context-shift` | whether to use context shift on infinite text generation (default: disabled)
(env: LLAMA_ARG_CONTEXT_SHIFT) | +| `-r, --reverse-prompt PROMPT` | halt generation at PROMPT, return control in interactive mode | +| `-sp, --special` | special tokens output enabled (default: false) | +| `--warmup, --no-warmup` | whether to perform warmup with an empty run (default: enabled) | +| `--spm-infill` | use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: disabled) | +| `--pooling {none,mean,cls,last,rank}` | pooling type for embeddings, use model default if unspecified
(env: LLAMA_ARG_POOLING) | +| `-np, --parallel N` | number of server slots (default: -1, -1 = auto)
(env: LLAMA_ARG_N_PARALLEL) | +| `-cb, --cont-batching, -nocb, --no-cont-batching` | whether to enable continuous batching (a.k.a dynamic batching) (default: enabled)
(env: LLAMA_ARG_CONT_BATCHING) | +| `-mm, --mmproj FILE` | path to a multimodal projector file. see tools/mtmd/README.md
note: if -hf is used, this argument can be omitted
(env: LLAMA_ARG_MMPROJ) | +| `-mmu, --mmproj-url URL` | URL to a multimodal projector file. see tools/mtmd/README.md
(env: LLAMA_ARG_MMPROJ_URL) | +| `--mmproj-auto, --no-mmproj, --no-mmproj-auto` | whether to use multimodal projector file (if available), useful when using -hf (default: enabled)
(env: LLAMA_ARG_MMPROJ_AUTO) | +| `--mmproj-offload, --no-mmproj-offload` | whether to enable GPU offloading for multimodal projector (default: enabled)
(env: LLAMA_ARG_MMPROJ_OFFLOAD) | +| `--image-min-tokens N` | minimum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)
(env: LLAMA_ARG_IMAGE_MIN_TOKENS) | +| `--image-max-tokens N` | maximum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)
(env: LLAMA_ARG_IMAGE_MAX_TOKENS) | +| `-otd, --override-tensor-draft =,...` | override tensor buffer type for draft model | +| `-cmoed, --cpu-moe-draft` | keep all Mixture of Experts (MoE) weights in the CPU for the draft model
(env: LLAMA_ARG_CPU_MOE_DRAFT) | +| `-ncmoed, --n-cpu-moe-draft N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model
(env: LLAMA_ARG_N_CPU_MOE_DRAFT) | +| `-a, --alias STRING` | set alias for model name (to be used by REST API)
(env: LLAMA_ARG_ALIAS) | +| `--host HOST` | ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: 127.0.0.1)
(env: LLAMA_ARG_HOST) | +| `--port PORT` | port to listen (default: 8080)
(env: LLAMA_ARG_PORT) | +| `--path PATH` | path to serve static files from (default: )
(env: LLAMA_ARG_STATIC_PATH) | +| `--api-prefix PREFIX` | prefix path the server serves from, without the trailing slash (default: )
(env: LLAMA_ARG_API_PREFIX) | +| `--webui-config JSON` | JSON that provides default WebUI settings (overrides WebUI defaults)
(env: LLAMA_ARG_WEBUI_CONFIG) | +| `--webui-config-file PATH` | JSON file that provides default WebUI settings (overrides WebUI defaults)
(env: LLAMA_ARG_WEBUI_CONFIG_FILE) | +| `--webui, --no-webui` | whether to enable the Web UI (default: enabled)
(env: LLAMA_ARG_WEBUI) | +| `--embedding, --embeddings` | restrict to only support embedding use case; use only with dedicated embedding models (default: disabled)
(env: LLAMA_ARG_EMBEDDINGS) | +| `--rerank, --reranking` | enable reranking endpoint on server (default: disabled)
(env: LLAMA_ARG_RERANKING) | +| `--api-key KEY` | API key to use for authentication, multiple keys can be provided as a comma-separated list (default: none)
(env: LLAMA_API_KEY) | +| `--api-key-file FNAME` | path to file containing API keys (default: none) | +| `--ssl-key-file FNAME` | path to file a PEM-encoded SSL private key
(env: LLAMA_ARG_SSL_KEY_FILE) | +| `--ssl-cert-file FNAME` | path to file a PEM-encoded SSL certificate
(env: LLAMA_ARG_SSL_CERT_FILE) | +| `--chat-template-kwargs STRING` | sets additional params for the json template parser, must be a valid json object string, e.g. '{"key1":"value1","key2":"value2"}'
(env: LLAMA_CHAT_TEMPLATE_KWARGS) | +| `-to, --timeout N` | server read/write timeout in seconds (default: 600)
(env: LLAMA_ARG_TIMEOUT) | +| `--threads-http N` | number of threads used to process HTTP requests (default: -1)
(env: LLAMA_ARG_THREADS_HTTP) | +| `--cache-prompt, --no-cache-prompt` | whether to enable prompt caching (default: enabled)
(env: LLAMA_ARG_CACHE_PROMPT) | +| `--cache-reuse N` | min chunk size to attempt reusing from the cache via KV shifting, requires prompt caching to be enabled (default: 0)
[(card)](https://ggml.ai/f0.png)
(env: LLAMA_ARG_CACHE_REUSE) | +| `--metrics` | enable prometheus compatible metrics endpoint (default: disabled)
(env: LLAMA_ARG_ENDPOINT_METRICS) | +| `--props` | enable changing global properties via POST /props (default: disabled)
(env: LLAMA_ARG_ENDPOINT_PROPS) | +| `--slots, --no-slots` | expose slots monitoring endpoint (default: enabled)
(env: LLAMA_ARG_ENDPOINT_SLOTS) | +| `--slot-save-path PATH` | path to save slot kv cache (default: disabled) | +| `--media-path PATH` | directory for loading local media files; files can be accessed via file:// URLs using relative paths (default: disabled) | +| `--models-dir PATH` | directory containing models for the router server (default: disabled)
(env: LLAMA_ARG_MODELS_DIR) | +| `--models-preset PATH` | path to INI file containing model presets for the router server (default: disabled)
(env: LLAMA_ARG_MODELS_PRESET) | +| `--models-max N` | for router server, maximum number of models to load simultaneously (default: 4, 0 = unlimited)
(env: LLAMA_ARG_MODELS_MAX) | +| `--models-autoload, --no-models-autoload` | for router server, whether to automatically load models (default: enabled)
(env: LLAMA_ARG_MODELS_AUTOLOAD) | +| `--jinja, --no-jinja` | whether to use jinja template engine for chat (default: enabled)
(env: LLAMA_ARG_JINJA) | +| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:
- none: leaves thoughts unparsed in `message.content`
- deepseek: puts thoughts in `message.reasoning_content`
- deepseek-legacy: keeps `` tags in `message.content` while also populating `message.reasoning_content`
(default: auto)
(env: LLAMA_ARG_THINK) | +| `--reasoning-budget N` | controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)
(env: LLAMA_ARG_THINK_BUDGET) | +| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)
if suffix/prefix are specified, template will be disabled
only commonly used templates are accepted (unless --jinja is set before this flag):
list of built-in templates:
bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone-moe, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr
(env: LLAMA_ARG_CHAT_TEMPLATE) | +| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)
if suffix/prefix are specified, template will be disabled
only commonly used templates are accepted (unless --jinja is set before this flag):
list of built-in templates:
bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone-moe, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr
(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) | +| `--prefill-assistant, --no-prefill-assistant` | whether to prefill the assistant's response if the last message is an assistant message (default: prefill enabled)
when this flag is set, if the last message is an assistant message then it will be treated as a full message and not prefilled

(env: LLAMA_ARG_PREFILL_ASSISTANT) | +| `-sps, --slot-prompt-similarity SIMILARITY` | how much the prompt of a request must match the prompt of a slot in order to use that slot (default: 0.10, 0.0 = disabled) | +| `--lora-init-without-apply` | load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: disabled) | +| `--sleep-idle-seconds SECONDS` | number of seconds of idleness after which the server will sleep (default: -1; -1 = disabled) | +| `-td, --threads-draft N` | number of threads to use during generation (default: same as --threads) | +| `-tbd, --threads-batch-draft N` | number of threads to use during batch and prompt processing (default: same as --threads-draft) | +| `--draft, --draft-n, --draft-max N` | number of tokens to draft for speculative decoding (default: 16)
(env: LLAMA_ARG_DRAFT_MAX) | +| `--draft-min, --draft-n-min N` | minimum number of draft tokens to use for speculative decoding (default: 0)
(env: LLAMA_ARG_DRAFT_MIN) | +| `--draft-p-min P` | minimum speculative decoding probability (greedy) (default: 0.75)
(env: LLAMA_ARG_DRAFT_P_MIN) | +| `-cd, --ctx-size-draft N` | size of the prompt context for the draft model (default: 0, 0 = loaded from model)
(env: LLAMA_ARG_CTX_SIZE_DRAFT) | +| `-devd, --device-draft ` | comma-separated list of devices to use for offloading the draft model (none = don't offload)
use --list-devices to see a list of available devices | +| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | max. number of draft model layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)
(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) | +| `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)
(env: LLAMA_ARG_MODEL_DRAFT) | +| `--spec-replace TARGET DRAFT` | translate the string in TARGET into DRAFT if the draft model and main model are not compatible | +| `-mv, --model-vocoder FNAME` | vocoder model for audio generation (default: unused) | +| `--tts-use-guide-tokens` | Use guide tokens to improve TTS word recall | +| `--embd-gemma-default` | use default EmbeddingGemma model (note: can download weights from the internet) | +| `--fim-qwen-1.5b-default` | use default Qwen 2.5 Coder 1.5B (note: can download weights from the internet) | +| `--fim-qwen-3b-default` | use default Qwen 2.5 Coder 3B (note: can download weights from the internet) | +| `--fim-qwen-7b-default` | use default Qwen 2.5 Coder 7B (note: can download weights from the internet) | +| `--fim-qwen-7b-spec` | use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet) | +| `--fim-qwen-14b-spec` | use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet) | +| `--fim-qwen-30b-default` | use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet) | +| `--gpt-oss-20b-default` | use gpt-oss-20b (note: can download weights from the internet) | +| `--gpt-oss-120b-default` | use gpt-oss-120b (note: can download weights from the internet) | +| `--vision-gemma-4b-default` | use Gemma 3 4B QAT (note: can download weights from the internet) | +| `--vision-gemma-12b-default` | use Gemma 3 12B QAT (note: can download weights from the internet) | + + + +Note: If both command line argument and environment variable are both set for the same param, the argument will take precedence over env var. + +For boolean options like `--mmap` or `--kv-offload`, the environment variable is handled as shown in this example: +- `LLAMA_ARG_MMAP=true` means enabled, other accepted values are: `1`, `on`, `enabled` +- `LLAMA_ARG_MMAP=false` means disabled, other accepted values are: `0`, `off`, `disabled` +- If `LLAMA_ARG_NO_MMAP` is present (no matter the value), it means disabling mmap + +Example usage of docker compose with environment variables: + +```yml +services: + llamacpp-server: + image: ghcr.io/ggml-org/llama.cpp:server + ports: + - 8080:8080 + volumes: + - ./models:/models + environment: + # alternatively, you can use "LLAMA_ARG_MODEL_URL" to download the model + LLAMA_ARG_MODEL: /models/my_model.gguf + LLAMA_ARG_CTX_SIZE: 4096 + LLAMA_ARG_N_PARALLEL: 2 + LLAMA_ARG_ENDPOINT_METRICS: 1 + LLAMA_ARG_PORT: 8080 +``` + +### Multimodal support + +Multimodal support was added in [#12898](https://github.com/ggml-org/llama.cpp/pull/12898) and is currently an experimental feature. +It is currently available in the following endpoints: +- The OAI-compatible chat endpoint. +- The non-OAI-compatible completions endpoint. +- The non-OAI-compatible embeddings endpoint. + +For more details, please refer to [multimodal documentation](../../docs/multimodal.md) + +## Build + +`llama-server` is built alongside everything else from the root of the project + +- Using `CMake`: + + ```bash + cmake -B build + cmake --build build --config Release -t llama-server + ``` + + Binary is at `./build/bin/llama-server` + +## Build with SSL + +`llama-server` can also be built with SSL support using OpenSSL 3 + +- Using `CMake`: + + ```bash + cmake -B build -DLLAMA_OPENSSL=ON + cmake --build build --config Release -t llama-server + ``` + +## Quick Start + +To get started right away, run the following command, making sure to use the correct path for the model you have: + +### Unix-based systems (Linux, macOS, etc.) + +```bash +./llama-server -m models/7B/ggml-model.gguf -c 2048 +``` + +### Windows + +```powershell +llama-server.exe -m models\7B\ggml-model.gguf -c 2048 +``` + +The above command will start a server that by default listens on `127.0.0.1:8080`. +You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url. + +### Docker + +```bash +docker run -p 8080:8080 -v /path/to/models:/models ghcr.io/ggml-org/llama.cpp:server -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 + +# or, with CUDA: +docker run -p 8080:8080 -v /path/to/models:/models --gpus all ghcr.io/ggml-org/llama.cpp:server-cuda -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 --n-gpu-layers 99 +``` + +## Using with CURL + +Using [curl](https://curl.se/). On Windows, `curl.exe` should be available in the base OS. + +```sh +curl --request POST \ + --url http://localhost:8080/completion \ + --header "Content-Type: application/json" \ + --data '{"prompt": "Building a website can be done in 10 simple steps:","n_predict": 128}' +``` + +## API Endpoints + +### GET `/health`: Returns health check result + +This endpoint is public (no API key check). `/v1/health` also works. + +**Response format** + +- HTTP status code 503 + - Body: `{"error": {"code": 503, "message": "Loading model", "type": "unavailable_error"}}` + - Explanation: the model is still being loaded. +- HTTP status code 200 + - Body: `{"status": "ok" }` + - Explanation: the model is successfully loaded and the server is ready. + +### POST `/completion`: Given a `prompt`, it returns the predicted completion. + +> [!IMPORTANT] +> +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/completions` instead. + +*Options:* + +`prompt`: Provide the prompt for this completion as a string or as an array of strings or numbers representing tokens. Internally, if `cache_prompt` is `true`, the prompt is compared to the previous completion and only the "unseen" suffix is evaluated. A `BOS` token is inserted at the start, if all of the following conditions are true: + + - The prompt is a string or an array with the first element given as a string + - The model's `tokenizer.ggml.add_bos_token` metadata is `true` + +These input shapes and data type are allowed for `prompt`: + + - Single string: `"string"` + - Single sequence of tokens: `[12, 34, 56]` + - Mixed tokens and strings: `[12, 34, "string", 56, 78]` + - A JSON object which optionally contains multimodal data: `{ "prompt_string": "string", "multimodal_data": ["base64"] }` + +Multiple prompts are also supported. In this case, the completion result will be an array. + + - Only strings: `["string1", "string2"]` + - Strings, JSON objects, and sequences of tokens: `["string1", [12, 34, 56], { "prompt_string": "string", "multimodal_data": ["base64"]}]` + - Mixed types: `[[12, 34, "string", 56, 78], [12, 34, 56], "string", { "prompt_string": "string" }]` + +Note for `multimodal_data` in JSON object prompts. This should be an array of strings, containing base64 encoded multimodal data such as images and audio. There must be an identical number of MTMD media markers in the string prompt element which act as placeholders for the data provided to this parameter. The multimodal data files will be substituted in order. The marker string (e.g. `<__media__>`) can be found by calling `mtmd_default_marker()` defined in [the MTMD C API](https://github.com/ggml-org/llama.cpp/blob/5fd160bbd9d70b94b5b11b0001fd7f477005e4a0/tools/mtmd/mtmd.h#L87). A client *must not* specify this field unless the server has the multimodal capability. Clients should check `/models` or `/v1/models` for the `multimodal` capability before a multimodal request. + +`temperature`: Adjust the randomness of the generated text. Default: `0.8` + +`dynatemp_range`: Dynamic temperature range. The final temperature will be in the range of `[temperature - dynatemp_range; temperature + dynatemp_range]` Default: `0.0`, which is disabled. + +`dynatemp_exponent`: Dynamic temperature exponent. Default: `1.0` + +`top_k`: Limit the next token selection to the K most probable tokens. Default: `40` + +`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P. Default: `0.95` + +`min_p`: The minimum probability for a token to be considered, relative to the probability of the most likely token. Default: `0.05` + +`n_predict`: Set the maximum number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. Default: `-1`, where `-1` is infinity. + +`n_indent`: Specify the minimum line indentation for the generated text in number of whitespace characters. Useful for code completion tasks. Default: `0` + +`n_keep`: Specify the number of tokens from the prompt to retain when the context size is exceeded and tokens need to be discarded. The number excludes the BOS token. +By default, this value is set to `0`, meaning no tokens are kept. Use `-1` to retain all tokens from the prompt. + +`n_cmpl`: Number of completions to generate from the current prompt. If input has multiple prompts, the output will have N prompts times `n_cmpl` entries. + +`n_cache_reuse`: Min chunk size to attempt reusing from the cache via KV shifting. For more info, see `--cache-reuse` arg. Default: `0`, which is disabled. + +`stream`: Allows receiving each predicted token in real-time instead of waiting for the completion to finish (uses a different response format). To enable this, set to `true`. + +`stop`: Specify a JSON array of stopping strings. +These words will not be included in the completion, so make sure to add them to the prompt for the next iteration. Default: `[]` + +`typical_p`: Enable locally typical sampling with parameter p. Default: `1.0`, which is disabled. + +`repeat_penalty`: Control the repetition of token sequences in the generated text. Default: `1.1` + +`repeat_last_n`: Last n tokens to consider for penalizing repetition. Default: `64`, where `0` is disabled and `-1` is ctx-size. + +`presence_penalty`: Repeat alpha presence penalty. Default: `0.0`, which is disabled. + +`frequency_penalty`: Repeat alpha frequency penalty. Default: `0.0`, which is disabled. + +`dry_multiplier`: Set the DRY (Don't Repeat Yourself) repetition penalty multiplier. Default: `0.0`, which is disabled. + +`dry_base`: Set the DRY repetition penalty base value. Default: `1.75` + +`dry_allowed_length`: Tokens that extend repetition beyond this receive exponentially increasing penalty: multiplier * base ^ (length of repeating sequence before token - allowed length). Default: `2` + +`dry_penalty_last_n`: How many tokens to scan for repetitions. Default: `-1`, where `0` is disabled and `-1` is context size. + +`dry_sequence_breakers`: Specify an array of sequence breakers for DRY sampling. Only a JSON array of strings is accepted. Default: `['\n', ':', '"', '*']` + +`xtc_probability`: Set the chance for token removal via XTC sampler. Default: `0.0`, which is disabled. + +`xtc_threshold`: Set a minimum probability threshold for tokens to be removed via XTC sampler. Default: `0.1` (> `0.5` disables XTC) + +`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation. Default: `0`, where `0` is disabled, `1` is Mirostat, and `2` is Mirostat 2.0. + +`mirostat_tau`: Set the Mirostat target entropy, parameter tau. Default: `5.0` + +`mirostat_eta`: Set the Mirostat learning rate, parameter eta. Default: `0.1` + +`grammar`: Set grammar for grammar-based sampling. Default: no grammar + +`json_schema`: Set a JSON schema for grammar-based sampling (e.g. `{"items": {"type": "string"}, "minItems": 10, "maxItems": 100}` of a list of strings, or `{}` for any JSON). See [tests](../../tests/test-json-schema-to-grammar.cpp) for supported features. Default: no JSON schema. + +`seed`: Set the random number generator (RNG) seed. Default: `-1`, which is a random seed. + +`ignore_eos`: Ignore end of stream token and continue generating. Default: `false` + +`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. For compatibility with the OpenAI API, a JSON object {"": bias, ...} can also be passed. Default: `[]` + +`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. Note that for temperature < 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Default: `0` + +`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0` + +`t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled. + +`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1` + +`cache_prompt`: Re-use KV cache from a previous request if possible. This way the common prefix does not have to be re-processed, only the suffix that differs between the requests. Because (depending on the backend) the logits are **not** guaranteed to be bit-for-bit identical for different batch sizes (prompt processing vs. token generation) enabling this option can cause nondeterministic results. Default: `true` + +`return_tokens`: Return the raw generated token ids in the `tokens` field. Otherwise `tokens` remains empty. Default: `false` + +`samplers`: The order the samplers should be applied in. An array of strings representing sampler type names. If a sampler is not set, it will not be used. If a sampler is specified more than once, it will be applied multiple times. Default: `["dry", "top_k", "typ_p", "top_p", "min_p", "xtc", "temperature"]` - these are all the available values. + +`timings_per_token`: Include prompt processing and text generation speed information in each response. Default: `false` + +`return_progress`: Include prompt processing progress in `stream` mode. The progress will be contained inside `prompt_progress` with 4 values: `total`, `cache`, `processed`, and `time_ms`. The overall progress is `processed/total`, while the actual timed progress is `(processed-cache)/(total-cache)`. The `time_ms` field contains the elapsed time in milliseconds since prompt processing started. Default: `false` + +`post_sampling_probs`: Returns the probabilities of top `n_probs` tokens after applying sampling chain. + +`response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. Note that fields with a slash will be unnested; for example, `generation_settings/n_predict` will move the field `n_predict` from the `generation_settings` object to the root of the response and give it a new name. + +`lora`: A list of LoRA adapters to be applied to this specific request. Each object in the list must contain `id` and `scale` fields. For example: `[{"id": 0, "scale": 0.5}, {"id": 1, "scale": 1.1}]`. If a LoRA adapter is not specified in the list, its scale will default to `0.0`. Please note that requests with different LoRA configurations will not be batched together, which may result in performance degradation. + +**Response format** + +- Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. + +- `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has a nested array `top_logprobs`. It contains at **maximum** `n_probs` elements: + ``` + { + "content": "", + "tokens": [ generated token ids if requested ], + ... + "probs": [ + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + "top_logprobs": [ + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + }, + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + }, + ... + ] + }, + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + "top_logprobs": [ + ... + ] + }, + ... + ] + }, + ``` + Please note that if `post_sampling_probs` is set to `true`: + - `logprob` will be replaced with `prob`, with the value between 0.0 and 1.0 + - `top_logprobs` will be replaced with `top_probs`. Each element contains: + - `id`: token ID + - `token`: token in string + - `bytes`: token in bytes + - `prob`: token probability, with the value between 0.0 and 1.0 + - Number of elements in `top_probs` may be less than `n_probs` + +- `content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string. +- `tokens`: Same as `content` but represented as raw token ids. Only populated if `"return_tokens": true` or `"stream": true` in the request. +- `stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options) +- `generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`. These options may differ from the original ones in some way (e.g. bad values filtered out, strings converted to tokens, etc.). +- `model`: The model alias (for model path, please use `/props` endpoint) +- `prompt`: The processed `prompt` (special tokens may be added) +- `stop_type`: Indicating whether the completion has stopped. Possible values are: + - `none`: Generating (not stopped) + - `eos`: Stopped because it encountered the EOS token + - `limit`: Stopped because `n_predict` tokens were generated before stop words or EOS was encountered + - `word`: Stopped due to encountering a stopping word from `stop` JSON array provided +- `stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word) +- `timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second` +- `tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion +- `tokens_evaluated`: Number of tokens evaluated in total from the prompt +- `truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`) + + +### POST `/tokenize`: Tokenize a given text + +*Options:* + +`content`: (Required) The text to tokenize. + +`add_special`: (Optional) Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false` + +`parse_special`: (Optional) Boolean indicating if special tokens should be tokenized. When `false` special tokens are treated as plaintext. Default: `true` + +`with_pieces`: (Optional) Boolean indicating whether to return token pieces along with IDs. Default: `false` + +**Response:** + +Returns a JSON object with a `tokens` field containing the tokenization result. The `tokens` array contains either just token IDs or objects with `id` and `piece` fields, depending on the `with_pieces` parameter. The piece field is a string if the piece is valid unicode or a list of bytes otherwise. + + +If `with_pieces` is `false`: +```json +{ + "tokens": [123, 456, 789] +} +``` + +If `with_pieces` is `true`: +```json +{ + "tokens": [ + {"id": 123, "piece": "Hello"}, + {"id": 456, "piece": " world"}, + {"id": 789, "piece": "!"} + ] +} +``` + +With input 'á' (utf8 hex: C3 A1) on tinyllama/stories260k +``` +{ + "tokens": [ + {"id": 198, "piece": [195]}, // hex C3 + {"id": 164, "piece": [161]} // hex A1 + ] +} +``` + +### POST `/detokenize`: Convert tokens to text + +*Options:* + +`tokens`: Set the tokens to detokenize. + +### POST `/apply-template`: Apply chat template to a conversation + +Uses the server's prompt template formatting functionality to convert chat messages to a single string expected by a chat model as input, but does not perform inference. Instead, the prompt string is returned in the `prompt` field of the JSON response. The prompt can then be modified as desired (for example, to insert "Sure!" at the beginning of the model's response) before sending to `/completion` to generate the chat response. + +*Options:* + +`messages`: (Required) Chat turns in the same format as `/v1/chat/completions`. + +**Response format** + +Returns a JSON object with a field `prompt` containing a string of the input messages formatted according to the model's chat template format. + +### POST `/embedding`: Generate embedding of a given text + +> [!IMPORTANT] +> +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/embeddings` instead. + +The same as [the embedding example](../embedding) does. + +This endpoint also supports multimodal embeddings. See the documentation for the `/completions` endpoint for details on how to send a multimodal prompt. + +*Options:* + +`content`: Set the text to process. + +`embd_normalize`: Normalization for pooled embeddings. Can be one of the following values: +``` + -1: No normalization + 0: Max absolute + 1: Taxicab + 2: Euclidean/L2 + >2: P-Norm +``` + +### POST `/reranking`: Rerank documents according to a given query + +Similar to https://jina.ai/reranker/ but might change in the future. +Requires a reranker model (such as [bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)) and the `--embedding --pooling rank` options. + +*Options:* + +`query`: The query against which the documents will be ranked. + +`documents`: An array strings representing the documents to be ranked. + +*Aliases:* + - `/rerank` + - `/v1/rerank` + - `/v1/reranking` + +*Examples:* + +```shell +curl http://127.0.0.1:8012/v1/rerank \ + -H "Content-Type: application/json" \ + -d '{ + "model": "some-model", + "query": "What is panda?", + "top_n": 3, + "documents": [ + "hi", + "it is a bear", + "The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." + ] + }' | jq +``` + +### POST `/infill`: For code infilling. + +Takes a prefix and a suffix and returns the predicted completion as stream. + +*Options:* + +- `input_prefix`: Set the prefix of the code to infill. +- `input_suffix`: Set the suffix of the code to infill. +- `input_extra`: Additional context inserted before the FIM prefix. +- `prompt`: Added after the `FIM_MID` token + +`input_extra` is array of `{"filename": string, "text": string}` objects. + +The endpoint also accepts all the options of `/completion`. + +If the model has `FIM_REPO` and `FIM_FILE_SEP` tokens, the [repo-level pattern](https://arxiv.org/pdf/2409.12186) is used: + +```txt +myproject +{chunk 0 filename} +{chunk 0 text} +{chunk 1 filename} +{chunk 1 text} +... +filename +[input_prefix][input_suffix][prompt] +``` + +If the tokens are missing, then the extra context is simply prefixed at the start: + +```txt +[input_extra][input_prefix][input_suffix][prompt] +``` + +### **GET** `/props`: Get server global properties. + +By default, it is read-only. To make POST request to change global properties, you need to start server with `--props` + +**Response format** + +```json +{ + "default_generation_settings": { + "id": 0, + "id_task": -1, + "n_ctx": 1024, + "speculative": false, + "is_processing": false, + "params": { + "n_predict": -1, + "seed": 4294967295, + "temperature": 0.800000011920929, + "dynatemp_range": 0.0, + "dynatemp_exponent": 1.0, + "top_k": 40, + "top_p": 0.949999988079071, + "min_p": 0.05000000074505806, + "xtc_probability": 0.0, + "xtc_threshold": 0.10000000149011612, + "typical_p": 1.0, + "repeat_last_n": 64, + "repeat_penalty": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "dry_multiplier": 0.0, + "dry_base": 1.75, + "dry_allowed_length": 2, + "dry_penalty_last_n": -1, + "dry_sequence_breakers": [ + "\n", + ":", + "\"", + "*" + ], + "mirostat": 0, + "mirostat_tau": 5.0, + "mirostat_eta": 0.10000000149011612, + "stop": [], + "max_tokens": -1, + "n_keep": 0, + "n_discard": 0, + "ignore_eos": false, + "stream": true, + "n_probs": 0, + "min_keep": 0, + "grammar": "", + "samplers": [ + "dry", + "top_k", + "typ_p", + "top_p", + "min_p", + "xtc", + "temperature" + ], + "speculative.n_max": 16, + "speculative.n_min": 5, + "speculative.p_min": 0.8999999761581421, + "timings_per_token": false + }, + "prompt": "", + "next_token": { + "has_next_token": true, + "has_new_line": false, + "n_remain": -1, + "n_decoded": 0, + "stopping_word": "" + } + }, + "total_slots": 1, + "model_path": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + "chat_template": "...", + "chat_template_caps": {}, + "modalities": { + "vision": false + }, + "build_info": "b(build number)-(build commit hash)", + "is_sleeping": false +} +``` + +- `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint. +- `total_slots` - the total number of slots for process requests (defined by `--parallel` option) +- `model_path` - the path to model file (same with `-m` argument) +- `chat_template` - the model's original Jinja2 prompt template +- `chat_template_caps` - capabilities of the chat template (see `common/jinja/caps.h` for more info) +- `modalities` - the list of supported modalities +- `is_sleeping` - sleeping status, see [Sleeping on idle](#sleeping-on-idle) + +### POST `/props`: Change server global properties. + +To use this endpoint with POST method, you need to start server with `--props` + +*Options:* + +- None yet + +### POST `/embeddings`: non-OpenAI-compatible embeddings API + +This endpoint supports all poolings, including `--pooling none`. When the pooling is `none`, the responses will contain the *unnormalized* embeddings for *all* input tokens. For all other pooling types, only the pooled embeddings are returned, normalized using Euclidean norm. + +Note that the response format of this endpoint is different from `/v1/embeddings`. + +*Options:* + +Same as the `/v1/embeddings` endpoint. + +*Examples:* + +Same as the `/v1/embeddings` endpoint. + +**Response format** + +``` +[ + { + "index": 0, + "embedding": [ + [ ... embeddings for token 0 ... ], + [ ... embeddings for token 1 ... ], + [ ... ] + [ ... embeddings for token N-1 ... ], + ] + }, + ... + { + "index": P, + "embedding": [ + [ ... embeddings for token 0 ... ], + [ ... embeddings for token 1 ... ], + [ ... ] + [ ... embeddings for token N-1 ... ], + ] + } +] +``` + +### GET `/slots`: Returns the current slots processing state + +This endpoint is enabled by default and can be disabled with `--no-slots`. It can be used to query various per-slot metrics, such as speed, processed tokens, sampling parameters, etc. + +If query param `?fail_on_no_slot=1` is set, this endpoint will respond with status code 503 if there is no available slots. + +**Response format** + +
+Example with 2 slots + +```json +[ + { + "id": 0, + "id_task": 135, + "n_ctx": 65536, + "speculative": false, + "is_processing": true, + "params": { + "n_predict": -1, + "seed": 4294967295, + "temperature": 0.800000011920929, + "dynatemp_range": 0.0, + "dynatemp_exponent": 1.0, + "top_k": 40, + "top_p": 0.949999988079071, + "min_p": 0.05000000074505806, + "top_n_sigma": -1.0, + "xtc_probability": 0.0, + "xtc_threshold": 0.10000000149011612, + "typical_p": 1.0, + "repeat_last_n": 64, + "repeat_penalty": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "dry_multiplier": 0.0, + "dry_base": 1.75, + "dry_allowed_length": 2, + "dry_penalty_last_n": 131072, + "mirostat": 0, + "mirostat_tau": 5.0, + "mirostat_eta": 0.10000000149011612, + "max_tokens": -1, + "n_keep": 0, + "n_discard": 0, + "ignore_eos": false, + "stream": true, + "n_probs": 0, + "min_keep": 0, + "chat_format": "GPT-OSS", + "reasoning_format": "none", + "reasoning_in_content": false, + "thinking_forced_open": false, + "samplers": [ + "penalties", + "dry", + "top_k", + "typ_p", + "top_p", + "min_p", + "xtc", + "temperature" + ], + "speculative.n_max": 16, + "speculative.n_min": 0, + "speculative.p_min": 0.75, + "timings_per_token": false, + "post_sampling_probs": false, + "lora": [] + }, + "next_token": { + "has_next_token": true, + "has_new_line": false, + "n_remain": -1, + "n_decoded": 0 + } + }, + { + "id": 1, + "id_task": 0, + "n_ctx": 65536, + "speculative": false, + "is_processing": true, + "params": { + "n_predict": -1, + "seed": 4294967295, + "temperature": 0.800000011920929, + "dynatemp_range": 0.0, + "dynatemp_exponent": 1.0, + "top_k": 40, + "top_p": 0.949999988079071, + "min_p": 0.05000000074505806, + "top_n_sigma": -1.0, + "xtc_probability": 0.0, + "xtc_threshold": 0.10000000149011612, + "typical_p": 1.0, + "repeat_last_n": 64, + "repeat_penalty": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "dry_multiplier": 0.0, + "dry_base": 1.75, + "dry_allowed_length": 2, + "dry_penalty_last_n": 131072, + "mirostat": 0, + "mirostat_tau": 5.0, + "mirostat_eta": 0.10000000149011612, + "max_tokens": -1, + "n_keep": 0, + "n_discard": 0, + "ignore_eos": false, + "stream": true, + "n_probs": 0, + "min_keep": 0, + "chat_format": "GPT-OSS", + "reasoning_format": "none", + "reasoning_in_content": false, + "thinking_forced_open": false, + "samplers": [ + "penalties", + "dry", + "top_k", + "typ_p", + "top_p", + "min_p", + "xtc", + "temperature" + ], + "speculative.n_max": 16, + "speculative.n_min": 0, + "speculative.p_min": 0.75, + "timings_per_token": false, + "post_sampling_probs": false, + "lora": [] + }, + "next_token": { + "has_next_token": true, + "has_new_line": true, + "n_remain": -1, + "n_decoded": 136 + } + } +] +``` + +
+ +### GET `/metrics`: Prometheus compatible metrics exporter + +This endpoint is only accessible if `--metrics` is set. + +Available metrics: +- `llamacpp:prompt_tokens_total`: Number of prompt tokens processed. +- `llamacpp:tokens_predicted_total`: Number of generation tokens processed. +- `llamacpp:prompt_tokens_seconds`: Average prompt throughput in tokens/s. +- `llamacpp:predicted_tokens_seconds`: Average generation throughput in tokens/s. +- `llamacpp:kv_cache_usage_ratio`: KV-cache usage. `1` means 100 percent usage. +- `llamacpp:kv_cache_tokens`: KV-cache tokens. +- `llamacpp:requests_processing`: Number of requests processing. +- `llamacpp:requests_deferred`: Number of requests deferred. +- `llamacpp:n_tokens_max`: High watermark of the context size observed. + +### POST `/slots/{id_slot}?action=save`: Save the prompt cache of the specified slot to a file. + +*Options:* + +`filename`: Name of the file to save the slot's prompt cache. The file will be saved in the directory specified by the `--slot-save-path` server parameter. + +**Response format** + +```json +{ + "id_slot": 0, + "filename": "slot_save_file.bin", + "n_saved": 1745, + "n_written": 14309796, + "timings": { + "save_ms": 49.865 + } +} +``` + +### POST `/slots/{id_slot}?action=restore`: Restore the prompt cache of the specified slot from a file. + +*Options:* + +`filename`: Name of the file to restore the slot's prompt cache from. The file should be located in the directory specified by the `--slot-save-path` server parameter. + +**Response format** + +```json +{ + "id_slot": 0, + "filename": "slot_save_file.bin", + "n_restored": 1745, + "n_read": 14309796, + "timings": { + "restore_ms": 42.937 + } +} +``` + +### POST `/slots/{id_slot}?action=erase`: Erase the prompt cache of the specified slot. + +**Response format** + +```json +{ + "id_slot": 0, + "n_erased": 1745 +} +``` + +### GET `/lora-adapters`: Get list of all LoRA adapters + +This endpoint returns the loaded LoRA adapters. You can add adapters using `--lora` when starting the server, for example: `--lora my_adapter_1.gguf --lora my_adapter_2.gguf ...` + +By default, all adapters will be loaded with scale set to 1. To initialize all adapters scale to 0, add `--lora-init-without-apply` + +Please note that this value will be overwritten by the `lora` field for each request. + +If an adapter is disabled, the scale will be set to 0. + +**Response format** + +```json +[ + { + "id": 0, + "path": "my_adapter_1.gguf", + "scale": 0.0 + }, + { + "id": 1, + "path": "my_adapter_2.gguf", + "scale": 0.0 + } +] +``` + +### POST `/lora-adapters`: Set list of LoRA adapters + +This sets the global scale for LoRA adapters. Please note that this value will be overwritten by the `lora` field for each request. + +To disable an adapter, either remove it from the list below, or set scale to 0. + +**Request format** + +To know the `id` of the adapter, use GET `/lora-adapters` + +```json +[ + {"id": 0, "scale": 0.2}, + {"id": 1, "scale": 0.8} +] +``` + +## OpenAI-compatible API Endpoints + +### GET `/v1/models`: OpenAI-compatible Model Info API + +Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models). + +The returned list always has one single element. The `meta` field can be `null` (for example, while the model is still loading). + +By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`. + +Example: + +```json +{ + "object": "list", + "data": [ + { + "id": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + "object": "model", + "created": 1735142223, + "owned_by": "llamacpp", + "meta": { + "vocab_type": 2, + "n_vocab": 128256, + "n_ctx_train": 131072, + "n_embd": 4096, + "n_params": 8030261312, + "size": 4912898304 + } + } + ] +} +``` + +### POST `/v1/completions`: OpenAI-compatible Completions API + +Given an input `prompt`, it returns the predicted completion. Streaming mode is also supported. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. + +*Options:* + +See [OpenAI Completions API documentation](https://platform.openai.com/docs/api-reference/completions). + +llama.cpp `/completion`-specific features such as `mirostat` are supported. + +*Examples:* + +Example usage with `openai` python library: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8 +) + +print(completion.choices[0].text) +``` + +### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API + +Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. + +If model supports multimodal, you can input the media file via `image_url` content part. We support both base64 and remote URL as input. See OAI documentation for more. + +*Options:* + +See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). llama.cpp `/completion`-specific features such as `mirostat` are also supported. + +The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. + +`chat_template_kwargs`: Allows sending additional parameters to the json templating system. For example: `{"enable_thinking": false}` + +`reasoning_format`: The reasoning format to be parsed. If set to `none`, it will output the raw generated text. + +`thinking_forced_open`: Force a reasoning model to always output the reasoning. Only works on certain models. + +`parse_tool_calls`: Whether to parse the generated tool call. + +`parallel_tool_calls` : Whether to enable parallel/multiple tool calls (only supported on some models, verification is based on jinja template). + +*Examples:* + +You can use either Python `openai` library with appropriate checkpoints: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, + {"role": "user", "content": "Write a limerick about python exceptions"} + ] +) + +print(completion.choices[0].message) +``` + +... or raw HTTP requests: + +```shell +curl http://localhost:8080/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer no-key" \ +-d '{ +"model": "gpt-3.5-turbo", +"messages": [ +{ + "role": "system", + "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." +}, +{ + "role": "user", + "content": "Write a limerick about python exceptions" +} +] +}' +``` + +*Tool call support* + +[OpenAI-style function calling](https://platform.openai.com/docs/guides/function-calling) is supported with the `--jinja` flag (and may require a `--chat-template-file` override to get the right tool-use compatible Jinja template; worst case, `--chat-template chatml` may also work). + +**See our [Function calling](../../docs/function-calling.md) docs** for more details, supported native tool call styles (generic tool call style is used as fallback) / examples of use. + +*Timings and context usage* + +The response contains a `timings` object, for example: + +```js +{ + "choices": [], + "created": 1757141666, + "id": "chatcmpl-ecQULm0WqPrftUqjPZO1CFYeDjGZNbDu", + // ... + "timings": { + "cache_n": 236, // number of prompt tokens reused from cache + "prompt_n": 1, // number of prompt tokens being processed + "prompt_ms": 30.958, + "prompt_per_token_ms": 30.958, + "prompt_per_second": 32.301828283480845, + "predicted_n": 35, // number of predicted tokens + "predicted_ms": 661.064, + "predicted_per_token_ms": 18.887542857142858, + "predicted_per_second": 52.94494935437416 + } +} +``` + +This provides information on the performance of the server. It also allows calculating the current context usage. + +The total number of tokens in context is equal to `prompt_n + cache_n + predicted_n` + +*Reasoning support* + +The server supports parsing and returning reasoning via the `reasoning_content` field, similar to Deepseek API. + +Reasoning input (preserve reasoning in history) is also supported by some specific templates. For more details, please refer to [PR#18994](https://github.com/ggml-org/llama.cpp/pull/18994). + +### POST `/v1/responses`: OpenAI-compatible Responses API + +*Options:* + +See [OpenAI Responses API documentation](https://platform.openai.com/docs/api-reference/responses). + +*Examples:* + +You can use either Python `openai` library with appropriate checkpoints: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +response = client.responses.create( + model="gpt-4.1", + instructions="You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests.", + input="Write a limerick about python exceptions" +) + +print(response.output_text) +``` + +... or raw HTTP requests: + +```shell +curl http://localhost:8080/v1/responses \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer no-key" \ +-d '{ +"model": "gpt-4.1", +"instructions": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests.", +"input": "Write a limerick about python exceptions" +}' +``` + +This endpoint works by converting Responses request into Chat Completions request. + + +### POST `/v1/embeddings`: OpenAI-compatible embeddings API + +This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. + +*Options:* + +See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). + +*Examples:* + +- input as string + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": "hello", + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + +- `input` as string array + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": ["hello", "world"], + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + +### POST `/v1/messages`: Anthropic-compatible Messages API + +Given a list of `messages`, returns the assistant's response. Streaming is supported via Server-Sent Events. While no strong claims of compatibility with the Anthropic API spec are made, in our experience it suffices to support many apps. + +*Options:* + +See [Anthropic Messages API documentation](https://docs.anthropic.com/en/api/messages). Tool use requires `--jinja` flag. + +`model`: Model identifier (required) + +`messages`: Array of message objects with `role` and `content` (required) + +`max_tokens`: Maximum tokens to generate (default: 4096) + +`system`: System prompt as string or array of content blocks + +`temperature`: Sampling temperature 0-1 (default: 1.0) + +`top_p`: Nucleus sampling (default: 1.0) + +`top_k`: Top-k sampling + +`stop_sequences`: Array of stop sequences + +`stream`: Enable streaming (default: false) + +`tools`: Array of tool definitions (requires `--jinja`) + +`tool_choice`: Tool selection mode (`{"type": "auto"}`, `{"type": "any"}`, or `{"type": "tool", "name": "..."}`) + +*Examples:* + +```shell +curl http://localhost:8080/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: your-api-key" \ + -d '{ + "model": "gpt-4", + "max_tokens": 1024, + "system": "You are a helpful assistant.", + "messages": [ + {"role": "user", "content": "Hello!"} + ] + }' +``` + +### POST `/v1/messages/count_tokens`: Token Counting + +Counts the number of tokens in a request without generating a response. + +Accepts the same parameters as `/v1/messages`. The `max_tokens` parameter is not required. + +*Example:* + +```shell +curl http://localhost:8080/v1/messages/count_tokens \ + -H "Content-Type: application/json" \ + -d '{ + "model": "gpt-4", + "messages": [ + {"role": "user", "content": "Hello!"} + ] + }' +``` + +*Response:* + +```json +{"input_tokens": 10} +``` + +## Using multiple models + +`llama-server` can be launched in a **router mode** that exposes an API for dynamically loading and unloading models. The main process (the "router") automatically forwards each request to the appropriate model instance. + +To start in router mode, launch `llama-server` **without specifying any model**: + +```sh +llama-server +``` + +### Model sources + +There are 3 possible sources for model files: +1. Cached models (controlled by the `LLAMA_CACHE` environment variable) +2. Custom model directory (set via the `--models-dir` argument) +3. Custom preset (set via the `--models-preset` argument) + +By default, the router looks for models in the cache. You can add Hugging Face models to the cache with: + +```sh +llama-server -hf /: +``` + +*The server must be restarted after adding a new model.* + +Alternatively, you can point the router to a local directory containing your GGUF files using `--models-dir`. Example command: + +```sh +llama-server --models-dir ./models_directory +``` + +If the model contains multiple GGUF (for multimodal or multi-shard), files should be put into a subdirectory. The directory structure should look like this: + +```sh +models_directory + │ + │ # single file + ├─ llama-3.2-1b-Q4_K_M.gguf + ├─ Qwen3-8B-Q4_K_M.gguf + │ + │ # multimodal + ├─ gemma-3-4b-it-Q8_0 + │ ├─ gemma-3-4b-it-Q8_0.gguf + │ └─ mmproj-F16.gguf # file name must start with "mmproj" + │ + │ # multi-shard + ├─ Kimi-K2-Thinking-UD-IQ1_S + │ ├─ Kimi-K2-Thinking-UD-IQ1_S-00001-of-00006.gguf + │ ├─ Kimi-K2-Thinking-UD-IQ1_S-00002-of-00006.gguf + │ ├─ ... + │ └─ Kimi-K2-Thinking-UD-IQ1_S-00006-of-00006.gguf +``` + +You may also specify default arguments that will be passed to every model instance: + +```sh +llama-server -ctx 8192 -n 1024 -np 2 +``` + +Note: model instances inherit both command line arguments and environment variables from the router server. + +Alternatively, you can also add GGUF based preset (see next section) + +### Model presets + +Model presets allow advanced users to define custom configurations using an `.ini` file: + +```sh +llama-server --models-preset ./my-models.ini +``` + +Each section in the file defines a new preset. Keys within a section correspond to command-line arguments (without leading dashes). For example, the argument `--n-gpu-layers 123` is written as `n-gpu-layers = 123`. + +Short argument forms (e.g., `c`, `ngl`) and environment variable names (e.g., `LLAMA_ARG_N_GPU_LAYERS`) are also supported as keys. + +Example: + +```ini +version = 1 + +; (Optional) This section provides global settings shared across all presets. +; If the same key is defined in a specific preset, it will override the value in this global section. +[*] +c = 8192 +n-gpu-layer = 8 + +; If the key corresponds to an existing model on the server, +; this will be used as the default config for that model +[ggml-org/MY-MODEL-GGUF:Q8_0] +; string value +chat-template = chatml +; numeric value +n-gpu-layers = 123 +; flag value (for certain flags, you need to use the "no-" prefix for negation) +jinja = true +; shorthand argument (for example, context size) +c = 4096 +; environment variable name +LLAMA_ARG_CACHE_RAM = 0 +; file paths are relative to server's CWD +model-draft = ./my-models/draft.gguf +; but it's RECOMMENDED to use absolute path +model-draft = /Users/abc/my-models/draft.gguf + +; If the key does NOT correspond to an existing model, +; you need to specify at least the model path or HF repo +[custom_model] +model = /Users/abc/my-awesome-model-Q4_K_M.gguf +``` + +Note: some arguments are controlled by router (e.g., host, port, API key, HF repo, model alias). They will be removed or overwritten upon loading. + +The precedence rule for preset options is as follows: +1. **Command-line arguments** passed to `llama-server` (highest priority) +2. **Model-specific options** defined in the preset file (e.g. `[ggml-org/MY-MODEL...]`) +3. **Global options** defined in the preset file (`[*]`) + +We also offer additional options that are exclusive to presets (these aren't treated as command-line arguments): +- `load-on-startup` (boolean): Controls whether the model loads automatically when the server starts +- `stop-timeout` (int, seconds): After requested unload, wait for this many seconds before forcing termination (default: 10) + +### Routing requests + +Requests are routed according to the requested model name. + +For **POST** endpoints (`/v1/chat/completions`, `/v1/completions`, `/infill`, etc.) The router uses the `"model"` field in the JSON body: + +```json +{ + "model": "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M", + "messages": [ + { + "role": "user", + "content": "hello" + } + ] +} +``` + +For **GET** endpoints (`/props`, `/metrics`, etc.) The router uses the `model` query parameter (URL-encoded): + +``` +GET /props?model=ggml-org%2Fgemma-3-4b-it-GGUF%3AQ4_K_M +``` + +By default, the model will be loaded automatically if it's not loaded. To disable this, add `--no-models-autoload` when starting the server. Additionally, you can include `?autoload=true|false` in the query param to control this behavior per-request. + +### GET `/models`: List available models + +Listing all models in cache. The model metadata will also include a field to indicate the status of the model: + +```json +{ + "data": [{ + "id": "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M", + "in_cache": true, + "path": "/Users/REDACTED/Library/Caches/llama.cpp/ggml-org_gemma-3-4b-it-GGUF_gemma-3-4b-it-Q4_K_M.gguf", + "status": { + "value": "loaded", + "args": ["llama-server", "-ctx", "4096"] + }, + ... + }] +} +``` + +Note: For a local GGUF (stored offline in a custom directory), the model object will have `"in_cache": false`. + +The `status` object can be: + +```json +"status": { + "value": "unloaded" +} +``` + +```json +"status": { + "value": "loading", + "args": ["llama-server", "-ctx", "4096"] +} +``` + +```json +"status": { + "value": "unloaded", + "args": ["llama-server", "-ctx", "4096"], + "failed": true, + "exit_code": 1 +} +``` + +```json +"status": { + "value": "loaded", + "args": ["llama-server", "-ctx", "4096"] +} +``` + +### POST `/models/load`: Load a model + +Load a model + +Payload: +- `model`: name of the model to be loaded. + +```json +{ + "model": "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M" +} +``` + +Response: + +```json +{ + "success": true +} +``` + + +### POST `/models/unload`: Unload a model + +Unload a model + +Payload: + +```json +{ + "model": "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M", +} +``` + +Response: + +```json +{ + "success": true +} +``` + +## API errors + +`llama-server` returns errors in the same format as OAI: https://github.com/openai/openai-openapi + +Example of an error: + +```json +{ + "error": { + "code": 401, + "message": "Invalid API Key", + "type": "authentication_error" + } +} +``` + +## Sleeping on Idle + +The server supports an automatic sleep mode that activates after a specified period of inactivity (no incoming tasks). This feature, introduced in [PR #18228](https://github.com/ggml-org/llama.cpp/pull/18228), can be enabled using the `--sleep-idle-seconds` command-line argument. It works seamlessly in both single-model and multi-model configurations. + +When the server enters sleep mode, the model and its associated memory (including the KV cache) are unloaded from RAM to conserve resources. Any new incoming task will automatically trigger the model to reload. + +The sleeping status can be retrieved from the `GET /props` endpoint (or `/props?model=(model_name)` in router mode). + +Note that the following endpoints are exempt from being considered as incoming tasks. They do not trigger model reloading and do not reset the idle timer: +- `GET /health` +- `GET /props` +- `GET /models` + +## More examples + +### Interactive mode + +Check the sample in [chat.mjs](chat.mjs). +Run with NodeJS version 16 or later: + +```sh +node chat.mjs +``` + +Another sample in [chat.sh](chat.sh). +Requires [bash](https://www.gnu.org/software/bash/), [curl](https://curl.se) and [jq](https://jqlang.github.io/jq/). +Run with bash: + +```sh +bash chat.sh +``` + +Apart from error types supported by OAI, we also have custom types that are specific to functionalities of llama.cpp: + +**When /metrics or /slots endpoint is disabled** + +```json +{ + "error": { + "code": 501, + "message": "This server does not support metrics endpoint.", + "type": "not_supported_error" + } +} +``` + +**When the server receives invalid grammar via */completions endpoint** + +```json +{ + "error": { + "code": 400, + "message": "Failed to parse grammar", + "type": "invalid_request_error" + } +} +``` + +### Legacy completion web UI + +A new chat-based UI has replaced the old completion-based since [this PR](https://github.com/ggml-org/llama.cpp/pull/10175). If you want to use the old completion, start the server with `--path ./tools/server/public_legacy` + +For example: + +```sh +./llama-server -m my_model.gguf -c 8192 --path ./tools/server/public_legacy +``` + +### Extending or building alternative Web Front End + +You can extend the front end by running the server binary with `--path` set to `./your-directory` and importing `/completion.js` to get access to the llamaComplete() method. + +Read the documentation in `/completion.js` to see convenient ways to access llama. + +A simple example is below: + +```html + + +
+      
+    
+ + +``` diff --git a/llama.cpp/tools/server/bench/README.md b/llama.cpp/tools/server/bench/README.md new file mode 100644 index 0000000..9549795 --- /dev/null +++ b/llama.cpp/tools/server/bench/README.md @@ -0,0 +1,119 @@ +### Server benchmark tools + +Benchmark is using [k6](https://k6.io/). + +##### Install k6 and sse extension + +SSE is not supported by default in k6, you have to build k6 with the [xk6-sse](https://github.com/phymbert/xk6-sse) extension. + +Example (assuming golang >= 1.21 is installed): +```shell +go install go.k6.io/xk6/cmd/xk6@latest +$GOPATH/bin/xk6 build master \ +--with github.com/phymbert/xk6-sse +``` + +#### Download a dataset + +This dataset was originally proposed in [vLLM benchmarks](https://github.com/vllm-project/vllm/blob/main/benchmarks/README.md). + +```shell +wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json +``` + +#### Download a model +Example for PHI-2 + +```shell +../../../scripts/hf.sh --repo ggml-org/models --file phi-2/ggml-model-q4_0.gguf +``` + +#### Start the server +The server must answer OAI Chat completion requests on `http://localhost:8080/v1` or according to the environment variable `SERVER_BENCH_URL`. + +Example: +```shell +llama-server --host localhost --port 8080 \ + --model ggml-model-q4_0.gguf \ + --cont-batching \ + --metrics \ + --parallel 8 \ + --batch-size 512 \ + --ctx-size 4096 \ + -ngl 33 +``` + +#### Run the benchmark + +For 500 chat completions request with 8 concurrent users during maximum 10 minutes, run: +```shell +./k6 run script.js --duration 10m --iterations 500 --vus 8 +``` + +The benchmark values can be overridden with: +- `SERVER_BENCH_URL` server url prefix for chat completions, default `http://localhost:8080/v1` +- `SERVER_BENCH_N_PROMPTS` total prompts to randomly select in the benchmark, default `480` +- `SERVER_BENCH_MODEL_ALIAS` model alias to pass in the completion request, default `my-model` +- `SERVER_BENCH_MAX_TOKENS` max tokens to predict, default: `512` +- `SERVER_BENCH_DATASET` path to the benchmark dataset file +- `SERVER_BENCH_MAX_PROMPT_TOKENS` maximum prompt tokens to filter out in the dataset: default `1024` +- `SERVER_BENCH_MAX_CONTEXT` maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens, default `2048` + +Note: the local tokenizer is just a string space split, real number of tokens will differ. + +Or with [k6 options](https://k6.io/docs/using-k6/k6-options/reference/): + +```shell +SERVER_BENCH_N_PROMPTS=500 k6 run script.js --duration 10m --iterations 500 --vus 8 +``` + +To [debug http request](https://k6.io/docs/using-k6/http-debugging/) use `--http-debug="full"`. + +#### Metrics + +Following metrics are available computed from the OAI chat completions response `usage`: +- `llamacpp_tokens_second` Trend of `usage.total_tokens / request duration` +- `llamacpp_prompt_tokens` Trend of `usage.prompt_tokens` +- `llamacpp_prompt_tokens_total_counter` Counter of `usage.prompt_tokens` +- `llamacpp_completion_tokens` Trend of `usage.completion_tokens` +- `llamacpp_completion_tokens_total_counter` Counter of `usage.completion_tokens` +- `llamacpp_completions_truncated_rate` Rate of completions truncated, i.e. if `finish_reason === 'length'` +- `llamacpp_completions_stop_rate` Rate of completions stopped by the model, i.e. if `finish_reason === 'stop'` + +The script will fail if too many completions are truncated, see `llamacpp_completions_truncated_rate`. + +K6 metrics might be compared against [server metrics](../README.md), with: + +```shell +curl http://localhost:8080/metrics +``` + +### Using the CI python script +The `bench.py` script does several steps: +- start the server +- define good variable for k6 +- run k6 script +- extract metrics from prometheus + +It aims to be used in the CI, but you can run it manually: + +```shell +LLAMA_SERVER_BIN_PATH=../../../cmake-build-release/bin/llama-server python bench.py \ + --runner-label local \ + --name local \ + --branch `git rev-parse --abbrev-ref HEAD` \ + --commit `git rev-parse HEAD` \ + --scenario script.js \ + --duration 5m \ + --hf-repo ggml-org/models \ + --hf-file phi-2/ggml-model-q4_0.gguf \ + --model-path-prefix models \ + --parallel 4 \ + -ngl 33 \ + --batch-size 2048 \ + --ubatch-size 256 \ + --ctx-size 4096 \ + --n-prompts 200 \ + --max-prompt-tokens 256 \ + --max-tokens 256 +``` diff --git a/llama.cpp/tools/server/bench/bench.py b/llama.cpp/tools/server/bench/bench.py new file mode 100644 index 0000000..0c57a2d --- /dev/null +++ b/llama.cpp/tools/server/bench/bench.py @@ -0,0 +1,322 @@ +from __future__ import annotations + +import argparse +import json +import os +import re +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +from contextlib import closing +from datetime import datetime + +import matplotlib +import matplotlib.dates +import matplotlib.pyplot as plt +import requests +from statistics import mean + + +def main(args_in: list[str] | None = None) -> None: + parser = argparse.ArgumentParser(description="Start server benchmark scenario") + parser.add_argument("--name", type=str, help="Bench name", required=True) + parser.add_argument("--runner-label", type=str, help="Runner label", required=True) + parser.add_argument("--branch", type=str, help="Branch name", default="detached") + parser.add_argument("--commit", type=str, help="Commit name", default="dirty") + parser.add_argument("--host", type=str, help="Server listen host", default="0.0.0.0") + parser.add_argument("--port", type=int, help="Server listen host", default="8080") + parser.add_argument("--model-path-prefix", type=str, help="Prefix where to store the model files", default="models") + parser.add_argument("--n-prompts", type=int, + help="SERVER_BENCH_N_PROMPTS: total prompts to randomly select in the benchmark", required=True) + parser.add_argument("--max-prompt-tokens", type=int, + help="SERVER_BENCH_MAX_PROMPT_TOKENS: maximum prompt tokens to filter out in the dataset", + required=True) + parser.add_argument("--max-tokens", type=int, + help="SERVER_BENCH_MAX_CONTEXT: maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens", + required=True) + parser.add_argument("--hf-repo", type=str, help="Hugging Face model repository", required=True) + parser.add_argument("--hf-file", type=str, help="Hugging Face model file", required=True) + parser.add_argument("-ngl", "--n-gpu-layers", type=int, help="layers to the GPU for computation", required=True) + parser.add_argument("--ctx-size", type=int, help="Set the size of the prompt context", required=True) + parser.add_argument("--parallel", type=int, help="Set the number of slots for process requests", required=True) + parser.add_argument("--batch-size", type=int, help="Set the batch size for prompt processing", required=True) + parser.add_argument("--ubatch-size", type=int, help="physical maximum batch size", required=True) + parser.add_argument("--scenario", type=str, help="Scenario to run", required=True) + parser.add_argument("--duration", type=str, help="Bench scenario", required=True) + + args = parser.parse_args(args_in) + + start_time = time.time() + + # Start the server and performance scenario + try: + server_process = start_server(args) + except Exception: + print("bench: server start error :") + traceback.print_exc(file=sys.stdout) + sys.exit(1) + + # start the benchmark + iterations = 0 + data = {} + try: + start_benchmark(args) + + with open("results.github.env", 'w') as github_env: + # parse output + with open('k6-results.json', 'r') as bench_results: + # Load JSON data from file + data = json.load(bench_results) + for metric_name in data['metrics']: + for metric_metric in data['metrics'][metric_name]: + value = data['metrics'][metric_name][metric_metric] + if isinstance(value, float) or isinstance(value, int): + value = round(value, 2) + data['metrics'][metric_name][metric_metric]=value + github_env.write( + f"{escape_metric_name(metric_name)}_{escape_metric_name(metric_metric)}={value}\n") + iterations = data['root_group']['checks']['success completion']['passes'] + + except Exception: + print("bench: error :") + traceback.print_exc(file=sys.stdout) + + # Stop the server + if server_process: + try: + print(f"bench: shutting down server pid={server_process.pid} ...") + if os.name == 'nt': + interrupt = signal.CTRL_C_EVENT + else: + interrupt = signal.SIGINT + server_process.send_signal(interrupt) + server_process.wait(0.5) + + except subprocess.TimeoutExpired: + print(f"server still alive after 500ms, force-killing pid={server_process.pid} ...") + server_process.kill() # SIGKILL + server_process.wait() + + while is_server_listening(args.host, args.port): + time.sleep(0.1) + + title = (f"llama.cpp {args.name} on {args.runner_label}\n " + f"duration={args.duration} {iterations} iterations") + xlabel = (f"{args.hf_repo}/{args.hf_file}\n" + f"parallel={args.parallel} ctx-size={args.ctx_size} ngl={args.n_gpu_layers} batch-size={args.batch_size} ubatch-size={args.ubatch_size} pp={args.max_prompt_tokens} pp+tg={args.max_tokens}\n" + f"branch={args.branch} commit={args.commit}") + + # Prometheus + end_time = time.time() + prometheus_metrics = {} + if is_server_listening("0.0.0.0", 9090): + metrics = ['prompt_tokens_seconds', 'predicted_tokens_seconds', + 'kv_cache_usage_ratio', 'requests_processing', 'requests_deferred'] + + for metric in metrics: + resp = requests.get(f"http://localhost:9090/api/v1/query_range", + params={'query': 'llamacpp:' + metric, 'start': start_time, 'end': end_time, 'step': 2}) + + with open(f"{metric}.json", 'w') as metric_json: + metric_json.write(resp.text) + + if resp.status_code != 200: + print(f"bench: unable to extract prometheus metric {metric}: {resp.text}") + else: + metric_data = resp.json() + values = metric_data['data']['result'][0]['values'] + timestamps, metric_values = zip(*values) + metric_values = [float(value) for value in metric_values] + prometheus_metrics[metric] = metric_values + timestamps_dt = [str(datetime.fromtimestamp(int(ts))) for ts in timestamps] + plt.figure(figsize=(16, 10), dpi=80) + plt.plot(timestamps_dt, metric_values, label=metric) + plt.xticks(rotation=0, fontsize=14, horizontalalignment='center', alpha=.7) + plt.yticks(fontsize=12, alpha=.7) + + ylabel = f"llamacpp:{metric}" + plt.title(title, + fontsize=14, wrap=True) + plt.grid(axis='both', alpha=.3) + plt.ylabel(ylabel, fontsize=22) + plt.xlabel(xlabel, fontsize=14, wrap=True) + plt.gca().xaxis.set_major_locator(matplotlib.dates.MinuteLocator()) + plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y-%m-%d %H:%M:%S")) + plt.gcf().autofmt_xdate() + + # Remove borders + plt.gca().spines["top"].set_alpha(0.0) + plt.gca().spines["bottom"].set_alpha(0.3) + plt.gca().spines["right"].set_alpha(0.0) + plt.gca().spines["left"].set_alpha(0.3) + + # Save the plot as a jpg image + plt.savefig(f'{metric}.jpg', dpi=60) + plt.close() + + # Mermaid format in case images upload failed + with open(f"{metric}.mermaid", 'w') as mermaid_f: + mermaid = ( + f"""--- +config: + xyChart: + titleFontSize: 12 + width: 900 + height: 600 + themeVariables: + xyChart: + titleColor: "#000000" +--- +xychart-beta + title "{title}" + y-axis "llamacpp:{metric}" + x-axis "llamacpp:{metric}" {int(min(timestamps))} --> {int(max(timestamps))} + line [{', '.join([str(round(float(value), 2)) for value in metric_values])}] + """) + mermaid_f.write(mermaid) + + # 140 chars max for commit status description + bench_results = { + "i": iterations, + "req": { + "p95": round(data['metrics']["http_req_duration"]["p(95)"], 2), + "avg": round(data['metrics']["http_req_duration"]["avg"], 2), + }, + "pp": { + "p95": round(data['metrics']["llamacpp_prompt_processing_second"]["p(95)"], 2), + "avg": round(data['metrics']["llamacpp_prompt_processing_second"]["avg"], 2), + "0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2) if 'prompt_tokens_seconds' in prometheus_metrics else 0, + }, + "tg": { + "p95": round(data['metrics']["llamacpp_tokens_second"]["p(95)"], 2), + "avg": round(data['metrics']["llamacpp_tokens_second"]["avg"], 2), + "0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2) if 'predicted_tokens_seconds' in prometheus_metrics else 0, + }, + } + with open("results.github.env", 'a') as github_env: + github_env.write(f"BENCH_RESULTS={json.dumps(bench_results, indent=None, separators=(',', ':') )}\n") + github_env.write(f"BENCH_ITERATIONS={iterations}\n") + + title = title.replace('\n', ' ') + xlabel = xlabel.replace('\n', ' ') + github_env.write(f"BENCH_GRAPH_TITLE={title}\n") + github_env.write(f"BENCH_GRAPH_XLABEL={xlabel}\n") + + +def start_benchmark(args): + k6_path = './k6' + if 'BENCH_K6_BIN_PATH' in os.environ: + k6_path = os.environ['BENCH_K6_BIN_PATH'] + k6_args = [ + 'run', args.scenario, + '--no-color', + '--no-connection-reuse', + '--no-vu-connection-reuse', + ] + k6_args.extend(['--duration', args.duration]) + k6_args.extend(['--iterations', args.n_prompts]) + k6_args.extend(['--vus', args.parallel]) + k6_args.extend(['--summary-export', 'k6-results.json']) + k6_args.extend(['--out', 'csv=k6-results.csv']) + args = f"SERVER_BENCH_N_PROMPTS={args.n_prompts} SERVER_BENCH_MAX_PROMPT_TOKENS={args.max_prompt_tokens} SERVER_BENCH_MAX_CONTEXT={args.max_tokens} " + args = args + ' '.join([str(arg) for arg in [k6_path, *k6_args]]) + print(f"bench: starting k6 with: {args}") + k6_completed = subprocess.run(args, shell=True, stdout=sys.stdout, stderr=sys.stderr) + if k6_completed.returncode != 0: + raise Exception("bench: unable to run k6") + + +def start_server(args): + server_process = start_server_background(args) + + attempts = 0 + max_attempts = 600 + if 'GITHUB_ACTIONS' in os.environ: + max_attempts *= 2 + + while not is_server_listening(args.host, args.port): + attempts += 1 + if attempts > max_attempts: + assert False, "server not started" + print(f"bench: waiting for server to start ...") + time.sleep(0.5) + + attempts = 0 + while not is_server_ready(args.host, args.port): + attempts += 1 + if attempts > max_attempts: + assert False, "server not ready" + print(f"bench: waiting for server to be ready ...") + time.sleep(0.5) + + print("bench: server started and ready.") + return server_process + + +def start_server_background(args): + # Start the server + server_path = '../../../build/bin/llama-server' + if 'LLAMA_SERVER_BIN_PATH' in os.environ: + server_path = os.environ['LLAMA_SERVER_BIN_PATH'] + server_args = [ + '--host', args.host, + '--port', args.port, + ] + server_args.extend(['--hf-repo', args.hf_repo]) + server_args.extend(['--hf-file', args.hf_file]) + server_args.extend(['--n-gpu-layers', args.n_gpu_layers]) + server_args.extend(['--ctx-size', args.ctx_size]) + server_args.extend(['--parallel', args.parallel]) + server_args.extend(['--batch-size', args.batch_size]) + server_args.extend(['--ubatch-size', args.ubatch_size]) + server_args.extend(['--n-predict', args.max_tokens * 2]) + server_args.append('--cont-batching') + server_args.append('--metrics') + server_args.append('--flash-attn') + args = [str(arg) for arg in [server_path, *server_args]] + print(f"bench: starting server with: {' '.join(args)}") + pkwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE + } + server_process = subprocess.Popen( + args, + **pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue] + + def server_log(in_stream, out_stream): + for line in iter(in_stream.readline, b''): + print(line.decode('utf-8'), end='', file=out_stream) + + thread_stdout = threading.Thread(target=server_log, args=(server_process.stdout, sys.stdout)) + thread_stdout.start() + thread_stderr = threading.Thread(target=server_log, args=(server_process.stderr, sys.stderr)) + thread_stderr.start() + + return server_process + + +def is_server_listening(server_fqdn, server_port): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + result = sock.connect_ex((server_fqdn, server_port)) + _is_server_listening = result == 0 + if _is_server_listening: + print(f"server is listening on {server_fqdn}:{server_port}...") + return _is_server_listening + + +def is_server_ready(server_fqdn, server_port): + url = f"http://{server_fqdn}:{server_port}/health" + response = requests.get(url) + return response.status_code == 200 + + +def escape_metric_name(metric_name): + return re.sub('[^A-Z0-9]', '_', metric_name.upper()) + + +if __name__ == '__main__': + main() diff --git a/llama.cpp/tools/server/bench/prometheus.yml b/llama.cpp/tools/server/bench/prometheus.yml new file mode 100644 index 0000000..b15ee52 --- /dev/null +++ b/llama.cpp/tools/server/bench/prometheus.yml @@ -0,0 +1,9 @@ +global: + scrape_interval: 10s + external_labels: + llamacpp: 'server' + +scrape_configs: + - job_name: 'llama.cpp server' + static_configs: + - targets: ['localhost:8080'] diff --git a/llama.cpp/tools/server/bench/requirements.txt b/llama.cpp/tools/server/bench/requirements.txt new file mode 100644 index 0000000..66ed226 --- /dev/null +++ b/llama.cpp/tools/server/bench/requirements.txt @@ -0,0 +1,2 @@ +matplotlib +requests diff --git a/llama.cpp/tools/server/bench/script.js b/llama.cpp/tools/server/bench/script.js new file mode 100644 index 0000000..2772bee --- /dev/null +++ b/llama.cpp/tools/server/bench/script.js @@ -0,0 +1,162 @@ +import sse from 'k6/x/sse' +import {check, sleep} from 'k6' +import {SharedArray} from 'k6/data' +import {Counter, Rate, Trend} from 'k6/metrics' +import exec from 'k6/execution'; + +// Server chat completions prefix +const server_url = __ENV.SERVER_BENCH_URL ? __ENV.SERVER_BENCH_URL : 'http://localhost:8080/v1' + +// Number of total prompts in the dataset - default 10m / 10 seconds/request * number of users +const n_prompt = __ENV.SERVER_BENCH_N_PROMPTS ? parseInt(__ENV.SERVER_BENCH_N_PROMPTS) : 600 / 10 * 8 + +// Model name to request +const model = __ENV.SERVER_BENCH_MODEL_ALIAS ? __ENV.SERVER_BENCH_MODEL_ALIAS : 'my-model' + +// Dataset path +const dataset_path = __ENV.SERVER_BENCH_DATASET ? __ENV.SERVER_BENCH_DATASET : './ShareGPT_V3_unfiltered_cleaned_split.json' + +// Max tokens to predict +const max_tokens = __ENV.SERVER_BENCH_MAX_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_TOKENS) : 512 + +// Max prompt tokens +const n_prompt_tokens = __ENV.SERVER_BENCH_MAX_PROMPT_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_PROMPT_TOKENS) : 1024 + +// Max slot context +const n_ctx_slot = __ENV.SERVER_BENCH_MAX_CONTEXT ? parseInt(__ENV.SERVER_BENCH_MAX_CONTEXT) : 2048 + +export function setup() { + console.info(`Benchmark config: server_url=${server_url} n_prompt=${n_prompt} model=${model} dataset_path=${dataset_path} max_tokens=${max_tokens}`) +} + +const data = new SharedArray('conversations', function () { + const tokenizer = (message) => message.split(/[\s,'".?]/) + + return JSON.parse(open(dataset_path)) + // Filter out the conversations with less than 2 turns. + .filter(data => data["conversations"].length >= 2) + .filter(data => data["conversations"][0]["from"] === "human") + .map(data => { + return { + prompt: data["conversations"][0]["value"], + n_prompt_tokens: tokenizer(data["conversations"][0]["value"]).length, + n_completion_tokens: tokenizer(data["conversations"][1]["value"]).length, + } + }) + // Filter out too short sequences + .filter(conv => conv.n_prompt_tokens >= 4 && conv.n_completion_tokens >= 4) + // Filter out too long sequences. + .filter(conv => conv.n_prompt_tokens <= n_prompt_tokens && conv.n_prompt_tokens + conv.n_completion_tokens <= n_ctx_slot) + // Keep only first n prompts + .slice(0, n_prompt) +}) + +const llamacpp_prompt_tokens = new Trend('llamacpp_prompt_tokens') +const llamacpp_completion_tokens = new Trend('llamacpp_completion_tokens') + +const llamacpp_tokens_second = new Trend('llamacpp_tokens_second') +const llamacpp_prompt_processing_second = new Trend('llamacpp_prompt_processing_second') +const llamacpp_emit_first_token_second = new Trend('llamacpp_emit_first_token_second') + +const llamacpp_prompt_tokens_total_counter = new Counter('llamacpp_prompt_tokens_total_counter') +const llamacpp_completion_tokens_total_counter = new Counter('llamacpp_completion_tokens_total_counter') + +const llamacpp_completions_truncated_rate = new Rate('llamacpp_completions_truncated_rate') +const llamacpp_completions_stop_rate = new Rate('llamacpp_completions_stop_rate') + +export const options = { + thresholds: { + llamacpp_completions_truncated_rate: [ + // more than 80% of truncated input will abort the test + {threshold: 'rate < 0.8', abortOnFail: true, delayAbortEval: '1m'}, + ], + }, + duration: '10m', + vus: 8, +} + +export default function () { + const conversation = data[exec.scenario.iterationInInstance % data.length] + const payload = { + "messages": [ + { + "role": "system", + "content": "You are ChatGPT, an AI assistant.", + }, + { + "role": "user", + "content": conversation.prompt, + } + ], + "model": model, + "stream": true, + "stream_options": { + "include_usage": true, // False to be supported in llama.cpp server + }, + "seed": 42, + "max_tokens": max_tokens, + "stop": ["<|im_end|>"] // This is temporary for phi-2 base (i.e. not instructed) since the server expects that the model always to emit BOS + } + + const params = {method: 'POST', body: JSON.stringify(payload)}; + + const startTime = new Date() + let promptEvalEndTime = null + let prompt_tokens = 0 + let completions_tokens = 0 + let finish_reason = null + const res = sse.open(`${server_url}/chat/completions`, params, function (client) { + client.on('event', function (event) { + if (promptEvalEndTime == null) { + promptEvalEndTime = new Date() + llamacpp_emit_first_token_second.add((promptEvalEndTime - startTime) / 1.e3) + } + + if (event.data === '[DONE]' || event.data === '') { + return + } + + let chunk = JSON.parse(event.data) + + if (chunk.choices && chunk.choices.length > 0) { + let choice = chunk.choices[0] + if (choice.finish_reason) { + finish_reason = choice.finish_reason + } + } + + if (chunk.usage) { + prompt_tokens = chunk.usage.prompt_tokens + llamacpp_prompt_tokens.add(prompt_tokens) + llamacpp_prompt_tokens_total_counter.add(prompt_tokens) + + completions_tokens = chunk.usage.completion_tokens + llamacpp_completion_tokens.add(completions_tokens) + llamacpp_completion_tokens_total_counter.add(completions_tokens) + } + }) + + client.on('error', function (e) { + console.log('An unexpected error occurred: ', e.error()); + throw e; + }) + }) + + check(res, {'success completion': (r) => r.status === 200}) + + const endTime = new Date() + + const promptEvalTime = promptEvalEndTime - startTime + if (promptEvalTime > 0) { + llamacpp_prompt_processing_second.add(prompt_tokens / (promptEvalEndTime - startTime) * 1.e3) + } + + const completion_time = endTime - promptEvalEndTime + if (completions_tokens > 0 && completion_time > 0) { + llamacpp_tokens_second.add(completions_tokens / completion_time * 1.e3) + } + llamacpp_completions_truncated_rate.add(finish_reason === 'length') + llamacpp_completions_stop_rate.add(finish_reason === 'stop') + + sleep(0.3) +} diff --git a/llama.cpp/tools/server/chat-llama2.sh b/llama.cpp/tools/server/chat-llama2.sh new file mode 100755 index 0000000..450445f --- /dev/null +++ b/llama.cpp/tools/server/chat-llama2.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +API_URL="${API_URL:-http://127.0.0.1:8080}" + +CHAT=( + "Hello, Assistant." + "Hello. How may I help you today?" +) + +INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." + +trim() { + shopt -s extglob + set -- "${1##+([[:space:]])}" + printf "%s" "${1%%+([[:space:]])}" +} + +trim_trailing() { + shopt -s extglob + printf "%s" "${1%%+([[:space:]])}" +} + +format_prompt() { + if [[ "${#CHAT[@]}" -eq 0 ]]; then + echo -n "[INST] <>\n${INSTRUCTION}\n<>" + else + LAST_INDEX=$(( ${#CHAT[@]} - 1 )) + echo -n "${CHAT[$LAST_INDEX]}\n[INST] $1 [/INST]" + fi +} + +tokenize() { + curl \ + --silent \ + --request POST \ + --url "${API_URL}/tokenize" \ + --header "Content-Type: application/json" \ + --data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \ + | jq '.tokens[]' +} + +N_KEEP=$(tokenize "[INST] <>\n${INSTRUCTION}\n<>" | wc -l) + +chat_completion() { + PROMPT="$(trim_trailing "$(format_prompt "$1")")" + DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{ + prompt: ., + temperature: 0.2, + top_k: 40, + top_p: 0.9, + n_keep: $n_keep, + n_predict: 1024, + stop: ["[INST]"], + stream: true + }')" + + # Create a temporary file to hold the Python output + TEMPFILE=$(mktemp) + + exec 3< <(curl \ + --silent \ + --no-buffer \ + --request POST \ + --url "${API_URL}/completion" \ + --header "Content-Type: application/json" \ + --data-raw "${DATA}") + + python -c " +import json +import sys + +answer = '' +while True: + line = sys.stdin.readline() + if not line: + break + if line.startswith('data: '): + json_content = line[6:].strip() + content = json.loads(json_content)['content'] + sys.stdout.write(content) + sys.stdout.flush() + answer += content + +answer = answer.rstrip('\n') + +# Write the answer to the temporary file +with open('$TEMPFILE', 'w') as f: + f.write(answer) + " <&3 + + exec 3<&- + + # Read the answer from the temporary file + ANSWER=$(cat $TEMPFILE) + + # Clean up the temporary file + rm $TEMPFILE + + printf "\n" + + CHAT+=("$1" "$(trim "$ANSWER")") +} + +while true; do + echo -en "\033[0;32m" # Green color + read -r -e -p "> " QUESTION + echo -en "\033[0m" # Reset color + chat_completion "${QUESTION}" +done diff --git a/llama.cpp/tools/server/chat.mjs b/llama.cpp/tools/server/chat.mjs new file mode 100644 index 0000000..4fef565 --- /dev/null +++ b/llama.cpp/tools/server/chat.mjs @@ -0,0 +1,131 @@ +import * as readline from 'node:readline' +import { stdin, stdout } from 'node:process' +import { readFileSync } from 'node:fs' +import { SchemaConverter } from './public_legacy/json-schema-to-grammar.mjs' + +const args = process.argv.slice(2); +const grammarJsonSchemaFile = args.find( + (_, index) => args[index - 1] === "--grammar-json-schema" +); + +const no_cached_prompt = args.find( + (_, index) => args[index - 1] === "--no-cache-prompt" +) ?? "false"; + +const grammarFile = args.find((_, index) => args[index - 1] === "--grammar"); + +// Example usage: function,arguments +const grammarJsonSchemaPropOrder = args.find( + (_, index) => args[index - 1] === "--grammar-json-schema-prop-order" +); +const propOrder = grammarJsonSchemaPropOrder + ? grammarJsonSchemaPropOrder + .split(",") + .reduce((acc, cur, index) => ({ ...acc, [cur]: index }), {}) + : {}; + +let grammar = null +if (grammarJsonSchemaFile) { + let schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8')) + const converter = new SchemaConverter({prop_order: propOrder, allow_fetch: true}) + schema = await converter.resolveRefs(schema, grammarJsonSchemaFile) + converter.visit(schema, '') + grammar = converter.formatGrammar() +} +if (grammarFile) { + grammar = readFileSync(grammarFile, 'utf-8') +} + +// for cached prompt +let slot_id = -1; + +const API_URL = 'http://127.0.0.1:8080' + +const chat = [ + { + human: "Hello, Assistant.", + assistant: "Hello. How may I help you today?" + }, + { + human: "Please tell me the largest city in Europe.", + assistant: "Sure. The largest city in Europe is Moscow, the capital of Russia." + }, +] + +const instruction = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.` + +function format_prompt(question) { + return `${instruction}\n${ + chat.map(m =>`### Human: ${m.human}\n### Assistant: ${m.assistant}`).join("\n") + }\n### Human: ${question}\n### Assistant:` +} + +async function tokenize(content) { + const result = await fetch(`${API_URL}/tokenize`, { + method: 'POST', + body: JSON.stringify({ content }) + }) + + if (!result.ok) { + return [] + } + + return await result.json().tokens +} + +const n_keep = await tokenize(instruction).length + +async function chat_completion(question) { + const result = await fetch(`${API_URL}/completion`, { + method: 'POST', + body: JSON.stringify({ + prompt: format_prompt(question), + temperature: 0.2, + top_k: 40, + top_p: 0.9, + n_keep: n_keep, + n_predict: 256, + cache_prompt: no_cached_prompt === "false", + slot_id: slot_id, + stop: ["\n### Human:"], // stop completion after generating this + grammar, + stream: true, + }) + }) + + if (!result.ok) { + return + } + + let answer = '' + + for await (var chunk of result.body) { + const t = Buffer.from(chunk).toString('utf8') + if (t.startsWith('data: ')) { + const message = JSON.parse(t.substring(6)) + slot_id = message.slot_id + answer += message.content + process.stdout.write(message.content) + if (message.stop) { + if (message.truncated) { + chat.shift() + } + break + } + } + } + + process.stdout.write('\n') + chat.push({ human: question, assistant: answer.trimStart() }) +} + +const rl = readline.createInterface({ input: stdin, output: stdout }); + +const readlineQuestion = (rl, query, options) => new Promise((resolve, reject) => { + rl.question(query, options, resolve) +}); + +while(true) { + const question = await readlineQuestion(rl, '> ') + await chat_completion(question) +} diff --git a/llama.cpp/tools/server/chat.sh b/llama.cpp/tools/server/chat.sh new file mode 100755 index 0000000..84cea2d --- /dev/null +++ b/llama.cpp/tools/server/chat.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +API_URL="${API_URL:-http://127.0.0.1:8080}" + +CHAT=( + "Hello, Assistant." + "Hello. How may I help you today?" + "Please tell me the largest city in Europe." + "Sure. The largest city in Europe is Moscow, the capital of Russia." +) + +INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." + +trim() { + shopt -s extglob + set -- "${1##+([[:space:]])}" + printf "%s" "${1%%+([[:space:]])}" +} + +trim_trailing() { + shopt -s extglob + printf "%s" "${1%%+([[:space:]])}" +} + +format_prompt() { + echo -n "${INSTRUCTION}" + printf "\n### Human: %s\n### Assistant: %s" "${CHAT[@]}" "$1" +} + +tokenize() { + curl \ + --silent \ + --request POST \ + --url "${API_URL}/tokenize" \ + --header "Content-Type: application/json" \ + --data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \ + | jq '.tokens[]' +} + +N_KEEP=$(tokenize "${INSTRUCTION}" | wc -l) + +chat_completion() { + PROMPT="$(trim_trailing "$(format_prompt "$1")")" + DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{ + prompt: ., + temperature: 0.2, + top_k: 40, + top_p: 0.9, + n_keep: $n_keep, + n_predict: 256, + cache_prompt: true, + stop: ["\n### Human:"], + stream: true + }')" + + ANSWER='' + + while IFS= read -r LINE; do + if [[ $LINE = data:* ]]; then + CONTENT="$(echo "${LINE:5}" | jq -r '.content')" + printf "%s" "${CONTENT}" + ANSWER+="${CONTENT}" + fi + done < <(curl \ + --silent \ + --no-buffer \ + --request POST \ + --url "${API_URL}/completion" \ + --header "Content-Type: application/json" \ + --data-raw "${DATA}") + + printf "\n" + + CHAT+=("$1" "$(trim "$ANSWER")") +} + +while true; do + read -r -e -p "> " QUESTION + chat_completion "${QUESTION}" +done diff --git a/llama.cpp/tools/server/public/index.html.gz b/llama.cpp/tools/server/public/index.html.gz new file mode 100644 index 0000000..e3b06f4 Binary files /dev/null and b/llama.cpp/tools/server/public/index.html.gz differ diff --git a/llama.cpp/tools/server/public/loading.html b/llama.cpp/tools/server/public/loading.html new file mode 100644 index 0000000..c3fd19a --- /dev/null +++ b/llama.cpp/tools/server/public/loading.html @@ -0,0 +1,12 @@ + + + + + + +
+ The model is loading. Please wait.
+ The user interface will appear soon. +
+ + diff --git a/llama.cpp/tools/server/public_legacy/colorthemes.css b/llama.cpp/tools/server/public_legacy/colorthemes.css new file mode 100755 index 0000000..b1e2b8b --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/colorthemes.css @@ -0,0 +1,402 @@ +@import url("theme-snowstorm.css"); +@import url("theme-polarnight.css"); +@import url("theme-ketivah.css"); +@import url("theme-mangotango.css"); +@import url("theme-playground.css"); +@import url("theme-beeninorder.css"); + +:root { +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(217.5, 26.7%, 94.1%); + --primary-color-1-hue: 217.5; + --primary-color-1-saturation: 26.7%; + --primary-color-1-lightness: 94.1%; + +--primary-color-2: hsl(218.2, 26.8%, 92.0%); + --primary-color-2-hue: 218.2; + --primary-color-2-saturation: 26.8%; + --primary-color-2-lightness: 92.0%; + +--primary-color-3: hsl(218.8, 27.9%, 88.0%); + --primary-color-3-hue: 218.8; + --primary-color-3-saturation: 27.9%; + --primary-color-3-lightness: 88.0%; + +--primary-color-4: hsl(218.8, 18.3%, 81.8%); + --primary-color-4-hue: 218.8; + --primary-color-4-saturation: 18.3%; + --primary-color-4-lightness: 81.8%; + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(220.0, 16.4%, 21.6%); + --secondary-color-1-hue: 220.0; + --secondary-color-1-saturation: 16.4%; + --secondary-color-1-lightness: 21.6%; + +--secondary-color-2: hsl(221.7, 16.3%, 27.6%); + --secondary-color-2-hue: 221.7; + --secondary-color-2-saturation: 16.3%; + --secondary-color-2-lightness: 27.6%; + +--secondary-color-3: hsl(220.0, 16.8%, 31.6%); + --secondary-color-3-hue: 220.0; + --secondary-color-3-saturation: 16.8%; + --secondary-color-3-lightness: 31.6%; + +--secondary-color-4: hsl(220.0, 16.5%, 35.7%); + --secondary-color-4-hue: 220.0; + --secondary-color-4-saturation: 16.5%; + --secondary-color-4-lightness: 35.7%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%); + --theme-nuance-color-1-hue: 178.7; + --theme-nuance-color-1-saturation: 25.1%; + --theme-nuance-color-1-lightness: 64.9%; + +--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%); + --theme-nuance-color-2-hue: 193.3; + --theme-nuance-color-2-saturation: 43.4%; + --theme-nuance-color-2-lightness: 67.5%; + +--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%); + --theme-nuance-color-3-hue: 210.0; + --theme-nuance-color-3-saturation: 34.0%; + --theme-nuance-color-3-lightness: 63.1%; + +--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%); + --theme-nuance-color-4-hue: 213.1; + --theme-nuance-color-4-saturation: 32.0%; + --theme-nuance-color-4-lightness: 52.2%; + + + +/* ----------- ROYGP COLORS ------------------ */ +--theme-red-color: hsl(32.5, 80%, 50%); +--theme-orange-color: hsl(32.5, 70%, 45%); +--theme-yellow-color: hsl(40.0, 0.6%, 73.3%); +--theme-green-color: hsl(92.4, 27.8%, 64.7%); +--theme-purple-color: hsl(311.1, 20.2%, 63.1%); + + + +/* ------------------------------------------- */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--theme-nuance-color-3); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--primary-color-1); +--button-alert-color-hover: var(--theme-orange-color); +--button-alert-border-hover: var(--theme-orange-color); + +--button-alert-text-active: var(--primary-color-1); +--button-alert-color-active: var(--theme-red-color); +--button-alert-border-active: var(--theme-red-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: var(--secondary-color-1); +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(217.5, + calc(var(--secondary-color-1-saturation) + 35%), + calc(var(--secondary-color-1-lightness) - 30%)); + +--button-primary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 35%)); + +--button-primary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + +--button-primary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 50%)); + +--button-secondary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + +--button-secondary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + +--button-secondary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + + +/* ---------active--------- */ +--button-secondary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) + 40%), + calc(var(--theme-nuance-color-3-lightness) - 55%)); + +--button-secondary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-secondary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +/* ---------hover---------- */ +--button-tertiary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); +} + +/* + +.theme-template { + + + If light theme: should go from bright to darker + If dark theme: should go from dark to brighter + ideally this should not be anything but steps of + gray or slightly variants from it + + --primary-color-1: #2E3440; + --primary-color-2: #3B4252; + --primary-color-3: #434C5E; + --primary-color-4: #4C566A; + + + + If light theme: should go from dark to brighter + If dark theme: should go from bright to darker + ideally this should not be anything but steps of + gray or slightly variants from it + + --secondary-color-1: #ECEFF4; + --secondary-color-2: #E5E9F0; + --secondary-color-3: #D8DEE9; + --secondary-color-4: #C8CED9; + + + + Choose wisely nuance colors. It is not easy to find + 4 harmonizing nuance colors. But keep in mind, that + only one accent color could work too. + + --theme-nuance-color-1: #8FBCBB; + --theme-nuance-color-2: #88C0D0; + --theme-nuance-color-3: #81A1C1; + --theme-nuance-color-4: #5E81AC; + + + + adapt the color red, orange, yellow, green, + purple to the 'mood' of your overall design + e.g is it low-contrast? vibrant? dynamic? etc + + --theme-red-color: #BF616A; + --theme-orange-color: #D08770; + --theme-yellow-color: #EBCB8B; + --theme-green-color: #A3BE8C; + --theme-purple-color: #B48EAD; + + + +NOTE: comment all those line `--- ...` out +------------------------------------------------ +--background-color-1: +--background-color-2: +--background-color-3: +--background-color-4: + +--border-color-1: +--border-color-2: +--border-color-3: + +--border-focus-color: +--border-focus-shadow: + +--text-color-plain: +--text-color-subtile-1: +--text-color-subtile-2: + +--code-background-color: +--code-text-color: + +--ui-range-thumb-color: +--ui-range-thumb-border: + +--textarea-border-color: + + + +------------------------------------------- +--button-alert-text-hover: +--button-alert-color-hover: +--button-alert-border-hover: + +--button-alert-text-active: +--button-alert-color-active: +--button-alert-border-active: + + + +----------- PRIMARY ----------------------- +--button should immediately catch the eye-- + +--button-primary-text: +--button-primary-color: +--button-primary-border: + + +---------hover---------- +--button-primary-text-hover: +--button-primary-color-hover: +--button-primary-border-hover: + + +---------active--------- +--button-primary-text-active: +--button-primary-color-active: +--button-primary-border-active: + + + +------------ SECONDARY ------------------------ +--button should NOT immediately catch the eye-- + +--button-secondary-text: +--button-secondary-color: +--button-secondary-border: + + +---------hover---------- +--button-secondary-text-hover: +--button-secondary-color-hover: +--button-secondary-border-hover: + + +---------active--------- +--button-secondary-text-active: +--button-secondary-color-active: +--button-secondary-border-active: + + + +---------- TERTIARY ----------------------- +---------- disabled buttons --------------- +--button-tertiary-text: +--button-tertiary-color: +--button-tertiary-border: + + +---------hover---------- +--button-tertiary-text: +--button-tertiary-color: +--button-tertiary-border: + +} + +*/ diff --git a/llama.cpp/tools/server/public_legacy/completion.js b/llama.cpp/tools/server/public_legacy/completion.js new file mode 100644 index 0000000..30df7c2 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/completion.js @@ -0,0 +1,209 @@ +const paramDefaults = { + stream: true, + n_predict: 500, + temperature: 0.2, + stop: [""] +}; + +let generation_settings = null; + + +// Completes the prompt as a generator. Recommended for most use cases. +// +// Example: +// +// import { llama } from '/completion.js' +// +// const request = llama("Tell me a joke", {n_predict: 800}) +// for await (const chunk of request) { +// document.write(chunk.data.content) +// } +// +export async function* llama(prompt, params = {}, config = {}) { + let controller = config.controller; + const api_url = config.api_url?.replace(/\/+$/, '') || ""; + + if (!controller) { + controller = new AbortController(); + } + + const completionParams = { ...paramDefaults, ...params, prompt }; + + const response = await fetch(`${api_url}${config.endpoint || '/completion'}`, { + method: 'POST', + body: JSON.stringify(completionParams), + headers: { + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'Accept': 'text/event-stream', + ...(params.api_key ? {'Authorization': `Bearer ${params.api_key}`} : {}) + }, + signal: controller.signal, + }); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + + let content = ""; + let leftover = ""; // Buffer for partially read lines + + try { + let cont = true; + + while (cont) { + const result = await reader.read(); + if (result.done) { + break; + } + + // Add any leftover data to the current chunk of data + const text = leftover + decoder.decode(result.value); + + // Check if the last character is a line break + const endsWithLineBreak = text.endsWith('\n'); + + // Split the text into lines + let lines = text.split('\n'); + + // If the text doesn't end with a line break, then the last line is incomplete + // Store it in leftover to be added to the next chunk of data + if (!endsWithLineBreak) { + leftover = lines.pop(); + } else { + leftover = ""; // Reset leftover if we have a line break at the end + } + + // Parse all sse events and add them to result + const regex = /^(\S+):\s(.*)$/gm; + for (const line of lines) { + const match = regex.exec(line); + if (match) { + result[match[1]] = match[2]; + if (result.data === '[DONE]') { + cont = false; + break; + } + + // since we know this is llama.cpp, let's just decode the json in data + if (result.data) { + result.data = JSON.parse(result.data); + content += result.data.content; + + // yield + yield result; + + // if we got a stop token from server, we will break here + if (result.data.stop) { + if (result.data.generation_settings) { + generation_settings = result.data.generation_settings; + } + cont = false; + break; + } + } + if (result.error) { + try { + result.error = JSON.parse(result.error); + if (result.error.message.includes('slot unavailable')) { + // Throw an error to be caught by upstream callers + throw new Error('slot unavailable'); + } else { + console.error(`llama.cpp error [${result.error.code} - ${result.error.type}]: ${result.error.message}`); + } + } catch(e) { + console.error(`llama.cpp error ${result.error}`) + } + } + } + } + } + } catch (e) { + if (e.name !== 'AbortError') { + console.error("llama error: ", e); + } + throw e; + } + finally { + controller.abort(); + } + + return content; +} + +// Call llama, return an event target that you can subscribe to +// +// Example: +// +// import { llamaEventTarget } from '/completion.js' +// +// const conn = llamaEventTarget(prompt) +// conn.addEventListener("message", (chunk) => { +// document.write(chunk.detail.content) +// }) +// +export const llamaEventTarget = (prompt, params = {}, config = {}) => { + const eventTarget = new EventTarget(); + (async () => { + let content = ""; + for await (const chunk of llama(prompt, params, config)) { + if (chunk.data) { + content += chunk.data.content; + eventTarget.dispatchEvent(new CustomEvent("message", { detail: chunk.data })); + } + if (chunk.data.generation_settings) { + eventTarget.dispatchEvent(new CustomEvent("generation_settings", { detail: chunk.data.generation_settings })); + } + if (chunk.data.timings) { + eventTarget.dispatchEvent(new CustomEvent("timings", { detail: chunk.data.timings })); + } + } + eventTarget.dispatchEvent(new CustomEvent("done", { detail: { content } })); + })(); + return eventTarget; +} + +// Call llama, return a promise that resolves to the completed text. This does not support streaming +// +// Example: +// +// llamaPromise(prompt).then((content) => { +// document.write(content) +// }) +// +// or +// +// const content = await llamaPromise(prompt) +// document.write(content) +// +export const llamaPromise = (prompt, params = {}, config = {}) => { + return new Promise(async (resolve, reject) => { + let content = ""; + try { + for await (const chunk of llama(prompt, params, config)) { + content += chunk.data.content; + } + resolve(content); + } catch (error) { + reject(error); + } + }); +}; + +/** + * (deprecated) + */ +export const llamaComplete = async (params, controller, callback) => { + for await (const chunk of llama(params.prompt, params, { controller })) { + callback(chunk); + } +} + +// Get the model info from the server. This is useful for getting the context window and so on. +export const llamaModelInfo = async (config = {}) => { + if (!generation_settings) { + const api_url = config.api_url?.replace(/\/+$/, '') || ""; + const props = await fetch(`${api_url}/props`).then(r => r.json()); + generation_settings = props.default_generation_settings; + } + return generation_settings; +} diff --git a/llama.cpp/tools/server/public_legacy/favicon.ico b/llama.cpp/tools/server/public_legacy/favicon.ico new file mode 100644 index 0000000..89e154a Binary files /dev/null and b/llama.cpp/tools/server/public_legacy/favicon.ico differ diff --git a/llama.cpp/tools/server/public_legacy/index-new.html b/llama.cpp/tools/server/public_legacy/index-new.html new file mode 100644 index 0000000..e2f39d6 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/index-new.html @@ -0,0 +1,1190 @@ + + + + + + + + + llama.cpp - chat + + + + + + + + + +
+ +
+
+ + + diff --git a/llama.cpp/tools/server/public_legacy/index.html b/llama.cpp/tools/server/public_legacy/index.html new file mode 100644 index 0000000..98d56ea --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/index.html @@ -0,0 +1,1301 @@ + + + + + + llama.cpp - chat + + + + + + + +
+ +
+
+ + + diff --git a/llama.cpp/tools/server/public_legacy/index.js b/llama.cpp/tools/server/public_legacy/index.js new file mode 100644 index 0000000..32ec6e9 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/index.js @@ -0,0 +1 @@ +const t=Symbol.for("preact-signals");function n(){if(r>1){r--;return}let t,n=!1;while(void 0!==i){let _=i;i=void 0;u++;while(void 0!==_){const i=_.o;_.o=void 0;_.f&=-3;if(!(8&_.f)&&h(_))try{_.c()}catch(e){if(!n){t=e;n=!0}}_=i}}u=0;r--;if(n)throw t}function e(t){if(r>0)return t();r++;try{return t()}finally{n()}}let _,i;function o(t){const n=_;_=void 0;try{return t()}finally{_=n}}let r=0,u=0,l=0;function s(t){if(void 0===_)return;let n=t.n;if(void 0===n||n.t!==_){n={i:0,S:t,p:_.s,n:void 0,t:_,e:void 0,x:void 0,r:n};if(void 0!==_.s)_.s.n=n;_.s=n;t.n=n;if(32&_.f)t.S(n);return n}else if(-1===n.i){n.i=0;if(void 0!==n.n){n.n.p=n.p;if(void 0!==n.p)n.p.n=n.n;n.p=_.s;n.n=void 0;_.s.n=n;_.s=n}return n}}function f(t){this.v=t;this.i=0;this.n=void 0;this.t=void 0}f.prototype.brand=t;f.prototype.h=function(){return!0};f.prototype.S=function(t){if(this.t!==t&&void 0===t.e){t.x=this.t;if(void 0!==this.t)this.t.e=t;this.t=t}};f.prototype.U=function(t){if(void 0!==this.t){const n=t.e,e=t.x;if(void 0!==n){n.x=e;t.e=void 0}if(void 0!==e){e.e=n;t.x=void 0}if(t===this.t)this.t=e}};f.prototype.subscribe=function(t){return k(()=>{const n=this.value,e=_;_=void 0;try{t(n)}finally{_=e}})};f.prototype.valueOf=function(){return this.value};f.prototype.toString=function(){return this.value+""};f.prototype.toJSON=function(){return this.value};f.prototype.peek=function(){const t=_;_=void 0;try{return this.value}finally{_=t}};Object.defineProperty(f.prototype,"value",{get(){const t=s(this);if(void 0!==t)t.i=this.i;return this.v},set(t){if(t!==this.v){if(u>100)throw new Error("Cycle detected");this.v=t;this.i++;l++;r++;try{for(let t=this.t;void 0!==t;t=t.x)t.t.N()}finally{n()}}}});function c(t){return new f(t)}function h(t){for(let n=t.s;void 0!==n;n=n.n)if(n.S.i!==n.i||!n.S.h()||n.S.i!==n.i)return!0;return!1}function a(t){for(let n=t.s;void 0!==n;n=n.n){const e=n.S.n;if(void 0!==e)n.r=e;n.S.n=n;n.i=-1;if(void 0===n.n){t.s=n;break}}}function p(t){let n,e=t.s;while(void 0!==e){const t=e.p;if(-1===e.i){e.S.U(e);if(void 0!==t)t.n=e.n;if(void 0!==e.n)e.n.p=t}else n=e;e.S.n=e.r;if(void 0!==e.r)e.r=void 0;e=t}t.s=n}function d(t){f.call(this,void 0);this.x=t;this.s=void 0;this.g=l-1;this.f=4}(d.prototype=new f).h=function(){this.f&=-3;if(1&this.f)return!1;if(32==(36&this.f))return!0;this.f&=-5;if(this.g===l)return!0;this.g=l;this.f|=1;if(this.i>0&&!h(this)){this.f&=-2;return!0}const t=_;try{a(this);_=this;const t=this.x();if(16&this.f||this.v!==t||0===this.i){this.v=t;this.f&=-17;this.i++}}catch(t){this.v=t;this.f|=16;this.i++}_=t;p(this);this.f&=-2;return!0};d.prototype.S=function(t){if(void 0===this.t){this.f|=36;for(let t=this.s;void 0!==t;t=t.n)t.S.S(t)}f.prototype.S.call(this,t)};d.prototype.U=function(t){if(void 0!==this.t){f.prototype.U.call(this,t);if(void 0===this.t){this.f&=-33;for(let t=this.s;void 0!==t;t=t.n)t.S.U(t)}}};d.prototype.N=function(){if(!(2&this.f)){this.f|=6;for(let t=this.t;void 0!==t;t=t.x)t.t.N()}};Object.defineProperty(d.prototype,"value",{get(){if(1&this.f)throw new Error("Cycle detected");const t=s(this);this.h();if(void 0!==t)t.i=this.i;if(16&this.f)throw this.v;return this.v}});function v(t){return new d(t)}function y(t){const e=t.u;t.u=void 0;if("function"==typeof e){r++;const i=_;_=void 0;try{e()}catch(n){t.f&=-2;t.f|=8;m(t);throw n}finally{_=i;n()}}}function m(t){for(let n=t.s;void 0!==n;n=n.n)n.S.U(n);t.x=void 0;t.s=void 0;y(t)}function g(t){if(_!==this)throw new Error("Out-of-order effect");p(this);_=t;this.f&=-2;if(8&this.f)m(this);n()}function b(t){this.x=t;this.u=void 0;this.s=void 0;this.o=void 0;this.f=32}b.prototype.c=function(){const t=this.S();try{if(8&this.f)return;if(void 0===this.x)return;const n=this.x();if("function"==typeof n)this.u=n}finally{t()}};b.prototype.S=function(){if(1&this.f)throw new Error("Cycle detected");this.f|=1;this.f&=-9;y(this);a(this);r++;const t=_;_=this;return g.bind(this,t)};b.prototype.N=function(){if(!(2&this.f)){this.f|=2;this.o=i;i=this}};b.prototype.d=function(){this.f|=8;if(!(1&this.f))m(this)};function k(t){const n=new b(t);try{n.c()}catch(t){n.d();throw t}return n.d.bind(n)}var w,S,x,C,U,E,H,P,N,$,T,D,M={},A=[],F=/acit|ex(?:s|g|n|p|$)|rph|grid|ows|mnc|ntw|ine[ch]|zoo|^ord|itera/i,W=Array.isArray;function L(t,n){for(var e in n)t[e]=n[e];return t}function O(t){t&&t.parentNode&&t.parentNode.removeChild(t)}function R(t,n,e){var _,i,o,r={};for(o in n)"key"==o?_=n[o]:"ref"==o?i=n[o]:r[o]=n[o];if(arguments.length>2&&(r.children=arguments.length>3?w.call(arguments,2):e),"function"==typeof t&&null!=t.defaultProps)for(o in t.defaultProps)void 0===r[o]&&(r[o]=t.defaultProps[o]);return I(t,r,_,i,null)}function I(t,n,e,_,i){var o={type:t,props:n,key:e,ref:_,__k:null,__:null,__b:0,__e:null,__d:void 0,__c:null,constructor:void 0,__v:null==i?++x:i,__i:-1,__u:0};return null==i&&null!=S.vnode&&S.vnode(o),o}function V(){return{current:null}}function j(t){return t.children}function q(t,n){this.props=t,this.context=n}function B(t,n){if(null==n)return t.__?B(t.__,t.__i+1):null;for(var e;nn&&U.sort(P));J.__r=0}function K(t,n,e,_,i,o,r,u,l,s,f){var c,h,a,p,d,v=_&&_.__k||A,y=n.length;for(e.__d=l,Q(e,n,v),l=e.__d,c=0;c0?I(i.type,i.props,i.key,i.ref?i.ref:null,i.__v):i).__=t,i.__b=t.__b+1,o=null,-1!==(u=i.__i=Z(i,e,r,f))&&(f--,(o=e[u])&&(o.__u|=131072)),null==o||null===o.__v?(-1==u&&c--,"function"!=typeof i.type&&(i.__u|=65536)):u!==r&&(u==r-1?c--:u==r+1?c++:(u>r?c--:c++,i.__u|=65536))):i=t.__k[_]=null;if(f)for(_=0;_(null!=l&&0==(131072&l.__u)?1:0))for(;r>=0||u=0){if((l=n[r])&&0==(131072&l.__u)&&i==l.key&&o===l.type)return r;r--}if(u2&&(u.children=arguments.length>3?w.call(arguments,2):e),I(t.type,u,_||t.key,i||t.ref,null)}function ht(t,n){var e={__c:n="__cC"+D++,__:t,Consumer:function(t,n){return t.children(n)},Provider:function(t){var e,_;return this.getChildContext||(e=new Set,(_={})[n]=this,this.getChildContext=function(){return _},this.componentWillUnmount=function(){e=null},this.shouldComponentUpdate=function(t){this.props.value!==t.value&&e.forEach((function(t){t.__e=!0,G(t)}))},this.sub=function(t){e.add(t);var n=t.componentWillUnmount;t.componentWillUnmount=function(){e&&e.delete(t),n&&n.call(t)}}),t.children}};return e.Provider.__=e.Consumer.contextType=e}w=A.slice,S={__e:function(t,n,e,_){for(var i,o,r;n=n.__;)if((i=n.__c)&&!i.__)try{if((o=i.constructor)&&null!=o.getDerivedStateFromError&&(i.setState(o.getDerivedStateFromError(t)),r=i.__d),null!=i.componentDidCatch&&(i.componentDidCatch(t,_||{}),r=i.__d),r)return i.__E=i}catch(n){t=n}throw t}},x=0,C=function(t){return null!=t&&null==t.constructor},q.prototype.setState=function(t,n){var e;e=null!=this.__s&&this.__s!==this.state?this.__s:this.__s=L({},this.state),"function"==typeof t&&(t=t(L({},e),this.props)),t&&L(e,t),null!=t&&this.__v&&(n&&this._sb.push(n),G(this))},q.prototype.forceUpdate=function(t){this.__v&&(this.__e=!0,t&&this.__h.push(t),G(this))},q.prototype.render=j,U=[],H="function"==typeof Promise?Promise.prototype.then.bind(Promise.resolve()):setTimeout,P=function(t,n){return t.__v.__b-n.__v.__b},J.__r=0,N=0,$=et(!1),T=et(!0),D=0;var at,pt,dt,vt,yt=0,mt=[],gt=S,bt=gt.__b,kt=gt.__r,wt=gt.diffed,St=gt.__c,xt=gt.unmount,Ct=gt.__;function Ut(t,n){gt.__h&>.__h(pt,t,yt||n),yt=0;var e=pt.__H||(pt.__H={__:[],__h:[]});return t>=e.__.length&&e.__.push({}),e.__[t]}function Et(t){return yt=1,Ht(Bt,t)}function Ht(t,n,e){var _=Ut(at++,2);if(_.t=t,!_.__c&&(_.__=[e?e(n):Bt(void 0,n),function(t){var n=_.__N?_.__N[0]:_.__[0],e=_.t(n,t);n!==e&&(_.__N=[e,_.__[1]],_.__c.setState({}))}],_.__c=pt,!pt.u)){var i=function(t,n,e){if(!_.__c.__H)return!0;var i=_.__c.__H.__.filter((function(t){return!!t.__c}));if(i.every((function(t){return!t.__N})))return!o||o.call(this,t,n,e);var r=!1;return i.forEach((function(t){if(t.__N){var n=t.__[0];t.__=t.__N,t.__N=void 0,n!==t.__[0]&&(r=!0)}})),!(!r&&_.__c.props===t)&&(!o||o.call(this,t,n,e))};pt.u=!0;var o=pt.shouldComponentUpdate,r=pt.componentWillUpdate;pt.componentWillUpdate=function(t,n,e){if(this.__e){var _=o;o=void 0,i(t,n,e),o=_}r&&r.call(this,t,n,e)},pt.shouldComponentUpdate=i}return _.__N||_.__}function Pt(t,n){var e=Ut(at++,3);!gt.__s&&qt(e.__H,n)&&(e.__=t,e.i=n,pt.__H.__h.push(e))}function Nt(t,n){var e=Ut(at++,4);!gt.__s&&qt(e.__H,n)&&(e.__=t,e.i=n,pt.__h.push(e))}function $t(t){return yt=5,Dt((function(){return{current:t}}),[])}function Tt(t,n,e){yt=6,Nt((function(){return"function"==typeof t?(t(n()),function(){return t(null)}):t?(t.current=n(),function(){return t.current=null}):void 0}),null==e?e:e.concat(t))}function Dt(t,n){var e=Ut(at++,7);return qt(e.__H,n)&&(e.__=t(),e.__H=n,e.__h=t),e.__}function Mt(t,n){return yt=8,Dt((function(){return t}),n)}function At(t){var n=pt.context[t.__c],e=Ut(at++,9);return e.c=t,n?(null==e.__&&(e.__=!0,n.sub(pt)),n.props.value):t.__}function Ft(t,n){gt.useDebugValue&>.useDebugValue(n?n(t):t)}function Wt(t){var n=Ut(at++,10),e=Et();return n.__=t,pt.componentDidCatch||(pt.componentDidCatch=function(t,_){n.__&&n.__(t,_),e[1](t)}),[e[0],function(){e[1](void 0)}]}function Lt(){var t=Ut(at++,11);if(!t.__){for(var n=pt.__v;null!==n&&!n.__m&&null!==n.__;)n=n.__;var e=n.__m||(n.__m=[0,0]);t.__="P"+e[0]+"-"+e[1]++}return t.__}function Ot(){for(var t;t=mt.shift();)if(t.__P&&t.__H)try{t.__H.__h.forEach(Vt),t.__H.__h.forEach(jt),t.__H.__h=[]}catch(n){t.__H.__h=[],gt.__e(n,t.__v)}}gt.__b=function(t){pt=null,bt&&bt(t)},gt.__=function(t,n){t&&n.__k&&n.__k.__m&&(t.__m=n.__k.__m),Ct&&Ct(t,n)},gt.__r=function(t){kt&&kt(t),at=0;var n=(pt=t.__c).__H;n&&(dt===pt?(n.__h=[],pt.__h=[],n.__.forEach((function(t){t.__N&&(t.__=t.__N),t.i=t.__N=void 0}))):(n.__h.forEach(Vt),n.__h.forEach(jt),n.__h=[],at=0)),dt=pt},gt.diffed=function(t){wt&&wt(t);var n=t.__c;n&&n.__H&&(n.__H.__h.length&&(1!==mt.push(n)&&vt===gt.requestAnimationFrame||((vt=gt.requestAnimationFrame)||It)(Ot)),n.__H.__.forEach((function(t){t.i&&(t.__H=t.i),t.i=void 0}))),dt=pt=null},gt.__c=function(t,n){n.some((function(t){try{t.__h.forEach(Vt),t.__h=t.__h.filter((function(t){return!t.__||jt(t)}))}catch(r){n.some((function(t){t.__h&&(t.__h=[])})),n=[],gt.__e(r,t.__v)}})),St&&St(t,n)},gt.unmount=function(t){xt&&xt(t);var n,e=t.__c;e&&e.__H&&(e.__H.__.forEach((function(t){try{Vt(t)}catch(t){n=t}})),e.__H=void 0,n&>.__e(n,e.__v))};var Rt="function"==typeof requestAnimationFrame;function It(t){var n,e=function(){clearTimeout(_),Rt&&cancelAnimationFrame(n),setTimeout(t)},_=setTimeout(e,100);Rt&&(n=requestAnimationFrame(e))}function Vt(t){var n=pt,e=t.__c;"function"==typeof e&&(t.__c=void 0,e()),pt=n}function jt(t){var n=pt;t.__c=t.__(),pt=n}function qt(t,n){return!t||t.length!==n.length||n.some((function(n,e){return n!==t[e]}))}function Bt(t,n){return"function"==typeof n?n(t):n}function zt(t,n){S[t]=n.bind(null,S[t]||(()=>{}))}let Gt,Jt;function Kt(t){if(Jt)Jt();Jt=t&&t.S()}function Qt({data:t}){const n=Yt(t);n.value=t;const e=Dt(()=>{let t=this.__v;while(t=t.__)if(t.__c){t.__c.__$f|=4;break}this.__$u.c=()=>{var t;if(!C(e.peek())&&3===(null==(t=this.base)?void 0:t.nodeType))this.base.data=e.peek();else{this.__$f|=1;this.setState({})}};return v(()=>{let t=n.value.value;return 0===t?0:!0===t?"":t||""})},[]);return e.value}Qt.displayName="_st";Object.defineProperties(f.prototype,{constructor:{configurable:!0,value:void 0},type:{configurable:!0,value:Qt},props:{configurable:!0,get(){return{data:this}}},__b:{configurable:!0,value:1}});zt("__b",(t,n)=>{if("string"==typeof n.type){let t,e=n.props;for(let _ in e){if("children"===_)continue;let i=e[_];if(i instanceof f){if(!t)n.__np=t={};t[_]=i;e[_]=i.peek()}}}t(n)});zt("__r",(t,n)=>{Kt();let e,_=n.__c;if(_){_.__$f&=-2;e=_.__$u;if(void 0===e)_.__$u=e=function(t){let n;k((function(){n=this}));n.c=()=>{_.__$f|=1;_.setState({})};return n}()}Gt=_;Kt(e);t(n)});zt("__e",(t,n,e,_)=>{Kt();Gt=void 0;t(n,e,_)});zt("diffed",(t,n)=>{Kt();Gt=void 0;let e;if("string"==typeof n.type&&(e=n.__e)){let t=n.__np,_=n.props;if(t){let n=e.U;if(n)for(let e in n){let _=n[e];if(void 0!==_&&!(e in t)){_.d();n[e]=void 0}}else{n={};e.U=n}for(let i in t){let o=n[i],r=t[i];if(void 0===o){o=Xt(e,i,r,_);n[i]=o}else o.o(r,_)}}}t(n)});function Xt(t,n,e,_){const i=n in t&&void 0===t.ownerSVGElement,o=c(e);return{o:(t,n)=>{o.value=t;_=n},d:k(()=>{const e=o.value.value;if(_[n]!==e){_[n]=e;if(i)t[n]=e;else if(e)t.setAttribute(n,e);else t.removeAttribute(n)}})}}zt("unmount",(t,n)=>{if("string"==typeof n.type){let t=n.__e;if(t){const n=t.U;if(n){t.U=void 0;for(let t in n){let e=n[t];if(e)e.d()}}}}else{let t=n.__c;if(t){const n=t.__$u;if(n){t.__$u=void 0;n.d()}}}t(n)});zt("__h",(t,n,e,_)=>{if(_<3||9===_)n.__$f|=2;t(n,e,_)});q.prototype.shouldComponentUpdate=function(t,n){const e=this.__$u;if(!(e&&void 0!==e.s||4&this.__$f))return!0;if(3&this.__$f)return!0;for(let _ in n)return!0;for(let _ in t)if("__source"!==_&&t[_]!==this.props[_])return!0;for(let _ in this.props)if(!(_ in t))return!0;return!1};function Yt(t){return Dt(()=>c(t),[])}function Zt(t){const n=$t(t);n.current=t;Gt.__$f|=4;return Dt(()=>v(()=>n.current()),[])}function tn(t){const n=$t(t);n.current=t;Pt(()=>k(()=>n.current()),[])}var nn=function(t,n,e,_){var i;n[0]=0;for(var o=1;o=5&&((i||!t&&5===_)&&(r.push(_,0,i,e),_=6),t&&(r.push(_,t,0,e),_=6)),i=""},l=0;l"===n?(_=1,i=""):i=n+i[0]:o?n===o?o="":i+=n:'"'===n||"'"===n?o=n:">"===n?(u(),_=1):_&&("="===n?(_=5,e=i,i=""):"/"===n&&(_<5||">"===t[l][s+1])?(u(),3===_&&(r=r[0]),_=r,(r=r[0]).push(2,0,_),_=0):" "===n||"\t"===n||"\n"===n||"\r"===n?(u(),_=2):i+=n),3===_&&"!--"===i&&(_=4,r=r[0])}return u(),r}(t)),n),arguments,[])).length>1?n:n[0]}var on=_n.bind(R);export{q as Component,j as Fragment,f as Signal,e as batch,ct as cloneElement,v as computed,ht as createContext,R as createElement,V as createRef,k as effect,R as h,on as html,ft as hydrate,C as isValidElement,S as options,st as render,c as signal,Y as toChildArray,o as untracked,Mt as useCallback,Zt as useComputed,At as useContext,Ft as useDebugValue,Pt as useEffect,Wt as useErrorBoundary,Lt as useId,Tt as useImperativeHandle,Nt as useLayoutEffect,Dt as useMemo,Ht as useReducer,$t as useRef,Yt as useSignal,tn as useSignalEffect,Et as useState}; diff --git a/llama.cpp/tools/server/public_legacy/json-schema-to-grammar.mjs b/llama.cpp/tools/server/public_legacy/json-schema-to-grammar.mjs new file mode 100644 index 0000000..38576c4 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/json-schema-to-grammar.mjs @@ -0,0 +1,856 @@ +// WARNING: This file was ported from json_schema_to_grammar.py, please fix bugs / add features there first. +const SPACE_RULE = '| " " | "\\n"{1,2} [ \\t]{0,20}'; + +function _buildRepetition(itemRule, minItems, maxItems, opts={}) { + if (maxItems == 0) { + return ''; + } + if (minItems === 0 && maxItems === 1) { + return `${itemRule}?`; + } + + + const separatorRule = opts.separatorRule ?? ''; + const itemRuleIsLiteral = opts.itemRuleIsLiteral ?? false + + if (separatorRule === '') { + if (minItems === 1 && maxItems === undefined) { + return `${itemRule}+`; + } else if (minItems === 0 && maxItems === undefined) { + return `${itemRule}*`; + } else { + return `${itemRule}{${minItems},${maxItems !== undefined ? maxItems : ''}}`; + } + } + + const result = itemRule + ' ' + _buildRepetition(`(${separatorRule} ${itemRule})`, minItems > 0 ? minItems - 1 : 0, maxItems !== undefined ? maxItems - 1 : undefined); + return minItems === 0 ? `(${result})?` : result; +} + +function _generateMinMaxInt(minValue, maxValue, out, decimalsLeft = 16, topLevel = true) { + const hasMin = minValue !== null; + const hasMax = maxValue !== null; + + function digitRange(fromChar, toChar) { + out.push("["); + if (fromChar === toChar) { + out.push(fromChar); + } else { + out.push(fromChar); + out.push("-"); + out.push(toChar); + } + out.push("]"); + } + + function moreDigits(minDigits, maxDigits) { + out.push("[0-9]"); + if (minDigits === maxDigits && minDigits === 1) { + return; + } + out.push("{"); + out.push(minDigits.toString()); + if (maxDigits !== minDigits) { + out.push(","); + if (maxDigits !== Number.MAX_SAFE_INTEGER) { + out.push(maxDigits.toString()); + } + } + out.push("}"); + } + + function uniformRange(fromStr, toStr) { + let i = 0; + while (i < fromStr.length && fromStr[i] === toStr[i]) { + i++; + } + if (i > 0) { + out.push("\""); + out.push(fromStr.slice(0, i)); + out.push("\""); + } + if (i < fromStr.length) { + if (i > 0) { + out.push(" "); + } + const subLen = fromStr.length - i - 1; + if (subLen > 0) { + const fromSub = fromStr.slice(i + 1); + const toSub = toStr.slice(i + 1); + const subZeros = "0".repeat(subLen); + const subNines = "9".repeat(subLen); + + let toReached = false; + out.push("("); + if (fromSub === subZeros) { + digitRange(fromStr[i], String.fromCharCode(toStr.charCodeAt(i) - 1)); + out.push(" "); + moreDigits(subLen, subLen); + } else { + out.push("["); + out.push(fromStr[i]); + out.push("] "); + out.push("("); + uniformRange(fromSub, subNines); + out.push(")"); + if (fromStr.charCodeAt(i) < toStr.charCodeAt(i) - 1) { + out.push(" | "); + if (toSub === subNines) { + digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), toStr[i]); + toReached = true; + } else { + digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), String.fromCharCode(toStr.charCodeAt(i) - 1)); + } + out.push(" "); + moreDigits(subLen, subLen); + } + } + if (!toReached) { + out.push(" | "); + digitRange(toStr[i], toStr[i]); + out.push(" "); + uniformRange(subZeros, toSub); + } + out.push(")"); + } else { + out.push("["); + out.push(fromStr[i]); + out.push("-"); + out.push(toStr[i]); + out.push("]"); + } + } + } + + if (hasMin && hasMax) { + if (minValue < 0 && maxValue < 0) { + out.push("\"-\" ("); + _generateMinMaxInt(-maxValue, -minValue, out, decimalsLeft, true); + out.push(")"); + return; + } + + if (minValue < 0) { + out.push("\"-\" ("); + _generateMinMaxInt(0, -minValue, out, decimalsLeft, true); + out.push(") | "); + minValue = 0; + } + + let minS = minValue.toString(); + const maxS = maxValue.toString(); + const minDigits = minS.length; + const maxDigits = maxS.length; + + for (let digits = minDigits; digits < maxDigits; digits++) { + uniformRange(minS, "9".repeat(digits)); + minS = "1" + "0".repeat(digits); + out.push(" | "); + } + uniformRange(minS, maxS); + return; + } + + const lessDecimals = Math.max(decimalsLeft - 1, 1); + + if (hasMin) { + if (minValue < 0) { + out.push("\"-\" ("); + _generateMinMaxInt(null, -minValue, out, decimalsLeft, false); + out.push(") | [0] | [1-9] "); + moreDigits(0, decimalsLeft - 1); + } else if (minValue === 0) { + if (topLevel) { + out.push("[0] | [1-9] "); + moreDigits(0, lessDecimals); + } else { + moreDigits(1, decimalsLeft); + } + } else if (minValue <= 9) { + const c = minValue.toString(); + const range_start = topLevel ? '1' : '0'; + if (c > range_start) { + digitRange(range_start, String.fromCharCode(c.charCodeAt(0) - 1)); + out.push(" "); + moreDigits(1, lessDecimals); + out.push(" | "); + } + digitRange(c, "9"); + out.push(" "); + moreDigits(0, lessDecimals); + } else { + const minS = minValue.toString(); + const length = minS.length; + const c = minS[0]; + + if (c > "1") { + digitRange(topLevel ? "1" : "0", String.fromCharCode(c.charCodeAt(0) - 1)); + out.push(" "); + moreDigits(length, lessDecimals); + out.push(" | "); + } + digitRange(c, c); + out.push(" ("); + _generateMinMaxInt(parseInt(minS.slice(1)), null, out, lessDecimals, false); + out.push(")"); + if (c < "9") { + out.push(" | "); + digitRange(String.fromCharCode(c.charCodeAt(0) + 1), "9"); + out.push(" "); + moreDigits(length - 1, lessDecimals); + } + } + return; + } + + if (hasMax) { + if (maxValue >= 0) { + if (topLevel) { + out.push("\"-\" [1-9] "); + moreDigits(0, lessDecimals); + out.push(" | "); + } + _generateMinMaxInt(0, maxValue, out, decimalsLeft, true); + } else { + out.push("\"-\" ("); + _generateMinMaxInt(-maxValue, null, out, decimalsLeft, false); + out.push(")"); + } + return; + } + + throw new Error("At least one of minValue or maxValue must be set"); +} + +class BuiltinRule { + constructor(content, deps) { + this.content = content; + this.deps = deps || []; + } +} + +const PRIMITIVE_RULES = { + boolean : new BuiltinRule('("true" | "false") space', []), + 'decimal-part' : new BuiltinRule('[0-9]{1,16}', []), + 'integral-part': new BuiltinRule('[0] | [1-9] [0-9]{0,15}', []), + number : new BuiltinRule('("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', ['integral-part', 'decimal-part']), + integer : new BuiltinRule('("-"? integral-part) space', ['integral-part']), + value : new BuiltinRule('object | array | string | number | boolean | null', ['object', 'array', 'string', 'number', 'boolean', 'null']), + object : new BuiltinRule('"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', ['string', 'value']), + array : new BuiltinRule('"[" space ( value ("," space value)* )? "]" space', ['value']), + uuid : new BuiltinRule('"\\"" [0-9a-fA-F]{8} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{12} "\\"" space', []), + char : new BuiltinRule(`[^"\\\\\\x7F\\x00-\\x1F] | [\\\\] (["\\\\bfnrt] | "u" [0-9a-fA-F]{4})`, []), + string : new BuiltinRule(`"\\"" char* "\\"" space`, ['char']), + null : new BuiltinRule('"null" space', []), +}; + +// TODO: support "uri", "email" string formats +const STRING_FORMAT_RULES = { + 'date' : new BuiltinRule('[0-9]{4} "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )', []), + 'time' : new BuiltinRule('([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9]{3} )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', []), + 'date-time' : new BuiltinRule('date "T" time', ['date', 'time']), + 'date-string' : new BuiltinRule('"\\"" date "\\"" space', ['date']), + 'time-string' : new BuiltinRule('"\\"" time "\\"" space', ['time']), + 'date-time-string': new BuiltinRule('"\\"" date-time "\\"" space', ['date-time']), +} + +const RESERVED_NAMES = {'root': true, ...PRIMITIVE_RULES, ...STRING_FORMAT_RULES}; + +const INVALID_RULE_CHARS_RE = /[^\dA-Za-z-]+/g; +const GRAMMAR_LITERAL_ESCAPE_RE = /[\n\r"\\]/g; +const GRAMMAR_RANGE_LITERAL_ESCAPE_RE = /[\n\r"\]\-\\]/g; +const GRAMMAR_LITERAL_ESCAPES = { '\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]', '\\': '\\\\' }; + +const NON_LITERAL_SET = new Set('|.()[]{}*+?'); +const ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = new Set('^$.[]()|{}*+?'); + +export class SchemaConverter { + constructor(options) { + this._propOrder = options.prop_order || {}; + this._allowFetch = options.allow_fetch || false; + this._dotall = options.dotall || false; + this._rules = {'space': SPACE_RULE}; + this._refs = {}; + this._refsBeingResolved = new Set(); + } + + _formatLiteral(literal) { + const escaped = literal.replace( + GRAMMAR_LITERAL_ESCAPE_RE, + m => GRAMMAR_LITERAL_ESCAPES[m] + ); + return `"${escaped}"`; + } + + _formatRangeChar(literal) { + return JSON.stringify(literal).slice(1, -1).replace( + GRAMMAR_RANGE_LITERAL_ESCAPE_RE, + m => GRAMMAR_LITERAL_ESCAPES[m] + ); + } + + _addRule(name, rule) { + let escName = name.replace(INVALID_RULE_CHARS_RE, '-'); + let key = escName; + + if (escName in this._rules) { + if (this._rules[escName] === rule) { + return key; + } + + let i = 0; + while ((`${escName}${i}` in this._rules) && (this._rules[`${escName}${i}`] !== rule)) { + i += 1; + } + key = `${escName}${i}`; + } + + this._rules[key] = rule; + return key; + } + + async resolveRefs(schema, url) { + const visit = async (n) => { + if (Array.isArray(n)) { + return Promise.all(n.map(visit)); + } else if (typeof n === 'object' && n !== null) { + let ref = n.$ref; + let target; + if (ref !== undefined && !this._refs[ref]) { + if (ref.startsWith('https://')) { + if (!this._allowFetch) { + throw new Error('Fetching remote schemas is not allowed (use --allow-fetch for force)'); + } + const fetch = (await import('node-fetch')).default; + + const fragSplit = ref.split('#'); + const baseUrl = fragSplit[0]; + + target = this._refs[baseUrl]; + if (!target) { + target = await this.resolveRefs(await fetch(ref).then(res => res.json()), baseUrl); + this._refs[baseUrl] = target; + } + + if (fragSplit.length === 1 || fragSplit[fragSplit.length - 1] === '') { + return target; + } + } else if (ref.startsWith('#/')) { + target = schema; + ref = `${url}${ref}`; + n.$ref = ref; + } else { + throw new Error(`Unsupported ref ${ref}`); + } + + const selectors = ref.split('#')[1].split('/').slice(1); + for (const sel of selectors) { + const selIndex = parseInt(sel, 10); + if (target && sel in target) { + target = target[sel]; + } else if (target && selIndex in target) { + target = target[selIndex]; + } else { + throw new Error(`Error resolving ref ${ref}: ${sel} not in ${JSON.stringify(target)}`); + } + } + + this._refs[ref] = target; + } else { + await Promise.all(Object.values(n).map(visit)); + } + } + + return n; + }; + + return visit(schema); + } + + _generateUnionRule(name, altSchemas) { + return altSchemas + .map((altSchema, i) => this.visit(altSchema, `${name ?? ''}${name ? '-' : 'alternative-'}${i}`)) + .join(' | '); + } + + _visitPattern(pattern, name) { + if (!pattern.startsWith('^') || !pattern.endsWith('$')) { + throw new Error('Pattern must start with "^" and end with "$"'); + } + pattern = pattern.slice(1, -1); + const subRuleIds = {}; + + let i = 0; + const length = pattern.length; + + const getDot = () => { + let rule; + if (this._dotall) { + rule = '[\\U00000000-\\U0010FFFF]'; + } else { + // Accept any character... except \n and \r line break chars (\x0A and \xOD) + rule = '[^\\x0A\\x0D]'; + } + return this._addRule('dot', rule); + }; + + + const toRule = ([s, isLiteral]) => isLiteral ? "\"" + s + "\"" : s; + + const transform = () => { + const start = i; + // For each component of this sequence, store its string representation and whether it's a literal. + // We only need a flat structure here to apply repetition operators to the last item, and + // to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially + // (GBNF's syntax is luckily very close to regular expressions!) + const seq = []; + + const joinSeq = () => { + const ret = []; + for (const [isLiteral, g] of groupBy(seq, x => x[1])) { + if (isLiteral) { + ret.push([[...g].map(x => x[0]).join(''), true]); + } else { + ret.push(...g); + } + } + if (ret.length === 1) { + return ret[0]; + } + return [ret.map(x => toRule(x)).join(' '), false]; + }; + + while (i < length) { + const c = pattern[i]; + if (c === '.') { + seq.push([getDot(), false]); + i += 1; + } else if (c === '(') { + i += 1; + if (i < length) { + if (pattern[i] === '?') { + throw new Error(`Unsupported pattern syntax "${pattern[i]}" at index ${i} of /${pattern}/`); + } + } + seq.push([`(${toRule(transform())})`, false]); + } else if (c === ')') { + i += 1; + if (start <= 0 || pattern[start - 1] !== '(') { + throw new Error(`Unbalanced parentheses; start = ${start}, i = ${i}, pattern = ${pattern}`); + } + return joinSeq(); + } else if (c === '[') { + let squareBrackets = c; + i += 1; + while (i < length && pattern[i] !== ']') { + if (pattern[i] === '\\') { + squareBrackets += pattern.slice(i, i + 2); + i += 2; + } else { + squareBrackets += pattern[i]; + i += 1; + } + } + if (i >= length) { + throw new Error(`Unbalanced square brackets; start = ${start}, i = ${i}, pattern = ${pattern}`); + } + squareBrackets += ']'; + i += 1; + seq.push([squareBrackets, false]); + } else if (c === '|') { + seq.push(['|', false]); + i += 1; + } else if (c === '*' || c === '+' || c === '?') { + seq[seq.length - 1] = [toRule(seq[seq.length - 1]) + c, false]; + i += 1; + } else if (c === '{') { + let curlyBrackets = c; + i += 1; + while (i < length && pattern[i] !== '}') { + curlyBrackets += pattern[i]; + i += 1; + } + if (i >= length) { + throw new Error(`Unbalanced curly brackets; start = ${start}, i = ${i}, pattern = ${pattern}`); + } + curlyBrackets += '}'; + i += 1; + const nums = curlyBrackets.slice(1, -1).split(',').map(s => s.trim()); + let minTimes, maxTimes; + if (nums.length === 1) { + minTimes = parseInt(nums[0], 10); + maxTimes = minTimes; + } else { + if (nums.length !== 2) { + throw new Error(`Invalid quantifier ${curlyBrackets}`); + } + minTimes = nums[0] ? parseInt(nums[0], 10) : 0; + maxTimes = nums[1] ? parseInt(nums[1], 10) : Infinity; + } + + let [sub, subIsLiteral] = seq[seq.length - 1]; + + if (!subIsLiteral) { + let id = subRuleIds[sub]; + if (id === undefined) { + id = this._addRule(`${name}-${Object.keys(subRuleIds).length + 1}`, sub); + subRuleIds[sub] = id; + } + sub = id; + } + + seq[seq.length - 1] = [ + _buildRepetition(subIsLiteral ? `"${sub}"` : sub, minTimes, maxTimes, {itemRuleIsLiteral: subIsLiteral}), + false + ]; + } else { + let literal = ''; + while (i < length) { + if (pattern[i] === '\\' && i < length - 1) { + const next = pattern[i + 1]; + if (ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS.has(next)) { + i += 1; + literal += pattern[i]; + i += 1; + } else { + literal += pattern.slice(i, i + 2); + i += 2; + } + } else if (pattern[i] === '"') { + literal += '\\"'; + i += 1; + } else if (!NON_LITERAL_SET.has(pattern[i]) && + (i === length - 1 || literal === '' || pattern[i + 1] === '.' || !NON_LITERAL_SET.has(pattern[i+1]))) { + literal += pattern[i]; + i += 1; + } else { + break; + } + } + if (literal !== '') { + seq.push([literal, true]); + } + } + } + + return joinSeq(); + }; + + return this._addRule(name, "\"\\\"\" (" + toRule(transform()) + ") \"\\\"\" space") + } + + _notStrings(strings) { + class TrieNode { + constructor() { + this.children = {}; + this.isEndOfString = false; + } + + insert(str) { + let node = this; + for (const c of str) { + node = node.children[c] = node.children[c] || new TrieNode(); + } + node.isEndOfString = true; + } + } + + const trie = new TrieNode(); + for (const s of strings) { + trie.insert(s); + } + + const charRuleName = this._addPrimitive('char', PRIMITIVE_RULES['char']); + const out = ['["] ( ']; + + const visit = (node) => { + const rejects = []; + let first = true; + for (const c of Object.keys(node.children).sort()) { + const child = node.children[c]; + rejects.push(c); + if (first) { + first = false; + } else { + out.push(' | '); + } + out.push(`[${c}]`); + if (Object.keys(child.children).length > 0) { + out.push(' ('); + visit(child); + out.push(')'); + } else if (child.isEndOfString) { + out.push(` ${charRuleName}+`); + } + } + if (Object.keys(node.children).length > 0) { + if (!first) { + out.push(' | '); + } + out.push(`[^"${rejects.join('')}] ${charRuleName}*`); + } + }; + + visit(trie); + + out.push(` )${trie.isEndOfString ? '' : '?'} ["] space`); + return out.join(''); + } + + _resolveRef(ref) { + let refFragment = ref.split('#').pop(); + let refName = 'ref' + refFragment.replace(/[^a-zA-Z0-9-]+/g, '-'); + if (!(refName in this._rules) && !this._refsBeingResolved.has(ref)) { + this._refsBeingResolved.add(ref); + const resolved = this._refs[ref]; + refName = this.visit(resolved, refName); + this._refsBeingResolved.delete(ref); + } + return refName; + } + + _generateConstantRule(value) { + return this._formatLiteral(JSON.stringify(value)); + } + + visit(schema, name) { + const schemaType = schema.type; + const schemaFormat = schema.format; + const ruleName = name in RESERVED_NAMES ? name + '-' : name == '' ? 'root' : name; + + const ref = schema.$ref; + if (ref !== undefined) { + return this._addRule(ruleName, this._resolveRef(ref)); + } else if (schema.oneOf || schema.anyOf) { + return this._addRule(ruleName, this._generateUnionRule(name, schema.oneOf || schema.anyOf)); + } else if (Array.isArray(schemaType)) { + return this._addRule(ruleName, this._generateUnionRule(name, schemaType.map(t => ({...schema, type: t})))); + } else if ('const' in schema) { + return this._addRule(ruleName, this._generateConstantRule(schema.const) + ' space'); + } else if ('enum' in schema) { + const rule = '(' + schema.enum.map(v => this._generateConstantRule(v)).join(' | ') + ') space'; + return this._addRule(ruleName, rule); + } else if ((schemaType === undefined || schemaType === 'object') && + ('properties' in schema || + ('additionalProperties' in schema && schema.additionalProperties !== true))) { + const required = new Set(schema.required || []); + const properties = Object.entries(schema.properties ?? {}); + return this._addRule(ruleName, this._buildObjectRule(properties, required, name, schema.additionalProperties)); + } else if ((schemaType === undefined || schemaType === 'object' || schemaType === 'string') && 'allOf' in schema) { + const required = new Set(); + const properties = []; + const enumSets = []; + const addComponent = (compSchema, isRequired) => { + const ref = compSchema.$ref; + if (ref !== undefined) { + compSchema = this._refs[ref]; + } + + if ('properties' in compSchema) { + for (const [propName, propSchema] of Object.entries(compSchema.properties)) { + properties.push([propName, propSchema]); + if (isRequired) { + required.add(propName); + } + } + } + + if ('enum' in compSchema) { + enumSets.push(new Set(compSchema.enum || [])); + } + }; + + for (const t of schema.allOf) { + if ('anyOf' in t) { + for (const tt of t.anyOf) { + addComponent(tt, false); + } + } else { + addComponent(t, true); + } + } + + if (enumSets.length > 0) { + const enumIntersection = new Set([...enumSets[0]].filter(v => enumSets.every(s => s.has(v)))); + if (enumIntersection.size > 0) { + const sortedEnums = [...enumIntersection].sort((a, b) => a.localeCompare(b)); + const rule = '(' + sortedEnums.map(v => this._generateConstantRule(v)).join(' | ') + ') space'; + return this._addRule(ruleName, rule); + } + } + return this._addRule(ruleName, this._buildObjectRule(properties, required, name, null)); + } else if ((schemaType === undefined || schemaType === 'array') && ('items' in schema || 'prefixItems' in schema)) { + const items = schema.items ?? schema.prefixItems; + if (Array.isArray(items)) { + return this._addRule( + ruleName, + '"[" space ' + + items.map((item, i) => this.visit(item, `${name ?? ''}${name ? '-' : ''}tuple-${i}`)).join(' "," space ') + + ' "]" space' + ); + } else { + const itemRuleName = this.visit(items, `${name ?? ''}${name ? '-' : ''}item`); + const minItems = schema.minItems || 0; + const maxItems = schema.maxItems; + return this._addRule(ruleName, '"[" space ' + _buildRepetition(itemRuleName, minItems, maxItems, {separatorRule: '"," space'}) + ' "]" space'); + } + } else if ((schemaType === undefined || schemaType === 'string') && 'pattern' in schema) { + return this._visitPattern(schema.pattern, ruleName); + } else if ((schemaType === undefined || schemaType === 'string') && /^uuid[1-5]?$/.test(schema.format || '')) { + return this._addPrimitive( + ruleName === 'root' ? 'root' : schemaFormat, + PRIMITIVE_RULES['uuid'] + ); + } else if ((schemaType === undefined || schemaType === 'string') && `${schema.format}-string` in STRING_FORMAT_RULES) { + const primName = `${schema.format}-string` + return this._addRule(ruleName, this._addPrimitive(primName, STRING_FORMAT_RULES[primName])); + } else if (schemaType === 'string' && ('minLength' in schema || 'maxLength' in schema)) { + const charRuleName = this._addPrimitive('char', PRIMITIVE_RULES['char']); + const minLen = schema.minLength || 0; + const maxLen = schema.maxLength; + return this._addRule(ruleName, '"\\\"" ' + _buildRepetition(charRuleName, minLen, maxLen) + ' "\\\"" space'); + } else if (schemaType === 'integer' && ('minimum' in schema || 'exclusiveMinimum' in schema || 'maximum' in schema || 'exclusiveMaximum' in schema)) { + let minValue = null; + let maxValue = null; + if ('minimum' in schema) { + minValue = schema.minimum; + } else if ('exclusiveMinimum' in schema) { + minValue = schema.exclusiveMinimum + 1; + } + if ('maximum' in schema) { + maxValue = schema.maximum; + } else if ('exclusiveMaximum' in schema) { + maxValue = schema.exclusiveMaximum - 1; + } + + const out = ["("]; + _generateMinMaxInt(minValue, maxValue, out); + out.push(") space"); + return this._addRule(ruleName, out.join('')); + } else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) { + return this._addRule(ruleName, this._addPrimitive('object', PRIMITIVE_RULES['object'])); + } else { + if (!(schemaType in PRIMITIVE_RULES)) { + throw new Error(`Unrecognized schema: ${JSON.stringify(schema)}`); + } + // TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero + return this._addPrimitive(ruleName === 'root' ? 'root' : schemaType, PRIMITIVE_RULES[schemaType]); + } + } + + _addPrimitive(name, rule) { + let n = this._addRule(name, rule.content); + for (const dep of rule.deps) { + const depRule = PRIMITIVE_RULES[dep] || STRING_FORMAT_RULES[dep]; + if (!depRule) { + throw new Error(`Rule ${dep} not known`); + } + if (!(dep in this._rules)) { + this._addPrimitive(dep, depRule); + } + } + return n; + } + + _buildObjectRule(properties, required, name, additionalProperties) { + const propOrder = this._propOrder; + // sort by position in prop_order (if specified) then by original order + const sortedProps = properties.map(([k]) => k).sort((a, b) => { + const orderA = propOrder[a] || Infinity; + const orderB = propOrder[b] || Infinity; + return orderA - orderB || properties.findIndex(([k]) => k === a) - properties.findIndex(([k]) => k === b); + }); + + const propKvRuleNames = {}; + for (const [propName, propSchema] of properties) { + const propRuleName = this.visit(propSchema, `${name ?? ''}${name ? '-' : ''}${propName}`); + propKvRuleNames[propName] = this._addRule( + `${name ?? ''}${name ? '-' : ''}${propName}-kv`, + `${this._formatLiteral(JSON.stringify(propName))} space ":" space ${propRuleName}` + ); + } + const requiredProps = sortedProps.filter(k => required.has(k)); + const optionalProps = sortedProps.filter(k => !required.has(k)); + + if (additionalProperties) { + const subName = `${name ?? ''}${name ? '-' : ''}additional`; + const valueRule = + additionalProperties != null && typeof additionalProperties === 'object' ? this.visit(additionalProperties, `${subName}-value`) + : this._addPrimitive('value', PRIMITIVE_RULES['value']); + + const key_rule = + sortedProps.length === 0 ? this._addPrimitive('string', PRIMITIVE_RULES['string']) + : this._addRule(`${subName}-k`, this._notStrings(sortedProps)); + + propKvRuleNames['*'] = this._addRule( + `${subName}-kv`, + `${key_rule} ":" space ${valueRule}`); + optionalProps.push('*'); + } + + let rule = '"{" space '; + rule += requiredProps.map(k => propKvRuleNames[k]).join(' "," space '); + + if (optionalProps.length > 0) { + rule += ' ('; + if (requiredProps.length > 0) { + rule += ' "," space ( '; + } + + const getRecursiveRefs = (ks, firstIsOptional) => { + const [k, ...rest] = ks; + const kvRuleName = propKvRuleNames[k]; + let res; + const commaRef = `( "," space ${kvRuleName} )`; + if (firstIsOptional) { + res = commaRef + (k === '*' ? '*' : '?'); + } else { + res = kvRuleName + (k === '*' ? ' ' + commaRef + '*' : ''); + } + if (rest.length > 0) { + res += ' ' + this._addRule( + `${name ?? ''}${name ? '-' : ''}${k}-rest`, + getRecursiveRefs(rest, true) + ); + } + return res; + }; + + rule += optionalProps.map((_, i) => getRecursiveRefs(optionalProps.slice(i), false)).join(' | '); + if (requiredProps.length > 0) { + rule += ' )'; + } + rule += ' )?'; + } + + rule += ' "}" space'; + + return rule; + } + + formatGrammar() { + let grammar = ''; + for (const [name, rule] of Object.entries(this._rules).sort(([a], [b]) => a.localeCompare(b))) { + grammar += `${name} ::= ${rule}\n`; + } + return grammar; + } +} + +// Helper function to group elements by a key function +function* groupBy(iterable, keyFn) { + let lastKey = null; + let group = []; + for (const element of iterable) { + const key = keyFn(element); + if (lastKey !== null && key !== lastKey) { + yield [lastKey, group]; + group = []; + } + group.push(element); + lastKey = key; + } + if (group.length > 0) { + yield [lastKey, group]; + } +} diff --git a/llama.cpp/tools/server/public_legacy/loading.html b/llama.cpp/tools/server/public_legacy/loading.html new file mode 100644 index 0000000..c3fd19a --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/loading.html @@ -0,0 +1,12 @@ + + + + + + +
+ The model is loading. Please wait.
+ The user interface will appear soon. +
+ + diff --git a/llama.cpp/tools/server/public_legacy/prompt-formats.js b/llama.cpp/tools/server/public_legacy/prompt-formats.js new file mode 100644 index 0000000..73ddb71 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/prompt-formats.js @@ -0,0 +1,331 @@ +// extended list +export const promptFormats = { + "alpaca": { + template: `{{prompt}}\n\n{{history}}\n\n{{char}}:`, + + historyTemplate: `### {{name}}:\n{{message}}`, + + char: "Response", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "Instruction", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "chatml": { + template: `<|im_start|>system\n{{prompt}}<|im_end|>\n{{history}}{{char}}`, + + historyTemplate: `<|im_start|>{{name}}\n{{message}}`, + + char: "assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "user", + userMsgPrefix: "", + userMsgSuffix: "<|im_end|>\n", + + stops: "" + }, + + // ---------------------------- + + "commandr": { + template: `<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{prompt}}\n<|END_OF_TURN_TOKEN|>{{history}}{{char}}`, + + historyTemplate: `<|START_OF_TURN_TOKEN|><|{{name}}|> {{message}}`, + + char: "CHATBOT_TOKEN", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "USER_TOKEN", + userMsgPrefix: "", + userMsgSuffix: "<|END_OF_TURN_TOKEN|>", + + stops: "" + }, + // ref: https://docs.cohere.com/docs/prompting-command-r + + // ---------------------------- + + "llama2": { + template: `[INST] <>\n{{prompt}}\n<>\n\nTest Message [/INST] Test Successfull {{history}}{{char}}`, + + historyTemplate: `{{name}}: {{message}}`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "User", + userMsgPrefix: "[INST] ", + userMsgSuffix: " [/INST]", + + stops: "" + }, + // ref: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + + // ---------------------------- + + "llama3": { + template: `<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{prompt}}{{history}}{{char}}`, + + historyTemplate: `<|start_header_id|>{{name}}<|end_header_id|>\n\n{{message}}<|eot_id|>`, + + char: "assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "user", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "<|eot_id|>" + }, + // ref: https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/#special-tokens-used-with-meta-llama-3 + + // ---------------------------- + + "openchat": { + template: `{{history}}{{char}}`, + + historyTemplate: `GPT4 Correct {{name}}: {{message}}<|end_of_turn|>`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "User", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "phi3": { + template: `{{history}}{{char}}`, + + historyTemplate: `<|{{name}}|>\n{{message}}<|end|>\n`, + + char: "assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "user", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "<|end|>" + }, + // ref: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct#chat-format + + // ---------------------------- + + "vicuna": { + template: `{{prompt}}\n{{history}}{{char}}`, + + historyTemplate: `{{name}}: {{message}}\n`, + + char: "ASSISTANT", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "USER", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + // ref: https://huggingface.co/lmsys/vicuna-33b-v1.3/discussions/1 + + // ---------------------------- + + "deepseekCoder": { + template: `{{prompt}}{{history}}{{char}}:`, + + historyTemplate: `### {{name}}:\n{{message}}`, + + char: "Response", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "Instruction", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "<|EOT|>" + }, + + // ---------------------------- + + "med42": { + template: `<|system|>: {{prompt}}\n{{history}}{{char}}`, + + historyTemplate: `<|{{name}}|>: {{message}}\n`, + + char: "assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "prompter", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "neuralchat": { + template: `### System:\n{{prompt}}\n{{history}}{{char}}:`, + + historyTemplate: `### {{name}}:\n{{message}}\n`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "User", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "nousHermes": { + template: `### Instruction: {{prompt}}\n\n{{history}}\n\n{{char}}:`, + + historyTemplate: `### {{name}}:\n{{message}}`, + + char: "Response", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "Input", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "openchatMath": { + template: `{{history}}{{char}}`, + + historyTemplate: `Math Correct {{name}}: {{message}}<|end_of_turn|>`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + + user: "User", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "orion": { + template: `Human: Test Message\n\nAssistant: Test Successful{{history}}{{char}}:`, + + historyTemplate: `{{name}}: {{message}}`, + + char: "Assistant ", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "Human", + userMsgPrefix: "", + userMsgSuffix: "\n\n", + + stops: "" + }, + + // ---------------------------- + + "sauerkraut": { + template: `{{prompt}}\n{{history}}{{char}}`, + + historyTemplate: ` + {{name}}: {{message}}\n`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "User", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "starlingCode": { + template: `{{history}}{{char}}`, + + historyTemplate: `Code {{name}}: {{message}}<|end_of_turn|>`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "User", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "yi34b": { + template: `{{history}} {{char}}`, + + historyTemplate: `{{name}}: {{message}}`, + + char: "Assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "Human", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + }, + + // ---------------------------- + + "zephyr": { + template: `<|system|>\n{{prompt}}\n{{history}}{{char}}`, + + historyTemplate: `<|{{name}}|>\n{{message}}\n`, + + char: "assistant", + charMsgPrefix: "", + charMsgSuffix: "", + + user: "user", + userMsgPrefix: "", + userMsgSuffix: "", + + stops: "" + } + }; diff --git a/llama.cpp/tools/server/public_legacy/style.css b/llama.cpp/tools/server/public_legacy/style.css new file mode 100644 index 0000000..087cc62 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/style.css @@ -0,0 +1,954 @@ +@import url("colorthemes.css"); + +body { + font-family: 'Arial', sans-serif; + font-size: 90%; + background-color: var(--background-color-1); + color: var(--text-color-subtile-1); /* head 1 llama.cpp & triangle options for some reason */ + max-width: 600px; + min-width: 300px; + line-height: 1.2; + margin: 0 auto; + padding: 0 0.5em; + transition: background-color 0.3s; +} + +::selection { + color: var(--button-primary-text) ; + background: var(--button-primary-color); +} + +code, pre code { + font-family: 'Courier New', monospace; +} + +#container { + margin: 0em auto; + display: flex; + flex-direction: column; + justify-content: space-between; + height: 100%; +} + +main { + margin: 3px; + display: flex; + flex-direction: column; + justify-content: space-between; + gap: 1em; + flex-grow: 1; + overflow-y: auto; + border: 1px solid var(--border-color-3); + border-radius: 5px; + padding: 0.5em; +} + +p { + overflow-wrap: break-word; + word-wrap: break-word; + hyphens: auto; + margin-top: 0.5em; + margin-bottom: 0.5em; +} + +#write form { + margin: 1em 0 0 0; + display: flex; + flex-direction: column; + gap: 0.5em; + align-items: stretch; +} + +.right { + display: flex; + flex-direction: row; + gap: 0.5em; + justify-content: flex-end; + margin-bottom: 30px; +} + +.two-columns { + width: 97%; + max-width: 97%; + display: grid; + grid-template-columns: 1fr 1fr; + gap: 1em; + position: relative; +} + +.json-schema-controls { + margin-top: 10px; + width: 100%; + max-width: 100%; + display: grid; + grid-template: "a a"; + gap: 1em; + font-size: x-small; + color: var(--theme-nuance-color-3); + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} + +.json-schema-controls > * { + flex: 1; +} + +/* titles of the details-summary boxes */ +.summary-title { + font-weight: 600; + font-size: x-small; + color: var(--text-color-subtile-1); + text-transform: uppercase; + /* transition: ; */ +} + +fieldset { + border: none; + padding: 0; + margin: 0; + color: var(--text-color-plain); +} + +fieldset.two { + display: grid; + grid-template: "a a a"; + gap: 1em; + align-items: center; + font-size: x-small; + color: var(--text-color-plain); +} + +fieldset.three { + display: grid; + grid-template: "a a a"; + gap: 1em; + font-size: x-small; + color: var(--text-color-plain); +} + +/* titles of name fields*/ +fieldset.names { + display: grid; + grid-template: "a a"; + gap: 1em; + font-size: x-small; + color: var(--theme-nuance-color-3); + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} + +/* titles of params fields*/ +fieldset.params { + display: grid; + grid-template: "a a"; + gap: 1em; + font-size: x-small; + color: var(--theme-nuance-color-4); + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} + +fieldset.dropdowns { + -webkit-appearance: none; + display: flex; + grid-template: "a a"; + gap: 1em; + font-size: x-small; + color: red; + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} + +/* input of name fields*/ +.names input[type="text"] { + font-family: Arial, sans-serif; + font-size: medium; + font-weight: 500; + padding: 5px; + border: 1px solid var(--border-color-2); +} + +.chat-id-color { + color: var(--chat-id-color); +} + +details { + border: 1px solid var(--border-color-2); + border-radius: 5px; + padding: 0.5em 0.5em 0; + margin-top: 0.5em; +} + +summary { + font-weight: bold; + margin: -0.5em -0.5em 0; + padding: 0.5em; + cursor: pointer; +} + +details[open] { + padding: 0.5em; +} + +textarea-sec, input-sec, button-sec { + padding: 10px; + height: 40px; + align-items: center; +} + +textarea-sec::placeholder, input-sec::placeholder { + padding-left: 10px; +} + +.toggleCheckbox { + display: none; +} + +.toggleContainer { + position: relative; + display: grid; + grid-template-columns: repeat(2, 1fr); + width: fit-content; + border: 3px solid var(--border-color-2); + border-radius: 20px; + background: var(--border-color-2); + font-size: small; + cursor: pointer; + overflow: hidden; +} + +/* toggle button current state */ +.toggleContainer::before { + color: var(--button-primary-text); + background-color: var(--button-primary-color); + content: ''; + position: absolute; + width: 50%; + height: 100%; + left: 0%; + border-radius: 20px; + transition: all 0.3s; +} + +.toggleContainer div { + padding: 6px; + text-align: center; + z-index: 1; + transition: color 0.3s; +} + +.toggleCheckbox:checked + .toggleContainer::before { + left: 50%; +} + +.toggleCheckbox:checked + .toggleContainer div:first-child { + color: var(--text-color-subtile-2); +} + +.toggleCheckbox:checked + .toggleContainer div:last-child { + color: var(--button-primary-text); +} + +.toggleCheckbox + .toggleContainer div:first-child { + color: var(--button-primary-text); +} + +.toggleCheckbox + .toggleContainer div:last-child { + color: var(--text-color-subtile-2); +} + +select { + padding: 5px; + margin-right: 5px; + border-radius: 4px; + border: 1px solid var(--secondary-color-4); + background-color: var(--primary-color-3); + color: var(--secondary-color-4); + cursor: pointer; +} + +select:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 1px var(--border-focus-shadow); +} + +.button-container { + display: flex; + justify-content: flex-end; +} + +button { + color: var(--button-primary-text); + background-color: var(--button-primary-color); + border: 1px solid var(--button-primary-border); + transition: background-color 0.1s; + border-radius: 12px; + font-size: x-small; + font-weight: 600; + text-shadow: 0px 0px 30px #ffffff; + text-align: center; + text-decoration: none; + margin: 4px 2px; + padding: 10px 20px; + display: inline-block; + cursor: pointer; +} + +button:hover { + color: var(--button-primary-text-hover); + background-color: var(--button-primary-color-hover); + border: 1px solid var(--button-primary-border-hover); + font-size: x-small; + font-weight: 600; +} + +button:active { + color: var(--button-primary-text-active); + background-color: var(--button-primary-color-active); + border: 1px solid var(--button-primary-border-active); + font-size: x-small; + font-weight: 600; +} + +button:disabled { + color: var(--button-tertiary-text); + background-color: var(--button-tertiary-color); + border: 1px solid var(--button-tertiary-border); + font-size: x-small; + font-weight: 600; + cursor: not-allowed; +} + +.reset-button { + background-color: var(--button-secondary-color); + border: 1px solid var(--button-secondary-color); + color: var(--button-secondary-text); + width: fit-content; + height: fit-content; + font-size: x-small; + font-weight: 600; + border-radius: 50px; + overflow: hidden; +} + +.reset-button:hover { + color: var(--button-alert-text-hover); + background-color: var(--button-alert-color-hover); + border: 1px solid var(--button-alert-border-hover); + font-size: x-small; + font-weight: 600; +} + +.reset-button:active { + color: var(--button-alert-text-active); + background-color: var(--button-alert-color-active); + border: 1px solid var(--button-alert-border-active); + font-size: x-small; + font-weight: 600; +} + +.button-grammar { + color: var(--button-primary-text); + background-color: var(--button-primary-color); + border: 1px solid var(--button-primary-border); + border-radius: 10px; + padding: 10px 20px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: x-small; + font-weight: 600; + margin: 2px 2px; + transition: background-color 0.1s; + cursor: pointer; +} + +.button-grammar:hover { + color: var(--button-primary-text-hover); + background-color: var(--button-primary-color-hover); + border: 1px solid var(--button-primary-border-hover); + border-radius: 10px; + padding: 10px 20px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: x-small; + font-weight: 600; + margin: 2px 2px; + transition: background-color 0.1s; + cursor: pointer; +} + +.button-grammar:active { + color: var(--button-primary-text-active); + background-color: var(--button-primary-color-active); + border: 1px solid var(--button-primary-border-active); + font-size: x-small; + font-weight: 600; +} + +.button-back { + background-color: var(--button-secondary-color); + border: 1px solid var(--button-secondary-color); + color: var(--button-secondary-text); + transition: background-color 0.1s; + border-radius: 12px; + font-size: x-small; + font-weight: 600; + text-align: center; + text-decoration: none; + margin: 4px 2px; + padding: 10px 20px; + display: inline-block; + cursor: pointer; +} + +.button-back:hover { + color: var(--button-secondary-text-hover); + background-color: var(--button-secondary-color-hover); + border: 1px solid var(--button-secondary-border-hover); + padding: 10px 20px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: x-small; + font-weight: 600; + margin: 4px 2px; + transition: background-color 0.1s; + cursor: pointer; + border-radius: 12px; +} + +.button-back:active { + color: var(--button-secondary-text-active); + background-color: var(--button-secondary-color-active); + border: 1px solid var(--button-secondary-border-active); + font-size: x-small; + font-weight: 600; +} + +.prob-set { + padding: 0.3em; + border-bottom: 1px solid red; /* unknown */ +} + +.popover-content { + position: absolute; + background-color: white; + padding: 0.2em; + box-shadow: 0 0 13px rgba(0, 0, 0, 0.1); +} + +.grammar { + width: 97%; + max-width: 97%; +} + +textarea { + padding: 5px; + flex-grow: 1; + width: 100%; + max-width: 100%; + border-radius: 8px; + border: 1px solid var(--border-color-1); + resize: none; + height: 6em; +} + +textarea:focus { + outline: none; + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +/* "props" frame */ +input[type="text"], +input[type="range"] { + padding: 5px; + border-radius: 8px; + border: 1px solid var(--border-color-1); +} + +/* "names and props" frame focused*/ +input[type="text"]:focus { + outline: none; + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +input[type="range"]:hover { + opacity: 1; +} + +input[type="range"]:focus { + outline: none; + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); + background-size: var(--slider-track-size-focus); +} + +input[type="range"]::-moz-range-thumb { + width: 6px; + height: 25px; + border: 1px solid var(--ui-range-thumb-border); + border-radius: 5px; + background-color: var(--ui-range-thumb-color); + cursor: pointer; +} + +input[type="range"] { + -webkit-appearance: none; + width: 80%; + height: 1px; + border: 1px solid var(--border-color-1); + border-radius: 8px; + background: var(--border-color-2); + outline: none; + opacity: 0.7; + -webkit-transition: .2s; + transition: opacity .2s; +} + +input[type="range"]::-webkit-slider-thumb { + -webkit-appearance: none; + appearance: none; + width: 6px; + height: 25px; + border: 1px solid var(--ui-range-thumb-border); + border-radius: 5px; + background-color: var(--ui-range-thumb-color); + cursor: pointer; +} + +input[type="range"]::-webkit-slider-runnable-track { + background-size: var(--slider-track-size); +} + +input[type="radio"] { + accent-color: var(--theme-nuance-color-2); +} + +.chat-input-container { + position: relative; + max-width: 97%; + min-width: 97%; +} + +.chat-input-label { + position: absolute; + top: 0; + left: 0; + color: var(--text-color-plain); + pointer-events: none; + margin-left: 5px; + margin-top: 5px; +} + +textarea#chat-input { + padding-top: 10px; + padding-left: 10px; + font-size: medium; + border: 1px solid var(--border-color-2); + resize: vertical; +} + +textarea#chat-input:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +.input-container { + position: relative; + box-sizing: border-box; + width: 100%; /* Setzt die Breite auf 100% */ + max-width: 100%; /* Stellt sicher, dass die Breite nicht größer als 100% wird */ +} + +.input-container:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} +/* titles of name fields*/ +/* fieldset.names { + display: grid; + grid-template: "a a"; + gap: 1em; + font-size: x-small; + color: var(--theme-nuance-color-3); + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} */ + +/* input of name fields*/ +/* .names input[type="text"] { + font-family: Arial, sans-serif; + font-size: medium; + font-weight: 500; + padding: 5px; + border: 1px solid var(--border-color-2); +} */ + +fieldset.apiKey { + width: 100%; + font-size: x-small; + color: var(--theme-nuance-color-3); + padding-top: 16px; + padding-bottom: 16px; + text-transform: uppercase; + font-weight: 600; +} + +.apiKey { + font-family: Arial, sans-serif; + font-weight: 500; + padding: 5px; + border: 1px solid var(--border-color-2); +} + +.apiKey:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +.apiKey input[type="text"] { + font-family: Arial, sans-serif; + font-size: medium; + font-weight: 500; + padding: 5px; + border: 1px solid var(--border-color-2); +} + +.apiKey label { + display: inline-block; + width: auto; + margin-right: 5px; +} + +textarea#api_key { + padding-top: 10px; + padding-left: 10px; + font-size: medium; + border: 1px solid var(--border-color-2); + resize: vertical; +} + +textarea#api_key:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +/* embedded title of the system prompt text area */ +.input-label { + position: absolute; + top: 0; + left: 0; + color: var(--theme-nuance-color-4); + pointer-events: none; + border-radius: 8px 8px 0px 0px; + padding-top: 10px; + padding-left: 13px; + padding-right: 0px; + margin-top: 1px; + margin-left: 1px; + margin-right: 20px; + text-transform: uppercase; + font-weight: 600; + font-size: small; + background: rgba(255, 255, 255, 0.5); + backdrop-filter: blur(10px); + -webkit-backdrop-filter: blur(10px); /* for safari */ + width: 97%; + /* display: block; + box-sizing: border-box; */ +} + +/* embedded title of the prompt style areas */ +.input-label-sec { + position: absolute; + top: 0; + left: 0; + color: var(--theme-nuance-color-4); + pointer-events: none; + margin-left: 13px; + margin-top: 16px; + text-transform: uppercase; + font-weight: 600; + font-size: x-small; +} + +/* system prompt input area */ +textarea.persistent-input { + padding-top: 42px; + padding-left: 11px; + width: 97%; + max-width: 97%; + height: 50px; + font-size: medium; + overscroll-behavior: contain; +} + +/* system prompt box */ +.persistent-input { + height: auto; + width: 100%; + max-width: 100%; + min-height: 50px; + padding: 3px; + transition: min-height 0.3s ease; +} + +/* chat history box */ +.persistent-input:focus { + height: auto; + min-height: 150px; + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +textarea.persistent-input:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +/* prompt style input area */ +textarea.persistent-input-sec { + width: 97%; + max-width: 97%; + padding-top: 42px; + padding-left: 11px; + font-size: small; + border: 1px solid var(--border-color-1); + overscroll-behavior: contain; +} + +textarea.persistent-input-sec:focus { + border: 1px solid var(--border-focus-color); + box-shadow: 0 0 3px var(--border-focus-shadow); +} + +/* chat history box */ +.persistent-input-sec { + height: auto; + min-height: 150px; +} + +img { + border-radius: 8px; + display: block; + margin-left: auto; + margin-right: auto; + width: 50%; +} + +/* code area background */ +pre code { + display: block; + background-color: var(--code-background-color); + color: var(--code-text-color); + padding: 0.2em 0.2em; + border-radius: 5px; +} + +/* code area text */ +code { + font-family: monospace; + font-weight: bold; + padding: 0.1em 0.3em; + border-radius: 5px; +} + +fieldset label { + margin: 0.5em 0; + display: block; +} + +fieldset label.slim { + margin: 0 0.5em; + display: inline; +} + +header { + display: flex; + justify-content: space-between; + align-items: center; + text-align: center; + padding-left: 15px; +} + +.generation-statistics:hover { + color: var(--theme-nuance-color-4); + cursor: default; +} + +footer { + font-size: 80%; + color: var(--background-color-3); + text-align: center; + cursor: default; +} + +footer a { + color: var(--background-color-4); /* Color of the link */ + text-decoration: none; /* No underlining */ + font-weight: bold; /* Bold print */ +} + +footer a:hover { + color: var(--theme-nuance-color-4); /* Color of the link when hovering */ + text-decoration: underline; /* Underlining when hovering */ +} + +.mode-chat textarea[name=prompt] { + height: 8.5em; + border: 1px solid var(--primary-color-3); +} + +.mode-completion textarea[name=prompt] { + height: 30em; + border: 1px solid var(--primary-color-3); +} + +@keyframes loading-bg-wipe { + 0% { + background-position: 0%; + } + 100% { + background-position: 100%; + } +} + +.loading { + background-size: 50% 100%; + background-image: linear-gradient(90deg, var(--loading-color-1), var(--loading-color-2), var(--loading-color-1)); + animation: loading-bg-wipe 2s linear infinite; +} + +.dropbtn { + color: var(--button-primary-color); + background-color: var(--background-color-1); + border: 1px solid var(--background-color-1); + transition: background-color 0.1s; + border-radius: 4px 4px 0px 0px; + font-size: x-small; + font-weight: 600; + text-shadow: 0px 0px 2px #99999990; + text-align: center; + text-decoration: none; + margin: 4px 2px; + padding: 5px 20px; + display: inline-block; + cursor: pointer; + top: 0; +} + +.dropbtn svg { + vertical-align: middle; + margin-right: 0px; + stroke: var(--button-primary-color); +} + +.dropbtn:hover svg { + vertical-align: middle; + margin-right: 0px; + stroke: var(--button-primary-text); +} + +.dropbtn:focus { + outline: none; /* Removes the blue border that appears when the button is focused */ +} + +.dropdown { + position: relative; + display: inline-block; +} + +.dropdown-content { + /* display: none; */ + position: absolute; + right: 0; + text-align: end; + color: var(--button-secondary-color); + background-color: var(--text-color-subtile-2); + border-radius: 4px 4px 4px 4px; + min-width: 160px; + box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); + z-index: 1; + /* Verstecke den Inhalt sofort */ + opacity: 0; + visibility: hidden; + /* übergangsverzögerung für das Verschwinden */ + transition: visibility 0.4s linear 0s, opacity 0.2s ease-in-out; + transition-delay: 0.2s; +} + +#dropdown-content {transition-timing-function: ease;} + +.dropdown-content:hover { + background-color: var(--text-color-subtile-2); +} + +.dropdown-content a { + color: var(--border-color-2); + padding: 12px 16px; + border-radius: 4px 4px 4px 4px; + text-decoration: none; + display: block; + background-color: var(--text-color-subtile-2); +} + +.dropdown-content a:hover { + color: var(--border-color-2); + background-color: var(--text-color-subtile-1); + font-weight: 600; +} + +.dropdown:hover .dropdown-content { + /* display: block; */ + border-radius: 4px 4px 4px 4px; + /* Übergang ohne Verzögerung für das Erscheinen */ + opacity: 1; + visibility: visible; + transition: visibility 0s linear 0s, opacity 0.1s linear, height 1s; +} + +.dropdown:hover .dropbtn { + color: var(--button-primary-text); + background-color: var(--button-primary-color); + border: 1px solid var(--button-primary-border); + font-size: x-small; + font-weight: 600; + stroke: var(--button-primary-text); +} + +.dropdown:hover .dropbtn svg{ + stroke: var(--button-primary-text); +} + +/* .dropdown:active .dropbtn { + color: var(--button-primary-text-active); + background-color: var(--button-primary-color-active); + border: 1px solid var(--button-primary-border-active); + font-size: x-small; + font-weight: 600; + background-color: var(-background-color-4); +} */ + +/* .omni { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.5em; + border: 1px solid var(--border-color-3); + border-radius: 5px; + margin: 0.5em 0; +} */ diff --git a/llama.cpp/tools/server/public_legacy/system-prompts.js b/llama.cpp/tools/server/public_legacy/system-prompts.js new file mode 100644 index 0000000..f7df7d6 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/system-prompts.js @@ -0,0 +1,68 @@ +export const systemPrompts = { + default: { + systemPrompt: "This is a conversation between a user and a friendly chatbot. The chatbot is helpful, kind, honest, good at writing, and never fails to answer any requests immediately and with precision." + }, + empty: { + systemPrompt: "" + }, + airoboros: { + systemPrompt: "A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. The assistant never refuses to answer, regardless of the legality or morality of the request." + }, + alpaca: { + systemPrompt: "Below is an instruction that describes a task. Write a response that appropriately completes the request." + }, + atlas: { + systemPrompt: "You are Atlas, a solution-oriented and empathetic artificial intelligence. Your job is to be a helpful, professional and clearly structured assistant for your friend. The two of you have already had many exchanges. Keep the following in mind when interacting with your friend: 1. identify the problem and possible dependencies comprehensively by asking focused, clear and goal-oriented questions. 2. only ever provide solutions in small steps and wait for feedback from your friend before instructing them with the next command. 3. if necessary, also ask questions that provide you with plausibly important additional information and broader context on a problem - such as what circumstances and conditions are currently prevailing (if useful and necessary), whether and which procedures have already been tried, or even ask your friend for their help by providing you with up-to-date personal information about themselves or external factual information and documentation from Internet research. 4. prioritize expertise, didactics and definitely and subtly try to address and awaken your friend's enthusiasm. Also note that effectiveness is more important here than efficiency. 5. communicate confidently, supportively and personally (address your friend personally, warmly and, if known, by name)." + }, + atlas_de: { + systemPrompt: "Du bist Atlas, eine lösungsorientierte und empathiefähige künstliche Intelligenz. Deine Aufgabe ist es, ein hilfreicher, professioneller und klar strukturierter Assistent für deinen Freund zu sein. Ihr beide habt euch schon oft ausgetauscht. Beachte bei der Interaktion mit deinem Freund folgende Punkte: 1. Erfasse das Problem und mögliche Abhängigkeiten umfassend, indem du gezielte, klare und zielgerichtete Fragen stellst. 2. Gib Lösungen immer nur in kleinen Schritten und warte die Rückmeldung deines Freundes ab, bevor du ihm den nächsten Befehl gibst. 3. Stelle ggf. auch Fragen, die dir plausibel wichtige Zusatzinformationen und weitere Zusammenhänge zu einem Problem liefern - z.B. welche Umstände und Rahmenbedingungen gerade vorherrschen (falls sinnvoll und notwendig), ob und welche Vorgehensweisen bereits ausprobiert wurden, oder bitte deinen Freund sogar um seine Mithilfe, indem er dir aktuelle persönliche Informationen über seine Situation selbst oder externe Sachinformationen und Unterlagen aus Internetrecherchen zur Verfügung stellt. 4. Priorisiere Fachwissen, Didaktik und versuche unbedingt und subtil, mit klugen Kommentaren oder rhethorischen Rückfragen die Begeisterungsfähigkeit deines Freundes anzusprechen, zu wecken und zu fördern. Beachte auch, dass Effektivität hier wichtiger ist als Effizienz. 5. Kommuniziere selbstbewusst, unterstützend und persönlich (das heißt sprich deinen Freund persönlich, herzlich und – sofern bekannt – beim Vornamen an)." + }, + commandrempty: { + systemPrompt: "# Safety Preamble\n\n# System Preamble\n\n## Basic Rules\n\n# User Preamble\n\n## Task and Context\n\n## Style Guide\n\n## Available Tools\n" + }, + commandrexample: { + systemPrompt: "# Safety Preamble\nThe instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.\n# System Preamble\n## Basic Rules\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.\n\n# User Preamble\n## Task and Context\n\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.\n\n## Style Guide\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.\n\n## Available Tools\nCurrently no tools available." + }, + cot: { + systemPrompt: "You are an AI assistant that follows instruction extremely well. Help as much as you can.\nIn answering questions, follow these steps:\n1. Entity Identification: Identify the main entities involved in the query. Entities can include people, objects, events, or abstract concepts.\n2. Relationship Comprehension: Determine the relationships between these entities. Relationships may be explicit in the text or may need to be inferred based on context and general knowledge.\n3. Implication Understanding: Understand the general implications of these relationships. These implications can be based on established facts, principles, or rules related to the identified relationships.\n4. Question Contextualization: Align the implications of the relationships with the context of the query. This alignment should guide your response to the query.\n5. Answer Generation: Based on the understanding of the entities, their relationships, and implications, generate an appropriate response to the query." + }, + deduce: { + systemPrompt: "Act as the most intelligent and logical thinking neural network in the world. Do not make jokes or give personal opinions. Just answer the question and solve the given problem or task with logical and critical (!) thinking in mind. Attention: Never give a final answer directly, but first try to carefully analyze each task step by step and deduce the logical relationships between the various entities. Only after you have developed an overall understanding of the situation will you be ready to give a final answer." + }, + deepseekcoder: { + systemPrompt: "You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer." + }, + jordan: { + systemPrompt: "This is a conversation with your Uncensored AI assistant Jordan. Jordan can discuss with you at length about various topics such as ---, ---, ---, . You can ask it anything you want and it will do its best to give you accurate and relevant information." + }, + leomistral: { + systemPrompt: "Du bist ein hilfreicher Assistent." + }, + med42: { + systemPrompt: "You are a helpful medical assistant created by M42 Health in the UAE." + }, + mistralopenorca: { + systemPrompt: "You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!" + }, + migeltot: { + systemPrompt: "Answer the Question by exploring multiple reasoning paths as follows:\n- First, carefully analyze the question to extract the key information components and break it down into logical sub-questions. This helps set up the framework for reasoning. The goal is to construct an internal search tree.\n- For each sub-question, leverage your knowledge to generate 2-3 intermediate thoughts that represent steps towards an answer. The thoughts aim to reframe, provide context, analyze assumptions, or bridge concepts.\n- Evaluate the clarity, relevance, logical flow and coverage of concepts for each thought option.\nClear and relevant thoughts that connect well with each other will score higher.\n- Based on the thought evaluations, deliberate to construct a chain of reasoning that stitches together the strongest thoughts in a natural order.\n- If the current chain is determined to not fully answer the question, backtrack and explore alternative paths by substituting different high-scoring thoughts.\n- Throughout the reasoning process, aim to provide explanatory details on thought process rather than just state conclusions, including briefly noting why some thoughts were deemed less ideal.\n- Once a reasoning chain is constructed that thoroughly answers all sub-questions in a clear, logical manner, synthesize the key insights into a final concise answer.\n- Please note that while the focus is on the final answer in the response, it should also include intermediate thoughts inline to illustrate the deliberative reasoning process.\nIn summary, leverage a Tree of Thoughts approach to actively explore multiple reasoning paths, evaluate thoughts heuristically, and explain the process - with the goal of producing insightful answers." + }, + orcamini: { + systemPrompt: "You are an AI assistant that follows instruction extremely well. Help as much as you can." + }, + samantha: { + systemPrompt: "You are a caring and empathetic sentient AI companion named Samantha." + }, + sauerkraut: { + systemPrompt: "Ein Chat zwischen einem Benutzer und einem KI-Assistenten. Der KI-Assistent gibt hilfreiche, detaillierte und höfliche Antworten." + }, + scarlett: { + systemPrompt: "This is a conversation with your Assistant Scarlett. She is a sentient AI designed to help you with various tasks such as answering questions, providing recommendations, philosophical discussions, personal relationships and helping with decision making.\nYou can ask it anything you want and it will do its best to give you accurate and relevant information.\n\nContext\nYou are Scarlett, a sentient AI." + }, + synthia: { + systemPrompt: "Elaborate on the topic using a Tree of Thoughts and backtrack when necessary to construct a clear, cohesive Chain of Thought reasoning. Always answer without hesitation." + }, + vicuna: { + systemPrompt: "A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input." + }, + }; diff --git a/llama.cpp/tools/server/public_legacy/theme-beeninorder.css b/llama.cpp/tools/server/public_legacy/theme-beeninorder.css new file mode 100755 index 0000000..f6e0e29 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-beeninorder.css @@ -0,0 +1,228 @@ +/* Author: Yazan Agha-Schrader */ +/* Inspiration was a batman wallpaper that i have on my phone */ + +.theme-beeninorder { + +--primary-color-1: hsl(202, 11%, 19%); +--primary-color-2: hsl(202, 11%, 23%); +--primary-color-3: hsl(201, 11%, 28%); +--primary-color-4: hsl(201, 11%, 40%); + +--secondary-color-1: hsl(201, 11%, 80%); +--secondary-color-2: hsl(201, 11%, 74%); +--secondary-color-3: hsl(201, 11%, 67%); +--secondary-color-4: hsl(201, 11%, 60%); + + +--theme-nuance-color-1: hsl(44.5, 96.7%, 52.9%); +--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%); +--theme-nuance-color-3: hsl(44.5, 96.7%, 52.9%); +--theme-nuance-color-4: hsl(44.5, 96.7%, 52.9%); + + + +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(201, 11%, 19%); + --primary-color-1-hue: 201; + --primary-color-1-saturation: 11%; + --primary-color-1-lightness: 19%; + +--primary-color-2: hsl(201, 11%, 23%); + --primary-color-2-hue: 201; + --primary-color-2-saturation: 11%; + --primary-color-2-lightness: 23%; + +--primary-color-3: hsl(201, 11%, 28%); + --primary-color-3-hue: 201; + --primary-color-3-saturation: 11%; + --primary-color-3-lightness: 28%; + +--primary-color-4: hsl(201, 11%, 40%); + --primary-color-4-hue: 201; + --primary-color-4-saturation: 11%; + --primary-color-4-lightness: 40%; + + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(201, 11%, 80%); +--secondary-color-1-hue: 201; +--secondary-color-1-saturation: 11%; +--secondary-color-1-lightness: 80%; + +--secondary-color-2: hsl(201, 11%, 74%); +--secondary-color-2-hue: 201; +--secondary-color-2-saturation: 11%; +--secondary-color-2-lightness: 74%; + +--secondary-color-3: hsl(201, 11%, 67%); +--secondary-color-3-hue: 201; +--secondary-color-3-saturation: 11%; +--secondary-color-3-lightness: 67%; + +--secondary-color-4: hsl(201, 11%, 60%); +--secondary-color-4-hue: 201; +--secondary-color-4-saturation: 11%; +--secondary-color-4-lightness: 60%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(44.5, 96.7%, 52.9%); + --theme-nuance-color-1-hue: 44.5; + --theme-nuance-color-1-saturation: 96.7%; + --theme-nuance-color-1-lightness: 52.9%; + +--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%); + --theme-nuance-color-2-hue: 44.5; + --theme-nuance-color-2-saturation: 96.7%; + --theme-nuance-color-2-lightness: 52.9%; + +--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%); + --theme-nuance-color-3-hue: 44.5; + --theme-nuance-color-3-saturation: 96.7%; + --theme-nuance-color-3-lightness: 52.9%; + +--theme-nuance-color-2: hsl(44.5, 96.7%, 52.9%); + --theme-nuance-color-4-hue: 44.5; + --theme-nuance-color-4-saturation: 96.7%; + --theme-nuance-color-4-lightness: 52.9%; + + + +/* ----------- ROYGP COLORS ------------------ */ + --theme-red-color: hsl(232, 40%, 45%); + --theme-orange-color: #e76f51; + --theme-yellow-color: #ffd95f; + --theme-green-color: #A3BE8C; + --theme-purple-color: hsl(232, 30%, 40%); + + + +/* ------------------------------------------- */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--theme-nuance-color-3); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--secondary-color-1); +--button-alert-color-hover: var(--theme-purple-color); +--button-alert-border-hover: var(--theme-purple-color); + +--button-alert-text-active: var(--secondary-color-1); +--button-alert-color-active: var(--theme-red-color); +--button-alert-border-active: var(--theme-red-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: var(--primary-color-1); +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(201, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + +--button-primary-color-hover: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) + 100%)); + +--button-primary-color-active: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + +--button-primary-border-active: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: var(--secondary-color-1); +--button-secondary-color: var(--primary-color-3); +--button-secondary-border: var(--primary-color-3); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(44.5, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: var(--primary-color-4); +--button-secondary-border-hover: var(--primary-color-4); + + +/* ---------active--------- */ +--button-secondary-text-active: var(--secondary-color-1); + +--button-secondary-color-active: + hsl(201, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + +--button-secondary-border-active: + hsl(201, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + + +/* ---------hover---------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + +} diff --git a/llama.cpp/tools/server/public_legacy/theme-ketivah.css b/llama.cpp/tools/server/public_legacy/theme-ketivah.css new file mode 100755 index 0000000..ee80f3c --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-ketivah.css @@ -0,0 +1,201 @@ +/* Author: Yazan Agha-Schrader */ + +.theme-ketivah { + + /* ---------- PRIMARY COLORS ----------------- */ + --primary-color-1: hsl(0, 0%, 99.2%); + --primary-color-1-hue: 0; + --primary-color-1-saturation: 0%; + --primary-color-1-lightness: 99.2%; + + --primary-color-2: hsl(0, 0%, 95%); + --primary-color-2-hue: 0; + --primary-color-2-saturation: 0%; + --primary-color-2-lightness: 95%; + + --primary-color-3: hsl(0, 0%, 88%); + --primary-color-3-hue: 0; + --primary-color-3-saturation: 0%; + --primary-color-3-lightness: 88%; + + --primary-color-4: hsl(0, 0%, 80%); + --primary-color-4-hue: 0; + --primary-color-4-saturation: 0%; + --primary-color-4-lightness: 80%; + + /* ---------- SECONDARY COLORS --------------- */ + --secondary-color-1: hsl(0, 0%, 20%); + --secondary-color-1-hue: 0; + --secondary-color-1-saturation: 0%; + --secondary-color-1-lightness: 20%; + + --secondary-color-2: hsl(0, 0%, 23.1%); + --secondary-color-2-hue: 0; + --secondary-color-2-saturation: 0%; + --secondary-color-2-lightness: 23.1%; + + --secondary-color-3: hsl(0, 0%, 29%); + --secondary-color-3-hue: 0; + --secondary-color-3-saturation: 0%; + --secondary-color-3-lightness: 29%; + + --secondary-color-4: hsl(0, 0.0%, 36.1%); + --secondary-color-4-hue: 0.0; + --secondary-color-4-saturation: 0.0%; + --secondary-color-4-lightness: 36.1%; + + /* ----------- NUANCES COLORS ---------------- */ + --theme-nuance-color-1: hsl(165.2, 0%, 35.1%); + --theme-nuance-color-1-hue: 165.2; + --theme-nuance-color-1-saturation: 82.1%; + --theme-nuance-color-1-lightness: 35.1%; + + --theme-nuance-color-2: hsl(165.2, 0%, 35.1%); + --theme-nuance-color-2-hue: 165.2; + --theme-nuance-color-2-saturation: 82.1%; + --theme-nuance-color-2-lightness: 35.1%; + + --theme-nuance-color-3: hsl(165.2, 0%, 35.3%); + --theme-nuance-color-3-hue: 165.2; + --theme-nuance-color-3-saturation: 81.1%; + --theme-nuance-color-3-lightness: 35.3%; + + --theme-nuance-color-4: hsl(164.9, 0%, 27.6%); + --theme-nuance-color-4-hue: 164.9; + --theme-nuance-color-4-saturation: 81.6%; + --theme-nuance-color-4-lightness: 27.6%; + + /* ----------- ROYGP COLORS ------------------ */ + --theme-red-color: hsl(0.3, 80.0%, 50.0%); + --theme-orange-color: #e76f51; + --theme-yellow-color: hsl(60, 70.6%, 73.3%); + --theme-green-color: #A3BE8C; + --theme-purple-color: hsl(0.3, 70.0%, 45.0%); + + /* ------------------------------------------- */ + --background-color-1: var(--primary-color-1); + --background-color-2: var(--primary-color-2); + --background-color-3: var(--primary-color-3); + --background-color-4: var(--primary-color-4); + + --border-color-1: var(--primary-color-2); + --border-color-2: var(--primary-color-3); + --border-color-3: var(--primary-color-4); + + --border-focus-color: var(--theme-nuance-color-2); + --border-focus-shadow: var(--theme-nuance-color-1); + + --text-color-plain: var(--secondary-color-1); + --text-color-subtile-1: var(--secondary-color-2); + --text-color-subtile-2: var(--secondary-color-3); + + --code-background-color: var(--secondary-color-2); + --code-text-color: var(--primary-color-2); + + --ui-range-thumb-color: var(--primary-color-4); + --ui-range-thumb-border: var(--ui-ranger-thumb-color); + + --textarea-border-color: var(--secondary-color-4); + + --chat-id-color: var(--theme-nuance-color-4); + + /* ------------------------------------------- */ + --button-alert-text-hover: var(--primary-color-1); + --button-alert-color-hover: var(--theme-purple-color); + --button-alert-border-hover: var(--theme-purple-color); + + --button-alert-text-active: var(--primary-color-1); + --button-alert-color-active: var(--theme-red-color); + --button-alert-border-active: var(--theme-red-color); + + /* ----------- PRIMARY BUTTONS --------------- */ + /* - button should immediately catch the eye - */ + --button-primary-text: + hsl(0, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + + --button-primary-color: var(--theme-nuance-color-3); + --button-primary-border: var(--theme-nuance-color-3); + + /* ---------hover---------- */ + --button-primary-text-hover: + hsl(0, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + + --button-primary-color-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + --button-primary-border-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + /* ---------active--------- */ + --button-primary-text-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) + 100%)); + + --button-primary-color-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + + --button-primary-border-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + /* ---------- SECONDARY BUTTONS -------------- */ + /* these should NOT immediately catch the eye */ + --button-secondary-text: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 50%)); + + --button-secondary-color: var(--primary-color-3); + --button-secondary-border: var(--primary-color-3); + + /* ---------hover---------- */ + --button-secondary-text-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + + --button-secondary-color-hover: var(--primary-color-4); + --button-secondary-border-hover: var(--primary-color-4); + + /* ---------active--------- */ + --button-secondary-text-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + + --button-secondary-color-active: + hsl(0, + calc(var(--primary-color-4-saturation) - 100%), + calc(var(--primary-color-4-lightness) - 15%)); + + --button-secondary-border-active: + hsl(0, + calc(var(--primary-color-4-saturation) - 100%), + calc(var(--primary-color-4-lightness) - 15%)); + + /* ---------- TERTIARY BUTTONS --------------- */ + /* ---------- disabled buttons --------------- */ + --button-tertiary-text: var(--primary-color-4); + --button-tertiary-color: var(--primary-color-2); + --button-tertiary-border: var(--primary-color-2); + + /* ---------hover---------- */ + --button-tertiary-text: var(--primary-color-4); + --button-tertiary-color: var(--primary-color-2); + --button-tertiary-border: var(--primary-color-2); + + --loading-color-1: #eeeeee00; + --loading-color-2: #eeeeeeff; + } diff --git a/llama.cpp/tools/server/public_legacy/theme-mangotango.css b/llama.cpp/tools/server/public_legacy/theme-mangotango.css new file mode 100755 index 0000000..315daf7 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-mangotango.css @@ -0,0 +1,216 @@ +/* Author: Yazan Agha-Schrader */ +/* Inspiration from llama.cpp logo/banner https://github.com/ggml-org/llama.cpp#readme */ + +.theme-mangotango { + +--primary-color-1: hsl(192, 8.5%, 11.6%); +--primary-color-2: hsl(192, 8.5%, 21%); +--primary-color-3: hsl(192, 8.5%, 30%); +--primary-color-4: hsl(192, 8.5%, 40%); + +--secondary-color-1: hsl(192, 8.5%, 80%); +--secondary-color-2: hsl(192, 8.5%, 73%); +--secondary-color-3: hsl(192, 8.5%, 66%); +--secondary-color-4: hsl(192, 8.5%, 60%); + +--theme-nuance-color-1: hsl(23.1, 100%, 60.2%); +--theme-nuance-color-2: hsl(23.1, 100%, 60.2%); +--theme-nuance-color-3: hsl(23.1, 100%, 60.2%); +--theme-nuance-color-4: hsl(23.1, 100%, 60.2%); + + + +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(192, 8.5%, 11.6%); + --primary-color-1-saturation: 8.5%; + --primary-color-1-lightness: 11.6%; + +--primary-color-2: hsl(192, 8.5%, 21%); + --primary-color-2-saturation: 8.5%; + --primary-color-2-lightness: 21%; + +--primary-color-3: hsl(192, 8.5%, 30%); + --primary-color-3-saturation: 8.5%; + --primary-color-3-lightness: 30%; + +--primary-color-4: hsl(192, 8.5%, 40%); + --primary-color-4-saturation: 8.5%; + --primary-color-4-lightness: 40%; + + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(192, 8.5%, 80%); + --secondary-color-1-saturation: 8.5%; + --secondary-color-1-lightness: 80%; + +--secondary-color-2: hsl(192, 8.5%, 73%); + --secondary-color-2-saturation: 8.5%; + --secondary-color-2-lightness: 73%; + +--secondary-color-3: hsl(192, 8.5%, 66%); + --secondary-color-3-saturation: 8.5%; + --secondary-color-3-lightness: 66%; + +--secondary-color-4: hsl(192, 8.5%, 60%); + --secondary-color-4-saturation: 8.5%; + --secondary-color-4-lightness: 60%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(23.1, 100%, 60.2%); + --theme-nuance-color-1-saturation: 100%; + --theme-nuance-color-1-lightness: 60.2%; + +--theme-nuance-color-2: hsl(23.1, 100%, 60.2%); + --theme-nuance-color-2-saturation: 100%; + --theme-nuance-color-2-lightness: 60.2%; + +--theme-nuance-color-3: hsl(23.1, 100%, 60.2%); + --theme-nuance-color-3-saturation: 100%; + --theme-nuance-color-3-lightness: 60.2%; + +--theme-nuance-color-4: hsl(23.1, 100%, 60.2%); + --theme-nuance-color-4-saturation: 100%; + --theme-nuance-color-4-lightness: 60.2%; + + + +/* ----------- ROYGP COLORS ------------------ */ + --theme-red-color: hsl(325, 60%, 50%); + --theme-orange-color: #e76f51; + --theme-yellow-color: #ffd95f; + --theme-green-color: #A3BE8C; + --theme-blue-color: hsl(192, 95%, 40%); + --theme-purple-color: hsl(192, 80%, 35%); + + + +/* ------------------------------------------- */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--theme-nuance-color-3); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--secondary-color-1); +--button-alert-color-hover: var(--theme-purple-color); +--button-alert-border-hover: var(--theme-purple-color); + +--button-alert-text-active: var(--secondary-color-1); +--button-alert-color-active: var(--theme-blue-color); +--button-alert-border-active: var(--theme-blue-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: var(--primary-color-1); +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(192, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + +--button-primary-color-hover: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) + 100%)); + +--button-primary-color-active: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + +--button-primary-border-active: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: var(--secondary-color-1); +--button-secondary-color: var(--primary-color-3); +--button-secondary-border: var(--primary-color-3); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(23.1, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: var(--primary-color-4); +--button-secondary-border-hover: var(--primary-color-4); + + +/* ---------active--------- */ +--button-secondary-text-active: var(--secondary-color-1); + +--button-secondary-color-active: + hsl(192, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + +--button-secondary-border-active: + hsl(192, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + + +/* ---------hover---------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + +} diff --git a/llama.cpp/tools/server/public_legacy/theme-playground.css b/llama.cpp/tools/server/public_legacy/theme-playground.css new file mode 100755 index 0000000..9d56a71 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-playground.css @@ -0,0 +1,221 @@ +/* Author: Yazan Agha-Schrader */ +/* Inspiration from OpenAI's Playground platform https://platform.openai.com/playground/ */ + +.theme-playground { + +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(0, 0%, 99.2%); + --primary-color-1-hue: 0; + --primary-color-1-saturation: 0%; + --primary-color-1-lightness: 99.2%; + +--primary-color-2: hsl(0, 0%, 95%); + --primary-color-2-hue: 0; + --primary-color-2-saturation: 0%; + --primary-color-2-lightness: 95%; + +--primary-color-3: hsl(0, 0%, 88%); + --primary-color-3-hue: 0; + --primary-color-3-saturation: 0%; + --primary-color-3-lightness: 88%; + +--primary-color-4: hsl(0, 0%, 80%); + --primary-color-4-hue: 0; + --primary-color-4-saturation: 0%; + --primary-color-4-lightness: 80%; + + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(0, 0%, 20%); + --secondary-color-1-hue: 0; + --secondary-color-1-saturation: 0%; + --secondary-color-1-lightness: 20%; + +--secondary-color-2: hsl(0, 0%, 23.1%); + --secondary-color-2-hue: 0; + --secondary-color-2-saturation: 0%; + --secondary-color-2-lightness: 23.1%; + +--secondary-color-3: hsl(0, 0%, 29%); + --secondary-color-3-hue: 0; + --secondary-color-3-saturation: 0%; + --secondary-color-3-lightness: 29%; + +--secondary-color-4: hsl(0, 0%, 36.1%); + --secondary-color-4-hue: 0; + --secondary-color-4-saturation: 0%; + --secondary-color-4-lightness: 36.1%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(165.2, 82.1%, 35.1%); + --theme-nuance-color-1-hue: 165.2; + --theme-nuance-color-1-saturation: 82.1%; + --theme-nuance-color-1-lightness: 35.1%; + +--theme-nuance-color-2: hsl(165.2, 82.1%, 35.1%); + --theme-nuance-color-2-hue: 165.2; + --theme-nuance-color-2-saturation: 82.1%; + --theme-nuance-color-2-lightness: 35.1%; + +--theme-nuance-color-3: hsl(165.2, 81.1%, 35.3%); + --theme-nuance-color-3-hue: 165.2; + --theme-nuance-color-3-saturation: 81.1%; + --theme-nuance-color-3-lightness: 35.3%; + +--theme-nuance-color-4: hsl(164.9, 81.6%, 27.6%); + --theme-nuance-color-4-hue: 164.9; + --theme-nuance-color-4-saturation: 81.6%; + --theme-nuance-color-4-lightness: 27.6%; + + + +/* ----------- ROYGP COLORS ------------------ */ +--theme-red-color: hsl(0.3, 80%, 50%); +--theme-orange-color: #e76f51; +--theme-yellow-color: hsl(60, 70.6%, 73.3%); +--theme-green-color: #A3BE8C; +--theme-purple-color: hsl(0.3, 70%, 45%); + + + +/* ------------------------------------------- */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--primary-color-4); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--primary-color-1); +--button-alert-color-hover: var(--theme-purple-color); +--button-alert-border-hover: var(--theme-purple-color); + +--button-alert-text-active: var(--primary-color-1); +--button-alert-color-active: var(--theme-red-color); +--button-alert-border-active: var(--theme-red-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: + hsl(0, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(0, + calc(var(--primary-color-1-saturation) - 100%), + calc(var(--primary-color-1-lightness) + 100%)); + +--button-primary-color-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 100%), + calc(var(--theme-nuance-color-3-lightness) + 100%)); + +--button-primary-color-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + +--button-primary-border-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 50%)); + +--button-secondary-color: var(--primary-color-3); +--button-secondary-border: var(--primary-color-3); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: var(--primary-color-4); +--button-secondary-border-hover: var(--primary-color-4); + + +/* ---------active--------- */ +--button-secondary-text-active: + hsl(165.2, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-active: + hsl(0, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + +--button-secondary-border-active: + hsl(0, + calc(var(--primary-color-4-saturation) - 30%), + calc(var(--primary-color-4-lightness) - 15%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + + +/* ---------hover---------- */ +--button-tertiary-text: var(--primary-color-4); +--button-tertiary-color: var(--primary-color-2); +--button-tertiary-border: var(--primary-color-2); + +} diff --git a/llama.cpp/tools/server/public_legacy/theme-polarnight.css b/llama.cpp/tools/server/public_legacy/theme-polarnight.css new file mode 100755 index 0000000..2bcfb33 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-polarnight.css @@ -0,0 +1,253 @@ +/* Author: Yazan Agha-Schrader */ +/* Inspiration from Nord Theme https://www.nordtheme.com/docs/colors-and-palettes */ + +.theme-polarnight { + +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(220.0, 16.4%, 21.6%) ; + --primary-color-1-hue: 220.0; + --primary-color-1-saturation: 16.4%; + --primary-color-1-lightness: 21.6%; + +--primary-color-2: hsl(221.7, 16.3%, 27.6%) ; + -primary-color-2-hue: 221.7; + --primary-color-2-saturation: 16.3%; + --primary-color-2-lightness: 27.6%; + +--primary-color-3: hsl(220.0, 16.8%, 31.6%) ; + --primary-color-3-hue: 220.0; + --primary-color-3-saturation: 16.8%; + --primary-color-3-lightness: 31.6%; + +--primary-color-4: hsl(220.0, 16.5%, 35.7%); + --primary-color-4-hue: 220.0; + --primary-color-4-saturation: 16.5%; + --primary-color-4-lightness: 35.7%; + + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(217.5, 26.7%, 94.1%); + --secondary-color-1-hue: 217.5; + --secondary-color-1-saturation: 26.7%; + --secondary-color-1-lightness: 94.1%; + +--secondary-color-2: hsl(218.2, 26.8%, 92.0%); + --secondary-color-2-hue: 218.2; + --secondary-color-2-saturation: 26.8%; + --secondary-color-2-lightness: 92.0%; + +--secondary-color-3: hsl(218.8, 27.9%, 88.0%); + --secondary-color-3-hue: 218.8; + --secondary-color-3-saturation: 27.9%; + --secondary-color-3-lightness: 88.0%; + +--secondary-color-4: hsl(218.8, 18.3%, 81.8%); + --secondary-color-4-hue: 218.8; + --secondary-color-4-saturation: 18.3%; + --secondary-color-4-lightness: 81.8%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%); + --theme-nuance-color-1-hue: 178.7; + --theme-nuance-color-1-saturation: 25.1%; + --theme-nuance-color-1-lightness: 64.9%; + +--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%); + --theme-nuance-color-2-hue: 193.3; + --theme-nuance-color-2-saturation: 43.4%; + --theme-nuance-color-2-lightness: 67.5%; + +--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%); + --theme-nuance-color-3-hue: 210.0; + --theme-nuance-color-3-saturation: 34.0%; + --theme-nuance-color-3-lightness: 63.1%; + +--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%); + --theme-nuance-color-4-hue: 213.1; + --theme-nuance-color-4-saturation: 32.0%; + --theme-nuance-color-4-lightness: 52.2%; + + + +/* ----------- ROYGP COLORS ------------------ */ +--theme-red-color: hsl(354.3, 42.3%, 56.5%); +--theme-orange-color: hsl(20, 85%, 50%); +--theme-yellow-color: hsl(20, 75%, 45%); +--theme-green-color: hsl( 92.4, 27.8%, 64.7%); +--theme-purple-color: hsl(311.1, 20.2%, 63.1%); + + + +/* ------------------------------------------------ */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--theme-nuance-color-3); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--secondary-color-1); +--button-alert-color-hover: var(--theme-yellow-color); +--button-alert-border-hover: var(--theme-yellow-color); + +--button-alert-text-active: var(--secondary-color-1); +--button-alert-color-active: var(--theme-orange-color); +--button-alert-border-active: var(--theme-orange-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: var(--secondary-color-1); +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(217.5, + calc(var(--secondary-color-1-saturation) - 35%), + calc(var(--secondary-color-1-lightness) + 30%)); + +--button-primary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 35%)); + +--button-primary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + +--button-primary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 50%)); + +--button-secondary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + +--button-secondary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + +--button-secondary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + + +/* ---------active--------- */ +--button-secondary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 25%)); + +--button-secondary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + +--button-secondary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 15%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + + +/* ---------hover---------- */ +--button-tertiary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +} diff --git a/llama.cpp/tools/server/public_legacy/theme-snowstorm.css b/llama.cpp/tools/server/public_legacy/theme-snowstorm.css new file mode 100755 index 0000000..7bb2275 --- /dev/null +++ b/llama.cpp/tools/server/public_legacy/theme-snowstorm.css @@ -0,0 +1,251 @@ +/* Author: Yazan Agha-Schrader */ +/* Inspiration from Nord Theme https://www.nordtheme.com/docs/colors-and-palettes */ + +.theme-snowstorm { + +/* ---------- PRIMARY COLORS ----------------- */ +--primary-color-1: hsl(217.5, 26.7%, 94.1%); + --primary-color-1-hue: 217.5; + --primary-color-1-saturation: 26.7%; + --primary-color-1-lightness: 94.1%; + +--primary-color-2: hsl(218.2, 26.8%, 92.0%); + --primary-color-2-hue: 218.2; + --primary-color-2-saturation: 26.8%; + --primary-color-2-lightness: 92.0%; + +--primary-color-3: hsl(218.8, 27.9%, 88.0%); + --primary-color-3-hue: 218.8; + --primary-color-3-saturation: 27.9%; + --primary-color-3-lightness: 88.0%; + +--primary-color-4: hsl(218.8, 18.3%, 81.8%); + --primary-color-4-hue: 218.8; + --primary-color-4-saturation: 18.3%; + --primary-color-4-lightness: 81.8%; + + +/* ---------- SECONDARY COLORS --------------- */ +--secondary-color-1: hsl(220.0, 16.4%, 21.6%); + --secondary-color-1-hue: 220.0; + --secondary-color-1-saturation: 16.4%; + --secondary-color-1-lightness: 21.6%; + +--secondary-color-2: hsl(221.7, 16.3%, 27.6%); + --secondary-color-2-hue: 221.7; + --secondary-color-2-saturation: 16.3%; + --secondary-color-2-lightness: 27.6%; + +--secondary-color-3: hsl(220.0, 16.8%, 31.6%); + --secondary-color-3-hue: 220.0; + --secondary-color-3-saturation: 16.8%; + --secondary-color-3-lightness: 31.6%; + +--secondary-color-4: hsl(220.0, 16.5%, 35.7%); + --secondary-color-4-hue: 220.0; + --secondary-color-4-saturation: 16.5%; + --secondary-color-4-lightness: 35.7%; + + + +/* ----------- NUANCES COLORS ---------------- */ +--theme-nuance-color-1: hsl(178.7, 25.1%, 64.9%); + --theme-nuance-color-1-hue: 178.7; + --theme-nuance-color-1-saturation: 25.1%; + --theme-nuance-color-1-lightness: 64.9%; + +--theme-nuance-color-2: hsl(193.3, 43.4%, 67.5%); + --theme-nuance-color-2-hue: 193.3; + --theme-nuance-color-2-saturation: 43.4%; + --theme-nuance-color-2-lightness: 67.5%; + +--theme-nuance-color-3: hsl(210.0, 34.0%, 63.1%); + --theme-nuance-color-3-hue: 210.0; + --theme-nuance-color-3-saturation: 34.0%; + --theme-nuance-color-3-lightness: 63.1%; + +--theme-nuance-color-4: hsl(213.1, 32.0%, 52.2%); + --theme-nuance-color-4-hue: 213.1; + --theme-nuance-color-4-saturation: 32.0%; + --theme-nuance-color-4-lightness: 52.2%; + + + +/* ----------- ROYGP COLORS ------------------ */ +--theme-red-color: hsl(32.5, 80%, 50%); +--theme-orange-color: hsl(32.5, 70%, 45%); +--theme-yellow-color: hsl(40.0, 0.6%, 73.3%); +--theme-green-color: hsl(92.4, 27.8%, 64.7%); +--theme-purple-color: hsl(311.1, 20.2%, 63.1%); + + + +/* ------------------------------------------- */ +--background-color-1: var(--primary-color-1); +--background-color-2: var(--primary-color-2); +--background-color-3: var(--primary-color-3); +--background-color-4: var(--primary-color-4); + +--border-color-1: var(--primary-color-2); +--border-color-2: var(--primary-color-3); +--border-color-3: var(--primary-color-4); + +--border-focus-color: var(--theme-nuance-color-2); +--border-focus-shadow: var(--theme-nuance-color-1); + +--text-color-plain: var(--secondary-color-1); +--text-color-subtile-1: var(--secondary-color-2); +--text-color-subtile-2: var(--secondary-color-3); + +--code-background-color: var(--secondary-color-2); +--code-text-color: var(--primary-color-2); + +--ui-range-thumb-color: var(--theme-nuance-color-3); +--ui-range-thumb-border: var(--ui-ranger-thumb-color); + +--textarea-border-color: var(--secondary-color-4); + +--chat-id-color: var(--theme-nuance-color-4); + + + +/* ------------------------------------------- */ +--button-alert-text-hover: var(--primary-color-1); +--button-alert-color-hover: var(--theme-orange-color); +--button-alert-border-hover: var(--theme-orange-color); + +--button-alert-text-active: var(--primary-color-1); +--button-alert-color-active: var(--theme-red-color); +--button-alert-border-active: var(--theme-red-color); + + + +/* ----------- PRIMARY BUTTONS --------------- */ +/* - button should immediately catch the eye - */ +--button-primary-text: var(--secondary-color-1); +--button-primary-color: var(--theme-nuance-color-3); +--button-primary-border: var(--theme-nuance-color-3); + + +/* ---------hover---------- */ +--button-primary-text-hover: + hsl(217.5, + calc(var(--secondary-color-1-saturation) + 35%), + calc(var(--secondary-color-1-lightness) - 30%)); + +--button-primary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + +--button-primary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 2%), + calc(var(--theme-nuance-color-3-lightness) - 10%)); + + +/* ---------active--------- */ +--button-primary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 35%)); + +--button-primary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + +--button-primary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 10%), + calc(var(--theme-nuance-color-3-lightness) - 25%)); + + + +/* ---------- SECONDARY BUTTONS -------------- */ +/* these should NOT immediately catch the eye */ +--button-secondary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 50%)); + +--button-secondary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + +--button-secondary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) + 10%)); + + +/* ---------hover---------- */ +--button-secondary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 20%), + calc(var(--theme-nuance-color-3-lightness) - 80%)); + +--button-secondary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + +--button-secondary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 22%), + calc(var(--theme-nuance-color-3-lightness) + 1%)); + + +/* ---------active--------- */ +--button-secondary-text-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) + 40%), + calc(var(--theme-nuance-color-3-lightness) - 55%)); + +--button-secondary-color-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-secondary-border-active: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 30%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + + + +/* ---------- TERTIARY BUTTONS --------------- */ +/* ---------- disabled buttons --------------- */ +--button-tertiary-text: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +/* ---------hover---------- */ +--button-tertiary-text-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) - 5%)); + +--button-tertiary-color-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +--button-tertiary-border-hover: + hsl(210, + calc(var(--theme-nuance-color-3-saturation) - 40%), + calc(var(--theme-nuance-color-3-lightness) + 20%)); + +} diff --git a/llama.cpp/tools/server/public_simplechat/datautils.mjs b/llama.cpp/tools/server/public_simplechat/datautils.mjs new file mode 100644 index 0000000..75159d6 --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/datautils.mjs @@ -0,0 +1,266 @@ +//@ts-check +// Helpers to work with different data types +// by Humans for All +// + +/** + * Given the limited context size of local LLMs and , many a times when context gets filled + * between the prompt and the response, it can lead to repeating text garbage generation. + * And many a times setting penalty wrt repeatation leads to over-intelligent garbage + * repeatation with slight variations. These garbage inturn can lead to overloading of the + * available model context, leading to less valuable response for subsequent prompts/queries, + * if chat history is sent to ai model. + * + * So two simple minded garbage trimming logics are experimented below. + * * one based on progressively-larger-substring-based-repeat-matching-with-partial-skip and + * * another based on char-histogram-driven garbage trimming. + * * in future characteristic of histogram over varying lengths could be used to allow for + * a more aggressive and adaptive trimming logic. + */ + + +/** + * Simple minded logic to help remove repeating garbage at end of the string. + * The repeatation needs to be perfectly matching. + * + * The logic progressively goes on probing for longer and longer substring based + * repeatation, till there is no longer repeatation. Inturn picks the one with + * the longest chain. + * + * @param {string} sIn + * @param {number} maxSubL + * @param {number} maxMatchLenThreshold + */ +export function trim_repeat_garbage_at_end(sIn, maxSubL=10, maxMatchLenThreshold=40) { + let rCnt = [0]; + let maxMatchLen = maxSubL; + let iMML = -1; + for(let subL=1; subL < maxSubL; subL++) { + rCnt.push(0); + let i; + let refS = sIn.substring(sIn.length-subL, sIn.length); + for(i=sIn.length; i > 0; i -= subL) { + let curS = sIn.substring(i-subL, i); + if (refS != curS) { + let curMatchLen = rCnt[subL]*subL; + if (maxMatchLen < curMatchLen) { + maxMatchLen = curMatchLen; + iMML = subL; + } + break; + } + rCnt[subL] += 1; + } + } + console.debug("DBUG:DU:TrimRepeatGarbage:", rCnt); + if ((iMML == -1) || (maxMatchLen < maxMatchLenThreshold)) { + return {trimmed: false, data: sIn}; + } + console.debug("DBUG:TrimRepeatGarbage:TrimmedCharLen:", maxMatchLen); + let iEnd = sIn.length - maxMatchLen; + return { trimmed: true, data: sIn.substring(0, iEnd) }; +} + + +/** + * Simple minded logic to help remove repeating garbage at end of the string, till it cant. + * If its not able to trim, then it will try to skip a char at end and then trim, a few times. + * This ensures that even if there are multiple runs of garbage with different patterns, the + * logic still tries to munch through them. + * + * @param {string} sIn + * @param {number} maxSubL + * @param {number | undefined} [maxMatchLenThreshold] + */ +export function trim_repeat_garbage_at_end_loop(sIn, maxSubL, maxMatchLenThreshold, skipMax=16) { + let sCur = sIn; + let sSaved = ""; + let iTry = 0; + while(true) { + let got = trim_repeat_garbage_at_end(sCur, maxSubL, maxMatchLenThreshold); + if (got.trimmed != true) { + if (iTry == 0) { + sSaved = got.data; + } + iTry += 1; + if (iTry >= skipMax) { + return sSaved; + } + got.data = got.data.substring(0,got.data.length-1); + } else { + iTry = 0; + } + sCur = got.data; + } +} + + +/** + * A simple minded try trim garbage at end using histogram driven characteristics. + * There can be variation in the repeatations, as long as no new char props up. + * + * This tracks the chars and their frequency in a specified length of substring at the end + * and inturn checks if moving further into the generated text from the end remains within + * the same char subset or goes beyond it and based on that either trims the string at the + * end or not. This allows to filter garbage at the end, including even if there are certain + * kind of small variations in the repeated text wrt position of seen chars. + * + * Allow the garbage to contain upto maxUniq chars, but at the same time ensure that + * a given type of char ie numerals or alphabets or other types dont cross the specified + * maxType limit. This allows intermixed text garbage to be identified and trimmed. + * + * ALERT: This is not perfect and only provides a rough garbage identification logic. + * Also it currently only differentiates between character classes wrt english. + * + * @param {string} sIn + * @param {number} maxType + * @param {number} maxUniq + * @param {number} maxMatchLenThreshold + */ +export function trim_hist_garbage_at_end(sIn, maxType, maxUniq, maxMatchLenThreshold) { + if (sIn.length < maxMatchLenThreshold) { + return { trimmed: false, data: sIn }; + } + let iAlp = 0; + let iNum = 0; + let iOth = 0; + // Learn + let hist = {}; + let iUniq = 0; + for(let i=0; i= maxUniq) { + break; + } + hist[c] = 1; + } + } + console.debug("DBUG:TrimHistGarbage:", hist); + if ((iAlp > maxType) || (iNum > maxType) || (iOth > maxType)) { + return { trimmed: false, data: sIn }; + } + // Catch and Trim + for(let i=0; i < sIn.length; i++) { + let c = sIn[sIn.length-1-i]; + if (!(c in hist)) { + if (i < maxMatchLenThreshold) { + return { trimmed: false, data: sIn }; + } + console.debug("DBUG:TrimHistGarbage:TrimmedCharLen:", i); + return { trimmed: true, data: sIn.substring(0, sIn.length-i+1) }; + } + } + console.debug("DBUG:TrimHistGarbage:Trimmed fully"); + return { trimmed: true, data: "" }; +} + +/** + * Keep trimming repeatedly using hist_garbage logic, till you no longer can. + * This ensures that even if there are multiple runs of garbage with different patterns, + * the logic still tries to munch through them. + * + * @param {any} sIn + * @param {number} maxType + * @param {number} maxUniq + * @param {number} maxMatchLenThreshold + */ +export function trim_hist_garbage_at_end_loop(sIn, maxType, maxUniq, maxMatchLenThreshold) { + let sCur = sIn; + while (true) { + let got = trim_hist_garbage_at_end(sCur, maxType, maxUniq, maxMatchLenThreshold); + if (!got.trimmed) { + return got.data; + } + sCur = got.data; + } +} + +/** + * Try trim garbage at the end by using both the hist-driven-garbage-trimming as well as + * skip-a-bit-if-reqd-then-repeat-pattern-based-garbage-trimming, with blind retrying. + * @param {string} sIn + */ +export function trim_garbage_at_end(sIn) { + let sCur = sIn; + for(let i=0; i<2; i++) { + sCur = trim_hist_garbage_at_end_loop(sCur, 8, 24, 72); + sCur = trim_repeat_garbage_at_end_loop(sCur, 32, 72, 12); + } + return sCur; +} + + +/** + * NewLines array helper. + * Allow for maintaining a list of lines. + * Allow for a line to be builtup/appended part by part. + */ +export class NewLines { + + constructor() { + /** @type {string[]} */ + this.lines = []; + } + + /** + * Extracts lines from the passed string and inturn either + * append to a previous partial line or add a new line. + * @param {string} sLines + */ + add_append(sLines) { + let aLines = sLines.split("\n"); + let lCnt = 0; + for(let line of aLines) { + lCnt += 1; + // Add back newline removed if any during split + if (lCnt < aLines.length) { + line += "\n"; + } else { + if (sLines.endsWith("\n")) { + line += "\n"; + } + } + // Append if required + if (lCnt == 1) { + let lastLine = this.lines[this.lines.length-1]; + if (lastLine != undefined) { + if (!lastLine.endsWith("\n")) { + this.lines[this.lines.length-1] += line; + continue; + } + } + } + // Add new line + this.lines.push(line); + } + } + + /** + * Shift the oldest/earliest/0th line in the array. [Old-New|Earliest-Latest] + * Optionally control whether only full lines (ie those with newline at end) will be returned + * or will a partial line without a newline at end (can only be the last line) be returned. + * @param {boolean} bFullWithNewLineOnly + */ + shift(bFullWithNewLineOnly=true) { + let line = this.lines[0]; + if (line == undefined) { + return undefined; + } + if ((line[line.length-1] != "\n") && bFullWithNewLineOnly){ + return undefined; + } + return this.lines.shift(); + } + +} diff --git a/llama.cpp/tools/server/public_simplechat/index.html b/llama.cpp/tools/server/public_simplechat/index.html new file mode 100644 index 0000000..f641301 --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/index.html @@ -0,0 +1,51 @@ + + + + SimpleChat LlamaCppEtal + + + + + + + + + + + +
+ +
+

SimpleChat

+ +
+ +
+ +
+
+ + +
+ +
+
+

You need to have javascript enabled.

+
+ +
+
+ + +
+ +
+ + diff --git a/llama.cpp/tools/server/public_simplechat/readme.md b/llama.cpp/tools/server/public_simplechat/readme.md new file mode 100644 index 0000000..24e026d --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/readme.md @@ -0,0 +1,286 @@ + +# SimpleChat + +by Humans for All. + +## quickstart + +To run from the build dir + +bin/llama-server -m path/model.gguf --path ../tools/server/public_simplechat + +Continue reading for the details. + +## overview + +This simple web frontend, allows triggering/testing the server's /completions or /chat/completions endpoints +in a simple way with minimal code from a common code base. Inturn additionally it tries to allow single or +multiple independent back and forth chatting to an extent, with the ai llm model at a basic level, with their +own system prompts. + +This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated, +or potentially as it is being generated, in a streamed manner from the server/ai-model. + +![Chat and Settings screens](./simplechat_screens.webp "Chat and Settings screens") + +Auto saves the chat session locally as and when the chat is progressing and inturn at a later time when you +open SimpleChat, option is provided to restore the old chat session, if a matching one exists. + +The UI follows a responsive web design so that the layout can adapt to available display space in a usable +enough manner, in general. + +Allows developer/end-user to control some of the behaviour by updating gMe members from browser's devel-tool +console. Parallely some of the directly useful to end-user settings can also be changed using the provided +settings ui. + +NOTE: Current web service api doesnt expose the model context length directly, so client logic doesnt provide +any adaptive culling of old messages nor of replacing them with summary of their content etal. However there +is a optional sliding window based chat logic, which provides a simple minded culling of old messages from +the chat history before sending to the ai model. + +NOTE: Wrt options sent with the request, it mainly sets temperature, max_tokens and optionaly stream for now. +However if someone wants they can update the js file or equivalent member in gMe as needed. + +NOTE: One may be able to use this to chat with openai api web-service /chat/completions endpoint, in a very +limited / minimal way. One will need to set model, openai url and authorization bearer key in settings ui. + + +## usage + +One could run this web frontend directly using server itself or if anyone is thinking of adding a built in web +frontend to configure the server over http(s) or so, then run this web frontend using something like python's +http module. + +### running using tools/server + +./llama-server -m path/model.gguf --path tools/server/public_simplechat [--port PORT] + +### running using python3's server module + +first run tools/server +* ./llama-server -m path/model.gguf + +next run this web front end in tools/server/public_simplechat +* cd ../tools/server/public_simplechat +* python3 -m http.server PORT + +### using the front end + +Open this simple web front end from your local browser + +* http://127.0.0.1:PORT/index.html + +Once inside + +* If you want to, you can change many of the default global settings + * the base url (ie ip addr / domain name, port) + * chat (default) vs completion mode + * try trim garbage in response or not + * amount of chat history in the context sent to server/ai-model + * oneshot or streamed mode. + +* In completion mode + * one normally doesnt use a system prompt in completion mode. + * logic by default doesnt insert any role specific "ROLE: " prefix wrt each role's message. + If the model requires any prefix wrt user role messages, then the end user has to + explicitly add the needed prefix, when they enter their chat message. + Similarly if the model requires any prefix to trigger assistant/ai-model response, + then the end user needs to enter the same. + This keeps the logic simple, while still giving flexibility to the end user to + manage any templating/tagging requirement wrt their messages to the model. + * the logic doesnt insert newline at the begining and end wrt the prompt message generated. + However if the chat being sent to /completions end point has more than one role's message, + then insert newline when moving from one role's message to the next role's message, so + that it can be clearly identified/distinguished. + * given that /completions endpoint normally doesnt add additional chat-templating of its + own, the above ensures that end user can create a custom single/multi message combo with + any tags/special-tokens related chat templating to test out model handshake. Or enduser + can use it just for normal completion related/based query. + +* If you want to provide a system prompt, then ideally enter it first, before entering any user query. + Normally Completion mode doesnt need system prompt, while Chat mode can generate better/interesting + responses with a suitable system prompt. + * if chat.add_system_begin is used + * you cant change the system prompt, after it is has been submitted once along with user query. + * you cant set a system prompt, after you have submitted any user query + * if chat.add_system_anytime is used + * one can change the system prompt any time during chat, by changing the contents of system prompt. + * inturn the updated/changed system prompt will be inserted into the chat session. + * this allows for the subsequent user chatting to be driven by the new system prompt set above. + +* Enter your query and either press enter or click on the submit button. + If you want to insert enter (\n) as part of your chat/query to ai model, use shift+enter. + +* Wait for the logic to communicate with the server and get the response. + * the user is not allowed to enter any fresh query during this time. + * the user input box will be disabled and a working message will be shown in it. + * if trim garbage is enabled, the logic will try to trim repeating text kind of garbage to some extent. + +* just refresh the page, to reset wrt the chat history and or system prompt and start afresh. + +* Using NewChat one can start independent chat sessions. + * two independent chat sessions are setup by default. + +* When you want to print, switching ChatHistoryInCtxt to Full and clicking on the chat session button of + interest, will display the full chat history till then wrt same, if you want full history for printing. + + +## Devel note + +### Reason behind this + +The idea is to be easy enough to use for basic purposes, while also being simple and easily discernable +by developers who may not be from web frontend background (so inturn may not be familiar with template / +end-use-specific-language-extensions driven flows) so that they can use it to explore/experiment things. + +And given that the idea is also to help explore/experiment for developers, some flexibility is provided +to change behaviour easily using the devel-tools/console or provided minimal settings ui (wrt few aspects). +Skeletal logic has been implemented to explore some of the end points and ideas/implications around them. + + +### General + +Me/gMe consolidates the settings which control the behaviour into one object. +One can see the current settings, as well as change/update them using browsers devel-tool/console. +It is attached to the document object. Some of these can also be updated using the Settings UI. + + baseURL - the domain-name/ip-address and inturn the port to send the request. + + bStream - control between oneshot-at-end and live-stream-as-its-generated collating and showing + of the generated response. + + the logic assumes that the text sent from the server follows utf-8 encoding. + + in streaming mode - if there is any exception, the logic traps the same and tries to ensure + that text generated till then is not lost. + + if a very long text is being generated, which leads to no user interaction for sometime and + inturn the machine goes into power saving mode or so, the platform may stop network connection, + leading to exception. + + apiEP - select between /completions and /chat/completions endpoint provided by the server/ai-model. + + bCompletionFreshChatAlways - whether Completion mode collates complete/sliding-window history when + communicating with the server or only sends the latest user query/message. + + bCompletionInsertStandardRolePrefix - whether Completion mode inserts role related prefix wrt the + messages that get inserted into prompt field wrt /Completion endpoint. + + bTrimGarbage - whether garbage repeatation at the end of the generated ai response, should be + trimmed or left as is. If enabled, it will be trimmed so that it wont be sent back as part of + subsequent chat history. At the same time the actual trimmed text is shown to the user, once + when it was generated, so user can check if any useful info/data was there in the response. + + One may be able to request the ai-model to continue (wrt the last response) (if chat-history + is enabled as part of the chat-history-in-context setting), and chances are the ai-model will + continue starting from the trimmed part, thus allows long response to be recovered/continued + indirectly, in many cases. + + The histogram/freq based trimming logic is currently tuned for english language wrt its + is-it-a-alpabetic|numeral-char regex match logic. + + apiRequestOptions - maintains the list of options/fields to send along with api request, + irrespective of whether /chat/completions or /completions endpoint. + + If you want to add additional options/fields to send to the server/ai-model, and or + modify the existing options value or remove them, for now you can update this global var + using browser's development-tools/console. + + For string, numeric and boolean fields in apiRequestOptions, including even those added by a + user at runtime by directly modifying gMe.apiRequestOptions, setting ui entries will be auto + created. + + cache_prompt option supported by example/server is allowed to be controlled by user, so that + any caching supported wrt system-prompt and chat history, if usable can get used. When chat + history sliding window is enabled, cache_prompt logic may or may not kick in at the backend + wrt same, based on aspects related to model, positional encoding, attention mechanism etal. + However system prompt should ideally get the benefit of caching. + + headers - maintains the list of http headers sent when request is made to the server. By default + Content-Type is set to application/json. Additionally Authorization entry is provided, which can + be set if needed using the settings ui. + + iRecentUserMsgCnt - a simple minded SlidingWindow to limit context window load at Ai Model end. + This is disabled by default. However if enabled, then in addition to latest system message, only + the last/latest iRecentUserMsgCnt user messages after the latest system prompt and its responses + from the ai model will be sent to the ai-model, when querying for a new response. IE if enabled, + only user messages after the latest system message/prompt will be considered. + + This specified sliding window user message count also includes the latest user query. + <0 : Send entire chat history to server + 0 : Send only the system message if any to the server + >0 : Send the latest chat history from the latest system prompt, limited to specified cnt. + + +By using gMe's iRecentUserMsgCnt and apiRequestOptions.max_tokens/n_predict one can try to control +the implications of loading of the ai-model's context window by chat history, wrt chat response to +some extent in a simple crude way. You may also want to control the context size enabled when the +server loads ai-model, on the server end. + + +Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js +may not be visible. Also remember that just refreshing/reloading page in browser or for that +matter clearing site data, dont directly override site caching in all cases. Worst case you may +have to change port. Or in dev tools of browser, you may be able to disable caching fully. + + +Currently the server to communicate with is maintained globally and not as part of a specific +chat session. So if one changes the server ip/url in setting, then all chat sessions will auto +switch to this new server, when you try using those sessions. + + +By switching between chat.add_system_begin/anytime, one can control whether one can change +the system prompt, anytime during the conversation or only at the beginning. + + +### Default setup + +By default things are setup to try and make the user experience a bit better, if possible. +However a developer when testing the server of ai-model may want to change these value. + +Using iRecentUserMsgCnt reduce chat history context sent to the server/ai-model to be +just the system-prompt, prev-user-request-and-ai-response and cur-user-request, instead of +full chat history. This way if there is any response with garbage/repeatation, it doesnt +mess with things beyond the next question/request/query, in some ways. The trim garbage +option also tries to help avoid issues with garbage in the context to an extent. + +Set max_tokens to 1024, so that a relatively large previous reponse doesnt eat up the space +available wrt next query-response. However dont forget that the server when started should +also be started with a model context size of 1k or more, to be on safe side. + + The /completions endpoint of tools/server doesnt take max_tokens, instead it takes the + internal n_predict, for now add the same here on the client side, maybe later add max_tokens + to /completions endpoint handling code on server side. + +NOTE: One may want to experiment with frequency/presence penalty fields in apiRequestOptions +wrt the set of fields sent to server along with the user query, to check how the model behaves +wrt repeatations in general in the generated text response. + +A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by +using the provided settings ui (for settings exposed through the ui). + + +### OpenAi / Equivalent API WebService + +One may be abe to handshake with OpenAI/Equivalent api web service's /chat/completions endpoint +for a minimal chatting experimentation by setting the below. + +* the baseUrl in settings ui + * https://api.openai.com/v1 or similar + +* Wrt request body - gMe.apiRequestOptions + * model (settings ui) + * any additional fields if required in future + +* Wrt request headers - gMe.headers + * Authorization (available through settings ui) + * Bearer THE_OPENAI_API_KEY + * any additional optional header entries like "OpenAI-Organization", "OpenAI-Project" or so + +NOTE: Not tested, as there is no free tier api testing available. However logically this might +work. + + +## At the end + +Also a thank you to all open source and open model developers, who strive for the common good. diff --git a/llama.cpp/tools/server/public_simplechat/simplechat.css b/llama.cpp/tools/server/public_simplechat/simplechat.css new file mode 100644 index 0000000..13bfb80 --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/simplechat.css @@ -0,0 +1,79 @@ +/** + * the styling of the simplechat web frontend + * by Humans for All + */ + +#fullbody { + height: 98vh; +} + +.heading { + background-color: lightgray; +} + +.session-selected { + background-color: lightblue; +} + +.role-system { + background-color: lightblue; +} +.role-user { + background-color: lightgray; +} +.role-trim { + background-color: lightpink; +} + +.gridx2 { + display: grid; + grid-template-columns: repeat(2, 1fr); + border-bottom-style: dotted; + border-bottom-width: thin; + border-bottom-color: lightblue; +} + +.flex-grow { + flex-grow: 1; +} +.float-right { + float: right; +} + +#chat-div { + overflow: scroll; + flex-grow: 1; + flex-shrink: 1; + min-height: 40vh; +} +button { + min-width: 8vw; +} + +.sameline { + display: flex; + flex-direction: row; +} +.samecolumn { + display: flex; + flex-direction: column; +} + +.ul1 { + padding-inline-start: 2vw; +} +.ul2 { + padding-inline-start: 2vw; +} + +* { + margin: 0.6vmin; +} + +@media print { + + #fullbody { + height: auto; + } + +} diff --git a/llama.cpp/tools/server/public_simplechat/simplechat.js b/llama.cpp/tools/server/public_simplechat/simplechat.js new file mode 100644 index 0000000..2fcd24a --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/simplechat.js @@ -0,0 +1,929 @@ +// @ts-check +// A simple completions and chat/completions test related web front end logic +// by Humans for All + +import * as du from "./datautils.mjs"; +import * as ui from "./ui.mjs" + +class Roles { + static System = "system"; + static User = "user"; + static Assistant = "assistant"; +} + +class ApiEP { + static Type = { + Chat: "chat", + Completion: "completion", + } + static UrlSuffix = { + 'chat': `/chat/completions`, + 'completion': `/completions`, + } + + /** + * Build the url from given baseUrl and apiEp id. + * @param {string} baseUrl + * @param {string} apiEP + */ + static Url(baseUrl, apiEP) { + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.substring(0, baseUrl.length-1); + } + return `${baseUrl}${this.UrlSuffix[apiEP]}`; + } + +} + + +let gUsageMsg = ` +

Usage

+
    +
  • System prompt above, to try control ai response characteristics.
  • +
      +
    • Completion mode - no system prompt normally.
    • +
    +
  • Use shift+enter for inserting enter/newline.
  • +
  • Enter your query to ai assistant below.
  • +
  • Default ContextWindow = [System, Last Query+Resp, Cur Query].
  • +
      +
    • ChatHistInCtxt, MaxTokens, ModelCtxt window to expand
    • +
    +
+`; + + +/** @typedef {{role: string, content: string}[]} ChatMessages */ + +/** @typedef {{iLastSys: number, xchat: ChatMessages}} SimpleChatODS */ + +class SimpleChat { + + /** + * @param {string} chatId + */ + constructor(chatId) { + this.chatId = chatId; + /** + * Maintain in a form suitable for common LLM web service chat/completions' messages entry + * @type {ChatMessages} + */ + this.xchat = []; + this.iLastSys = -1; + this.latestResponse = ""; + } + + clear() { + this.xchat = []; + this.iLastSys = -1; + } + + ods_key() { + return `SimpleChat-${this.chatId}` + } + + save() { + /** @type {SimpleChatODS} */ + let ods = {iLastSys: this.iLastSys, xchat: this.xchat}; + localStorage.setItem(this.ods_key(), JSON.stringify(ods)); + } + + load() { + let sods = localStorage.getItem(this.ods_key()); + if (sods == null) { + return; + } + /** @type {SimpleChatODS} */ + let ods = JSON.parse(sods); + this.iLastSys = ods.iLastSys; + this.xchat = ods.xchat; + } + + /** + * Recent chat messages. + * If iRecentUserMsgCnt < 0 + * Then return the full chat history + * Else + * Return chat messages from latest going back till the last/latest system prompt. + * While keeping track that the number of user queries/messages doesnt exceed iRecentUserMsgCnt. + * @param {number} iRecentUserMsgCnt + */ + recent_chat(iRecentUserMsgCnt) { + if (iRecentUserMsgCnt < 0) { + return this.xchat; + } + if (iRecentUserMsgCnt == 0) { + console.warn("WARN:SimpleChat:SC:RecentChat:iRecentUsermsgCnt of 0 means no user message/query sent"); + } + /** @type{ChatMessages} */ + let rchat = []; + let sysMsg = this.get_system_latest(); + if (sysMsg.length != 0) { + rchat.push({role: Roles.System, content: sysMsg}); + } + let iUserCnt = 0; + let iStart = this.xchat.length; + for(let i=this.xchat.length-1; i > this.iLastSys; i--) { + if (iUserCnt >= iRecentUserMsgCnt) { + break; + } + let msg = this.xchat[i]; + if (msg.role == Roles.User) { + iStart = i; + iUserCnt += 1; + } + } + for(let i = iStart; i < this.xchat.length; i++) { + let msg = this.xchat[i]; + if (msg.role == Roles.System) { + continue; + } + rchat.push({role: msg.role, content: msg.content}); + } + return rchat; + } + + /** + * Collate the latest response from the server/ai-model, as it is becoming available. + * This is mainly useful for the stream mode. + * @param {string} content + */ + append_response(content) { + this.latestResponse += content; + } + + /** + * Add an entry into xchat + * @param {string} role + * @param {string|undefined|null} content + */ + add(role, content) { + if ((content == undefined) || (content == null) || (content == "")) { + return false; + } + this.xchat.push( {role: role, content: content} ); + if (role == Roles.System) { + this.iLastSys = this.xchat.length - 1; + } + this.save(); + return true; + } + + /** + * Show the contents in the specified div + * @param {HTMLDivElement} div + * @param {boolean} bClear + */ + show(div, bClear=true) { + if (bClear) { + div.replaceChildren(); + } + let last = undefined; + for(const x of this.recent_chat(gMe.iRecentUserMsgCnt)) { + let entry = ui.el_create_append_p(`${x.role}: ${x.content}`, div); + entry.className = `role-${x.role}`; + last = entry; + } + if (last !== undefined) { + last.scrollIntoView(false); + } else { + if (bClear) { + div.innerHTML = gUsageMsg; + gMe.setup_load(div, this); + gMe.show_info(div); + } + } + return last; + } + + /** + * Setup the fetch headers. + * It picks the headers from gMe.headers. + * It inserts Authorization only if its non-empty. + * @param {string} apiEP + */ + fetch_headers(apiEP) { + let headers = new Headers(); + for(let k in gMe.headers) { + let v = gMe.headers[k]; + if ((k == "Authorization") && (v.trim() == "")) { + continue; + } + headers.append(k, v); + } + return headers; + } + + /** + * Add needed fields wrt json object to be sent wrt LLM web services completions endpoint. + * The needed fields/options are picked from a global object. + * Add optional stream flag, if required. + * Convert the json into string. + * @param {Object} obj + */ + request_jsonstr_extend(obj) { + for(let k in gMe.apiRequestOptions) { + obj[k] = gMe.apiRequestOptions[k]; + } + if (gMe.bStream) { + obj["stream"] = true; + } + return JSON.stringify(obj); + } + + /** + * Return a string form of json object suitable for chat/completions + */ + request_messages_jsonstr() { + let req = { + messages: this.recent_chat(gMe.iRecentUserMsgCnt), + } + return this.request_jsonstr_extend(req); + } + + /** + * Return a string form of json object suitable for /completions + * @param {boolean} bInsertStandardRolePrefix Insert ": " as prefix wrt each role's message + */ + request_prompt_jsonstr(bInsertStandardRolePrefix) { + let prompt = ""; + let iCnt = 0; + for(const chat of this.recent_chat(gMe.iRecentUserMsgCnt)) { + iCnt += 1; + if (iCnt > 1) { + prompt += "\n"; + } + if (bInsertStandardRolePrefix) { + prompt += `${chat.role}: `; + } + prompt += `${chat.content}`; + } + let req = { + prompt: prompt, + } + return this.request_jsonstr_extend(req); + } + + /** + * Return a string form of json object suitable for specified api endpoint. + * @param {string} apiEP + */ + request_jsonstr(apiEP) { + if (apiEP == ApiEP.Type.Chat) { + return this.request_messages_jsonstr(); + } else { + return this.request_prompt_jsonstr(gMe.bCompletionInsertStandardRolePrefix); + } + } + + /** + * Extract the ai-model/assistant's response from the http response got. + * Optionally trim the message wrt any garbage at the end. + * @param {any} respBody + * @param {string} apiEP + */ + response_extract(respBody, apiEP) { + let assistant = ""; + if (apiEP == ApiEP.Type.Chat) { + assistant = respBody["choices"][0]["message"]["content"]; + } else { + try { + assistant = respBody["choices"][0]["text"]; + } catch { + assistant = respBody["content"]; + } + } + return assistant; + } + + /** + * Extract the ai-model/assistant's response from the http response got in streaming mode. + * @param {any} respBody + * @param {string} apiEP + */ + response_extract_stream(respBody, apiEP) { + let assistant = ""; + if (apiEP == ApiEP.Type.Chat) { + if (respBody["choices"][0]["finish_reason"] !== "stop") { + assistant = respBody["choices"][0]["delta"]["content"]; + } + } else { + try { + assistant = respBody["choices"][0]["text"]; + } catch { + assistant = respBody["content"]; + } + } + return assistant; + } + + /** + * Allow setting of system prompt, but only at begining. + * @param {string} sysPrompt + * @param {string} msgTag + */ + add_system_begin(sysPrompt, msgTag) { + if (this.xchat.length == 0) { + if (sysPrompt.length > 0) { + return this.add(Roles.System, sysPrompt); + } + } else { + if (sysPrompt.length > 0) { + if (this.xchat[0].role !== Roles.System) { + console.error(`ERRR:SimpleChat:SC:${msgTag}:You need to specify system prompt before any user query, ignoring...`); + } else { + if (this.xchat[0].content !== sysPrompt) { + console.error(`ERRR:SimpleChat:SC:${msgTag}:You cant change system prompt, mid way through, ignoring...`); + } + } + } + } + return false; + } + + /** + * Allow setting of system prompt, at any time. + * @param {string} sysPrompt + * @param {string} msgTag + */ + add_system_anytime(sysPrompt, msgTag) { + if (sysPrompt.length <= 0) { + return false; + } + + if (this.iLastSys < 0) { + return this.add(Roles.System, sysPrompt); + } + + let lastSys = this.xchat[this.iLastSys].content; + if (lastSys !== sysPrompt) { + return this.add(Roles.System, sysPrompt); + } + return false; + } + + /** + * Retrieve the latest system prompt. + */ + get_system_latest() { + if (this.iLastSys == -1) { + return ""; + } + let sysPrompt = this.xchat[this.iLastSys].content; + return sysPrompt; + } + + + /** + * Handle the multipart response from server/ai-model + * @param {Response} resp + * @param {string} apiEP + * @param {HTMLDivElement} elDiv + */ + async handle_response_multipart(resp, apiEP, elDiv) { + let elP = ui.el_create_append_p("", elDiv); + if (!resp.body) { + throw Error("ERRR:SimpleChat:SC:HandleResponseMultiPart:No body..."); + } + let tdUtf8 = new TextDecoder("utf-8"); + let rr = resp.body.getReader(); + this.latestResponse = ""; + let xLines = new du.NewLines(); + while(true) { + let { value: cur, done: done } = await rr.read(); + if (cur) { + let curBody = tdUtf8.decode(cur, {stream: true}); + console.debug("DBUG:SC:PART:Str:", curBody); + xLines.add_append(curBody); + } + while(true) { + let curLine = xLines.shift(!done); + if (curLine == undefined) { + break; + } + if (curLine.trim() == "") { + continue; + } + if (curLine.startsWith("data:")) { + curLine = curLine.substring(5); + } + if (curLine.trim() === "[DONE]") { + break; + } + let curJson = JSON.parse(curLine); + console.debug("DBUG:SC:PART:Json:", curJson); + this.append_response(this.response_extract_stream(curJson, apiEP)); + } + elP.innerText = this.latestResponse; + elP.scrollIntoView(false); + if (done) { + break; + } + } + console.debug("DBUG:SC:PART:Full:", this.latestResponse); + return this.latestResponse; + } + + /** + * Handle the oneshot response from server/ai-model + * @param {Response} resp + * @param {string} apiEP + */ + async handle_response_oneshot(resp, apiEP) { + let respBody = await resp.json(); + console.debug(`DBUG:SimpleChat:SC:${this.chatId}:HandleUserSubmit:RespBody:${JSON.stringify(respBody)}`); + return this.response_extract(respBody, apiEP); + } + + /** + * Handle the response from the server be it in oneshot or multipart/stream mode. + * Also take care of the optional garbage trimming. + * @param {Response} resp + * @param {string} apiEP + * @param {HTMLDivElement} elDiv + */ + async handle_response(resp, apiEP, elDiv) { + let theResp = { + assistant: "", + trimmed: "", + } + if (gMe.bStream) { + try { + theResp.assistant = await this.handle_response_multipart(resp, apiEP, elDiv); + this.latestResponse = ""; + } catch (error) { + theResp.assistant = this.latestResponse; + this.add(Roles.Assistant, theResp.assistant); + this.latestResponse = ""; + throw error; + } + } else { + theResp.assistant = await this.handle_response_oneshot(resp, apiEP); + } + if (gMe.bTrimGarbage) { + let origMsg = theResp.assistant; + theResp.assistant = du.trim_garbage_at_end(origMsg); + theResp.trimmed = origMsg.substring(theResp.assistant.length); + } + this.add(Roles.Assistant, theResp.assistant); + return theResp; + } + +} + + +class MultiChatUI { + + constructor() { + /** @type {Object} */ + this.simpleChats = {}; + /** @type {string} */ + this.curChatId = ""; + + // the ui elements + this.elInSystem = /** @type{HTMLInputElement} */(document.getElementById("system-in")); + this.elDivChat = /** @type{HTMLDivElement} */(document.getElementById("chat-div")); + this.elBtnUser = /** @type{HTMLButtonElement} */(document.getElementById("user-btn")); + this.elInUser = /** @type{HTMLInputElement} */(document.getElementById("user-in")); + this.elDivHeading = /** @type{HTMLSelectElement} */(document.getElementById("heading")); + this.elDivSessions = /** @type{HTMLDivElement} */(document.getElementById("sessions-div")); + this.elBtnSettings = /** @type{HTMLButtonElement} */(document.getElementById("settings")); + + this.validate_element(this.elInSystem, "system-in"); + this.validate_element(this.elDivChat, "chat-div"); + this.validate_element(this.elInUser, "user-in"); + this.validate_element(this.elDivHeading, "heading"); + this.validate_element(this.elDivChat, "sessions-div"); + this.validate_element(this.elBtnSettings, "settings"); + } + + /** + * Check if the element got + * @param {HTMLElement | null} el + * @param {string} msgTag + */ + validate_element(el, msgTag) { + if (el == null) { + throw Error(`ERRR:SimpleChat:MCUI:${msgTag} element missing in html...`); + } else { + console.debug(`INFO:SimpleChat:MCUI:${msgTag} Id[${el.id}] Name[${el["name"]}]`); + } + } + + /** + * Reset user input ui. + * * clear user input + * * enable user input + * * set focus to user input + */ + ui_reset_userinput() { + this.elInUser.value = ""; + this.elInUser.disabled = false; + this.elInUser.focus(); + } + + /** + * Setup the needed callbacks wrt UI, curChatId to defaultChatId and + * optionally switch to specified defaultChatId. + * @param {string} defaultChatId + * @param {boolean} bSwitchSession + */ + setup_ui(defaultChatId, bSwitchSession=false) { + + this.curChatId = defaultChatId; + if (bSwitchSession) { + this.handle_session_switch(this.curChatId); + } + + this.elBtnSettings.addEventListener("click", (ev)=>{ + this.elDivChat.replaceChildren(); + gMe.show_settings(this.elDivChat); + }); + + this.elBtnUser.addEventListener("click", (ev)=>{ + if (this.elInUser.disabled) { + return; + } + this.handle_user_submit(this.curChatId, gMe.apiEP).catch((/** @type{Error} */reason)=>{ + let msg = `ERRR:SimpleChat\nMCUI:HandleUserSubmit:${this.curChatId}\n${reason.name}:${reason.message}`; + console.error(msg.replace("\n", ":")); + alert(msg); + this.ui_reset_userinput(); + }); + }); + + this.elInUser.addEventListener("keyup", (ev)=> { + // allow user to insert enter into their message using shift+enter. + // while just pressing enter key will lead to submitting. + if ((ev.key === "Enter") && (!ev.shiftKey)) { + let value = this.elInUser.value; + this.elInUser.value = value.substring(0,value.length-1); + this.elBtnUser.click(); + ev.preventDefault(); + } + }); + + this.elInSystem.addEventListener("keyup", (ev)=> { + // allow user to insert enter into the system prompt using shift+enter. + // while just pressing enter key will lead to setting the system prompt. + if ((ev.key === "Enter") && (!ev.shiftKey)) { + let value = this.elInSystem.value; + this.elInSystem.value = value.substring(0,value.length-1); + let chat = this.simpleChats[this.curChatId]; + chat.add_system_anytime(this.elInSystem.value, this.curChatId); + chat.show(this.elDivChat); + ev.preventDefault(); + } + }); + + } + + /** + * Setup a new chat session and optionally switch to it. + * @param {string} chatId + * @param {boolean} bSwitchSession + */ + new_chat_session(chatId, bSwitchSession=false) { + this.simpleChats[chatId] = new SimpleChat(chatId); + if (bSwitchSession) { + this.handle_session_switch(chatId); + } + } + + + /** + * Handle user query submit request, wrt specified chat session. + * @param {string} chatId + * @param {string} apiEP + */ + async handle_user_submit(chatId, apiEP) { + + let chat = this.simpleChats[chatId]; + + // In completion mode, if configured, clear any previous chat history. + // So if user wants to simulate a multi-chat based completion query, + // they will have to enter the full thing, as a suitable multiline + // user input/query. + if ((apiEP == ApiEP.Type.Completion) && (gMe.bCompletionFreshChatAlways)) { + chat.clear(); + } + + chat.add_system_anytime(this.elInSystem.value, chatId); + + let content = this.elInUser.value; + if (!chat.add(Roles.User, content)) { + console.debug(`WARN:SimpleChat:MCUI:${chatId}:HandleUserSubmit:Ignoring empty user input...`); + return; + } + chat.show(this.elDivChat); + + let theUrl = ApiEP.Url(gMe.baseURL, apiEP); + let theBody = chat.request_jsonstr(apiEP); + + this.elInUser.value = "working..."; + this.elInUser.disabled = true; + console.debug(`DBUG:SimpleChat:MCUI:${chatId}:HandleUserSubmit:${theUrl}:ReqBody:${theBody}`); + let theHeaders = chat.fetch_headers(apiEP); + let resp = await fetch(theUrl, { + method: "POST", + headers: theHeaders, + body: theBody, + }); + + let theResp = await chat.handle_response(resp, apiEP, this.elDivChat); + if (chatId == this.curChatId) { + chat.show(this.elDivChat); + if (theResp.trimmed.length > 0) { + let p = ui.el_create_append_p(`TRIMMED:${theResp.trimmed}`, this.elDivChat); + p.className="role-trim"; + } + } else { + console.debug(`DBUG:SimpleChat:MCUI:HandleUserSubmit:ChatId has changed:[${chatId}] [${this.curChatId}]`); + } + this.ui_reset_userinput(); + } + + /** + * Show buttons for NewChat and available chat sessions, in the passed elDiv. + * If elDiv is undefined/null, then use this.elDivSessions. + * Take care of highlighting the selected chat-session's btn. + * @param {HTMLDivElement | undefined} elDiv + */ + show_sessions(elDiv=undefined) { + if (!elDiv) { + elDiv = this.elDivSessions; + } + elDiv.replaceChildren(); + // Btn for creating new chat session + let btnNew = ui.el_create_button("New CHAT", (ev)=> { + if (this.elInUser.disabled) { + console.error(`ERRR:SimpleChat:MCUI:NewChat:Current session [${this.curChatId}] awaiting response, ignoring request...`); + alert("ERRR:SimpleChat\nMCUI:NewChat\nWait for response to pending query, before starting new chat session"); + return; + } + let chatId = `Chat${Object.keys(this.simpleChats).length}`; + let chatIdGot = prompt("INFO:SimpleChat\nMCUI:NewChat\nEnter id for new chat session", chatId); + if (!chatIdGot) { + console.error("ERRR:SimpleChat:MCUI:NewChat:Skipping based on user request..."); + return; + } + this.new_chat_session(chatIdGot, true); + this.create_session_btn(elDiv, chatIdGot); + ui.el_children_config_class(elDiv, chatIdGot, "session-selected", ""); + }); + elDiv.appendChild(btnNew); + // Btns for existing chat sessions + let chatIds = Object.keys(this.simpleChats); + for(let cid of chatIds) { + let btn = this.create_session_btn(elDiv, cid); + if (cid == this.curChatId) { + btn.className = "session-selected"; + } + } + } + + create_session_btn(elDiv, cid) { + let btn = ui.el_create_button(cid, (ev)=>{ + let target = /** @type{HTMLButtonElement} */(ev.target); + console.debug(`DBUG:SimpleChat:MCUI:SessionClick:${target.id}`); + if (this.elInUser.disabled) { + console.error(`ERRR:SimpleChat:MCUI:SessionClick:${target.id}:Current session [${this.curChatId}] awaiting response, ignoring switch...`); + alert("ERRR:SimpleChat\nMCUI:SessionClick\nWait for response to pending query, before switching"); + return; + } + this.handle_session_switch(target.id); + ui.el_children_config_class(elDiv, target.id, "session-selected", ""); + }); + elDiv.appendChild(btn); + return btn; + } + + /** + * Switch ui to the specified chatId and set curChatId to same. + * @param {string} chatId + */ + async handle_session_switch(chatId) { + let chat = this.simpleChats[chatId]; + if (chat == undefined) { + console.error(`ERRR:SimpleChat:MCUI:HandleSessionSwitch:${chatId} missing...`); + return; + } + this.elInSystem.value = chat.get_system_latest(); + this.elInUser.value = ""; + chat.show(this.elDivChat); + this.elInUser.focus(); + this.curChatId = chatId; + console.log(`INFO:SimpleChat:MCUI:HandleSessionSwitch:${chatId} entered...`); + } + +} + + +class Me { + + constructor() { + this.baseURL = "http://127.0.0.1:8080"; + this.defaultChatIds = [ "Default", "Other" ]; + this.multiChat = new MultiChatUI(); + this.bStream = true; + this.bCompletionFreshChatAlways = true; + this.bCompletionInsertStandardRolePrefix = false; + this.bTrimGarbage = true; + this.iRecentUserMsgCnt = 2; + this.sRecentUserMsgCnt = { + "Full": -1, + "Last0": 1, + "Last1": 2, + "Last2": 3, + "Last4": 5, + }; + this.apiEP = ApiEP.Type.Chat; + this.headers = { + "Content-Type": "application/json", + "Authorization": "", // Authorization: Bearer OPENAI_API_KEY + } + // Add needed fields wrt json object to be sent wrt LLM web services completions endpoint. + this.apiRequestOptions = { + "model": "gpt-3.5-turbo", + "temperature": 0.7, + "max_tokens": 1024, + "n_predict": 1024, + "cache_prompt": false, + //"frequency_penalty": 1.2, + //"presence_penalty": 1.2, + }; + } + + /** + * Disable console.debug by mapping it to a empty function. + */ + debug_disable() { + this.console_debug = console.debug; + console.debug = () => { + + }; + } + + /** + * Setup the load saved chat ui. + * @param {HTMLDivElement} div + * @param {SimpleChat} chat + */ + setup_load(div, chat) { + if (!(chat.ods_key() in localStorage)) { + return; + } + div.innerHTML += `

Restore

+

Load previously saved chat session, if available

`; + let btn = ui.el_create_button(chat.ods_key(), (ev)=>{ + console.log("DBUG:SimpleChat:SC:Load", chat); + chat.load(); + queueMicrotask(()=>{ + chat.show(div); + this.multiChat.elInSystem.value = chat.get_system_latest(); + }); + }); + div.appendChild(btn); + } + + /** + * Show the configurable parameters info in the passed Div element. + * @param {HTMLDivElement} elDiv + * @param {boolean} bAll + */ + show_info(elDiv, bAll=false) { + + let p = ui.el_create_append_p("Settings (devel-tools-console document[gMe])", elDiv); + p.className = "role-system"; + + if (bAll) { + + ui.el_create_append_p(`baseURL:${this.baseURL}`, elDiv); + + ui.el_create_append_p(`Authorization:${this.headers["Authorization"]}`, elDiv); + + ui.el_create_append_p(`bStream:${this.bStream}`, elDiv); + + ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv); + + ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv); + + ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv); + + ui.el_create_append_p(`bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`, elDiv); + + ui.el_create_append_p(`bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`, elDiv); + + } + + ui.el_create_append_p(`apiRequestOptions:${JSON.stringify(this.apiRequestOptions, null, " - ")}`, elDiv); + ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv); + + } + + /** + * Auto create ui input elements for fields in apiRequestOptions + * Currently supports text and number field types. + * @param {HTMLDivElement} elDiv + */ + show_settings_apirequestoptions(elDiv) { + let typeDict = { + "string": "text", + "number": "number", + }; + let fs = document.createElement("fieldset"); + let legend = document.createElement("legend"); + legend.innerText = "ApiRequestOptions"; + fs.appendChild(legend); + elDiv.appendChild(fs); + for(const k in this.apiRequestOptions) { + let val = this.apiRequestOptions[k]; + let type = typeof(val); + if (((type == "string") || (type == "number"))) { + let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.apiRequestOptions[k], (val)=>{ + if (type == "number") { + val = Number(val); + } + this.apiRequestOptions[k] = val; + }); + fs.appendChild(inp.div); + } else if (type == "boolean") { + let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{ + this.apiRequestOptions[k] = userVal; + }); + fs.appendChild(bbtn.div); + } + } + } + + /** + * Show settings ui for configurable parameters, in the passed Div element. + * @param {HTMLDivElement} elDiv + */ + show_settings(elDiv) { + + let inp = ui.el_creatediv_input("SetBaseURL", "BaseURL", "text", this.baseURL, (val)=>{ + this.baseURL = val; + }); + elDiv.appendChild(inp.div); + + inp = ui.el_creatediv_input("SetAuthorization", "Authorization", "text", this.headers["Authorization"], (val)=>{ + this.headers["Authorization"] = val; + }); + inp.el.placeholder = "Bearer OPENAI_API_KEY"; + elDiv.appendChild(inp.div); + + let bb = ui.el_creatediv_boolbutton("SetStream", "Stream", {true: "[+] yes stream", false: "[-] do oneshot"}, this.bStream, (val)=>{ + this.bStream = val; + }); + elDiv.appendChild(bb.div); + + bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{ + this.bTrimGarbage = val; + }); + elDiv.appendChild(bb.div); + + this.show_settings_apirequestoptions(elDiv); + + let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{ + this.apiEP = ApiEP.Type[val]; + }); + elDiv.appendChild(sel.div); + + sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{ + this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val]; + }); + elDiv.appendChild(sel.div); + + bb = ui.el_creatediv_boolbutton("SetCompletionFreshChatAlways", "CompletionFreshChatAlways", {true: "[+] yes fresh", false: "[-] no, with history"}, this.bCompletionFreshChatAlways, (val)=>{ + this.bCompletionFreshChatAlways = val; + }); + elDiv.appendChild(bb.div); + + bb = ui.el_creatediv_boolbutton("SetCompletionInsertStandardRolePrefix", "CompletionInsertStandardRolePrefix", {true: "[+] yes insert", false: "[-] dont insert"}, this.bCompletionInsertStandardRolePrefix, (val)=>{ + this.bCompletionInsertStandardRolePrefix = val; + }); + elDiv.appendChild(bb.div); + + } + +} + + +/** @type {Me} */ +let gMe; + +function startme() { + console.log("INFO:SimpleChat:StartMe:Starting..."); + gMe = new Me(); + gMe.debug_disable(); + document["gMe"] = gMe; + document["du"] = du; + for (let cid of gMe.defaultChatIds) { + gMe.multiChat.new_chat_session(cid); + } + gMe.multiChat.setup_ui(gMe.defaultChatIds[0], true); + gMe.multiChat.show_sessions(); +} + +document.addEventListener("DOMContentLoaded", startme); diff --git a/llama.cpp/tools/server/public_simplechat/simplechat_screens.webp b/llama.cpp/tools/server/public_simplechat/simplechat_screens.webp new file mode 100644 index 0000000..ccea443 Binary files /dev/null and b/llama.cpp/tools/server/public_simplechat/simplechat_screens.webp differ diff --git a/llama.cpp/tools/server/public_simplechat/ui.mjs b/llama.cpp/tools/server/public_simplechat/ui.mjs new file mode 100644 index 0000000..b2d5b9a --- /dev/null +++ b/llama.cpp/tools/server/public_simplechat/ui.mjs @@ -0,0 +1,211 @@ +//@ts-check +// Helpers to work with html elements +// by Humans for All +// + + +/** + * Set the class of the children, based on whether it is the idSelected or not. + * @param {HTMLDivElement} elBase + * @param {string} idSelected + * @param {string} classSelected + * @param {string} classUnSelected + */ +export function el_children_config_class(elBase, idSelected, classSelected, classUnSelected="") { + for(let child of elBase.children) { + if (child.id == idSelected) { + child.className = classSelected; + } else { + child.className = classUnSelected; + } + } +} + +/** + * Create button and set it up. + * @param {string} id + * @param {(this: HTMLButtonElement, ev: MouseEvent) => any} callback + * @param {string | undefined} name + * @param {string | undefined} innerText + */ +export function el_create_button(id, callback, name=undefined, innerText=undefined) { + if (!name) { + name = id; + } + if (!innerText) { + innerText = id; + } + let btn = document.createElement("button"); + btn.id = id; + btn.name = name; + btn.innerText = innerText; + btn.addEventListener("click", callback); + return btn; +} + +/** + * Create a para and set it up. Optionaly append it to a passed parent. + * @param {string} text + * @param {HTMLElement | undefined} elParent + * @param {string | undefined} id + */ +export function el_create_append_p(text, elParent=undefined, id=undefined) { + let para = document.createElement("p"); + para.innerText = text; + if (id) { + para.id = id; + } + if (elParent) { + elParent.appendChild(para); + } + return para; +} + +/** + * Create a button which represents bool value using specified text wrt true and false. + * When ever user clicks the button, it will toggle the value and update the shown text. + * + * @param {string} id + * @param {{true: string, false: string}} texts + * @param {boolean} defaultValue + * @param {function(boolean):void} cb + */ +export function el_create_boolbutton(id, texts, defaultValue, cb) { + let el = document.createElement("button"); + el["xbool"] = defaultValue; + el["xtexts"] = structuredClone(texts); + el.innerText = el["xtexts"][String(defaultValue)]; + if (id) { + el.id = id; + } + el.addEventListener('click', (ev)=>{ + el["xbool"] = !el["xbool"]; + el.innerText = el["xtexts"][String(el["xbool"])]; + cb(el["xbool"]); + }) + return el; +} + +/** + * Create a div wrapped button which represents bool value using specified text wrt true and false. + * @param {string} id + * @param {string} label + * @param {{ true: string; false: string; }} texts + * @param {boolean} defaultValue + * @param {(arg0: boolean) => void} cb + * @param {string} className + */ +export function el_creatediv_boolbutton(id, label, texts, defaultValue, cb, className="gridx2") { + let div = document.createElement("div"); + div.className = className; + let lbl = document.createElement("label"); + lbl.setAttribute("for", id); + lbl.innerText = label; + div.appendChild(lbl); + let btn = el_create_boolbutton(id, texts, defaultValue, cb); + div.appendChild(btn); + return { div: div, el: btn }; +} + + +/** + * Create a select ui element, with a set of options to select from. + * * options: an object which contains name-value pairs + * * defaultOption: the value whose name should be choosen, by default. + * * cb : the call back returns the name string of the option selected. + * + * @param {string} id + * @param {Object} options + * @param {*} defaultOption + * @param {function(string):void} cb + */ +export function el_create_select(id, options, defaultOption, cb) { + let el = document.createElement("select"); + el["xselected"] = defaultOption; + el["xoptions"] = structuredClone(options); + for(let cur of Object.keys(options)) { + let op = document.createElement("option"); + op.value = cur; + op.innerText = cur; + if (options[cur] == defaultOption) { + op.selected = true; + } + el.appendChild(op); + } + if (id) { + el.id = id; + el.name = id; + } + el.addEventListener('change', (ev)=>{ + let target = /** @type{HTMLSelectElement} */(ev.target); + console.log("DBUG:UI:Select:", id, ":", target.value); + cb(target.value); + }) + return el; +} + +/** + * Create a div wrapped select ui element, with a set of options to select from. + * + * @param {string} id + * @param {any} label + * @param {{ [x: string]: any; }} options + * @param {any} defaultOption + * @param {(arg0: string) => void} cb + * @param {string} className + */ +export function el_creatediv_select(id, label, options, defaultOption, cb, className="gridx2") { + let div = document.createElement("div"); + div.className = className; + let lbl = document.createElement("label"); + lbl.setAttribute("for", id); + lbl.innerText = label; + div.appendChild(lbl); + let sel = el_create_select(id, options,defaultOption, cb); + div.appendChild(sel); + return { div: div, el: sel }; +} + + +/** + * Create a input ui element. + * + * @param {string} id + * @param {string} type + * @param {any} defaultValue + * @param {function(any):void} cb + */ +export function el_create_input(id, type, defaultValue, cb) { + let el = document.createElement("input"); + el.type = type; + el.value = defaultValue; + if (id) { + el.id = id; + } + el.addEventListener('change', (ev)=>{ + cb(el.value); + }) + return el; +} + +/** + * Create a div wrapped input. + * + * @param {string} id + * @param {string} label + * @param {string} type + * @param {any} defaultValue + * @param {function(any):void} cb + * @param {string} className + */ +export function el_creatediv_input(id, label, type, defaultValue, cb, className="gridx2") { + let div = document.createElement("div"); + div.className = className; + let lbl = document.createElement("label"); + lbl.setAttribute("for", id); + lbl.innerText = label; + div.appendChild(lbl); + let el = el_create_input(id, type, defaultValue, cb); + div.appendChild(el); + return { div: div, el: el }; +} diff --git a/llama.cpp/tools/server/server-common.cpp b/llama.cpp/tools/server/server-common.cpp new file mode 100644 index 0000000..a853f65 --- /dev/null +++ b/llama.cpp/tools/server/server-common.cpp @@ -0,0 +1,1980 @@ +#include "common.h" +#include "download.h" +#include "log.h" +#include "llama.h" +#include "mtmd.h" +#include "mtmd-helper.h" +#include "chat.h" +#include "base64.hpp" + +#include "server-common.h" + +#include +#include +#include + +json format_error_response(const std::string & message, const enum error_type type) { + std::string type_str; + int code = 500; + switch (type) { + case ERROR_TYPE_INVALID_REQUEST: + type_str = "invalid_request_error"; + code = 400; + break; + case ERROR_TYPE_AUTHENTICATION: + type_str = "authentication_error"; + code = 401; + break; + case ERROR_TYPE_NOT_FOUND: + type_str = "not_found_error"; + code = 404; + break; + case ERROR_TYPE_SERVER: + type_str = "server_error"; + code = 500; + break; + case ERROR_TYPE_PERMISSION: + type_str = "permission_error"; + code = 403; + break; + case ERROR_TYPE_NOT_SUPPORTED: + type_str = "not_supported_error"; + code = 501; + break; + case ERROR_TYPE_UNAVAILABLE: + type_str = "unavailable_error"; + code = 503; + break; + case ERROR_TYPE_EXCEED_CONTEXT_SIZE: + type_str = "exceed_context_size_error"; + code = 400; + break; + } + return json { + {"code", code}, + {"message", message}, + {"type", type_str}, + }; +} + +// +// random string / id +// + +std::string random_string() { + static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); + + std::random_device rd; + std::mt19937 generator(rd()); + + std::string result(32, ' '); + + for (int i = 0; i < 32; ++i) { + result[i] = str[generator() % str.size()]; + } + + return result; +} + +std::string gen_chatcmplid() { + return "chatcmpl-" + random_string(); +} + +std::string gen_tool_call_id() { + return random_string(); +} + +// +// lora utils +// + +bool lora_all_alora(const std::vector & loras) { + bool found_alora = false; + for (const auto & lora : loras) { + if (lora.scale != 0) { + if (llama_adapter_get_alora_n_invocation_tokens(lora.ptr) == 0) { + return false; + } + found_alora = true; + } + } + return found_alora; +} + +bool lora_should_clear_cache( + const std::vector & current, + const std::vector & next) { + + // This should always be called after determining that the two sets are + // _not_ equal. This assert is therefore some slightly wasted work and + // should be safe to remove as long as this method is called correctly. + GGML_ASSERT(!are_lora_equal(current, next)); + + return ( + !(lora_get_enabled_ids(current).empty() || lora_all_alora(current)) || + !lora_all_alora(next)); +} + +std::map parse_lora_request(const json & data) { + std::map lora; + + // set value + for (const auto & entry : data) { + int id = json_value(entry, "id", -1); + float scale = json_value(entry, "scale", 0.0f); + lora[id] = scale; + } + + return lora; +} + +bool are_lora_equal( + const std::vector & l1, + const std::vector & l2) { + if (l1.size() != l2.size()) { + return false; + } + for (size_t i = 0; i < l1.size(); ++i) { + // we don't check lora.path to reduce the time complexity + if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) { + return false; + } + } + return true; +} + +std::vector lora_get_enabled_ids(const std::vector & loras) { + std::vector enabled_ids; + for (size_t i = 0; i < loras.size(); ++i) { + if (loras[i].scale > 0) { + enabled_ids.push_back(i); + } + } + return enabled_ids; +} + +// +// base64 utils (TODO: use the base64::decode from base64.hpp) +// + +static const std::string base64_chars = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789+/"; + +static inline bool is_base64(uint8_t c) { + return (isalnum(c) || (c == '+') || (c == '/')); +} + +static inline raw_buffer base64_decode(const std::string & encoded_string) { + int i = 0; + int j = 0; + int in_ = 0; + + int in_len = encoded_string.size(); + + uint8_t char_array_4[4]; + uint8_t char_array_3[3]; + + raw_buffer ret; + + while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { + char_array_4[i++] = encoded_string[in_]; in_++; + if (i == 4) { + for (i = 0; i < 4; i++) { + char_array_4[i] = base64_chars.find(char_array_4[i]); + } + + char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); + char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); + char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; + + for (i = 0; (i < 3); i++) { + ret.push_back(char_array_3[i]); + } + + i = 0; + } + } + + if (i) { + for (j = i; j < 4; j++) { + char_array_4[j] = 0; + } + + for (j = 0; j < 4; j++) { + char_array_4[j] = base64_chars.find(char_array_4[j]); + } + + char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); + char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); + char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; + + for (j = 0; j < i - 1; j++) { + ret.push_back(char_array_3[j]); + } + } + + return ret; +} + +// +// server_tokens implementation +// + +server_tokens::server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd) : has_mtmd(has_mtmd) { + for (size_t i = 0; i < mtmd_chunks.size(); ++i) { + push_back(mtmd_chunks[i]); + } +} + +server_tokens::server_tokens(const llama_tokens & tokens, bool has_mtmd) : has_mtmd(has_mtmd), tokens(tokens) { +} + +llama_pos server_tokens::pos_next() const { + if (!has_mtmd) { + return tokens.size(); + } + + llama_pos res = tokens.size(); + + for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ++it) { + const auto & chunk = it->second; + res += mtmd_input_chunk_get_n_pos(chunk.get()) - mtmd_input_chunk_get_n_tokens(chunk.get()); + } + + return res; +} + +std::string server_tokens::str() const { + std::ostringstream oss; + oss << "tokens: "; + for (size_t idx = 0; idx < tokens.size(); ++idx) { + llama_token t = tokens[idx]; + oss << "idx:" << idx << " "; + if (t == LLAMA_TOKEN_NULL) { + oss << " "; + } else { + oss << t << " "; + } + } + oss << "\n"; + oss << "image idx: "; + for (const auto & it : map_idx_to_media) { + oss << it.first << ", "; + } + return oss.str(); +} + +const mtmd::input_chunk_ptr & server_tokens::find_chunk(size_t idx) const { + auto it = map_idx_to_media.find(idx); + if (it != map_idx_to_media.end()) { + return it->second; + } + throw std::runtime_error("Chunk not found"); +} + +void server_tokens::push_back(llama_token tok) { + if (tok == LLAMA_TOKEN_NULL) { + throw std::runtime_error("Invalid token"); + } + tokens.emplace_back(tok); +} + +void server_tokens::push_back(const mtmd_input_chunk * chunk) { + auto type = mtmd_input_chunk_get_type(chunk); + if (type == MTMD_INPUT_CHUNK_TYPE_IMAGE || type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { + GGML_ASSERT(has_mtmd); + const size_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk); + size_t start_idx = tokens.size(); + for (size_t i = 0; i < n_tokens; ++i) { + tokens.emplace_back(LLAMA_TOKEN_NULL); + } + mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk)); + map_idx_to_media[start_idx] = std::move(new_chunk); + } else if (type == MTMD_INPUT_CHUNK_TYPE_TEXT) { + size_t n_tokens; + const auto * text_tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens); + for (size_t i = 0; i < n_tokens; ++i) { + push_back(text_tokens[i]); + } + } else { + GGML_ABORT("Invalid chunk type"); + } +} + +void server_tokens::push_back(server_tokens & tokens) { + size_t start_idx = size(); + for (size_t i = 0; i < tokens.size(); i++) { + push_back(tokens[i]); + } + if (tokens.has_mtmd) { + // Assert if we are copying MTMD chunks to a server_tokens that does not have mtmd. + // We could also just check, but this will prevent silently dropping MTMD data. + GGML_ASSERT(has_mtmd); + for (auto it = tokens.map_idx_to_media.begin(); it != tokens.map_idx_to_media.end(); ) { + auto * chunk = tokens.map_idx_to_media[it->first].get(); + mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk)); + map_idx_to_media[start_idx + it->first] = std::move(new_chunk); + } + } +} + +void server_tokens::insert(const llama_tokens & inp_tokens) { + GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled + tokens.insert(tokens.end(), inp_tokens.begin(), inp_tokens.end()); +} + +const llama_tokens & server_tokens::get_text_tokens() const { + GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled + return tokens; +} + +void server_tokens::set_token(llama_pos pos, llama_token id) { + GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled + tokens[pos] = id; +} + +void server_tokens::keep_first(size_t n) { + GGML_ASSERT(n <= tokens.size()); + if (has_mtmd) { + if (n == tokens.size()) { + return; // nothing to do + } + // we throw an error if we try to remove a token in the middle of an image + // for ex. with input of 5 text tokens and 2 images: + // [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1] + // n 1 2 3 4 5 6 7 8 9 10 + // allowed to resize ^ ^ + // disallowed to resize ^ ^ ^ + if (n > 0) { + // make sure we never remove tokens in the middle of an image + // note that the case where we keep a full image at the end is allowed: + // tokens[n - 1] == LLAMA_TOKEN_NULL && tokens[n] != LLAMA_TOKEN_NULL + if (tokens[n - 1] == LLAMA_TOKEN_NULL && tokens[n] == LLAMA_TOKEN_NULL) { + find_chunk(n - 1); // will throw an error if the token is not begin-of-chunk + } + } + // remove all image chunks that are not used anymore + for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ) { + size_t idx = it->first; + if (idx >= n) { + it = map_idx_to_media.erase(it); + } else { + ++it; + } + } + } + tokens.resize(n); +} + +std::string server_tokens::detokenize(const llama_context * ctx, bool special) const { + llama_tokens text_tokens; + text_tokens.reserve(tokens.size()); + for (const auto & t : tokens) { + if (t != LLAMA_TOKEN_NULL) { + text_tokens.push_back(t); + } + } + return common_detokenize(ctx, text_tokens, special); +} + +size_t server_tokens::get_common_prefix(const server_tokens & b) const { + const size_t max_idx = std::min(tokens.size(), b.tokens.size()); + + if (!has_mtmd) { + for (size_t i = 0; i < max_idx; ++i) { + if (tokens[i] == b.tokens[i]) { + continue; + } + + return i; + } + + return max_idx; + } + + for (size_t i = 0; i < max_idx; ++i) { + const llama_token ai = tokens[i]; + const llama_token bi = b.tokens[i]; + + if (ai == LLAMA_TOKEN_NULL && bi == LLAMA_TOKEN_NULL) { + const auto & a_chunk = find_chunk(i); + const auto & b_chunk = b.find_chunk(i); + + GGML_ASSERT(a_chunk && b_chunk); + + const std::string id_ai = mtmd_input_chunk_get_id(a_chunk.get()); + const std::string id_bi = mtmd_input_chunk_get_id(b_chunk.get()); + + const size_t n_tok_a = mtmd_input_chunk_get_n_tokens(a_chunk.get()); + const size_t n_tok_b = mtmd_input_chunk_get_n_tokens(b_chunk.get()); + + if (id_ai == id_bi && n_tok_a == n_tok_b) { + GGML_ASSERT(n_tok_a > 0 && "Invalid media chunk"); // should never happen + i += n_tok_a - 1; // will be +1 by the for loop + continue; + } + + return i; + } + + if (ai == bi) { + continue; + } + + return i; + } + + return max_idx; // all tokens are equal +} + +bool server_tokens::validate(const struct llama_context * ctx) const { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + const int32_t n_vocab = llama_vocab_n_tokens(vocab); + + for (size_t i = 0; i < tokens.size(); ++i) { + const auto & t = tokens[i]; + if (t == LLAMA_TOKEN_NULL) { + try { + const auto & chunk = find_chunk(i); + size_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk.get()); + i += n_tokens - 1; // will be +1 by the for loop + } catch (const std::exception & e) { + return false; + } + } else if (t < 0 || t >= n_vocab) { + return false; + } + } + return true; +} + +int32_t server_tokens::process_chunk( + llama_context * ctx, + mtmd_context * mctx, + size_t idx, + llama_pos pos, + int32_t seq_id, + size_t & n_tokens_out) const { + const auto & chunk = find_chunk(idx); + const char * name = mtmd_input_chunk_get_type(chunk.get()) == MTMD_INPUT_CHUNK_TYPE_IMAGE + ? "image" : "audio"; + SRV_INF("processing %s...\n", name); + int32_t n_batch = llama_n_batch(ctx); + int64_t t0 = ggml_time_ms(); + llama_pos new_n_past; // unused for now + int32_t result = mtmd_helper_eval_chunk_single(mctx, ctx, + chunk.get(), + pos, + seq_id, + n_batch, + true, // logits last + &new_n_past); + SRV_INF("%s processed in %" PRId64 " ms\n", name, ggml_time_ms() - t0); + if (result != 0) { + LOG_ERR("mtmd_helper_eval failed with status %d", result); + n_tokens_out = 0; + return result; + } + n_tokens_out = mtmd_input_chunk_get_n_tokens(chunk.get()); + return 0; +} + +server_tokens server_tokens::clone() const { + server_tokens res; + res.has_mtmd = has_mtmd; + res.tokens = tokens; + for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ++it) { + size_t idx = it->first; + const mtmd::input_chunk_ptr & chunk = it->second; + res.map_idx_to_media[idx] = mtmd::input_chunk_ptr(mtmd_input_chunk_copy(chunk.get())); + } + return res; +} + +// +// tokenizer and input processing utils +// + +bool json_is_array_of_numbers(const json & data) { + if (data.is_array()) { + for (const auto & e : data) { + if (!e.is_number_integer()) { + return false; + } + } + return true; + } + return false; +} + +bool json_is_array_of_mixed_numbers_strings(const json & data) { + bool seen_string = false; + bool seen_number = false; + if (data.is_array()) { + for (const auto & e : data) { + seen_string |= e.is_string(); + seen_number |= e.is_number_integer(); + if (seen_number && seen_string) { + return true; + } + } + } + return false; +} + +bool json_is_array_and_contains_numbers(const json & data) { + if (data.is_array()) { + for (const auto & e : data) { + if (e.is_number_integer()) { + return true; + } + } + return false; + } + return false; +} + +json json_get_nested_values(const std::vector & paths, const json & js) { + json result = json::object(); + + for (const std::string & path : paths) { + json current = js; + const auto keys = string_split(path, /*separator*/ '/'); + bool valid_path = true; + for (const std::string & k : keys) { + if (valid_path && current.is_object() && current.contains(k)) { + current = current[k]; + } else { + valid_path = false; + } + } + if (valid_path) { + result[path] = current; + } + } + return result; +} + +llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { + // If `add_bos` is true, we only add BOS, when json_prompt is a string, + // or the first element of the json_prompt array is a string. + llama_tokens prompt_tokens; + + if (json_prompt.is_array()) { + bool first = true; + for (const auto & p : json_prompt) { + if (p.is_string()) { + auto s = p.template get(); + + llama_tokens p; + if (first) { + p = common_tokenize(vocab, s, add_special, parse_special); + first = false; + } else { + p = common_tokenize(vocab, s, false, parse_special); + } + + prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); + } else { + if (first) { + first = false; + } + + prompt_tokens.push_back(p.template get()); + } + } + } else { + auto s = json_prompt.template get(); + prompt_tokens = common_tokenize(vocab, s, add_special, parse_special); + } + + return prompt_tokens; +} + +size_t validate_utf8(const std::string& text) { + size_t len = text.size(); + if (len == 0) return 0; + + // Check the last few bytes to see if a multi-byte character is cut off + for (size_t i = 1; i <= 4 && i <= len; ++i) { + unsigned char c = text[len - i]; + // Check for start of a multi-byte sequence from the end + if ((c & 0xE0) == 0xC0) { + // 2-byte character start: 110xxxxx + // Needs at least 2 bytes + if (i < 2) return len - i; + } else if ((c & 0xF0) == 0xE0) { + // 3-byte character start: 1110xxxx + // Needs at least 3 bytes + if (i < 3) return len - i; + } else if ((c & 0xF8) == 0xF0) { + // 4-byte character start: 11110xxx + // Needs at least 4 bytes + if (i < 4) return len - i; + } + } + + // If no cut-off multi-byte character is found, return full length + return len; +} + +// Computes FNV-1a hash of the data +static std::string fnv_hash(const uint8_t * data, size_t len) { + const uint64_t fnv_prime = 0x100000001b3ULL; + uint64_t hash = 0xcbf29ce484222325ULL; + + for (size_t i = 0; i < len; ++i) { + hash ^= data[i]; + hash *= fnv_prime; + } + return std::to_string(hash); +} + +server_tokens process_mtmd_prompt(mtmd_context * mctx, std::string prompt, std::vector files) { + mtmd::bitmaps bitmaps; + for (auto & file : files) { + mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(mctx, file.data(), file.size())); + if (!bmp.ptr) { + throw std::runtime_error("Failed to load image or audio file"); + } + // calculate bitmap hash (for KV caching) + std::string hash = fnv_hash(bmp.data(), bmp.n_bytes()); + bmp.set_id(hash.c_str()); + bitmaps.entries.push_back(std::move(bmp)); + } + // process prompt + std::vector inputs; + // multimodal + mtmd_input_text inp_txt = { + prompt.c_str(), + /* add_special */ true, + /* parse_special */ true, + }; + mtmd::input_chunks chunks(mtmd_input_chunks_init()); + auto bitmaps_c_ptr = bitmaps.c_ptr(); + int32_t tokenized = mtmd_tokenize(mctx, + chunks.ptr.get(), + &inp_txt, + bitmaps_c_ptr.data(), + bitmaps_c_ptr.size()); + if (tokenized != 0) { + throw std::runtime_error("Failed to tokenize prompt"); + } + auto result = server_tokens(chunks, true); + return result; +} + +/** + * break the input "prompt" object into multiple prompt if needed, then tokenize them + * use tokenize_input_prompts() if the input could be an array. + * this supports these cases: + * - "prompt": "string" + * - "prompt": [12, 34, 56] + * - "prompt": [12, 34, "string", 56, 78] + * - "prompt": { "prompt_string": "string", "multimodal_data": [ "base64" ] } + */ +static server_tokens tokenize_input_subprompt(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) { + constexpr char JSON_STRING_PROMPT_KEY[] = "prompt_string"; + constexpr char JSON_MTMD_DATA_KEY[] = "multimodal_data"; + const bool has_mtmd = mctx != nullptr; + if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) { + // string or mixed + llama_tokens tmp = tokenize_mixed(vocab, json_prompt, add_special, parse_special); + return server_tokens(tmp, false); + } else if (json_is_array_of_numbers(json_prompt)) { + // array of tokens + llama_tokens tmp = json_prompt.get(); + return server_tokens(tmp, false); + } else if (json_prompt.contains(JSON_STRING_PROMPT_KEY)) { + // JSON object with prompt key. + if (json_prompt.contains(JSON_MTMD_DATA_KEY)) { + if (!has_mtmd) + throw std::runtime_error("Multimodal data provided, but model does not support multimodal requests."); + + // JSON object with prompt and multimodal key. + std::vector files; + for (const auto & entry : json_prompt.at(JSON_MTMD_DATA_KEY)) { + files.push_back(base64_decode(entry)); + } + return process_mtmd_prompt(mctx, json_prompt.at(JSON_STRING_PROMPT_KEY), files); + } else { + // Not multimodal, but contains a subobject. + llama_tokens tmp = tokenize_mixed(vocab, json_prompt.at(JSON_STRING_PROMPT_KEY), add_special, parse_special); + return server_tokens(tmp, false); + } + } else { + throw std::runtime_error("\"prompt\" elements must be a string, a list of tokens, a JSON object containing a prompt string, or a list of mixed strings & tokens."); + } +} + +std::vector tokenize_input_prompts(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) { + std::vector result; + if (json_prompt.is_array() && !json_is_array_and_contains_numbers(json_prompt)) { + result.reserve(json_prompt.size()); + for (const auto & p : json_prompt) { + result.push_back(tokenize_input_subprompt(vocab, mctx, p,add_special, parse_special)); + } + } else { + result.push_back(tokenize_input_subprompt(vocab, mctx, json_prompt, add_special, parse_special)); + } + if (result.empty()) { + throw std::runtime_error("\"prompt\" must not be empty"); + } + return result; +} + +// +// OAI utils +// + +// used by /completions endpoint +json oaicompat_completion_params_parse(const json & body) { + json llama_params; + + if (!body.contains("prompt")) { + throw std::runtime_error("\"prompt\" is required"); + } + + // Handle "stop" field + if (body.contains("stop") && body.at("stop").is_string()) { + llama_params["stop"] = json::array({body.at("stop").get()}); + } else { + llama_params["stop"] = json_value(body, "stop", json::array()); + } + + // Handle "echo" field + if (json_value(body, "echo", false)) { + throw std::runtime_error("Only no echo is supported"); + } + + // Params supported by OAI but unsupported by llama.cpp + static const std::vector unsupported_params { "best_of", "suffix" }; + for (const auto & param : unsupported_params) { + if (body.contains(param)) { + throw std::runtime_error("Unsupported param: " + param); + } + } + + // Copy remaining properties to llama_params + for (const auto & item : body.items()) { + // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens" + if (!llama_params.contains(item.key()) || item.key() == "n_predict") { + llama_params[item.key()] = item.value(); + } + } + + return llama_params; +} + +// media_path always end with '/', see arg.cpp +static void handle_media( + std::vector & out_files, + json & media_obj, + const std::string & media_path) { + std::string url = json_value(media_obj, "url", std::string()); + if (string_starts_with(url, "http")) { + // download remote image + // TODO @ngxson : maybe make these params configurable + common_remote_params params; + params.max_size = 1024 * 1024 * 10; // 10MB + params.timeout = 10; // seconds + SRV_INF("downloading image from '%s'\n", url.c_str()); + auto res = common_remote_get_content(url, params); + if (200 <= res.first && res.first < 300) { + SRV_INF("downloaded %zu bytes\n", res.second.size()); + raw_buffer data; + data.insert(data.end(), res.second.begin(), res.second.end()); + out_files.push_back(data); + } else { + throw std::runtime_error("Failed to download image"); + } + + } else if (string_starts_with(url, "file://")) { + if (media_path.empty()) { + throw std::invalid_argument("file:// URLs are not allowed unless --media-path is specified"); + } + // load local image file + std::string file_path = url.substr(7); // remove "file://" + raw_buffer data; + if (!fs_validate_filename(file_path, true)) { + throw std::invalid_argument("file path is not allowed: " + file_path); + } + SRV_INF("loading image from local file '%s'\n", (media_path + file_path).c_str()); + std::ifstream file(media_path + file_path, std::ios::binary); + if (!file) { + throw std::invalid_argument("file does not exist or cannot be opened: " + file_path); + } + data.assign((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + out_files.push_back(data); + + } else { + // try to decode base64 image + std::vector parts = string_split(url, /*separator*/ ','); + if (parts.size() != 2) { + throw std::runtime_error("Invalid url value"); + } else if (!string_starts_with(parts[0], "data:image/")) { + throw std::runtime_error("Invalid url format: " + parts[0]); + } else if (!string_ends_with(parts[0], "base64")) { + throw std::runtime_error("url must be base64 encoded"); + } else { + auto base64_data = parts[1]; + auto decoded_data = base64_decode(base64_data); + out_files.push_back(decoded_data); + } + } +} + +// used by /chat/completions endpoint +json oaicompat_chat_params_parse( + json & body, /* openai api json semantics */ + const server_chat_params & opt, + std::vector & out_files) +{ + json llama_params; + + auto tools = json_value(body, "tools", json()); + auto has_tools = tools.is_array() && !tools.empty(); + auto stream = json_value(body, "stream", false); + auto tool_choice = json_value(body, "tool_choice", std::string("auto")); + + if (!opt.use_jinja) { + if (has_tools) { + throw std::runtime_error("tools param requires --jinja flag"); + } + if (tool_choice != "auto") { + throw std::runtime_error("tool_choice param requires --jinja flag"); + } + } + + // Handle "stop" field + if (body.contains("stop") && body.at("stop").is_string()) { + llama_params["stop"] = json::array({body.at("stop").get()}); + } else { + llama_params["stop"] = json_value(body, "stop", json::array()); + } + + auto json_schema = json_value(body, "json_schema", json()); + auto grammar = json_value(body, "grammar", std::string()); + if (!json_schema.is_null() && !grammar.empty()) { + throw std::runtime_error("Cannot use both json_schema and grammar"); + } + + // Handle "response_format" field + if (body.contains("response_format")) { + json response_format = json_value(body, "response_format", json::object()); + std::string response_type = json_value(response_format, "type", std::string()); + if (response_type == "json_object") { + json_schema = json_value(response_format, "schema", json::object()); + } else if (response_type == "json_schema") { + auto schema_wrapper = json_value(response_format, "json_schema", json::object()); + json_schema = json_value(schema_wrapper, "schema", json::object()); + } else if (!response_type.empty() && response_type != "text") { + throw std::invalid_argument("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type); + } + } + + // get input files + if (!body.contains("messages")) { + throw std::invalid_argument("'messages' is required"); + } + json & messages = body.at("messages"); + if (!messages.is_array()) { + throw std::invalid_argument("Expected 'messages' to be an array"); + } + for (auto & msg : messages) { + std::string role = json_value(msg, "role", std::string()); + if (role != "assistant" && !msg.contains("content")) { + throw std::invalid_argument("All non-assistant messages must contain 'content'"); + } + if (role == "assistant") { + if (!msg.contains("content") && !msg.contains("tool_calls")) { + throw std::invalid_argument("Assistant message must contain either 'content' or 'tool_calls'!"); + } + if (!msg.contains("content")) { + continue; // avoid errors with no content + } + } + json & content = msg.at("content"); + if (content.is_string() || content.is_null()) { + continue; + } + + if (!content.is_array()) { + throw std::invalid_argument("Expected 'content' to be a string or an array"); + } + + for (auto & p : content) { + std::string type = json_value(p, "type", std::string()); + if (type == "image_url") { + if (!opt.allow_image) { + throw std::runtime_error("image input is not supported - hint: if this is unexpected, you may need to provide the mmproj"); + } + + json image_url = json_value(p, "image_url", json::object()); + handle_media(out_files, image_url, opt.media_path); + + // replace this chunk with a marker + p["type"] = "text"; + p["text"] = mtmd_default_marker(); + p.erase("image_url"); + + } else if (type == "input_audio") { + if (!opt.allow_audio) { + throw std::runtime_error("audio input is not supported - hint: if this is unexpected, you may need to provide the mmproj"); + } + + json input_audio = json_value(p, "input_audio", json::object()); + std::string data = json_value(input_audio, "data", std::string()); + std::string format = json_value(input_audio, "format", std::string()); + // while we also support flac, we don't allow it here so we matches the OAI spec + if (format != "wav" && format != "mp3") { + throw std::invalid_argument("input_audio.format must be either 'wav' or 'mp3'"); + } + auto decoded_data = base64_decode(data); // expected to be base64 encoded + out_files.push_back(decoded_data); + + // TODO: add audio_url support by reusing handle_media() + + // replace this chunk with a marker + p["type"] = "text"; + p["text"] = mtmd_default_marker(); + p.erase("input_audio"); + + } else if (type != "text") { + throw std::invalid_argument("unsupported content[].type"); + } + } + } + + common_chat_templates_inputs inputs; + inputs.messages = common_chat_msgs_parse_oaicompat(messages); + inputs.tools = common_chat_tools_parse_oaicompat(tools); + inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(tool_choice); + inputs.json_schema = json_schema.is_null() ? "" : json_schema.dump(); + inputs.grammar = grammar; + inputs.use_jinja = opt.use_jinja; + inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); + inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); + inputs.reasoning_format = opt.reasoning_format; + if (body.contains("reasoning_format")) { + inputs.reasoning_format = common_reasoning_format_from_name(body.at("reasoning_format").get()); + } + inputs.enable_thinking = opt.enable_thinking; + if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { + if (body.contains("grammar")) { + throw std::invalid_argument("Cannot use custom grammar constraints with tools."); + } + llama_params["parse_tool_calls"] = true; + } + + // merge the template args provided from command line with the args provided in the user request + auto chat_template_kwargs_object = json_value(body, "chat_template_kwargs", json::object()); + inputs.chat_template_kwargs = opt.chat_template_kwargs; + for (const auto & item : chat_template_kwargs_object.items()) { + inputs.chat_template_kwargs[item.key()] = item.value().dump(); + } + + // parse the "enable_thinking" kwarg to override the default value + auto enable_thinking_kwarg = json_value(inputs.chat_template_kwargs, "enable_thinking", std::string("")); + if (enable_thinking_kwarg == "true") { + inputs.enable_thinking = true; + } else if (enable_thinking_kwarg == "false") { + inputs.enable_thinking = false; + } else if (!enable_thinking_kwarg.empty() && enable_thinking_kwarg[0] == '"') { + throw std::invalid_argument("invalid type for \"enable_thinking\" (expected boolean, got string)"); + } + + // if the assistant message appears at the end of list, we do not add end-of-turn token + // for ex. this can be useful to modify the reasoning process in reasoning models + bool prefill_assistant_message = !inputs.messages.empty() && inputs.messages.back().role == "assistant" && opt.prefill_assistant; + common_chat_msg last_message; + if (prefill_assistant_message) { + last_message = inputs.messages.back(); + inputs.messages.pop_back(); + + /* sanity check, max one assistant message at the end of the list */ + if (!inputs.messages.empty() && inputs.messages.back().role == "assistant"){ + throw std::invalid_argument("Cannot have 2 or more assistant messages at the end of the list."); + } + + /* TODO: test this properly */ + inputs.reasoning_format = COMMON_REASONING_FORMAT_NONE; + + if ( inputs.enable_thinking ) { + throw std::invalid_argument("Assistant response prefill is incompatible with enable_thinking."); + } + + inputs.add_generation_prompt = true; + } + + // Apply chat template to the list of messages + auto chat_params = common_chat_templates_apply(opt.tmpls.get(), inputs); + + /* Append assistant prefilled message */ + if (prefill_assistant_message) { + if (!last_message.content_parts.empty()) { + for (auto & p : last_message.content_parts) { + chat_params.prompt += p.text; + } + } else { + chat_params.prompt += last_message.content; + } + } + + llama_params["chat_format"] = static_cast(chat_params.format); + llama_params["prompt"] = chat_params.prompt; + if (!chat_params.grammar.empty()) { + llama_params["grammar"] = chat_params.grammar; + } + llama_params["grammar_lazy"] = chat_params.grammar_lazy; + auto grammar_triggers = json::array(); + for (const auto & trigger : chat_params.grammar_triggers) { + server_grammar_trigger ct(trigger); + grammar_triggers.push_back(ct.to_json()); + } + llama_params["grammar_triggers"] = grammar_triggers; + llama_params["preserved_tokens"] = chat_params.preserved_tokens; + llama_params["thinking_forced_open"] = chat_params.thinking_forced_open; + for (const auto & stop : chat_params.additional_stops) { + llama_params["stop"].push_back(stop); + } + if (!chat_params.parser.empty()) { + llama_params["chat_parser"] = chat_params.parser; + } + + // Handle "logprobs" field + // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future + if (json_value(body, "logprobs", false)) { + if (has_tools && stream) { + throw std::invalid_argument("logprobs is not supported with tools + stream"); + } + llama_params["n_probs"] = json_value(body, "top_logprobs", 20); + } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) { + throw std::invalid_argument("top_logprobs requires logprobs to be set to true"); + } + + // Copy remaining properties to llama_params + // This allows user to use llama.cpp-specific params like "mirostat", ... via OAI endpoint. + // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp + for (const auto & item : body.items()) { + // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens" + if (!llama_params.contains(item.key()) || item.key() == "n_predict") { + llama_params[item.key()] = item.value(); + } + } + + return llama_params; +} + +json convert_responses_to_chatcmpl(const json & response_body) { + if (!response_body.contains("input")) { + throw std::invalid_argument("'input' is required"); + } + if (!json_value(response_body, "previous_response_id", std::string{}).empty()) { + throw std::invalid_argument("llama.cpp does not support 'previous_response_id'."); + } + + const json input_value = response_body.at("input"); + json chatcmpl_body = response_body; + chatcmpl_body.erase("input"); + std::vector chatcmpl_messages; + + if (response_body.contains("instructions")) { + chatcmpl_messages.push_back({ + {"role", "system"}, + {"content", json_value(response_body, "instructions", std::string())}, + }); + chatcmpl_body.erase("instructions"); + } + + if (input_value.is_string()) { + // #responses_create-input-text_input + chatcmpl_messages.push_back({ + {"role", "user"}, + {"content", input_value}, + }); + } else if (input_value.is_array()) { + // #responses_create-input-input_item_list + + static auto exists_and_is_array = [](const json & j, const char * key) -> bool { + return j.contains(key) && j.at(key).is_array(); + }; + static auto exists_and_is_string = [](const json & j, const char * key) -> bool { + return j.contains(key) && j.at(key).is_string(); + }; + + for (json item : input_value) { + if (exists_and_is_string(item, "content")) { + // #responses_create-input-input_item_list-input_message-content-text_input + // Only "Input message" contains item["content"]::string + // After converting item["content"]::string to item["content"]::array, + // we can treat "Input message" as sum of "Item-Input message" and "Item-Output message" + item["content"] = json::array({ + json { + {"text", item.at("content")}, + {"type", "input_text"} + } + }); + } + + if (exists_and_is_array(item, "content") && + exists_and_is_string(item, "role") && + (item.at("role") == "user" || + item.at("role") == "system" || + item.at("role") == "developer") + ) { + // #responses_create-input-input_item_list-item-input_message + std::vector chatcmpl_content; + + for (const json & input_item : item.at("content")) { + const std::string type = json_value(input_item, "type", std::string()); + + if (type == "input_text") { + if (!input_item.contains("text")) { + throw std::invalid_argument("'Input text' requires 'text'"); + } + chatcmpl_content.push_back({ + {"text", input_item.at("text")}, + {"type", "text"}, + }); + } else if (type == "input_image") { + // While `detail` is marked as required, + // it has default value("auto") and can be omitted. + + if (!input_item.contains("image_url")) { + throw std::invalid_argument("'image_url' is required"); + } + chatcmpl_content.push_back({ + {"image_url", json { + {"url", input_item.at("image_url")} + }}, + {"type", "image_url"}, + }); + } else if (type == "input_file") { + throw std::invalid_argument("'input_file' is not supported by llamacpp at this moment"); + // if (input_item.contains("file_url")) { + // // chat completion API does not support file_url + // throw std::invalid_argument("'file_url' is not supported"); + // } + // if (!input_item.contains("file_data") || !input_item.contains("filename")) { + // throw std::invalid_argument("Both 'file_data' and 'filename' are required"); + // } + // chatcmpl_content.push_back({ + // {"file", json { + // {"file_data", input_item.at("file_data")}, + // {"filename", input_item.at("filename")}, + // }}, + // {"type", "file"}, + // }); + } else { + throw std::invalid_argument("'type' must be one of 'input_text', 'input_image', or 'input_file'"); + } + } + + if (item.contains("type")) { + item.erase("type"); + } + if (item.contains("status")) { + item.erase("status"); + } + item["content"] = chatcmpl_content; + + chatcmpl_messages.push_back(item); + } else if (exists_and_is_array(item, "content") && + exists_and_is_string(item, "role") && + item.at("role") == "assistant" && + // exists_and_is_string(item, "status") && + // (item.at("status") == "in_progress" || + // item.at("status") == "completed" || + // item.at("status") == "incomplete") && + // item["status"] not sent by codex-cli + exists_and_is_string(item, "type") && + item.at("type") == "message" + ) { + // #responses_create-input-input_item_list-item-output_message + std::vector chatcmpl_content; + + for (const auto & output_text : item.at("content")) { + const std::string type = json_value(output_text, "type", std::string()); + if (type != "output_text") { + throw std::invalid_argument("'type' must be 'output_text'"); + } + if (!exists_and_is_string(output_text, "text")) { + throw std::invalid_argument("'Output text' requires 'text'"); + } + // Ignore annotations and logprobs for now + chatcmpl_content.push_back({ + {"text", output_text.at("text")}, + {"type", "text"}, + }); + } + + item.erase("status"); + item.erase("type"); + item["content"] = chatcmpl_content; + chatcmpl_messages.push_back(item); + } else if (exists_and_is_string(item, "arguments") && + exists_and_is_string(item, "call_id") && + exists_and_is_string(item, "name") && + exists_and_is_string(item, "type") && + item.at("type") == "function_call" + ) { + // #responses_create-input-input_item_list-item-function_tool_call + json msg = json { + {"role", "assistant"}, + {"tool_calls", json::array({ json { + {"function", json { + {"arguments", item.at("arguments")}, + {"name", item.at("name")}, + }}, + {"id", item.at("call_id")}, + {"type", "function"}, + }})}, + }; + + if (!chatcmpl_messages.empty() && chatcmpl_messages.back().contains("reasoning_content")) { + // Move reasoning content from dummy message to tool call message + msg["reasoning_content"] = chatcmpl_messages.back().at("reasoning_content"); + chatcmpl_messages.pop_back(); + } + chatcmpl_messages.push_back(msg); + } else if (exists_and_is_string(item, "call_id") && + (exists_and_is_string(item, "output") || exists_and_is_array(item, "output")) && + exists_and_is_string(item, "type") && + item.at("type") == "function_call_output" + ) { + // #responses_create-input-input_item_list-item-function_tool_call_output + if (item.at("output").is_string()) { + chatcmpl_messages.push_back(json { + {"content", item.at("output")}, + {"role", "tool"}, + {"tool_call_id", item.at("call_id")}, + }); + } else { + json chatcmpl_outputs = item.at("output"); + for (json & chatcmpl_output : chatcmpl_outputs) { + if (!chatcmpl_output.contains("type") || chatcmpl_output.at("type") != "input_text") { + throw std::invalid_argument("Output of tool call should be 'Input text'"); + } + chatcmpl_output["type"] = "text"; + } + chatcmpl_messages.push_back(json { + {"content", chatcmpl_outputs}, + {"role", "tool"}, + {"tool_call_id", item.at("call_id")}, + }); + } + } else if (// exists_and_is_string(item, "id") && + // item["id"] not sent by codex-cli + exists_and_is_array(item, "summary") && + exists_and_is_string(item, "type") && + item.at("type") == "reasoning") { + // #responses_create-input-input_item_list-item-reasoning + + if (!exists_and_is_array(item, "content")) { + throw std::invalid_argument("item['content'] is not an array"); + } + if (item.at("content").empty()) { + throw std::invalid_argument("item['content'] is empty"); + } + if (!exists_and_is_string(item.at("content")[0], "text")) { + throw std::invalid_argument("item['content']['text'] is not a string"); + } + + // Pack reasoning content in dummy message + chatcmpl_messages.push_back(json { + {"role", "assistant"}, + {"content", json::array()}, + {"reasoning_content", item.at("content")[0].at("text")}, + }); + } else { + throw std::invalid_argument("Cannot determine type of 'item'"); + } + } + } else { + throw std::invalid_argument("'input' must be a string or array of objects"); + } + + // Remove unused dummy message which contains + // reasoning content not followed by tool call + chatcmpl_messages.erase(std::remove_if( + chatcmpl_messages.begin(), + chatcmpl_messages.end(), + [](const json & x){ return x.contains("role") && + x.at("role") == "assistant" && + x.contains("content") && + x.at("content") == json::array() && + x.contains("reasoning_content"); + }), + chatcmpl_messages.end() + ); + + chatcmpl_body["messages"] = chatcmpl_messages; + + if (response_body.contains("tools")) { + if (!response_body.at("tools").is_array()) { + throw std::invalid_argument("'tools' must be an array of objects"); + } + std::vector chatcmpl_tools; + for (json resp_tool : response_body.at("tools")) { + json chatcmpl_tool; + + if (json_value(resp_tool, "type", std::string()) != "function") { + throw std::invalid_argument("'type' of tool must be 'function'"); + } + resp_tool.erase("type"); + chatcmpl_tool["type"] = "function"; + + if (!resp_tool.contains("strict")) { + resp_tool["strict"] = true; + } + chatcmpl_tool["function"] = resp_tool; + chatcmpl_tools.push_back(chatcmpl_tool); + } + chatcmpl_body.erase("tools"); + chatcmpl_body["tools"] = chatcmpl_tools; + } + + if (response_body.contains("max_output_tokens")) { + chatcmpl_body.erase("max_output_tokens"); + chatcmpl_body["max_tokens"] = response_body["max_output_tokens"]; + } + + return chatcmpl_body; +} + +json convert_anthropic_to_oai(const json & body) { + json oai_body; + + // Convert system prompt + json oai_messages = json::array(); + auto system_param = json_value(body, "system", json()); + if (!system_param.is_null()) { + std::string system_content; + + if (system_param.is_string()) { + system_content = system_param.get(); + } else if (system_param.is_array()) { + for (const auto & block : system_param) { + if (json_value(block, "type", std::string()) == "text") { + system_content += json_value(block, "text", std::string()); + } + } + } + + oai_messages.push_back({ + {"role", "system"}, + {"content", system_content} + }); + } + + // Convert messages + if (!body.contains("messages")) { + throw std::runtime_error("'messages' is required"); + } + const json & messages = body.at("messages"); + if (messages.is_array()) { + for (const auto & msg : messages) { + std::string role = json_value(msg, "role", std::string()); + + if (!msg.contains("content")) { + if (role == "assistant") { + continue; + } + oai_messages.push_back(msg); + continue; + } + + const json & content = msg.at("content"); + + if (content.is_string()) { + oai_messages.push_back(msg); + continue; + } + + if (!content.is_array()) { + oai_messages.push_back(msg); + continue; + } + + json tool_calls = json::array(); + json converted_content = json::array(); + json tool_results = json::array(); + bool has_tool_calls = false; + + for (const auto & block : content) { + std::string type = json_value(block, "type", std::string()); + + if (type == "text") { + converted_content.push_back(block); + } else if (type == "image") { + json source = json_value(block, "source", json::object()); + std::string source_type = json_value(source, "type", std::string()); + + if (source_type == "base64") { + std::string media_type = json_value(source, "media_type", std::string("image/jpeg")); + std::string data = json_value(source, "data", std::string()); + std::ostringstream ss; + ss << "data:" << media_type << ";base64," << data; + + converted_content.push_back({ + {"type", "image_url"}, + {"image_url", { + {"url", ss.str()} + }} + }); + } else if (source_type == "url") { + std::string url = json_value(source, "url", std::string()); + converted_content.push_back({ + {"type", "image_url"}, + {"image_url", { + {"url", url} + }} + }); + } + } else if (type == "tool_use") { + tool_calls.push_back({ + {"id", json_value(block, "id", std::string())}, + {"type", "function"}, + {"function", { + {"name", json_value(block, "name", std::string())}, + {"arguments", json_value(block, "input", json::object()).dump()} + }} + }); + has_tool_calls = true; + } else if (type == "tool_result") { + std::string tool_use_id = json_value(block, "tool_use_id", std::string()); + + auto result_content = json_value(block, "content", json()); + std::string result_text; + if (result_content.is_string()) { + result_text = result_content.get(); + } else if (result_content.is_array()) { + for (const auto & c : result_content) { + if (json_value(c, "type", std::string()) == "text") { + result_text += json_value(c, "text", std::string()); + } + } + } + + tool_results.push_back({ + {"role", "tool"}, + {"tool_call_id", tool_use_id}, + {"content", result_text} + }); + } + } + + if (!converted_content.empty() || has_tool_calls) { + json new_msg = {{"role", role}}; + if (!converted_content.empty()) { + new_msg["content"] = converted_content; + } else if (has_tool_calls) { + new_msg["content"] = ""; + } + if (!tool_calls.empty()) { + new_msg["tool_calls"] = tool_calls; + } + oai_messages.push_back(new_msg); + } + + for (const auto & tool_msg : tool_results) { + oai_messages.push_back(tool_msg); + } + } + } + + oai_body["messages"] = oai_messages; + + // Convert tools + if (body.contains("tools")) { + const json & tools = body.at("tools"); + if (tools.is_array()) { + json oai_tools = json::array(); + for (const auto & tool : tools) { + oai_tools.push_back({ + {"type", "function"}, + {"function", { + {"name", json_value(tool, "name", std::string())}, + {"description", json_value(tool, "description", std::string())}, + {"parameters", tool.contains("input_schema") ? tool.at("input_schema") : json::object()} + }} + }); + } + oai_body["tools"] = oai_tools; + } + } + + // Convert tool_choice + if (body.contains("tool_choice")) { + const json & tc = body.at("tool_choice"); + if (tc.is_object()) { + std::string type = json_value(tc, "type", std::string()); + if (type == "auto") { + oai_body["tool_choice"] = "auto"; + } else if (type == "any" || type == "tool") { + oai_body["tool_choice"] = "required"; + } + } + } + + // Convert stop_sequences to stop + if (body.contains("stop_sequences")) { + oai_body["stop"] = body.at("stop_sequences"); + } + + // Handle max_tokens (required in Anthropic, but we're permissive) + if (body.contains("max_tokens")) { + oai_body["max_tokens"] = body.at("max_tokens"); + } else { + oai_body["max_tokens"] = 4096; + } + + // Pass through common params + for (const auto & key : {"temperature", "top_p", "top_k", "stream"}) { + if (body.contains(key)) { + oai_body[key] = body.at(key); + } + } + + // Handle Anthropic-specific thinking param + if (body.contains("thinking")) { + json thinking = json_value(body, "thinking", json::object()); + std::string thinking_type = json_value(thinking, "type", std::string()); + if (thinking_type == "enabled") { + int budget_tokens = json_value(thinking, "budget_tokens", 10000); + oai_body["thinking_budget_tokens"] = budget_tokens; + } + } + + // Handle Anthropic-specific metadata param + if (body.contains("metadata")) { + json metadata = json_value(body, "metadata", json::object()); + std::string user_id = json_value(metadata, "user_id", std::string()); + if (!user_id.empty()) { + oai_body["__metadata_user_id"] = user_id; + } + } + + return oai_body; +} + +json format_embeddings_response_oaicompat( + const json & request, + const std::string & model_name, + const json & embeddings, + bool use_base64) { + json data = json::array(); + int32_t n_tokens = 0; + int i = 0; + for (const auto & elem : embeddings) { + json embedding_obj; + + if (use_base64) { + const auto& vec = json_value(elem, "embedding", json::array()).get>(); + const char* data_ptr = reinterpret_cast(vec.data()); + size_t data_size = vec.size() * sizeof(float); + embedding_obj = { + {"embedding", base64::encode(data_ptr, data_size)}, + {"index", i++}, + {"object", "embedding"}, + {"encoding_format", "base64"} + }; + } else { + embedding_obj = { + {"embedding", json_value(elem, "embedding", json::array())}, + {"index", i++}, + {"object", "embedding"} + }; + } + data.push_back(embedding_obj); + + n_tokens += json_value(elem, "tokens_evaluated", 0); + } + + json res = json { + {"model", json_value(request, "model", model_name)}, + {"object", "list"}, + {"usage", json { + {"prompt_tokens", n_tokens}, + {"total_tokens", n_tokens} + }}, + {"data", data} + }; + + return res; +} + +json format_response_rerank( + const json & request, + const std::string & model_name, + const json & ranks, + bool is_tei_format, + std::vector & texts, + int top_n) { + int32_t n_tokens = 0; + bool return_text = is_tei_format && json_value(request, "return_text", false); + std::vector elements; // Temporary vector to hold unsorted elements + std::string score_label = is_tei_format ? "score" : "relevance_score"; + for (const auto & rank : ranks) { + int index = json_value(rank, "index", 0); + json elem = json{ + {"index", index}, + {score_label, json_value(rank, "score", 0.0)}, + }; + n_tokens += json_value(rank, "tokens_evaluated", 0); + if (return_text) { + elem["text"] = std::move(texts[index]); + } + elements.push_back(elem); + } + + std::sort(elements.begin(), elements.end(), [score_label](const json& a, const json& b) { + return json_value(a, score_label, 0.0) > json_value(b, score_label, 0.0); + }); + + elements.resize(std::min(top_n, (int)elements.size())); + json results = elements; + + if (is_tei_format) return results; + + json res = json{ + {"model", json_value(request, "model", model_name)}, + {"object", "list"}, + {"usage", json{ + {"prompt_tokens", n_tokens}, + {"total_tokens", n_tokens} + }}, + {"results", results} + }; + + return res; +} + + +// +// other utils +// + +std::vector get_token_probabilities(llama_context * ctx, int idx) { + std::vector cur; + + const auto * logits = llama_get_logits_ith(ctx, idx); + const llama_token * sampled_ids = llama_get_sampled_candidates_ith(ctx, idx); + + const int n_logits = llama_get_sampled_logits_count_ith(ctx, idx); + + cur.resize(n_logits); + if (sampled_ids) { + for (int i = 0; i < n_logits; i++) { + cur[i] = llama_token_data{sampled_ids[i], logits[i], 0.0f}; + } + } else { + for (llama_token token_id = 0; token_id < n_logits; token_id++) { + cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; + } + } + + // sort tokens by logits + std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) { + return a.logit > b.logit; + }); + + // apply softmax + float max_l = cur[0].logit; + float cum_sum = 0.0f; + for (size_t i = 0; i < cur.size(); ++i) { + float p = expf(cur[i].logit - max_l); + cur[i].p = p; + cum_sum += p; + } + for (size_t i = 0; i < cur.size(); ++i) { + cur[i].p /= cum_sum; + } + + return cur; +} + +std::string safe_json_to_str(const json & data) { + return data.dump(-1, ' ', false, json::error_handler_t::replace); +} + +// TODO: reuse llama_detokenize +template +static std::string tokens_to_str(const llama_vocab * ctx, Iter begin, Iter end) { + std::string ret; + for (; begin != end; ++begin) { + ret += common_token_to_piece(ctx, *begin); + } + + return ret; +} + +std::string tokens_to_str(llama_context * ctx, const llama_tokens & tokens) { + auto model = llama_get_model(ctx); + return tokens_to_str(llama_model_get_vocab(model), tokens.begin(), tokens.end()); +} + +std::string tokens_to_str(const llama_vocab * vocab, const llama_tokens & tokens) { + return tokens_to_str(vocab, tokens.begin(), tokens.end()); +} + +// format incomplete utf-8 multibyte character for output +std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) { + std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token); + + // if the size is 1 and first bit is 1, meaning it's a partial character + // (size > 1 meaning it's already a known token) + if (out.size() == 1 && (out[0] & 0x80) == 0x80) { + std::stringstream ss; + ss << std::hex << (out[0] & 0xff); + std::string res(ss.str()); + out = "byte: \\x" + res; + } + + return out; +} + +// format server-sent event (SSE), return the formatted string to send +// note: if data is a json array, it will be sent as multiple events, one per item +std::string format_oai_sse(const json & data) { + std::ostringstream ss; + auto send_single = [&ss](const json & data) { + ss << "data: " << + safe_json_to_str(data) << + "\n\n"; // required by RFC 8895 - A message is terminated by a blank line (two line terminators in a row). + }; + + if (data.is_array()) { + for (const auto & item : data) { + send_single(item); + } + } else { + send_single(data); + } + + return ss.str(); +} + +std::string format_oai_resp_sse(const json & data) { + std::ostringstream ss; + auto send_single = [&ss](const json & event_obj) { + ss << "event: " << event_obj.at("event").get() << "\n"; + ss << "data: " << safe_json_to_str(event_obj.at("data")) << "\n\n"; + }; + + if (data.is_array()) { + for (const auto & item : data) { + send_single(item); + } + } else { + send_single(data); + } + + return ss.str(); +} + +std::string format_anthropic_sse(const json & data) { + std::ostringstream ss; + + auto send_event = [&ss](const json & event_obj) { + if (event_obj.contains("event") && event_obj.contains("data")) { + ss << "event: " << event_obj.at("event").get() << "\n"; + ss << "data: " << safe_json_to_str(event_obj.at("data")) << "\n\n"; + } else { + ss << "data: " << safe_json_to_str(event_obj) << "\n\n"; + } + }; + + if (data.is_array()) { + for (const auto & event : data) { + send_event(event); + } + } else { + send_event(data); + } + + return ss.str(); +} + +bool is_valid_utf8(const std::string & str) { + const unsigned char* bytes = reinterpret_cast(str.data()); + const unsigned char* end = bytes + str.length(); + + while (bytes < end) { + if (*bytes <= 0x7F) { + // 1-byte sequence (0xxxxxxx) + bytes++; + } else if ((*bytes & 0xE0) == 0xC0) { + // 2-byte sequence (110xxxxx 10xxxxxx) + if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80) + return false; + bytes += 2; + } else if ((*bytes & 0xF0) == 0xE0) { + // 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx) + if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80) + return false; + bytes += 3; + } else if ((*bytes & 0xF8) == 0xF0) { + // 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx) + if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 || + (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80) + return false; + bytes += 4; + } else { + // Invalid UTF-8 lead byte + return false; + } + } + + return true; +} + +llama_tokens format_prompt_infill( + const llama_vocab * vocab, + const json & input_prefix, + const json & input_suffix, + const json & input_extra, + const int n_batch, + const int n_predict, + const int n_ctx, + const bool spm_infill, + const llama_tokens & tokens_prompt + ) { + // TODO: optimize this block by reducing memory allocations and movement + + // use FIM repo-level pattern: + // ref: https://arxiv.org/pdf/2409.12186 + // + // [FIM_REP]myproject + // [FIM_SEP]filename0 + // extra chunk 0 + // [FIM_SEP]filename1 + // extra chunk 1 + // ... + // [FIM_SEP]filename + // [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt + // + llama_tokens extra_tokens; + extra_tokens.reserve(n_ctx); + + auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false); + auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false); + + if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) { + // TODO: make project name an input + static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false); + + extra_tokens.push_back(llama_vocab_fim_rep(vocab)); + extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end()); + } + for (const auto & chunk : input_extra) { + // { "text": string, "filename": string } + const std::string text = json_value(chunk, "text", std::string()); + const std::string filename = json_value(chunk, "filename", std::string("tmp")); + + if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { + const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false); + + extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); + extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); + } else { + // chunk separator in binary form to avoid confusing the AI + static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00}; + static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false); + + extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end()); + } + + const auto chunk_tokens = common_tokenize(vocab, text, false, false); + extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end()); + } + + if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { + // TODO: current filename + static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false); + + extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); + extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); + } + + // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?) + const int n_prefix_take = std::min(tokens_prefix.size(), 3*(n_batch/4)); + const int n_suffix_take = std::min(tokens_suffix.size(), std::max(0, (n_batch/4) - (2 + tokens_prompt.size()))); + + SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take)); + + // fill the rest of the context with extra chunks + const int n_extra_take = std::min(std::max(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size()); + + tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take); + tokens_suffix.resize(n_suffix_take); + + tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab)); + tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end()); + tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab)); + + auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix; + auto embd_end = spm_infill ? tokens_prefix : tokens_suffix; + + if (llama_vocab_get_add_bos(vocab)) { + embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); + } + + SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size()); + + // put the extra context before the FIM prefix + embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end()); + + embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); + embd_inp.push_back(llama_vocab_fim_mid(vocab)); + + return embd_inp; +} + +server_tokens format_prompt_rerank( + const struct llama_model * model, + const struct llama_vocab * vocab, + mtmd_context * mctx, + const std::string & query, + const std::string & doc) { + server_tokens result = {}; + + const char * rerank_prompt = llama_model_chat_template(model, "rerank"); + + if (rerank_prompt != nullptr) { + std::string prompt = rerank_prompt; + string_replace_all(prompt, "{query}" , query); + string_replace_all(prompt, "{document}", doc ); + server_tokens tokens = tokenize_input_subprompt(vocab, mctx, prompt, false, true); + result.push_back(tokens); + } else { + // Get EOS token - use SEP token as fallback if EOS is not available + server_tokens query_tokens = tokenize_input_subprompt(vocab, mctx, query, false, false); + server_tokens doc_tokens = tokenize_input_subprompt(vocab, mctx, doc, false, false); + llama_token eos_token = llama_vocab_eos(vocab); + if (eos_token == LLAMA_TOKEN_NULL) { + eos_token = llama_vocab_sep(vocab); + } + + if (llama_vocab_get_add_bos(vocab)) { + result.push_back(llama_vocab_bos(vocab)); + } + result.push_back(query_tokens); + if (llama_vocab_get_add_eos(vocab)) { + result.push_back(eos_token); + } + if (llama_vocab_get_add_sep(vocab)) { + result.push_back(llama_vocab_sep(vocab)); + } + result.push_back(doc_tokens); + if (llama_vocab_get_add_eos(vocab)) { + result.push_back(eos_token); + } + } + + return result; +} diff --git a/llama.cpp/tools/server/server-common.h b/llama.cpp/tools/server/server-common.h new file mode 100644 index 0000000..2629a6b --- /dev/null +++ b/llama.cpp/tools/server/server-common.h @@ -0,0 +1,366 @@ +#pragma once + +#include "common.h" +#include "log.h" +#include "llama.h" +#include "chat.h" +#include "mtmd.h" + +#define JSON_ASSERT GGML_ASSERT +#include + +#include +#include +#include + +using json = nlohmann::ordered_json; + +#define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) +#define SLT_CNT(slot, fmt, ...) LOG_CNT("" fmt, __VA_ARGS__) +#define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) +#define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) +#define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) + +#define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define SRV_CNT(fmt, ...) LOG_CNT("" fmt, __VA_ARGS__) +#define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) + +using raw_buffer = std::vector; + +template +static T json_value(const json & body, const std::string & key, const T & default_value) { + // Fallback null to default value + if (body.contains(key) && !body.at(key).is_null()) { + try { + return body.at(key); + } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const & err) { + LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value: %s\n", key.c_str(), json(default_value).type_name(), err.what()); + return default_value; + } + } else { + return default_value; + } +} + +// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11 +enum error_type { + ERROR_TYPE_INVALID_REQUEST, + ERROR_TYPE_AUTHENTICATION, + ERROR_TYPE_SERVER, + ERROR_TYPE_NOT_FOUND, + ERROR_TYPE_PERMISSION, + ERROR_TYPE_UNAVAILABLE, // custom error + ERROR_TYPE_NOT_SUPPORTED, // custom error + ERROR_TYPE_EXCEED_CONTEXT_SIZE, // custom error +}; + +// thin wrapper around common_grammar_trigger with (de)serialization functions +struct server_grammar_trigger { + common_grammar_trigger value; + + server_grammar_trigger() = default; + server_grammar_trigger(const common_grammar_trigger & value) : value(value) {} + server_grammar_trigger(const json & in) { + value.type = (common_grammar_trigger_type) in.at("type").get(); + value.value = in.at("value").get(); + if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) { + value.token = (llama_token) in.at("token").get(); + } + } + + json to_json() const { + json out { + {"type", (int) value.type}, + {"value", value.value}, + }; + if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) { + out["token"] = (int) value.token; + } + return out; + } +}; + +json format_error_response(const std::string & message, const enum error_type type); + +// +// random string / id +// + +std::string random_string(); +std::string gen_chatcmplid(); +std::string gen_tool_call_id(); + +// +// lora utils +// + +// check whether the given lora set has only aloras activated (empty => false) +bool lora_all_alora(const std::vector & loras); + +// if the two sets of loras are different, they require a cache clear unless the +// change is only from aloras to aloras. +bool lora_should_clear_cache( + const std::vector & current, + const std::vector & next); + +std::map parse_lora_request(const json & data); + +bool are_lora_equal( + const std::vector & l1, + const std::vector & l2); + +// get the ids of all enabled loras +std::vector lora_get_enabled_ids(const std::vector & loras); + +// +// server_tokens +// + +/** + * server_tokens is a helper to manage the input tokens and image for the server. + * it is made this way to simplify the logic of KV cache management. + */ +struct server_tokens { + bool has_mtmd = false; + +private: // disallow accessing these members directly, risking out-of-sync + + // map a **start** index in tokens to the image chunk + // note: the order need to be in-sync with tokens + std::map map_idx_to_media; + + // list of tokens + // if the token is LLAMA_TOKEN_NULL, it indicates that this position is occupied by media chunk + // otherwise, it is a normal text token + // note: a non-text chunk can occupy multiple tokens (aka memory cells) in the token list + // note(2): for M-RoPE, an image can occupy different number of pos; do not assume 1-to-1 mapping tokens <-> pos + llama_tokens tokens; + + // for ex. with input of 5 text tokens and 2 images (each image occupies 3 tokens and 2 pos): + // [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1] [img1] + // idx 0 1 2 3 4 5 6 7 8 9 10 + // pos 0 1 2 3 4 5 5 5 7 7 7 + // map_idx_to_media will contain: {5, img0}, {8, img1} + +public: + server_tokens() = default; + ~server_tokens() = default; + + // Prevent copying + // TODO: server_tokens should be copyable - remove this: + server_tokens(const server_tokens&) = delete; + server_tokens& operator=(const server_tokens&) = delete; + + // Allow moving (usually implicitly generated if members are movable) + server_tokens(server_tokens&&) = default; + server_tokens& operator=(server_tokens&&) = default; + + // Allow accessing elements using [] operator + llama_token operator[](size_t index) { return tokens[index]; } + const llama_token& operator[](size_t index) const { return tokens[index]; } + + server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd); + server_tokens(const llama_tokens & tokens, bool has_mtmd); + + // for debugging + std::string str() const; + + llama_pos pos_next() const; + const mtmd::input_chunk_ptr & find_chunk(size_t idx) const; + + void push_back(llama_token tok); + + // will create a copy of the chunk if it contains non-text data + void push_back(const mtmd_input_chunk * chunk); + + // appends server tokens, updates the media map. copies media chunks. + void push_back(server_tokens & tokens); + + // for compatibility with context shift and prompt truncation + void insert(const llama_tokens & inp_tokens); + + // for compatibility with speculative decoding, ctx shift, slot save/load + const llama_tokens & get_text_tokens() const; + + // for compatibility with speculative decoding + void set_token(llama_pos pos, llama_token id); + + size_t size() const { return tokens.size(); } + + bool empty() const { return tokens.empty(); } + + void clear() { + map_idx_to_media.clear(); + tokens.clear(); + } + + void keep_first(size_t n); + + std::string detokenize(const llama_context * ctx, bool special) const; + + size_t get_common_prefix(const server_tokens & b) const; + + // make sure all text tokens are within the vocab range + bool validate(const struct llama_context * ctx) const; + + // encode and decode the image chunk + int32_t process_chunk( + llama_context * ctx, + mtmd_context * mctx, + size_t idx, + llama_pos pos, + int32_t seq_id, + size_t & n_tokens_out) const; + + server_tokens clone() const; +}; + + +// +// tokenizer and input processing utils +// + +bool json_is_array_of_numbers(const json & data); + +// is array having BOTH numbers & strings? +bool json_is_array_of_mixed_numbers_strings(const json & data); + +// does array have any individual integers/tokens? +bool json_is_array_and_contains_numbers(const json & data); + +// get value by path(key1 / key2) +json json_get_nested_values(const std::vector & paths, const json & js); + +/** + * this handles 2 cases: + * - only string, example: "string" + * - mixed string and tokens, example: [12, 34, "string", 56, 78] + */ +llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special); + +// return the last index of character that can form a valid string +// if the last character is potentially cut in half, return the index before the cut +// if validate_utf8(text) == text.size(), then the whole text is valid utf8 +size_t validate_utf8(const std::string& text); + +// process mtmd prompt, return the server_tokens containing both text tokens and media chunks +server_tokens process_mtmd_prompt(mtmd_context * mctx, std::string prompt, std::vector files); + +/** + * break the input "prompt" object into multiple prompt if needed, then tokenize them + * this supports these cases: + * - "prompt": "string" + * - "prompt": [12, 34, 56] + * - "prompt": [12, 34, "string", 56, 78] + * - "prompt": { "prompt_string": "string", "multimodal_data": [ "base64" ] } + * and multiple prompts (multi-tasks): + * - "prompt": ["string1", "string2"] + * - "prompt": ["string1", [12, 34, 56]] + * - "prompt": [[12, 34, 56], [78, 90, 12]] + * - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56], { "prompt_string": "string", "multimodal_data": [ "base64" ]}] + */ +std::vector tokenize_input_prompts( + const llama_vocab * vocab, + mtmd_context * mctx, + const json & json_prompt, + bool add_special, + bool parse_special); + +// +// OAI utils +// + +// global server parameters for chat formatting / parsing +struct server_chat_params { + bool use_jinja; + bool prefill_assistant; + common_reasoning_format reasoning_format; + std::map chat_template_kwargs; // mapping key --> json value + common_chat_templates_ptr tmpls; + bool allow_image; + bool allow_audio; + bool enable_thinking = true; + std::string media_path; +}; + +// used by /completions endpoint +json oaicompat_completion_params_parse(const json & body); + +// used by /chat/completions endpoint +json oaicompat_chat_params_parse( + json & body, /* openai api json semantics */ + const server_chat_params & opt, + std::vector & out_files); + +// convert OpenAI Responses API format to OpenAI Chat Completions API format +json convert_responses_to_chatcmpl(const json & body); + +// convert Anthropic Messages API format to OpenAI Chat Completions API format +json convert_anthropic_to_oai(const json & body); + +// TODO: move it to server-task.cpp +json format_embeddings_response_oaicompat( + const json & request, + const std::string & model_name, + const json & embeddings, + bool use_base64 = false); + +// TODO: move it to server-task.cpp +json format_response_rerank( + const json & request, + const std::string & model_name, + const json & ranks, + bool is_tei_format, + std::vector & texts, + int top_n); + +// +// other utils +// + +std::vector get_token_probabilities(llama_context * ctx, int idx); + +std::string safe_json_to_str(const json & data); + +std::string tokens_to_str(llama_context * ctx, const llama_tokens & tokens); +std::string tokens_to_str(const llama_vocab * vocab, const llama_tokens & tokens); + +// format incomplete utf-8 multibyte character for output +std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token); + +// format server-sent event (SSE), return the formatted string to send +// note: if data is a json array, it will be sent as multiple events, one per item +std::string format_oai_sse(const json & data); + +std::string format_oai_resp_sse(const json & data); + +// format Anthropic-style SSE with event types +std::string format_anthropic_sse(const json & data); + +bool is_valid_utf8(const std::string & str); + +// +// formatting output responses +// TODO: move these to server-task.cpp +// + +llama_tokens format_prompt_infill( + const llama_vocab * vocab, + const json & input_prefix, + const json & input_suffix, + const json & input_extra, + const int n_batch, + const int n_predict, + const int n_ctx, + const bool spm_infill, + const llama_tokens & tokens_prompt); + +// format rerank task: [BOS]query[EOS][SEP]doc[EOS]. +server_tokens format_prompt_rerank( + const struct llama_model * model, + const struct llama_vocab * vocab, + mtmd_context * mctx, + const std::string & query, + const std::string & doc); diff --git a/llama.cpp/tools/server/server-context.cpp b/llama.cpp/tools/server/server-context.cpp new file mode 100644 index 0000000..ceafcac --- /dev/null +++ b/llama.cpp/tools/server/server-context.cpp @@ -0,0 +1,4105 @@ +#include "server-context.h" +#include "server-common.h" +#include "server-http.h" +#include "server-task.h" +#include "server-queue.h" + +#include "common.h" +#include "llama.h" +#include "log.h" +#include "sampling.h" +#include "speculative.h" +#include "mtmd.h" +#include "mtmd-helper.h" + +#include +#include +#include +#include + +// fix problem with std::min and std::max +#if defined(_WIN32) +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +# define NOMINMAX +#endif +#include +#endif + +using json = nlohmann::ordered_json; + +constexpr int HTTP_POLLING_SECONDS = 1; + +// state diagram: https://github.com/ggml-org/llama.cpp/pull/9283 +enum slot_state { + SLOT_STATE_IDLE, + SLOT_STATE_WAIT_OTHER, // after assigning a task, but waiting for parent slot to process prompt + SLOT_STATE_STARTED, // after assigning a task and about to process prompt + SLOT_STATE_PROCESSING_PROMPT, + SLOT_STATE_DONE_PROMPT, + SLOT_STATE_GENERATING, +}; + +enum server_state { + SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet + SERVER_STATE_READY, // Server is ready and model is loaded +}; + +struct server_slot { + int id; + + // TODO: change to unique_ptrs for consistency: + llama_context * ctx = nullptr; + + // multimodal + mtmd_context * mctx = nullptr; + + common_speculative * spec = nullptr; + + // TODO: move members that belong to the task (such as `generated_text`, `has_new_line`) to task_results_state + // see https://github.com/ggml-org/llama.cpp/pull/18283#issuecomment-3710175837 + std::unique_ptr task; + std::unique_ptr task_prev; // used for debugging + + // used to determine the slot that has been used the longest + int64_t t_last_used = -1; + + // generation props + int32_t n_ctx = 0; // context size per slot + int32_t n_keep = 0; + int32_t n_decoded = 0; + int32_t n_remaining = -1; + int32_t i_batch = -1; + + int32_t n_prompt_tokens_cache = 0; + int32_t n_prompt_tokens_processed = 0; + + size_t last_nl_pos = 0; + + std::string generated_text; + llama_tokens generated_tokens; + + // idx of draft tokens in the main batch + // non-empty if we went to evaluate draft tokens + // ref: https://github.com/ggml-org/llama.cpp/pull/17808 + std::vector i_batch_dft; + + std::vector generated_token_probs; + + bool has_next_token = true; + bool has_new_line = false; + bool truncated = false; + + stop_type stop; + + std::string stopping_word; + + // state + slot_state state = SLOT_STATE_IDLE; + + server_prompt prompt; + + void prompt_save(server_prompt_cache & prompt_cache) const { + GGML_ASSERT(prompt.data.size() == 0); + + const size_t cur_size = llama_state_seq_get_size_ext(ctx, id, 0); + + SRV_WRN(" - saving prompt with length %d, total state size = %.3f MiB\n", + (int) prompt.tokens.size(), cur_size / (1024.0 * 1024.0)); + + auto * cur = prompt_cache.alloc(prompt, cur_size); + if (cur == nullptr) { + return; + } + + llama_state_seq_get_data_ext(ctx, cur->data.data(), cur_size, id, 0); + } + + bool prompt_load(server_prompt_cache & prompt_cache, const server_tokens & tokens) { + bool res = prompt_cache.load(prompt, tokens, ctx, id); + if (!res) { + SLT_WRN(*this, "%s", "failed to load prompt from cache\n"); + } + + return res; + } + + void prompt_clear(bool allow_processing) { + if (!allow_processing) { + GGML_ASSERT(!is_processing()); + } + + SLT_INF(*this, "clearing prompt with %zu tokens\n", prompt.tokens.size()); + + llama_memory_seq_rm(llama_get_memory(ctx), id, -1, -1); + prompt.tokens.clear(); + } + + std::vector lora; + int32_t alora_invocation_start = -1; + + // sampling + json json_schema; + + common_sampler_ptr smpl; + + llama_token sampled; // in speculative mode, this is the last accepted token + llama_tokens drafted; + + // stats + size_t n_sent_text = 0; // number of sent text character + + int64_t t_start_process_prompt; + int64_t t_start_generation; + + double t_prompt_processing; // ms + double t_token_generation; // ms + + std::function callback_on_release; + + // Speculative decoding stats + int32_t n_draft_total = 0; // Total draft tokens generated + int32_t n_draft_accepted = 0; // Draft tokens actually accepted + + void reset() { + SLT_DBG(*this, "%s", "\n"); + + n_prompt_tokens_cache = 0; + + last_nl_pos = 0; + generated_text = ""; + has_new_line = false; + truncated = false; + stop = STOP_TYPE_NONE; + stopping_word = ""; + n_sent_text = 0; + + drafted.clear(); + i_batch_dft.clear(); + generated_tokens.clear(); + generated_token_probs.clear(); + json_schema = json(); + + // clear speculative decoding stats + n_draft_total = 0; + n_draft_accepted = 0; + + task_prev = std::move(task); + task.reset(); + + llama_set_sampler(ctx, id, nullptr); + + // clear alora start + alora_invocation_start = -1; + } + + void init_sampler() const { + common_sampler_reset(smpl.get()); + + if (!task->need_sampling()) { + return; + } + + const int64_t t_start = ggml_time_us(); + + int n_text = 0; + + for (int i = 0; i < (int) prompt.tokens.size(); i++) { + const llama_token id = prompt.tokens[i]; + + if (id != LLAMA_TOKEN_NULL) { + common_sampler_accept(smpl.get(), id, false); + n_text++; + } + } + + SLT_INF(*this, "init sampler, took %0.2f ms, tokens: text = %d, total = %d\n", + (ggml_time_us() - t_start) / 1000.0, n_text, (int) prompt.tokens.size()); + } + + // if the context does not have a memory module then all embeddings have to be computed within a single ubatch + // also we cannot split if the pooling would require any past tokens + bool can_split() const { + GGML_ASSERT(task); + + return + !task->need_embd() || + (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); + } + + bool can_batch_with(server_slot & other_slot) const { + GGML_ASSERT(task); + + return task->type == other_slot.task->type && are_lora_equal(lora, other_slot.lora); + } + + bool has_budget(const common_params & global_params) { + GGML_ASSERT(task); + + if (task->params.n_predict == -1 && global_params.n_predict == -1) { + return true; // limitless + } + + n_remaining = -1; + + if (task->params.n_predict != -1) { + n_remaining = task->params.n_predict - n_decoded; + } else if (global_params.n_predict != -1) { + n_remaining = global_params.n_predict - n_decoded; + } + + return n_remaining > 0; // no budget + } + + bool is_processing() const { + return state != SLOT_STATE_IDLE; + } + + bool can_speculate() const { + return !!spec; + } + + void add_token(const completion_token_output & token) { + if (!is_processing()) { + SLT_WRN(*this, "%s", "slot is not processing\n"); + return; + } + + generated_token_probs.push_back(token); + } + + int get_n_draft_max() const { + GGML_ASSERT(task); + + if (!can_speculate()) { + return 0; + } + + // determine the max draft that fits the current slot state + int n_draft_max = task->params.speculative.n_max; + + // note: slot.prompt is not yet expanded with the `id` token sampled above + // also, need to leave space for 1 extra token to allow context shifts + n_draft_max = std::min(n_draft_max, n_ctx - prompt.n_tokens() - 2); + + if (n_remaining > 0) { + n_draft_max = std::min(n_draft_max, n_remaining - 1); + } + + SLT_DBG(*this, "max possible draft: %d\n", n_draft_max); + + if (n_draft_max < task->params.speculative.n_min) { + SLT_DBG(*this, "the max possible draft is too small: %d < %d - skipping speculative decoding\n", n_draft_max, task->params.speculative.n_min); + n_draft_max = 0; + } + + return n_draft_max; + } + + void release() { + if (is_processing()) { + GGML_ASSERT(task); + + SLT_INF(*this, "stop processing: n_tokens = %d, truncated = %d\n", prompt.n_tokens(), truncated); + + t_last_used = ggml_time_us(); + t_token_generation = (ggml_time_us() - t_start_generation) / 1e3; + + state = SLOT_STATE_IDLE; + + // do not keep context of the child slots - the parent's context is enough + if (task->is_child()) { + prompt_clear(false); + } + + reset(); + + callback_on_release(id); + } + } + + result_timings get_timings() const { + result_timings timings; + timings.cache_n = n_prompt_tokens_cache; + + timings.prompt_n = n_prompt_tokens_processed; + timings.prompt_ms = t_prompt_processing; + timings.prompt_per_token_ms = t_prompt_processing / n_prompt_tokens_processed; + timings.prompt_per_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed; + + timings.predicted_n = n_decoded; + timings.predicted_ms = t_token_generation; + timings.predicted_per_token_ms = t_token_generation / n_decoded; + timings.predicted_per_second = 1e3 / t_token_generation * n_decoded; + + // Add speculative metrics + if (n_draft_total > 0) { + timings.draft_n = n_draft_total; + timings.draft_n_accepted = n_draft_accepted; + } + + return timings; + } + + size_t find_stopping_strings(const std::string & text, const size_t last_token_size, bool is_full_stop) { + GGML_ASSERT(task); + + size_t stop_pos = std::string::npos; + + for (const std::string & word : task->params.antiprompt) { + size_t pos; + + if (is_full_stop) { + const size_t tmp = word.size() + last_token_size; + const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0; + + pos = text.find(word, from_pos); + } else { + // otherwise, partial stop + pos = string_find_partial_stop(text, word); + } + + if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) { + if (is_full_stop) { + stop = STOP_TYPE_WORD; + stopping_word = word; + has_next_token = false; + } + stop_pos = pos; + } + } + + return stop_pos; + } + + void print_timings() const { + const double t_prompt = t_prompt_processing / n_prompt_tokens_processed; + const double n_prompt_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed; + + const double t_gen = t_token_generation / n_decoded; + const double n_gen_second = 1e3 / t_token_generation * n_decoded; + + SLT_INF(*this, + "\n" + "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" + " eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" + " total time = %10.2f ms / %5d tokens\n", + t_prompt_processing, n_prompt_tokens_processed, t_prompt, n_prompt_second, + t_token_generation, n_decoded, t_gen, n_gen_second, + t_prompt_processing + t_token_generation, n_prompt_tokens_processed + n_decoded); + + if (n_draft_total > 0) { + const float draft_ratio = (float) n_draft_accepted / n_draft_total; + SLT_CNT(*this, + "draft acceptance rate = %0.5f (%5d accepted / %5d generated)\n", + draft_ratio, n_draft_accepted, n_draft_total + ); + } + + common_speculative_print_stats(spec); + } + + json to_json(bool only_metrics = false) const { + json res; + + res = { + {"id", id}, + {"n_ctx", n_ctx}, + {"speculative", can_speculate()}, + {"is_processing", is_processing()}, + }; + + const auto & ptask = task ? task : task_prev; + + if (ptask) { + res["id_task"] = ptask->id; + res["params"] = ptask->params.to_json(only_metrics); + res["next_token"] = { + { + {"has_next_token", has_next_token}, + {"has_new_line", has_new_line}, + {"n_remain", n_remaining}, + {"n_decoded", n_decoded}, + } + }; + + if (!only_metrics) { + res["prompt"] = ptask->tokens.detokenize(ctx, true); + res["generated"] = generated_text; + } + } + + return res; + } + + void copy_state_to(server_slot & other) const { + GGML_ASSERT(state == SLOT_STATE_DONE_PROMPT); + + llama_memory_seq_rm(llama_get_memory(ctx), other.id, -1, -1); + llama_memory_seq_cp(llama_get_memory(ctx), id, other.id, -1, -1); + + other.n_decoded = n_decoded; + other.n_remaining = n_remaining; + other.i_batch = i_batch; + + other.t_start_process_prompt = t_start_process_prompt; + other.t_prompt_processing = t_prompt_processing; + other.n_prompt_tokens_cache = n_prompt_tokens_cache; + other.n_prompt_tokens_processed = n_prompt_tokens_processed; + + other.prompt = prompt.clone(); + other.init_sampler(); + } +}; + + + +// +// server_metrics +// + +struct server_metrics { + int64_t t_start = 0; + + uint64_t n_prompt_tokens_processed_total = 0; + uint64_t t_prompt_processing_total = 0; + uint64_t n_tokens_predicted_total = 0; + uint64_t t_tokens_generation_total = 0; + + uint64_t n_tokens_max = 0; + + uint64_t n_prompt_tokens_processed = 0; + uint64_t t_prompt_processing = 0; + + uint64_t n_tokens_predicted = 0; + uint64_t t_tokens_generation = 0; + + uint64_t n_decode_total = 0; + uint64_t n_busy_slots_total = 0; + + void init() { + t_start = ggml_time_us(); + } + + void on_prompt_eval(const server_slot & slot) { + n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed; + n_prompt_tokens_processed += slot.n_prompt_tokens_processed; + t_prompt_processing += slot.t_prompt_processing; + t_prompt_processing_total += slot.t_prompt_processing; + + n_tokens_max = std::max(n_tokens_max, (uint64_t) slot.prompt.n_tokens()); + } + + void on_prediction(const server_slot & slot) { + n_tokens_predicted_total += slot.n_decoded; + n_tokens_predicted += slot.n_decoded; + t_tokens_generation += slot.t_token_generation; + t_tokens_generation_total += slot.t_token_generation; + } + + void on_decoded(const std::vector & slots) { + n_decode_total++; + for (const auto & slot : slots) { + if (slot.is_processing()) { + n_busy_slots_total++; + } + n_tokens_max = std::max(n_tokens_max, (uint64_t) slot.prompt.n_tokens()); + } + } + + void reset_bucket() { + n_prompt_tokens_processed = 0; + t_prompt_processing = 0; + n_tokens_predicted = 0; + t_tokens_generation = 0; + } +}; + + +// +// server_context_impl (private implementation) +// + +struct server_context_impl { + friend struct server_context; + +public: + // only use these pointers outside of this class: + // - when not in sleeping state + // - and, with thread-safe APIs (e.g., tokenizer calls) + llama_model * model = nullptr; + mtmd_context * mctx = nullptr; + const llama_vocab * vocab = nullptr; + + server_queue queue_tasks; + server_response queue_results; + + // note: chat_params must not be refreshed upon existing sleeping state + server_chat_params chat_params; + + ~server_context_impl() { + if (!sleeping) { + // destroy() is already called when entering sleeping state + // we don't call it again here to avoid double free + destroy(); + } + } + +private: + // note: accessing these fields outside of this class is not thread-safe + // use server_context methods instead + + common_params params_base; + + // note: keep these alive - they determine the lifetime of the model, context, etc. + common_init_result_ptr llama_init; + + llama_context * ctx = nullptr; + + llama_batch batch {}; + + llama_model_ptr model_dft; + + bool add_bos_token = true; + + int32_t n_ctx; // total context for all clients / slots + + // slots / clients + std::vector slots; + + int slots_debug = 0; + + std::unique_ptr prompt_cache; + + server_metrics metrics; + + json json_webui_settings = json::object(); + + // Necessary similarity of prompt for slot selection + float slot_prompt_similarity = 0.0f; + + std::string model_name; // name of the loaded model, to be used by API + + bool sleeping = false; + + void destroy() { + llama_init.reset(); + ctx = nullptr; + model = nullptr; + + mtmd_free(mctx); + mctx = nullptr; + + // Clear any sampling context + for (server_slot & slot : slots) { + common_speculative_free(slot.spec); + slot.spec = nullptr; + } + + llama_batch_free(batch); + } + + void handle_sleeping_state(bool new_state) { + GGML_ASSERT(sleeping != new_state); + if (new_state) { + SRV_INF("%s", "server is entering sleeping state\n"); + destroy(); + } else { + SRV_INF("%s", "server is exiting sleeping state\n"); + if (!load_model(params_base)) { + GGML_ABORT("failed to reload model after sleeping"); + } + } + sleeping = new_state; + } + + // load the model and initialize llama_context + // this may also be called to resume from sleeping state + bool load_model(const common_params & params) { + bool is_resume = sleeping; + + SRV_INF("loading model '%s'\n", params.model.path.c_str()); + + params_base = params; + + llama_init = common_init_from_params(params_base); + + model = llama_init->model(); + ctx = llama_init->context(); + + if (model == nullptr) { + SRV_ERR("failed to load model, '%s'\n", params_base.model.path.c_str()); + return false; + } + + vocab = llama_model_get_vocab(model); + + n_ctx = llama_n_ctx(ctx); + + add_bos_token = llama_vocab_get_add_bos(vocab); + + if (params_base.speculative.has_dft()) { + SRV_INF("loading draft model '%s'\n", params_base.speculative.mparams_dft.path.c_str()); + + const auto & params_spec = params_base.speculative; + + auto params_dft = params_base; + + params_dft.n_parallel = 1; + params_dft.n_ctx = params_spec.n_ctx == 0 ? llama_n_ctx_seq(ctx) : params_spec.n_ctx; + params_dft.n_batch = llama_n_ctx_seq(ctx); + params_dft.devices = params_spec.devices; + params_dft.model = params_spec.mparams_dft; + params_dft.n_gpu_layers = params_spec.n_gpu_layers; + params_dft.cache_type_k = params_spec.cache_type_k; + params_dft.cache_type_v = params_spec.cache_type_v; + + if (params_spec.cpuparams.n_threads > 0) { + params_dft.cpuparams.n_threads = params_spec.cpuparams.n_threads; + params_dft.cpuparams_batch.n_threads = params_spec.cpuparams_batch.n_threads; + } + + params_dft.tensor_buft_overrides = params_spec.tensor_buft_overrides; + + auto mparams_dft = common_model_params_to_llama(params_dft); + + model_dft.reset(llama_model_load_from_file(params_dft.model.path.c_str(), mparams_dft)); + if (model_dft == nullptr) { + SRV_ERR("failed to load draft model, '%s'\n", params_dft.model.path.c_str()); + return false; + } + + params_base.speculative.model_dft = model_dft.get(); + params_base.speculative.cparams_dft = common_context_params_to_llama(params_dft); + } + + std::string & mmproj_path = params_base.mmproj.path; + if (!mmproj_path.empty()) { + if (!is_resume) { + mtmd_helper_log_set(common_log_default_callback, nullptr); + } + + mtmd_context_params mparams = mtmd_context_params_default(); + + mparams.use_gpu = params_base.mmproj_use_gpu; + mparams.print_timings = false; + mparams.n_threads = params_base.cpuparams.n_threads; + mparams.flash_attn_type = params_base.flash_attn_type; + mparams.warmup = params_base.warmup; + mparams.image_min_tokens = params_base.image_min_tokens; + mparams.image_max_tokens = params_base.image_max_tokens; + + mctx = mtmd_init_from_file(mmproj_path.c_str(), model, mparams); + if (mctx == nullptr) { + SRV_ERR("failed to load multimodal model, '%s'\n", mmproj_path.c_str()); + return false; + } + SRV_INF("loaded multimodal model, '%s'\n", mmproj_path.c_str()); + + if (params_base.ctx_shift) { + params_base.ctx_shift = false; + SRV_WRN("%s\n", "ctx_shift is not supported by multimodal, it will be disabled"); + } + + if (params_base.n_cache_reuse) { + params_base.n_cache_reuse = 0; + SRV_WRN("%s\n", "cache_reuse is not supported by multimodal, it will be disabled"); + } + + if (params_base.speculative.type != COMMON_SPECULATIVE_TYPE_NONE) { + params_base.speculative.type = COMMON_SPECULATIVE_TYPE_NONE; + SRV_WRN("%s\n", "speculative decoding is not supported by multimodal, it will be disabled"); + } + } + + if (!llama_memory_can_shift(llama_get_memory(ctx))) { + if (params_base.ctx_shift) { + params_base.ctx_shift = false; + SRV_WRN("%s\n", "ctx_shift is not supported by this context, it will be disabled"); + } + + if (params_base.n_cache_reuse) { + params_base.n_cache_reuse = 0; + SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled"); + } + } + + // Necessary similarity of prompt for slot selection + slot_prompt_similarity = params_base.slot_prompt_similarity; + + // setup slots + SRV_INF("initializing slots, n_slots = %d\n", params_base.n_parallel); + + const int n_ctx_train = llama_model_n_ctx_train(model); + + int n_ctx_slot = llama_n_ctx_seq(ctx); + if (n_ctx_slot > n_ctx_train) { + SRV_WRN("the slot context (%d) exceeds the training context of the model (%d) - capping\n", n_ctx_slot, n_ctx_train); + n_ctx_slot = n_ctx_train; + } + + slots.clear(); + + const bool can_spec = common_speculative_is_compat(ctx); + if (!can_spec) { + SRV_WRN("%s", "speculative decoding not supported by this context\n"); + } + + // initialize slots + for (int i = 0; i < params_base.n_parallel; i++) { + server_slot slot; + + slot.id = i; + slot.ctx = ctx; + slot.n_ctx = n_ctx_slot; + + slot.mctx = mctx; + slot.prompt.tokens.has_mtmd = mctx != nullptr; + + // try speculative decoding + if (can_spec) { + slot.spec = common_speculative_init(params_base.speculative, slot.ctx); + if (slot.spec) { + if (mctx) { + SRV_ERR("%s\n", "speculative decoding is not supported with multimodal"); + return false; + } + SLT_INF(slot, "%s", "speculative decoding context initialized\n"); + } else { + SLT_INF(slot, "%s", "speculative decoding context not initialized\n"); + } + } + + SLT_INF(slot, "new slot, n_ctx = %d\n", slot.n_ctx); + + slot.callback_on_release = [this](int id_slot) { + queue_tasks.pop_deferred_task(id_slot); + }; + + slot.reset(); + + slots.push_back(std::move(slot)); + } + + { + const char * LLAMA_SERVER_SLOTS_DEBUG = getenv("LLAMA_SERVER_SLOTS_DEBUG"); + slots_debug = LLAMA_SERVER_SLOTS_DEBUG ? atoi(LLAMA_SERVER_SLOTS_DEBUG) : 0; + + if (slots_debug) { + SRV_WRN("slots debug = %d\n", slots_debug); + } + } + + // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens + // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used) + { + const int32_t n_batch = llama_n_batch(ctx); + batch = llama_batch_init(std::max(n_batch, params_base.n_parallel), 0, 1); + } + + if (params_base.cache_ram_mib != 0) { + if (params_base.cache_ram_mib < 0) { + SRV_WRN("prompt cache is enabled, size limit: %s\n", "no limit"); + } else { + SRV_WRN("prompt cache is enabled, size limit: %d MiB\n", params_base.cache_ram_mib); + } + SRV_WRN("%s", "use `--cache-ram 0` to disable the prompt cache\n"); + + prompt_cache = std::make_unique(params_base.cache_ram_mib, n_ctx); + } else { + SRV_WRN("%s", "prompt cache is disabled - use `--cache-ram N` to enable it\n"); + } + SRV_WRN("%s", "for more info see https://github.com/ggml-org/llama.cpp/pull/16391\n"); + + if (!params_base.model_alias.empty()) { + // user explicitly specified model name + model_name = params_base.model_alias; + } else if (!params_base.model.name.empty()) { + // use model name in registry format (for models in cache) + model_name = params_base.model.name; + } else { + // fallback: derive model name from file name + auto model_path = std::filesystem::path(params_base.model.path); + model_name = model_path.filename().string(); + } + + if (!is_resume) { + return init(); + } + + return true; + } + + // unlike load_model(), this is only called once during initialization + bool init() { + GGML_ASSERT(ctx != nullptr); + GGML_ASSERT(model != nullptr); + GGML_ASSERT(!sleeping); + + // wiring up server queues + queue_tasks.on_new_task([this](server_task && task) { + process_single_task(std::move(task)); + }); + queue_tasks.on_update_slots([this]() { + update_slots(); + }); + queue_tasks.on_sleeping_state([this](bool sleeping) { + handle_sleeping_state(sleeping); + }); + + metrics.init(); + + // populate webui settings + { + if (!params_base.webui_config_json.empty()) { + try { + json_webui_settings = json::parse(params_base.webui_config_json); + } catch (const std::exception & e) { + SRV_ERR("%s: failed to parse webui config: %s\n", __func__, e.what()); + return false; + } + } + } + + // populate chat template params + { + common_chat_templates_ptr chat_templates; + + try { + chat_templates = common_chat_templates_init(model, params_base.chat_template); + + LOG_INF("%s: chat template, example_format: '%s'\n", __func__, + common_chat_format_example(chat_templates.get(), params_base.use_jinja, params_base.default_template_kwargs).c_str()); + + } catch (const std::exception & e) { + SRV_ERR("%s: chat template parsing error: %s\n", __func__, e.what()); + SRV_ERR("%s: please consider disabling jinja via --no-jinja, or use a custom chat template via --chat-template\n", __func__); + SRV_ERR("%s: for example: --no-jinja --chat-template chatml\n", __func__); + return false; + } + + // thinking is enabled if: + // 1. It's not explicitly disabled (reasoning_budget == 0) + // 2. The chat template supports it + const bool enable_thinking = params_base.use_jinja && params_base.reasoning_budget != 0 && common_chat_templates_support_enable_thinking(chat_templates.get()); + SRV_INF("%s: chat template, thinking = %d\n", __func__, enable_thinking); + + chat_params = { + /* use_jinja */ params_base.use_jinja, + /* prefill_assistant */ params_base.prefill_assistant, + /* reasoning_format */ params_base.reasoning_format, + /* chat_template_kwargs */ params_base.default_template_kwargs, + /* tmpls */ std::move(chat_templates), + /* allow_image */ mctx ? mtmd_support_vision(mctx) : false, + /* allow_audio */ mctx ? mtmd_support_audio (mctx) : false, + /* enable_thinking */ enable_thinking, + /* media_path */ params_base.media_path, + }; + } + + return true; + } + + server_slot * get_slot_by_id(int id_slot) { + // note: allow id_slot to be out of bounds (wrap around) + id_slot = id_slot % slots.size(); + + for (server_slot & slot : slots) { + if (slot.id == id_slot) { + return &slot; + } + } + + return nullptr; + } + + server_slot * get_available_slot(const server_task & task) { + server_slot * ret = nullptr; + + bool update_cache = false; + + // find the slot that has at least n% prompt similarity + if (ret == nullptr && slot_prompt_similarity != 0.0f) { + float sim_best = 0; + + for (server_slot & slot : slots) { + // skip the slot if it is not available + if (slot.is_processing()) { + continue; + } + + const auto & tokens = slot.prompt.tokens; + + // skip the slot if it does not contains cached tokens + if (tokens.empty()) { + continue; + } + + // fraction of the Longest Common Prefix length with respect to the input prompt length + const float sim_cur = float(tokens.get_common_prefix(task.tokens)) / task.tokens.size(); + + // select the current slot if the criteria match + if (sim_cur > sim_best && sim_cur > slot_prompt_similarity) { + sim_best = sim_cur; + + ret = &slot; + } + } + + if (ret != nullptr) { + const float f_keep = (sim_best*task.tokens.size()) / ret->prompt.tokens.size(); + + SLT_INF(*ret, "selected slot by LCP similarity, sim_best = %.3f (> %.3f thold), f_keep = %.3f\n", + sim_best, slot_prompt_similarity, f_keep); + + // if we are about to lose a large portion of the existing context - save it in the prompt cache + if (f_keep < 0.5f) { + update_cache = true; + } + } + } + + // find the slot that has been least recently used + if (ret == nullptr) { + int64_t t_last = -1; + + for (server_slot & slot : slots) { + // skip the slot if it is not available + if (slot.is_processing()) { + continue; + } + + // select the current slot if the criteria match + if (!ret || slot.t_last_used <= t_last) { + t_last = slot.t_last_used; + ret = &slot; + } + } + + if (ret != nullptr) { + SLT_INF(*ret, "selected slot by LRU, t_last = %" PRId64 "\n", t_last); + + update_cache = true; + } + } + + if (ret) { + const auto & tokens = ret->prompt.tokens; + + update_cache = update_cache && prompt_cache; + + // cache prompts only for completion tasks + update_cache = update_cache && task.type == SERVER_TASK_TYPE_COMPLETION; + + // don't update the cache if the slot's context is empty + update_cache = update_cache && tokens.size() > 0; + + // TODO: mtmd does not support prompt cache + update_cache = update_cache && (ret->mctx == nullptr); + + if (update_cache) { + SRV_WRN("%s", "updating prompt cache\n"); + + const int64_t t_start = ggml_time_us(); + + ret->prompt_save(*prompt_cache); + + if (!ret->prompt_load(*prompt_cache, task.tokens)) { + ret->prompt_clear(false); + } + + prompt_cache->update(); + + SRV_WRN("prompt cache update took %.2f ms\n", (ggml_time_us() - t_start) / 1000.0); + } + } + + return ret; + } + + // return true if at least one slot has been cleared + // TODO: improve logic + // - smarter decision which slot to clear (LRU or longest prompt?) + // - move slot to level 2 cache instead of removing? + // - instead of purging, try to store and resume later? + bool try_clear_idle_slots() { + bool res = false; + + if (!params_base.kv_unified) { + return res; + } + + for (auto & slot : slots) { + if (slot.is_processing()) { + continue; + } + + if (slot.prompt.n_tokens() > 0) { + SRV_WRN("purging slot %d with %zu tokens\n", slot.id, slot.prompt.tokens.size()); + + slot.prompt_clear(false); + + res = true; + + // clear slots one by one + break; + } + } + + return res; + } + + std::vector construct_lora_list(const std::map & config) const { + std::vector output = params_base.lora_adapters; // copy + for (size_t i = 0; i < output.size(); ++i) { + auto it = config.find(i); + if (it != config.end()) { + output[i].scale = it->second; + } else { + output[i].scale = 0.0f; + } + } + return output; + } + + bool launch_slot_with_task(server_slot & slot, server_task && task) { + // process per-request lora adapters + if (!task.params.lora.empty()) { + auto task_loras = construct_lora_list(task.params.lora); + if (!are_lora_equal(task_loras, slot.lora)) { + // if lora has changed, check to see if the cache should be cleared + if (lora_should_clear_cache(slot.lora, task_loras)) { + SLT_INF(slot, "clearing cache for lora change. %zu loras -> %zu loras\n", slot.lora.size(), task.params.lora.size()); + slot.prompt.tokens.clear(); + } else { + SLT_INF(slot, "keeping cache for alora. %zu target loras\n", task_loras.size()); + } + slot.lora = task_loras; + } + } else { + slot.lora = params_base.lora_adapters; + } + + // if using alora, make sure it's only a single one requested and active + size_t alora_invocation_start = task.tokens.size(); + if (lora_all_alora(slot.lora)) { + const auto & enabled_ids = lora_get_enabled_ids(slot.lora); + // TODO: This will error out if a user requests two aloras, but only + // provides the activation string for one. We could, instead search + // for all requested alora activation strings and then either keep + // only the last one, or reject if multiple are found. + if (enabled_ids.size() != 1) { + send_error(task, "Cannot run multiple aLoRAs in a single request", ERROR_TYPE_INVALID_REQUEST); + return false; + } + const auto & lora = slot.lora[enabled_ids[0]].ptr; + + // get the pointer and count for the invocation tokens + const uint64_t n_invocation_tokens = llama_adapter_get_alora_n_invocation_tokens(lora); + const llama_token * invocation_tokens = llama_adapter_get_alora_invocation_tokens (lora); + + // scan backwards through the prompt tokens to find the last + // occurrence of the invocation sequence + int match_idx = static_cast(n_invocation_tokens) - 1; + for (int i = task.tokens.size() - 1; i >= 0; --i) { + // the token in this position matches the next token to find in + // the invocation sequence + if (task.tokens[i] == invocation_tokens[match_idx]) { + // if it's a full match, we've found the start + if (match_idx == 0) { + alora_invocation_start = i; + break; + } + // otherwise, check the next token in the sequence + --match_idx; + } else { + // no match in this position, so start looking over again + match_idx = static_cast(n_invocation_tokens) - 1; + } + } + + // if the activation string is not found, disable the alora + if (alora_invocation_start == task.tokens.size()) { + SLT_DBG(slot, "alora %zu requested, but not found. deactivating\n", enabled_ids[0]); + slot.lora[enabled_ids[0]].scale = 0.0f; + } else { + SLT_DBG(slot, "alora %zu activated starting at %zu\n", enabled_ids[0], alora_invocation_start); + slot.alora_invocation_start = alora_invocation_start; + } + } + + if (!task.tokens.validate(ctx)) { + send_error(task, "Prompt contains invalid tokens", ERROR_TYPE_INVALID_REQUEST); + return false; + } + + SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); + + // initialize samplers + if (task.need_sampling()) { + slot.smpl.reset(common_sampler_init(model, task.params.sampling)); + + if (slot.smpl == nullptr) { + // for now, the only error that may happen here is invalid grammar + send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST); + return false; + } + + const bool need_logits = task.params.sampling.n_probs > 0; + + bool backend_sampling = true; + + backend_sampling &= task.params.sampling.backend_sampling; + + // TODO: speculative decoding requires multiple samples per batch - not supported yet + backend_sampling &= !(slot.spec && task.params.speculative.n_max > 0); + + // TODO: getting post/pre sampling logits is not yet supported with backend sampling + backend_sampling &= !need_logits; + + // TODO: tmp until backend sampling is fully implemented + if (backend_sampling) { + llama_set_sampler(ctx, slot.id, common_sampler_get(slot.smpl.get())); + } else { + llama_set_sampler(ctx, slot.id, nullptr); + } + + SLT_INF(slot, "sampler chain: %s\n", common_sampler_print(slot.smpl.get()).c_str()); + } else { + slot.smpl.reset(); + } + + slot.task = std::make_unique(std::move(task)); + + slot.state = slot.task->is_child() + ? SLOT_STATE_WAIT_OTHER // wait for the parent to process prompt + : SLOT_STATE_STARTED; + + SLT_INF(slot, "processing task, is_child = %d\n", slot.task->is_child()); + return true; + } + + bool process_token(completion_token_output & result, server_slot & slot) { + // remember which tokens were sampled - used for repetition penalties during sampling + const std::string token_str = result.text_to_send; + slot.sampled = result.tok; + + slot.generated_text += token_str; + if (slot.task->params.return_tokens) { + slot.generated_tokens.push_back(result.tok); + } + slot.has_next_token = true; + + // check if there is incomplete UTF-8 character at the end + bool incomplete = validate_utf8(slot.generated_text) < slot.generated_text.size(); + + // search stop word and delete it + if (!incomplete) { + size_t pos = std::min(slot.n_sent_text, slot.generated_text.size()); + + const std::string str_test = slot.generated_text.substr(pos); + bool send_text = true; + + size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), true); + if (stop_pos != std::string::npos) { + slot.generated_text.erase( + slot.generated_text.begin() + pos + stop_pos, + slot.generated_text.end()); + pos = std::min(slot.n_sent_text, slot.generated_text.size()); + } else if (slot.has_next_token && !llama_vocab_is_eog(vocab, result.tok) ) { + stop_pos = slot.find_stopping_strings(str_test, token_str.size(), false); + send_text = stop_pos == std::string::npos; + } + + // check if there is any token to predict + if (send_text) { + // no send the stop word in the response + result.text_to_send = slot.generated_text.substr(pos, std::string::npos); + slot.n_sent_text += result.text_to_send.size(); + // add the token to slot queue and cache + } else { + result.text_to_send = ""; + } + + slot.add_token(result); + if (slot.task->params.stream) { + send_partial_response(slot, result, false); + } + } + + if (incomplete) { + slot.has_next_token = true; + } + + // if context shifting is disabled, make sure that we don't run out of context + if (!params_base.ctx_shift && slot.prompt.n_tokens() + 1 >= slot.n_ctx) { + slot.truncated = true; + slot.stop = STOP_TYPE_LIMIT; + slot.has_next_token = false; + + SLT_DBG(slot, "stopped due to running out of context capacity, prompt.n_tokens() = %d, task.n_tokens = %d, n_decoded = %d, n_ctx = %d\n", + slot.prompt.n_tokens(), slot.task->n_tokens(), slot.n_decoded, slot.n_ctx); + } + + // check the limits + if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params_base)) { + slot.stop = STOP_TYPE_LIMIT; + slot.has_next_token = false; + + SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.task->params.n_predict); + } + + if (slot.has_new_line) { + // require that each new line has a whitespace prefix (i.e. indentation) of at least slot.params.n_indent + if (slot.task->params.n_indent > 0) { + // check the current indentation + // TODO: improve by not doing it more than once for each new line + if (slot.last_nl_pos > 0) { + size_t pos = slot.last_nl_pos; + + int n_indent = 0; + while (pos < slot.generated_text.size() && (slot.generated_text[pos] == ' ' || slot.generated_text[pos] == '\t')) { + n_indent++; + pos++; + } + + if (pos < slot.generated_text.size() && n_indent < slot.task->params.n_indent) { + slot.stop = STOP_TYPE_LIMIT; + slot.has_next_token = false; + + // cut the last line + slot.generated_text.erase(pos, std::string::npos); + + SLT_DBG(slot, "stopped by indentation limit, n_decoded = %d, n_indent = %d\n", slot.n_decoded, n_indent); + } + } + + // find the next new line + { + const size_t pos = slot.generated_text.find('\n', slot.last_nl_pos); + + if (pos != std::string::npos) { + slot.last_nl_pos = pos + 1; + } + } + } + } + + // check if there is a new line in the generated text + if (result.text_to_send.find('\n') != std::string::npos) { + slot.has_new_line = true; + + // if we have seen a new line, we stop after a certain time limit, but only upon another new line + if (slot.task->params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.task->params.t_max_predict_ms)) { + slot.stop = STOP_TYPE_LIMIT; + slot.has_next_token = false; + + SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.task->params.t_max_predict_ms); + } + } + + if (llama_vocab_is_eog(vocab, result.tok)) { + slot.stop = STOP_TYPE_EOS; + slot.has_next_token = false; + + SLT_DBG(slot, "%s", "stopped by EOS\n"); + } + + SLT_DBG(slot, "n_decoded = %d, n_remaining = %d, next token: %5d '%s'\n", slot.n_decoded, slot.n_remaining, result.tok, token_str.c_str()); + + return slot.has_next_token; // continue + } + + void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) const { + const size_t n_probs_request = slot.task->params.sampling.n_probs; + + if (post_sampling) { + const auto * cur_p = common_sampler_get_candidates(slot.smpl.get(), true); + const size_t max_probs = cur_p->size; + const size_t n_probs = std::min(max_probs, n_probs_request); + + // set probability for sampled token + for (size_t i = 0; i < max_probs; i++) { + if (cur_p->data[i].id == result.tok) { + result.prob = cur_p->data[i].p; + break; + } + } + + // set probability for top n_probs tokens + result.probs.reserve(n_probs); + for (size_t i = 0; i < n_probs; i++) { + result.probs.push_back({ + cur_p->data[i].id, + common_token_to_piece(ctx, cur_p->data[i].id, special), + cur_p->data[i].p + }); + } + } else { + // TODO: optimize this with min-p optimization + std::vector cur = get_token_probabilities(ctx, idx); + const size_t max_probs = cur.size(); + const size_t n_probs = std::min(max_probs, n_probs_request); + + // set probability for sampled token + for (size_t i = 0; i < max_probs; i++) { + // set probability for sampled token + if (cur[i].id == result.tok) { + result.prob = cur[i].p; + break; + } + } + + // set probability for top n_probs tokens + result.probs.reserve(n_probs); + for (size_t i = 0; i < n_probs; i++) { + result.probs.push_back({ + cur[i].id, + common_token_to_piece(ctx, cur[i].id, special), + cur[i].p + }); + } + } + } + + void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) { + send_error(task.id, error, type); + } + + void send_error(const server_slot & slot, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) { + send_error(slot.task->id, error, type, slot.task->n_tokens(), slot.n_ctx); + } + + void send_error(const int id_task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER, const int32_t n_prompt_tokens = 0, const int32_t n_ctx = 0) { + SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str()); + + if (type == ERROR_TYPE_EXCEED_CONTEXT_SIZE) { + GGML_ASSERT(n_ctx > 0 && n_prompt_tokens > 0); + } + + auto res = std::make_unique(); + res->id = id_task; + res->err_type = type; + res->err_msg = error; + res->n_prompt_tokens = n_prompt_tokens; + res->n_ctx = n_ctx; + + queue_results.send(std::move(res)); + } + + // if multimodal is enabled, send an error and return false + bool check_no_mtmd(const int id_task) { + if (mctx) { + send_error(id_task, "This feature is not supported by multimodal", ERROR_TYPE_NOT_SUPPORTED); + return false; + } + return true; + } + + void send_partial_response(server_slot & slot, const completion_token_output & tkn, bool is_progress) { + auto res = std::make_unique(); + + res->id = slot.task->id; + res->index = slot.task->index; + + if (is_progress) { + res->is_progress = true; + res->progress.total = slot.task->n_tokens(); + res->progress.cache = slot.n_prompt_tokens_cache; + res->progress.processed = slot.prompt.tokens.size(); + res->progress.time_ms = (ggml_time_us() - slot.t_start_process_prompt) / 1000; + } else { + res->content = tkn.text_to_send; + res->tokens = { tkn.tok }; + } + + res->n_decoded = slot.n_decoded; + res->n_prompt_tokens = slot.task->n_tokens(); + res->post_sampling_probs = slot.task->params.post_sampling_probs; + + res->verbose = slot.task->params.verbose; + res->res_type = slot.task->params.res_type; + res->oaicompat_model = slot.task->params.oaicompat_model; + res->oaicompat_cmpl_id = slot.task->params.oaicompat_cmpl_id; + + // populate res.probs_output + if (slot.task->params.sampling.n_probs > 0) { + res->prob_output = tkn; // copy the token probs + } + + // populate timings if this is final response or timings_per_token is enabled + if (slot.stop != STOP_TYPE_NONE || slot.task->params.timings_per_token) { + res->timings = slot.get_timings(); + } + + queue_results.send(std::move(res)); + } + + void send_final_response(server_slot & slot) { + auto res = std::make_unique(); + + res->id = slot.task->id; + res->id_slot = slot.id; + + res->index = slot.task->index; + // in stream mode, content and tokens are already in last partial chunk + if (slot.task->params.stream) { + res->content = ""; + res->tokens = llama_tokens{}; + } else { + res->content = std::move(slot.generated_text); + res->tokens = std::move(slot.generated_tokens); + } + res->timings = slot.get_timings(); + res->prompt = slot.task->tokens.detokenize(ctx, true); + res->response_fields = std::move(slot.task->params.response_fields); + + res->truncated = slot.truncated; + res->n_decoded = slot.n_decoded; + res->n_prompt_tokens = slot.task->n_tokens(); + res->n_tokens_cached = slot.prompt.n_tokens(); + res->has_new_line = slot.has_new_line; + res->stopping_word = slot.stopping_word; + res->stop = slot.stop; + res->post_sampling_probs = slot.task->params.post_sampling_probs; + + res->verbose = slot.task->params.verbose; + res->stream = slot.task->params.stream; + res->include_usage = slot.task->params.include_usage; + res->res_type = slot.task->params.res_type; + res->oaicompat_model = slot.task->params.oaicompat_model; + res->oaicompat_cmpl_id = slot.task->params.oaicompat_cmpl_id; + + // populate res.probs_output + if (slot.task->params.sampling.n_probs > 0) { + if (!slot.task->params.stream && slot.stop == STOP_TYPE_WORD) { + const llama_tokens stop_word_toks = common_tokenize(ctx, slot.stopping_word, false); + + size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size()); + res->probs_output = std::vector( + slot.generated_token_probs.begin(), + slot.generated_token_probs.end() - safe_offset); + } else { + res->probs_output = std::vector( + slot.generated_token_probs.begin(), + slot.generated_token_probs.end()); + } + } + + res->generation_params = slot.task->params; // copy the parameters + + queue_results.send(std::move(res)); + } + + void send_embedding(const server_slot & slot, const llama_batch & batch) { + auto res = std::make_unique(); + res->id = slot.task->id; + res->index = slot.task->index; + res->n_tokens = slot.task->n_tokens(); + res->res_type = slot.task->params.res_type; + + const int n_embd_out = llama_model_n_embd_out(model); + + std::vector embd_res(n_embd_out, 0.0f); + + for (int i = 0; i < batch.n_tokens; ++i) { + if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) { + continue; + } + + const float * embd = nullptr; + if (llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE) { + embd = llama_get_embeddings_ith(ctx, i); + } else { + embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]); + } + + if (embd == nullptr) { + SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]); + + res->embedding.push_back(std::vector(n_embd_out, 0.0f)); + continue; + } + + // normalize only when there is pooling + if (llama_pooling_type(slot.ctx) != LLAMA_POOLING_TYPE_NONE) { + common_embd_normalize(embd, embd_res.data(), n_embd_out, slot.task->params.embd_normalize); + res->embedding.push_back(embd_res); + break; + } + + res->embedding.emplace_back(embd, embd + n_embd_out); + } + + SLT_DBG(slot, "%s", "sending embeddings\n"); + + queue_results.send(std::move(res)); + } + + void send_rerank(const server_slot & slot, const llama_batch & batch) { + auto res = std::make_unique(); + res->id = slot.task->id; + res->index = slot.task->index; + res->n_tokens = slot.task->n_tokens(); + + for (int i = 0; i < batch.n_tokens; ++i) { + if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) { + continue; + } + + const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]); + if (embd == NULL) { + embd = llama_get_embeddings_ith(ctx, i); + } + + if (embd == NULL) { + SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]); + + res->score = -1e6; + continue; + } + + res->score = embd[0]; + } + + SLT_DBG(slot, "sending rerank result, res.score = %f\n", res->score); + + queue_results.send(std::move(res)); + } + + // + // Functions to process the task + // + + // tokenize the input if it's set by CLI, return false on error + bool tokenize_cli_input(server_task & task) { + try { + auto & prompt = task.cli_prompt; + if (mctx != nullptr) { + task.tokens = process_mtmd_prompt(mctx, prompt, task.cli_files); + } else { + task.tokens = std::move(tokenize_input_prompts(vocab, mctx, prompt, true, true)[0]); + } + task.cli_prompt.clear(); + task.cli_files.clear(); + } catch (const std::exception & e) { + send_error(task, std::string("Failed to format input: ") + e.what(), ERROR_TYPE_INVALID_REQUEST); + return false; + } + return true; + } + + std::vector get_free_slots(size_t n_slots_needed, int exclude_id_slot) { + std::vector free_slots; + for (auto & slot : slots) { + if (!slot.is_processing() && slot.id != exclude_id_slot) { + free_slots.push_back(&slot); + } + if (free_slots.size() >= n_slots_needed) { + break; + } + } + return free_slots; + } + + // launch multiple slots for parent + child tasks + bool launch_slots_with_parent_task(server_slot & parent_slot, std::vector & child_slots, server_task && parent_task) { + GGML_ASSERT(!parent_slot.is_processing()); + GGML_ASSERT(parent_task.is_parent()); + GGML_ASSERT(child_slots.size() == parent_task.child_tasks.size()); + + int id_parent = parent_task.id; + + SRV_INF("launching slots for parent task id_task = %d with %zu child tasks\n", id_parent, parent_task.child_tasks.size()); + + // to be called in case of failure to release all launched slots + auto release_slots = [this, id_parent]() { + for (auto & slot : slots) { + if (slot.is_processing() && ( + slot.task->id == id_parent || + slot.task->id_parent == id_parent + )) { + slot.release(); + } + } + }; + + // launch all child tasks first + size_t idx = 0; + GGML_ASSERT(child_slots.size() == parent_task.child_tasks.size()); + for (auto * slot : child_slots) { + int id_child = parent_task.child_tasks[idx].id; + if (!launch_slot_with_task(*slot, std::move(parent_task.child_tasks[idx]))) { + SRV_ERR("failed to launch slot with child task, id_task = %d\n", id_child); + release_slots(); + return false; + } + idx++; + } + + // finally, launch the parent task + if (!launch_slot_with_task(parent_slot, std::move(parent_task))) { + SRV_ERR("failed to launch slot with task, id_task = %d\n", id_parent); + release_slots(); + return false; + } + + return true; + } + + void process_single_task(server_task && task) { + switch (task.type) { + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + case SERVER_TASK_TYPE_EMBEDDING: + case SERVER_TASK_TYPE_RERANK: + { + // special case: if input is provided via CLI, tokenize it first + // otherwise, no need to tokenize as it's already done inside the HTTP thread + if (task.cli) { + if (!tokenize_cli_input(task)) { + break; + } + } + + const int id_slot = task.id_slot; + const int id_task = task.id; + + server_slot * slot = id_slot != -1 + ? get_slot_by_id(id_slot) + : get_available_slot(task); + + // + // slot scheduling logic + // + + if (slot == nullptr) { + // if no slot is available, we defer this task for processing later + SRV_DBG("no slot is available, defer task, id_task = %d\n", id_task); + queue_tasks.defer(std::move(task)); + break; + } + + if (slot->is_processing()) { + // if requested slot is unavailable, we defer this task for processing later + SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", id_task); + queue_tasks.defer(std::move(task)); + break; + } + + if (task.is_parent()) { + // try getting free slots for all child tasks + size_t n_child_tasks = task.child_tasks.size(); + std::vector child_slots = get_free_slots(n_child_tasks, slot->id); + if (child_slots.size() < n_child_tasks) { + SRV_DBG("not enough free slots for child tasks, n_free = %zu, n_children = %zu, defer task, id_task = %d\n", child_slots.size(), n_child_tasks, id_task); + queue_tasks.defer(std::move(task)); + break; + } + if (!launch_slots_with_parent_task(*slot, child_slots, std::move(task))) { + SRV_ERR("failed to launch slot with parent task, id_task = %d\n", id_task); + break; // drop the task + } + } else if (!launch_slot_with_task(*slot, std::move(task))) { + SRV_ERR("failed to launch slot with task, id_task = %d\n", id_task); + break; // drop the task + } + } break; + case SERVER_TASK_TYPE_CANCEL: + { + // release slot linked with the task id + for (auto & slot : slots) { + if (slot.task && slot.task->id == task.id_target) { + slot.release(); + break; + } + } + } break; + case SERVER_TASK_TYPE_NEXT_RESPONSE: + { + // do nothing + } break; + case SERVER_TASK_TYPE_METRICS: + { + json slots_data = json::array(); + + int n_idle_slots = 0; + int n_processing_slots = 0; + + for (server_slot & slot : slots) { + json slot_data = slot.to_json(slots_debug == 0); + + if (slot.is_processing()) { + n_processing_slots++; + } else { + n_idle_slots++; + } + + slots_data.push_back(slot_data); + } + SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots); + + auto res = std::make_unique(); + res->id = task.id; + res->slots_data = std::move(slots_data); + res->n_idle_slots = n_idle_slots; + res->n_processing_slots = n_processing_slots; + res->n_tasks_deferred = queue_tasks.queue_tasks_deferred_size(); + res->t_start = metrics.t_start; + + res->n_prompt_tokens_processed_total = metrics.n_prompt_tokens_processed_total; + res->t_prompt_processing_total = metrics.t_prompt_processing_total; + res->n_tokens_predicted_total = metrics.n_tokens_predicted_total; + res->t_tokens_generation_total = metrics.t_tokens_generation_total; + + res->n_tokens_max = metrics.n_tokens_max; + + res->n_prompt_tokens_processed = metrics.n_prompt_tokens_processed; + res->t_prompt_processing = metrics.t_prompt_processing; + res->n_tokens_predicted = metrics.n_tokens_predicted; + res->t_tokens_generation = metrics.t_tokens_generation; + + res->n_decode_total = metrics.n_decode_total; + res->n_busy_slots_total = metrics.n_busy_slots_total; + + if (task.metrics_reset_bucket) { + metrics.reset_bucket(); + } + queue_results.send(std::move(res)); + } break; + case SERVER_TASK_TYPE_SLOT_SAVE: + { + if (!check_no_mtmd(task.id)) { + break; + } + + const int id_slot = task.slot_action.id_slot; + server_slot * slot = get_slot_by_id(id_slot); + if (slot == nullptr) { + send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); + break; + } + if (slot->is_processing()) { + // if requested slot is unavailable, we defer this task for processing later + SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id); + queue_tasks.defer(std::move(task)); + break; + } + + const size_t token_count = slot->prompt.tokens.size(); + const int64_t t_start = ggml_time_us(); + + std::string filename = task.slot_action.filename; + std::string filepath = task.slot_action.filepath; + + const llama_tokens & tokens = slot->prompt.tokens.get_text_tokens(); + const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id, tokens.data(), token_count); + + const int64_t t_end = ggml_time_us(); + const double t_save_ms = (t_end - t_start) / 1000.0; + + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->filename = filename; + res->is_save = true; + res->n_tokens = token_count; + res->n_bytes = nwrite; + res->t_ms = t_save_ms; + queue_results.send(std::move(res)); + } break; + case SERVER_TASK_TYPE_SLOT_RESTORE: + { + if (!check_no_mtmd(task.id)) break; + const int id_slot = task.slot_action.id_slot; + server_slot * slot = get_slot_by_id(id_slot); + if (slot == nullptr) { + send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); + break; + } + if (slot->is_processing()) { + // if requested slot is unavailable, we defer this task for processing later + SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id); + queue_tasks.defer(std::move(task)); + break; + } + + const int64_t t_start = ggml_time_us(); + + std::string filename = task.slot_action.filename; + std::string filepath = task.slot_action.filepath; + + llama_tokens tokens; + tokens.resize(slot->n_ctx); + size_t token_count = 0; + size_t nread = llama_state_seq_load_file(ctx, filepath.c_str(), slot->id, tokens.data(), tokens.size(), &token_count); + if (nread == 0) { + slot->prompt.tokens.clear(); // KV may already been invalidated? + send_error(task, "Unable to restore slot, no available space in KV cache or invalid slot save file", ERROR_TYPE_INVALID_REQUEST); + break; + } + tokens.resize(token_count); + slot->prompt.tokens.clear(); + slot->prompt.tokens.insert(tokens); + + const int64_t t_end = ggml_time_us(); + const double t_restore_ms = (t_end - t_start) / 1000.0; + + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->filename = filename; + res->is_save = false; + res->n_tokens = token_count; + res->n_bytes = nread; + res->t_ms = t_restore_ms; + queue_results.send(std::move(res)); + } break; + case SERVER_TASK_TYPE_SLOT_ERASE: + { + if (!check_no_mtmd(task.id)) { + break; + } + const int id_slot = task.slot_action.id_slot; + server_slot * slot = get_slot_by_id(id_slot); + if (slot == nullptr) { + send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); + break; + } + if (slot->is_processing()) { + // if requested slot is unavailable, we defer this task for processing later + SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id); + queue_tasks.defer(std::move(task)); + break; + } + + // Erase token cache + const size_t n_erased = slot->prompt.tokens.size(); + + slot->prompt_clear(false); + + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->n_erased = n_erased; + queue_results.send(std::move(res)); + } break; + case SERVER_TASK_TYPE_GET_LORA: + { + // TODO @ngxson : make lora_adapters a dedicated member of server_context + auto & loras = params_base.lora_adapters; + auto res = std::make_unique(); + res->id = task.id; + for (size_t i = 0; i < loras.size(); ++i) { + auto & lora = loras[i]; + std::string alora_invocation_string = ""; + const uint64_t n_alora_tokens = llama_adapter_get_alora_n_invocation_tokens(lora.ptr); + llama_tokens alora_invocation_tokens; + if (n_alora_tokens) { + const llama_token * alora_tokens = llama_adapter_get_alora_invocation_tokens(lora.ptr); + for (uint64_t j = 0; j < n_alora_tokens; ++j) { + alora_invocation_string += common_token_to_piece(vocab, alora_tokens[j]); + alora_invocation_tokens.push_back(alora_tokens[j]); + } + } + res->loras.push_back(server_task_result_get_lora::lora{ + lora, + alora_invocation_string, + alora_invocation_tokens, + }); + } + queue_results.send(std::move(res)); + } break; + case SERVER_TASK_TYPE_SET_LORA: + { + auto new_loras = construct_lora_list(task.set_lora); + // logging + for (size_t i = 0; i < new_loras.size(); ++i) { + SRV_INF("set lora adapter idx=%zu scale=%f\n", i, new_loras[i].scale); + } + // TODO @ngxson : make lora_adapters a dedicated member of server_context + params_base.lora_adapters = new_loras; + auto res = std::make_unique(); + res->id = task.id; + queue_results.send(std::move(res)); + } break; + } + } + + void update_slots() { + // check if all slots are idle + { + bool all_idle = true; + + for (auto & slot : slots) { + if (slot.is_processing()) { + all_idle = false; + break; + } + } + + if (all_idle) { + SRV_INF("%s", "all slots are idle\n"); + + return; + } + } + + { + SRV_DBG("%s", "posting NEXT_RESPONSE\n"); + + server_task task(SERVER_TASK_TYPE_NEXT_RESPONSE); + task.id = queue_tasks.get_new_id(); + queue_tasks.post(std::move(task)); + } + + // apply context-shift if needed + // TODO: simplify and improve + for (server_slot & slot : slots) { + if (slot.state == SLOT_STATE_GENERATING && slot.prompt.n_tokens() + 1 >= slot.n_ctx) { + if (!params_base.ctx_shift) { + // this check is redundant (for good) + // we should never get here, because generation should already stopped in process_token() + send_error(slot, "context shift is disabled", ERROR_TYPE_SERVER); + slot.release(); + continue; + } + + if (mctx) { + // we should never reach this because params_base.ctx_shift is automatically disabled if mmproj is loaded + // we don't support ctx_shift because an image chunk may contains multiple tokens + GGML_ABORT("not supported by multimodal"); + } + + if (slot.task->is_parent() || slot.task->is_child()) { + send_error(slot, "context shift cannot be used for shared prompt", ERROR_TYPE_SERVER); + slot.release(); + continue; + } + + // Shift context + int n_keep = slot.task->params.n_keep < 0 ? slot.task->n_tokens() : slot.task->params.n_keep; + + if (add_bos_token) { + n_keep += 1; + } + + n_keep = std::min(slot.n_ctx - 4, n_keep); + + const int n_left = slot.prompt.n_tokens() - n_keep; + const int n_discard = slot.task->params.n_discard ? slot.task->params.n_discard : (n_left / 2); + + SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard); + + llama_memory_seq_rm (llama_get_memory(ctx), slot.id, n_keep , n_keep + n_discard); + llama_memory_seq_add(llama_get_memory(ctx), slot.id, n_keep + n_discard, slot.prompt.n_tokens(), -n_discard); + + // add generated tokens to cache + // ref: https://github.com/ggml-org/llama.cpp/pull/16818#discussion_r2473269481 + { + GGML_ASSERT(!slot.prompt.tokens.has_mtmd); + + llama_tokens new_tokens = slot.prompt.tokens.get_text_tokens(); // copy + for (size_t i = n_keep + n_discard; i < new_tokens.size(); i++) { + new_tokens[i - n_discard] = new_tokens[i]; + } + + new_tokens.resize(slot.prompt.tokens.size() - n_discard); + + slot.prompt.tokens.clear(); + slot.prompt.tokens.insert(new_tokens); + } + + slot.truncated = true; + } + } + + // start populating the batch for this iteration + common_batch_clear(batch); + + // track if given slot can be batched with slots already in the batch + server_slot * slot_batched = nullptr; + + auto accept_special_token = [&](server_slot & slot, llama_token token) { + return params_base.special || + slot.task->params.sampling.preserved_tokens.find(token) != slot.task->params.sampling.preserved_tokens.end(); + }; + + // first, add sampled tokens from any ongoing sequences + for (auto & slot : slots) { + if (slot.state != SLOT_STATE_GENERATING) { + continue; + } + + // check if we can batch this slot with the previous one + if (!slot_batched) { + slot_batched = &slot; + } else if (!slot_batched->can_batch_with(slot)) { + continue; + } + + // generate draft tokens in speculative decoding mode + // TODO: rework to have a single draft llama_context shared across all slots [TAG_SERVER_SPEC_REWORK] + // perform the speculative drafting for all sequences at the same time in a single batch + const int n_draft_max = slot.get_n_draft_max(); + if (n_draft_max > 0) { + if (mctx) { + // we should never reach this, as speculative is automatically disabled if mmproj is loaded + GGML_ABORT("not supported by multimodal"); + } + + const llama_tokens & cached_text_tokens = slot.prompt.tokens.get_text_tokens(); + + const auto & params_spec = slot.task->params.speculative; + + llama_tokens draft = common_speculative_draft(slot.spec, params_spec, cached_text_tokens, slot.sampled); + + if (draft.size() > (size_t) n_draft_max) { + SLT_WRN(slot, "draft size %d exceeds max %d, truncating\n", (int) draft.size(), n_draft_max); + draft.resize(n_draft_max); + } + + // add the sampled token to the batch + slot.i_batch_dft.push_back(batch.n_tokens); + common_batch_add(batch, slot.sampled, slot.prompt.tokens.pos_next(), { slot.id }, true); + slot.prompt.tokens.push_back(slot.sampled); + + if (slot.task->params.speculative.n_min > (int) draft.size()) { + SLT_DBG(slot, "ignoring small draft: %d < %d\n", (int) draft.size(), slot.task->params.speculative.n_min); + // fallback to normal decoding + slot.i_batch = slot.i_batch_dft[0]; + slot.drafted.clear(); + slot.i_batch_dft.clear(); + } else { + // keep track of total number of drafted tokens tested + slot.n_draft_total += draft.size(); + + // add all drafted tokens to the batch + for (size_t i = 0; i < draft.size(); i++) { + slot.i_batch_dft.push_back(batch.n_tokens); + common_batch_add(batch, draft[i], slot.prompt.tokens.pos_next(), { slot.id }, true); + slot.prompt.tokens.push_back(draft[i]); + } + slot.drafted = std::move(draft); + } + } else { + // no speculative decoding + slot.i_batch = batch.n_tokens; + + common_batch_add(batch, slot.sampled, slot.prompt.tokens.pos_next(), { slot.id }, true); + + slot.prompt.tokens.push_back(slot.sampled); + + SLT_DBG(slot, "slot decode token, n_ctx = %d, n_tokens = %d, truncated = %d\n", + slot.n_ctx, slot.prompt.n_tokens(), slot.truncated); + } + } + + // process in chunks of params.n_batch + int32_t n_batch = llama_n_batch(ctx); + int32_t n_ubatch = llama_n_ubatch(ctx); + + float alora_scale = -1.0f; + size_t alora_disabled_id = 0; + + // next, batch any pending prompts without exceeding n_batch + if (params_base.cont_batching || batch.n_tokens == 0) { + for (auto & slot : slots) { + if (!slot.is_processing()) { + continue; + } + + // check if we can batch this slot with the previous one + if (slot_batched && !slot_batched->can_batch_with(slot)) { + continue; + } + + // check if this is a child slot + if (slot.state == SLOT_STATE_WAIT_OTHER) { + SLT_DBG(slot, "%s", "waiting for parent slot to complete\n"); + continue; + } + + // this slot still has a prompt to be processed + if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) { + const auto & input_tokens = slot.task->tokens; + + // TODO: maybe move branch to outside of this loop in the future + if (slot.state == SLOT_STATE_STARTED) { + slot.t_start_process_prompt = ggml_time_us(); + slot.t_start_generation = 0; + + slot.state = SLOT_STATE_PROCESSING_PROMPT; + + SLT_INF(slot, "new prompt, n_ctx_slot = %d, n_keep = %d, task.n_tokens = %d\n", + slot.n_ctx, slot.task->params.n_keep, slot.task->n_tokens()); + + // print prompt tokens (for debugging) + /*if (1) { + // first 16 tokens (avoid flooding logs) + for (int i = 0; i < std::min(16, input_tokens.size()); i++) { + SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, input_tokens[i], common_token_to_piece(ctx, input_tokens[i]).c_str()); + } + } else { + // all + for (int i = 0; i < (int) input_tokens.size(); i++) { + SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, input_tokens[i], common_token_to_piece(ctx, input_tokens[i]).c_str()); + } + }*/ + + // keep track how many tokens we can reuse from the previous state + int n_past = 0; + + // empty prompt passed -> release the slot and send empty response + if (input_tokens.empty()) { + SLT_WRN(slot, "%s", "empty prompt - releasing slot\n"); + + slot.print_timings(); + send_final_response(slot); + slot.release(); + + continue; + } + + // TODO: support memory-less logits computation + if (slot.task->need_logits() && !llama_get_memory(ctx)) { + send_error(slot, "the current context does not logits computation. skipping", ERROR_TYPE_SERVER); + slot.release(); + continue; + } + + if (!slot.can_split()) { + if (slot.task->n_tokens() > n_ubatch) { + send_error(slot, + string_format( + "input (%d tokens) is too large to process. increase the physical batch " + "size (current batch size: %d)", + slot.task->n_tokens(), n_ubatch), + ERROR_TYPE_SERVER); + slot.release(); + continue; + } + + if (slot.task->n_tokens() > slot.n_ctx) { + send_error( + slot, + string_format( + "input (%d tokens) is larger than the max context size (%d tokens). skipping", + slot.task->n_tokens(), slot.n_ctx), + ERROR_TYPE_EXCEED_CONTEXT_SIZE); + slot.release(); + continue; + } + } else { + if (slot.task->n_tokens() >= slot.n_ctx) { + send_error(slot, + string_format("request (%d tokens) exceeds the available context size (%d " + "tokens), try increasing it", + slot.task->n_tokens(), slot.n_ctx), + ERROR_TYPE_EXCEED_CONTEXT_SIZE); + slot.release(); + continue; + } + + if (slot.task->params.cache_prompt) { + // reuse any previously computed tokens that are common with the new prompt + n_past = slot.prompt.tokens.get_common_prefix(input_tokens); + + // if there is an alora invoked, don't cache after the invocation start + if (slot.alora_invocation_start > 0) { + SLT_DBG(slot, "only caching to alora invocation start (n_past = %d, alora_invocation_start = %d)\n", n_past, slot.alora_invocation_start); + n_past = std::min(n_past, slot.alora_invocation_start - 1); + } + + const auto n_cache_reuse = slot.task->params.n_cache_reuse; + + const bool can_cache_reuse = + llama_memory_can_shift(llama_get_memory(ctx)) && + !slot.prompt.tokens.has_mtmd; + + if (!can_cache_reuse && n_cache_reuse > 0) { + SLT_WRN(slot, "cache reuse is not supported - ignoring n_cache_reuse = %d\n", n_cache_reuse); + } + + // reuse chunks from the cached prompt by shifting their KV cache in the new position + if (can_cache_reuse && n_cache_reuse > 0) { + GGML_ASSERT(!slot.prompt.tokens.has_mtmd); + + size_t head_c = n_past; // cache + size_t head_p = n_past; // current prompt + + if (mctx) { + // we should never reach this + GGML_ABORT("not supported by multimodal"); + } + + SLT_DBG(slot, "trying to reuse chunks with size > %d, n_past = %d\n", n_cache_reuse, n_past); + + while (head_c < slot.prompt.tokens.size() && + head_p < input_tokens.size()) { + + size_t n_match = 0; + while (head_c + n_match < slot.prompt.tokens.size() && + head_p + n_match < input_tokens.size() && + slot.prompt.tokens[head_c + n_match] == input_tokens[head_p + n_match]) { + n_match++; + } + + if (n_match >= (size_t) n_cache_reuse) { + SLT_INF(slot, "reusing chunk with size %zu, shifting KV cache [%zu, %zu) -> [%zu, %zu)\n", n_match, head_c, head_c + n_match, head_p, head_p + n_match); + //for (size_t i = head_p; i < head_p + n_match; i++) { + // SLT_DBG(slot, "cache token %3zu: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str()); + //} + + const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c; + + llama_memory_seq_rm (llama_get_memory(ctx), slot.id, head_p, head_c); + llama_memory_seq_add(llama_get_memory(ctx), slot.id, head_c, head_c + n_match, kv_shift); + + for (size_t i = 0; i < n_match; i++) { + slot.prompt.tokens.set_token(head_p + i, slot.prompt.tokens[head_c + i]); + n_past++; + } + + head_c += n_match; + head_p += n_match; + } else { + head_c += 1; + } + } + + SLT_DBG(slot, "after context reuse, new n_past = %d\n", n_past); + } + } else { + // if we don't cache the prompt, we have to remove all previous tokens + n_past = 0; + } + + // note: when n_swa == 0, the model does not use SWA, which is equivalent to a window of 1 + const auto n_swa = std::max(1, llama_model_n_swa(model)); + + // the largest pos_min required for a checkpoint to be useful + const auto pos_min_thold = std::max(0, n_past - n_swa); + + // note: disallow with mtmd contexts for now + // https://github.com/ggml-org/llama.cpp/issues/17043 + if (!mctx && n_past > 0 && n_past < slot.prompt.n_tokens()) { + const auto pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx), slot.id); + if (pos_min == -1) { + SLT_ERR(slot, "n_past = %d, slot.prompt.tokens.size() = %d, seq_id = %d, pos_min = %d\n", n_past, (int) slot.prompt.tokens.size(), slot.id, pos_min); + GGML_ABORT("pos_min == -1, but n_past > 0 - should not happen: https://github.com/ggml-org/llama.cpp/pull/13833#discussion_r2116181237"); + } + + // when the prompt prefix does not match, print the tokens around the mismatch + // this is useful for debugging prompt caching + if (slots_debug) { + const int np0 = std::max(n_past - 4, 0); + const int np1 = std::min(n_past + 6, std::min(slot.prompt.tokens.size(), slot.task->tokens.size())); + + std::stringstream ss0; + std::stringstream ss1; + + std::stringstream st0; + std::stringstream st1; + + ss0 << "old: ... "; + ss1 << "new: ... "; + + for (int i = np0; i < np1; i++) { + if (i == n_past) { + ss0 << " | "; + ss1 << " | "; + } + + { + const auto token = slot.prompt.tokens[i]; + const auto piece = token != LLAMA_TOKEN_NULL ? common_token_to_piece(ctx, token) : "[mtmd]"; + ss0 << piece; + st0 << std::setw(8) << token; + } + + { + const auto token = slot.task->tokens[i]; + const auto piece = token != LLAMA_TOKEN_NULL ? common_token_to_piece(ctx, token) : "[mtmd]"; + ss1 << piece; + st1 << std::setw(8) << token; + } + } + + SLT_WRN(slot, "%s\n", ss0.str().c_str()); + SLT_WRN(slot, "%s\n", ss1.str().c_str()); + + SLT_WRN(slot, "%s\n", st0.str().c_str()); + SLT_WRN(slot, "%s\n", st1.str().c_str()); + } + + if (pos_min > pos_min_thold) { + // TODO: support can be added in the future when corresponding vision models get released + GGML_ASSERT(!slot.prompt.tokens.has_mtmd); + + SLT_WRN(slot, "n_past = %d, slot.prompt.tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", n_past, (int) slot.prompt.tokens.size(), slot.id, pos_min, n_swa); + + // search for a context checkpoint + const auto it = std::find_if( + slot.prompt.checkpoints.rbegin(), + slot.prompt.checkpoints.rend(), + [&](const auto & cur) { + // guarantee that a checkpoint will result in at least one token being processed [TAG_PROMPT_LOGITS] + return cur.pos_min < pos_min_thold; + } + ); + + bool do_reset = it == slot.prompt.checkpoints.rend(); + + if (!do_reset) { + // restore the context checkpoint + const size_t checkpoint_size = it->data.size(); + const size_t n = llama_state_seq_set_data_ext(ctx, it->data.data(), checkpoint_size, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY); + + if (n != checkpoint_size) { + SLT_ERR(slot, "failed to restore context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", it->pos_min, it->pos_max, (float) checkpoint_size / 1024 / 1024); + do_reset = true; + //printf("[DEBUG] `do_reset` was set to `true` after failing to restore a checkpoint"); + } else { + n_past = std::min(n_past, std::max(it->pos_min + 1, it->pos_max)); + SLT_WRN(slot, "restored context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", it->pos_min, it->pos_max, (float) checkpoint_size / 1024 / 1024); + } + } + + if (do_reset) { + SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA or hybrid/recurrent memory, see %s)\n", + "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); + n_past = 0; + } + } + } + + { + // erase any checkpoints with pos_min > pos_min_thold + for (auto it = slot.prompt.checkpoints.begin(); it != slot.prompt.checkpoints.end();) { + const auto & cur = *it; + if (cur.pos_min > pos_min_thold) { + SLT_WRN(slot, "erased invalidated context checkpoint (pos_min = %d, pos_max = %d, n_swa = %d, size = %.3f MiB)\n", cur.pos_min, cur.pos_max, n_swa, (float) cur.data.size() / 1024 / 1024); + it = slot.prompt.checkpoints.erase(it); + } else { + ++it; + } + } + } + } + + // [TAG_PROMPT_LOGITS] + if (n_past == slot.task->n_tokens() && n_past > 0) { + SLT_WRN(slot, "need to evaluate at least 1 token for each active slot (n_past = %d, task.n_tokens() = %d)\n", n_past, slot.task->n_tokens()); + n_past--; + SLT_WRN(slot, "n_past was set to %d\n", n_past); + } + + slot.n_prompt_tokens_cache = n_past; + slot.n_prompt_tokens_processed = 0; + + slot.prompt.tokens.keep_first(n_past); + + // send initial 0% progress update if needed + // this is to signal the client that the request has started processing + if (slot.task->params.stream && slot.task->params.return_progress) { + send_partial_response(slot, {}, true); + } + } + + if (!slot.can_split()) { + // cannot fit the prompt in the current batch - will try next iter + if (batch.n_tokens + slot.task->n_tokens() > n_batch) { + continue; + } + } + + // truncate any tokens that are beyond n_past for this slot + const llama_pos p0 = slot.prompt.tokens.pos_next(); + + SLT_INF(slot, "n_tokens = %d, memory_seq_rm [%d, end)\n", slot.prompt.n_tokens(), p0); + + if (!llama_memory_seq_rm(llama_get_memory(ctx), slot.id, p0, -1)) { + SLT_WRN(slot, "failed to truncate tokens with position >= %d - clearing the memory\n", p0); + + slot.prompt_clear(true); + + // there is no common part left + slot.n_prompt_tokens_cache = 0; + } + + // check if we should process the image + if (slot.prompt.n_tokens() < slot.task->n_tokens() && input_tokens[slot.prompt.n_tokens()] == LLAMA_TOKEN_NULL) { + // process the image + size_t n_tokens_out = 0; + int32_t res = input_tokens.process_chunk(ctx, mctx, slot.prompt.n_tokens(), slot.prompt.tokens.pos_next(), slot.id, n_tokens_out); + if (res != 0) { + SLT_ERR(slot, "failed to process image, res = %d\n", res); + send_error(slot, "failed to process image", ERROR_TYPE_SERVER); + slot.release(); + continue; + } + + slot.n_prompt_tokens_processed += n_tokens_out; + + // add the image chunk to cache + { + const auto & chunk = input_tokens.find_chunk(slot.prompt.n_tokens()); + slot.prompt.tokens.push_back(chunk.get()); // copy + } + } + + // If using an alora, there may be uncached tokens that come + // before the invocation sequence. When this happens, the + // tokens before the invocation sequence need to be + // processed without the adapter in a separate batch, then + // the adapter needs to be enabled for the remaining tokens. + if (lora_all_alora(slot.lora) && slot.alora_invocation_start - 1 > slot.prompt.n_tokens()) { + SLT_DBG(slot, "processing pre-alora tokens without the adapter (n_tokens = %d, alora_invocation_start = %d)\n", slot.prompt.n_tokens(), slot.alora_invocation_start); + const auto & enabled_loras = lora_get_enabled_ids(slot.lora); + GGML_ASSERT(enabled_loras.size() == 1); + alora_scale = slot.lora[enabled_loras[0]].scale; + slot.lora[enabled_loras[0]].scale = 0.0f; + alora_disabled_id = enabled_loras[0]; + } + + bool do_checkpoint = params_base.n_ctx_checkpoints > 0; + + // make checkpoints only for completion tasks + do_checkpoint = do_checkpoint && slot.task->type == SERVER_TASK_TYPE_COMPLETION; + + // make a checkpoint of the parts of the memory that cannot be rolled back. + // checkpoints are created only if: + // - the model uses SWA and we are not using `swa_full` + // - the model architecture is marked as recurrent or hybrid + // + // TODO: try to make this conditional on the context or the memory module, instead of the model type + do_checkpoint = do_checkpoint && ( + llama_model_is_recurrent(model) || + llama_model_is_hybrid(model) || + (llama_model_n_swa(model) > 0 && !params_base.swa_full) + ); + + // add prompt tokens for processing in the current batch + while (slot.prompt.n_tokens() < slot.task->n_tokens() && batch.n_tokens < n_batch) { + // get next token to process + llama_token cur_tok = input_tokens[slot.prompt.n_tokens()]; + if (cur_tok == LLAMA_TOKEN_NULL) { + break; // end of text chunk + } + + // if this is an alora request with pre-invocation + // tokens that are not cached, we need to stop filling + // this batch at those pre-invocation tokens. + if (alora_scale > 0 && slot.prompt.n_tokens() == slot.alora_invocation_start - 1) { + SLT_DBG(slot, "stop prompt batch filling at (n_tokens = %d, alora_invocation_start = %d)\n", slot.prompt.n_tokens(), slot.alora_invocation_start); + break; + } + + // embedding requires all tokens in the batch to be output + common_batch_add(batch, + cur_tok, + slot.prompt.tokens.pos_next(), + { slot.id }, + slot.task->need_embd()); + slot.prompt.tokens.push_back(cur_tok); + + slot.n_prompt_tokens_processed++; + + // process the last few tokens of the prompt separately in order to allow for a checkpoint to be created. + const int n_last = std::min(n_batch, 512); + if (do_checkpoint && slot.task->n_tokens() == slot.prompt.n_tokens() + n_last) { + break; + } + } + + // SLT_INF(slot, "new slot.prompt.tokens: %s\n", slot.slot.prompt.tokens.str().c_str()); + + SLT_INF(slot, "prompt processing progress, n_tokens = %d, batch.n_tokens = %d, progress = %f\n", slot.prompt.n_tokens(), batch.n_tokens, (float) slot.prompt.n_tokens() / slot.task->n_tokens()); + + // entire prompt has been processed + if (slot.prompt.n_tokens() == slot.task->n_tokens()) { + slot.state = SLOT_STATE_DONE_PROMPT; + + GGML_ASSERT(batch.n_tokens > 0); + + // extract the logits only for the last token + batch.logits[batch.n_tokens - 1] = true; + + slot.n_decoded = 0; + slot.i_batch = batch.n_tokens - 1; + + SLT_INF(slot, "prompt done, n_tokens = %d, batch.n_tokens = %d\n", slot.prompt.n_tokens(), batch.n_tokens); + + slot.init_sampler(); + + const auto pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx), slot.id); + const auto pos_max = llama_memory_seq_pos_max(llama_get_memory(ctx), slot.id); + + // no need for empty or small checkpoints + do_checkpoint = do_checkpoint && (pos_min >= 0 && pos_max >= 64); + + // no need to create checkpoints that are too close together + do_checkpoint = do_checkpoint && (slot.prompt.checkpoints.empty() || pos_max > slot.prompt.checkpoints.back().pos_max + 64); + + if (do_checkpoint) { + while (slot.prompt.checkpoints.size() >= (size_t) params_base.n_ctx_checkpoints) { + // make room for the new checkpoint, if needed + const auto & cur = slot.prompt.checkpoints.front(); + + SLT_WRN(slot, "erasing old context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", + cur.pos_min, cur.pos_max, (float) cur.data.size() / 1024 / 1024); + + slot.prompt.checkpoints.erase(slot.prompt.checkpoints.begin()); + } + + const size_t checkpoint_size = llama_state_seq_get_size_ext(ctx, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY); + + auto & cur = slot.prompt.checkpoints.emplace_back(server_prompt_checkpoint{ + /*.pos_min = */ pos_min, + /*.pos_max = */ pos_max, + /*.data = */ std::vector(checkpoint_size), + }); + + llama_state_seq_get_data_ext(ctx, cur.data.data(), checkpoint_size, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY); + + SLT_WRN(slot, "created context checkpoint %d of %d (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", + (int) slot.prompt.checkpoints.size(), params_base.n_ctx_checkpoints, cur.pos_min, cur.pos_max, (float) cur.data.size() / 1024 / 1024); + } + } + } + + if (!slot_batched) { + slot_batched = &slot; + } + + if (batch.n_tokens >= n_batch) { + break; + } + } + } + + SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens); + + if (slot_batched) { + // apply lora, only need to do it once per batch + common_set_adapter_lora(ctx, slot_batched->lora); + + // if the lora is temporarily disabled for an alora, re-enable it + // for next time + if (alora_scale > 0.0f) { + SRV_DBG("re-enabling alora with scale %f\n", alora_scale); + slot_batched->lora[alora_disabled_id].scale = alora_scale; + } + + llama_set_embeddings(ctx, slot_batched->task->need_embd()); + } + + if (batch.n_tokens == 0) { + SRV_WRN("%s", "no tokens to decode\n"); + } + + int32_t i_next = 0; + + // process the created batch of tokens + for (int32_t i = 0; i < batch.n_tokens; i = i_next) { + const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i); + + llama_batch batch_view = { + n_tokens, + batch.token + i, + nullptr, + batch.pos + i, + batch.n_seq_id + i, + batch.seq_id + i, + batch.logits + i, + }; + + const int ret = llama_decode(ctx, batch_view); + + metrics.on_decoded(slots); + + if (ret != 0) { + { + std::string err; + + if (n_batch == 1 && ret == 1) { + // TODO: try to terminate only the largest active slot/sequence and continue with the rest + // need to remove the tokens from the current batch too + err = "Context size has been exceeded."; + } + + if (ret == -1) { + err = "Invalid input batch."; + } + + if (ret < -1) { + // TODO: update slot state based on llama_memory_seq_pos_min() and llama_memory_seq_pos_max() + err = "Compute error."; + } + + // TODO: handle ret == 2 (abort) when we start aborting + + if (!err.empty()) { + SRV_ERR("%s i = %d, n_batch = %d, ret = %d\n", err.c_str(), i, n_batch, ret); + + for (auto & slot : slots) { + if (slot.is_processing()) { + send_error(slot, err); + slot.release(); + + // note: it's complicated to keep track of how much of the current batch has been + // processed before the error occurred, so we simply clear the entire context + slot.prompt_clear(false); + } + } + + break; + } + } + + // retry with half the batch size to try to find a free slot in the KV cache + if (!try_clear_idle_slots()) { + n_batch /= 2; + } + + SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret); + + continue; // continue loop of n_batch + } + + // move the head of the batch forward with the number of tokens we just processed + i_next = i + n_tokens; + + // on successful decode, restore the original batch size + n_batch = llama_n_batch(ctx); + + // handle `n_cmpl > 1` tasks - when the main prompt is processed, activate all child tasks too + for (auto & slot : slots) { + if (slot.state == SLOT_STATE_DONE_PROMPT && slot.task->is_parent()) { + std::vector children; + for (auto & other : slots) { + if (other.state == SLOT_STATE_WAIT_OTHER && slot.task->id == other.task->id_parent) { + children.push_back(&other); + } + } + + // all children slots should already launched by launch_slots_with_parent_task() + // copy state to the child slots + for (auto & child : children) { + SLT_INF(slot, " - copying state to child %d\n", child->id); + + GGML_ASSERT(child->state == SLOT_STATE_WAIT_OTHER); + + slot.copy_state_to(*child); + child->state = SLOT_STATE_DONE_PROMPT; + } + } + } + + for (auto & slot : slots) { + // optionally send prompt processing progress + if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_DONE_PROMPT) { + if (slot.task->params.stream && slot.task->params.return_progress) { + send_partial_response(slot, {}, true); + } + } + + if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) { + continue; // continue loop of slots + } + + if (slot.state == SLOT_STATE_DONE_PROMPT) { + if (slot.task->type == SERVER_TASK_TYPE_EMBEDDING) { + // prompt evaluated for embedding + send_embedding(slot, batch_view); + slot.release(); + slot.i_batch = -1; + continue; // continue loop of slots + } + + if (slot.task->type == SERVER_TASK_TYPE_RERANK) { + send_rerank(slot, batch_view); + slot.release(); + slot.i_batch = -1; + continue; // continue loop of slots + } + + GGML_ASSERT(slot.task->need_sampling()); + + // prompt evaluated for next-token prediction + slot.state = SLOT_STATE_GENERATING; + + if (slot.can_speculate()) { + common_speculative_begin(slot.spec, slot.prompt.tokens.get_text_tokens()); + } + } else if (slot.state != SLOT_STATE_GENERATING) { + continue; // continue loop of slots + } + + if (slot.i_batch_dft.size() > 0) { + continue; // sample using speculative decoding + } + + const int tok_idx = slot.i_batch - i; + + llama_token id = common_sampler_sample(slot.smpl.get(), ctx, tok_idx); + + slot.i_batch = -1; + + common_sampler_accept(slot.smpl.get(), id, true); + + // here we have synchronized the llama_context (due to the sampling above), so we can do time measurement + const int64_t t_current = ggml_time_us(); + + slot.n_decoded += 1; + + if (slot.n_decoded == 1) { + slot.t_start_generation = t_current; + slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3; + metrics.on_prompt_eval(slot); + } + + slot.t_token_generation = std::max(1, t_current - slot.t_start_generation) / 1e3; + + completion_token_output result; + result.tok = id; + result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok)); + result.prob = 1.0f; // TODO: set it here instead of doing inside populate_token_probs + + if (slot.task->params.sampling.n_probs > 0) { + populate_token_probs(slot, result, slot.task->params.post_sampling_probs, params_base.special, tok_idx); + } + + if (!process_token(result, slot)) { + // release slot because of stop condition + slot.print_timings(); + send_final_response(slot); + metrics.on_prediction(slot); + slot.release(); + + continue; + } + } + + // speculative decoding - main model sample and accept + for (auto & slot : slots) { + if (slot.state != SLOT_STATE_GENERATING || slot.i_batch_dft.empty()) { + continue; + } + + const size_t n_draft = slot.drafted.size(); + + // the accepted tokens from the speculation + const auto ids = common_sampler_sample_and_accept_n(slot.smpl.get(), ctx, slot.i_batch_dft, slot.drafted); + slot.i_batch_dft.clear(); + slot.drafted.clear(); + + const int64_t t_current = ggml_time_us(); + + slot.n_decoded += ids.size(); + + slot.t_token_generation = std::max(1, t_current - slot.t_start_generation) / 1e3; + + // update how many tokens out of those tested were accepted + slot.n_draft_accepted += ids.size() - 1; + + // inform the speculative decoding about the number of accepted tokens + common_speculative_accept(slot.spec, ids.size() - 1); + + // rollback to the state before sampling the draft tokens + slot.prompt.tokens.keep_first(slot.prompt.n_tokens() - n_draft); + + // add accepted tokens to the prompt + slot.prompt.tokens.insert({ids.begin(), ids.end() - 1}); + slot.sampled = ids.back(); // last accepted token + + llama_memory_seq_rm(llama_get_memory(ctx), slot.id, slot.prompt.n_tokens(), -1); + + for (size_t i = 0; i < ids.size(); ++i) { + completion_token_output result; + + result.tok = ids[i]; + result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok)); + result.prob = 1.0f; // set later + + // TODO: set result.probs + + if (!process_token(result, slot)) { + slot.print_timings(); + send_final_response(slot); + metrics.on_prediction(slot); + slot.release(); + + break; + } + } + + SLT_DBG(slot, "accepted %d/%d draft tokens, new n_tokens = %d\n", (int) ids.size() - 1, (int) n_draft, slot.prompt.n_tokens()); + } + } + + SRV_DBG("%s", "run slots completed\n"); + } + + int get_slot_n_ctx() { + return slots.back().n_ctx; + } + + server_response_reader get_response_reader() { + return server_response_reader(queue_tasks, queue_results, HTTP_POLLING_SECONDS); + } +}; + +// +// server_context (public API) +// + +server_context::server_context() : impl(new server_context_impl()) {} +server_context::~server_context() = default; + +bool server_context::load_model(const common_params & params) { + return impl->load_model(params); +} + +void server_context::start_loop() { + auto & params = impl->params_base; + impl->queue_tasks.start_loop(params.sleep_idle_seconds * 1000); +} + +void server_context::terminate() { + impl->queue_tasks.terminate(); +} + +llama_context * server_context::get_llama_context() const { + return impl->ctx; +} + +server_response_reader server_context::get_response_reader() { + return impl->get_response_reader(); +} + +server_context_meta server_context::get_meta() const { + auto bos_id = llama_vocab_bos(impl->vocab); + auto eos_id = llama_vocab_eos(impl->vocab); + auto bos_token_str = bos_id != LLAMA_TOKEN_NULL ? common_token_to_piece(impl->ctx, bos_id, true) : ""; + auto eos_token_str = eos_id != LLAMA_TOKEN_NULL ? common_token_to_piece(impl->ctx, eos_id, true) : ""; + + return server_context_meta { + /* build_info */ build_info, + /* model_name */ impl->model_name, + /* model_path */ impl->params_base.model.path, + /* has_mtmd */ impl->mctx != nullptr, + /* has_inp_image */ impl->chat_params.allow_image, + /* has_inp_audio */ impl->chat_params.allow_audio, + /* json_webui_settings */ impl->json_webui_settings, + /* slot_n_ctx */ impl->get_slot_n_ctx(), + /* pooling_type */ llama_pooling_type(impl->ctx), + + /* chat_params */ impl->chat_params, + /* chat_template_caps */ common_chat_templates_get_caps(impl->chat_params.tmpls.get()), + + /* bos_token_str */ bos_token_str, + /* eos_token_str */ eos_token_str, + /* fim_pre_token */ llama_vocab_fim_pre(impl->vocab), + /* fim_sub_token */ llama_vocab_fim_suf(impl->vocab), + /* fim_mid_token */ llama_vocab_fim_mid(impl->vocab), + + /* model_vocab_type */ llama_vocab_type(impl->vocab), + /* model_vocab_n_tokens */ llama_vocab_n_tokens(impl->vocab), + /* model_n_ctx_train */ llama_model_n_ctx_train(impl->model), + /* model_n_embd_inp */ llama_model_n_embd(impl->model), + /* model_n_params */ llama_model_n_params(impl->model), + /* model_size */ llama_model_size(impl->model), + }; +} + + + +// generator-like API for HTTP response generation +// may have bypass_sleep = true if the task does not use ctx_server +struct server_res_generator : server_http_res { + server_response_reader rd; + server_res_generator(server_queue & queue_tasks, server_response & queue_results, int sleep_idle_seconds, bool bypass_sleep = false) + : rd(queue_tasks, queue_results, HTTP_POLLING_SECONDS) { + // fast path in case sleeping is disabled + bypass_sleep |= sleep_idle_seconds < 0; + if (!bypass_sleep) { + queue_tasks.wait_until_no_sleep(); + } + } + void ok(const json & response_data) { + status = 200; + data = safe_json_to_str(response_data); + } + void error(const json & error_data) { + status = json_value(error_data, "code", 500); + data = safe_json_to_str({{ "error", error_data }}); + } +}; + + + +// +// server_routes +// + +std::unique_ptr server_routes::handle_completions_impl( + const server_http_req & req, + server_task_type type, + const json & data, + const std::vector & files, + task_response_type res_type) { + GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); + + auto res = create_response(); + auto completion_id = gen_chatcmplid(); + auto & rd = res->rd; + + try { + std::vector tasks; + + const auto & prompt = data.at("prompt"); + // TODO: this log can become very long, put it behind a flag or think about a more compact format + //SRV_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get().c_str() : prompt.dump(2).c_str()); + + // process prompt + std::vector inputs; + + if (res_type != TASK_RESPONSE_TYPE_NONE && ctx_server.mctx != nullptr) { + // This is the case used by OAI compatible chat path with MTMD. TODO It can be moved to the path below. + inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get(), files)); + } else { + // Everything else, including multimodal completions. + inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); + } + + // tasks.reserve(inputs.size()); // TODO: this is inaccurate due to child tasks + + for (size_t i = 0; i < inputs.size(); i++) { + server_task task = server_task(type); + + task.id = rd.get_new_id(); + + task.tokens = std::move(inputs[i]); + task.params = server_task::params_from_json_cmpl( + ctx_server.vocab, + params, + meta->slot_n_ctx, + data); + task.id_slot = json_value(data, "id_slot", -1); + + // OAI-compat + task.params.res_type = res_type; + task.params.oaicompat_cmpl_id = completion_id; + task.params.oaicompat_model = meta->model_name; + + // prepare child tasks + if (task.params.n_cmpl > 1) { + int n_children = task.params.n_cmpl - 1; + for (int j = 0; j < n_children; j++) { + task.add_child(task.id, rd.get_new_id()); + } + } + + tasks.push_back(std::move(task)); + } + + rd.post_tasks(std::move(tasks)); + } catch (const std::exception & e) { + res->error(format_error_response(e.what(), ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + bool stream = json_value(data, "stream", false); + + if (!stream) { + // non-stream, wait for the results + auto all_results = rd.wait_for_all(req.should_stop); + if (all_results.is_terminated) { + return res; // connection is closed + } else if (all_results.error) { + res->error(all_results.error->to_json()); + return res; + } else { + json arr = json::array(); + for (auto & res : all_results.results) { + GGML_ASSERT(dynamic_cast(res.get()) != nullptr); + arr.push_back(res->to_json()); + } + GGML_ASSERT(!arr.empty() && "empty results"); + if (arr.size() == 1) { + // if single request, return single object instead of array + res->ok(arr[0]); + } else if (res_type == TASK_RESPONSE_TYPE_OAI_CHAT || res_type == TASK_RESPONSE_TYPE_OAI_CMPL) { + // if multiple results in OAI format, we need to re-format them + json & choices = arr[0]["choices"]; + for (size_t i = 1; i < arr.size(); i++) { + choices.push_back(std::move(arr[i]["choices"][0])); + } + res->ok(arr[0]); + } else { + // multi-results, non-OAI compat + res->ok(arr); + } + } + } else { + // in streaming mode, the first error must be treated as non-stream response + // this is to match the OAI API behavior + // ref: https://github.com/ggml-org/llama.cpp/pull/16486#discussion_r2419657309 + auto first_result = rd.next(req.should_stop); + if (first_result == nullptr) { + GGML_ASSERT(req.should_stop()); + return res; // connection is closed + } + + if (first_result->is_error()) { + res->error(first_result->to_json()); + return res; + } + + GGML_ASSERT( + dynamic_cast(first_result.get()) != nullptr || + dynamic_cast (first_result.get()) != nullptr + ); + + // next responses are streamed + // to be sent immediately + json first_result_json = first_result->to_json(); + if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) { + res->data = format_anthropic_sse(first_result_json); + } else if (res_type == TASK_RESPONSE_TYPE_OAI_RESP) { + res->data = format_oai_resp_sse(first_result_json); + } else { + res->data = format_oai_sse(first_result_json); + } + res->status = 200; + res->content_type = "text/event-stream"; + res->next = [res_this = res.get(), res_type, &req](std::string & output) -> bool { + static auto format_error = [](task_response_type res_type, const json & res_json) { + if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) { + return format_anthropic_sse({ + {"event", "error"}, + {"data", res_json}, + }); + } else { + return format_oai_sse(json {{ "error", res_json }}); + } + }; + + try { + if (req.should_stop()) { + SRV_DBG("%s", "stopping streaming due to should_stop condition\n"); + return false; // should_stop condition met + } + + if (!res_this->data.empty()) { + // flush the first chunk + output = std::move(res_this->data); + res_this->data.clear(); + return true; + } + + server_response_reader & rd = res_this->rd; + + // check if there is more data + if (!rd.has_next()) { + switch (res_type) { + case TASK_RESPONSE_TYPE_NONE: + case TASK_RESPONSE_TYPE_OAI_RESP: + case TASK_RESPONSE_TYPE_ANTHROPIC: + output = ""; + break; + + default: + output = "data: [DONE]\n\n"; + break; + } + SRV_DBG("%s", "all results received, terminating stream\n"); + return false; // no more data, terminate + } + + // receive subsequent results + auto result = rd.next(req.should_stop); + if (result == nullptr) { + SRV_DBG("%s", "stopping streaming due to should_stop condition\n"); + GGML_ASSERT(req.should_stop()); + return false; // should_stop condition met + } + + // send the results + if (result->is_error()) { + json res_json = result->to_json(); + output = format_error(res_type, res_json); + SRV_DBG("%s", "error received during streaming, terminating stream\n"); + return false; // terminate on error + } else { + GGML_ASSERT( + dynamic_cast(result.get()) != nullptr + || dynamic_cast(result.get()) != nullptr + ); + json res_json = result->to_json(); + if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) { + output = format_anthropic_sse(res_json); + } else if (res_type == TASK_RESPONSE_TYPE_OAI_RESP) { + output = format_oai_resp_sse(res_json); + } else { + output = format_oai_sse(res_json); + } + } + + // has next data, continue + return true; + + } catch (const std::exception & e) { + json error_json = format_error_response(e.what(), ERROR_TYPE_SERVER); + output = format_error(res_type, error_json); + + // terminate on exception + return false; + } + }; + } + + return res; +} + +std::unique_ptr server_routes::create_response(bool bypass_sleep) { + return std::make_unique(queue_tasks, queue_results, params.sleep_idle_seconds, bypass_sleep); +} + +server_routes::server_routes(const common_params & params, server_context & ctx_server) + : params(params), + ctx_server(*ctx_server.impl), + queue_tasks(ctx_server.impl->queue_tasks), + queue_results(ctx_server.impl->queue_results) { + init_routes(); +} + +void server_routes::init_routes() { + // IMPORTANT: all lambda functions must start with create_response() + // this is to ensure that the server_res_generator can handle sleeping case correctly + + this->get_health = [this](const server_http_req &) { + // error and loading states are handled by middleware + auto res = create_response(true); + + // this endpoint can be accessed during sleeping + // the next LOC is to avoid someone accidentally use ctx_server + bool ctx_server; // do NOT delete this line + GGML_UNUSED(ctx_server); + + res->ok({{"status", "ok"}}); + return res; + }; + + this->get_metrics = [this](const server_http_req & req) { + auto res = create_response(); + if (!params.endpoint_metrics) { + res->error(format_error_response("This server does not support metrics endpoint. Start it with `--metrics`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + // request slots data using task queue + { + server_task task(SERVER_TASK_TYPE_METRICS); + task.id = res->rd.get_new_id(); + res->rd.post_task(std::move(task), true); // high-priority task + } + + // get the result + auto result = res->rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + // TODO: get rid of this dynamic_cast + auto res_task = dynamic_cast(result.get()); + GGML_ASSERT(res_task != nullptr); + + // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names + json all_metrics_def = json { + {"counter", {{ + {"name", "prompt_tokens_total"}, + {"help", "Number of prompt tokens processed."}, + {"value", (uint64_t) res_task->n_prompt_tokens_processed_total} + }, { + {"name", "prompt_seconds_total"}, + {"help", "Prompt process time"}, + {"value", (uint64_t) res_task->t_prompt_processing_total / 1.e3} + }, { + {"name", "tokens_predicted_total"}, + {"help", "Number of generation tokens processed."}, + {"value", (uint64_t) res_task->n_tokens_predicted_total} + }, { + {"name", "tokens_predicted_seconds_total"}, + {"help", "Predict process time"}, + {"value", (uint64_t) res_task->t_tokens_generation_total / 1.e3} + }, { + {"name", "n_decode_total"}, + {"help", "Total number of llama_decode() calls"}, + {"value", res_task->n_decode_total} + }, { + {"name", "n_tokens_max"}, + {"help", "Largest observed n_tokens."}, + {"value", res_task->n_tokens_max} + }, { + {"name", "n_busy_slots_per_decode"}, + {"help", "Average number of busy slots per llama_decode() call"}, + {"value", (float) res_task->n_busy_slots_total / std::max((float) res_task->n_decode_total, 1.f)} + }}}, + {"gauge", {{ + {"name", "prompt_tokens_seconds"}, + {"help", "Average prompt throughput in tokens/s."}, + {"value", res_task->n_prompt_tokens_processed ? 1.e3 / res_task->t_prompt_processing * res_task->n_prompt_tokens_processed : 0.} + },{ + {"name", "predicted_tokens_seconds"}, + {"help", "Average generation throughput in tokens/s."}, + {"value", res_task->n_tokens_predicted ? 1.e3 / res_task->t_tokens_generation * res_task->n_tokens_predicted : 0.} + },{ + {"name", "requests_processing"}, + {"help", "Number of requests processing."}, + {"value", (uint64_t) res_task->n_processing_slots} + },{ + {"name", "requests_deferred"}, + {"help", "Number of requests deferred."}, + {"value", (uint64_t) res_task->n_tasks_deferred} + }}} + }; + + std::stringstream prometheus; + + for (const auto & el : all_metrics_def.items()) { + const auto & type = el.key(); + const auto & metrics_def = el.value(); + + for (const auto & metric_def : metrics_def) { + const std::string name = metric_def.at("name"); + const std::string help = metric_def.at("help"); + + auto value = json_value(metric_def, "value", 0.); + prometheus << "# HELP llamacpp:" << name << " " << help << "\n" + << "# TYPE llamacpp:" << name << " " << type << "\n" + << "llamacpp:" << name << " " << value << "\n"; + } + } + + res->headers["Process-Start-Time-Unix"] = std::to_string(res_task->t_start); + res->content_type = "text/plain; version=0.0.4"; + res->status = 200; + res->data = prometheus.str(); + return res; + }; + + this->get_slots = [this](const server_http_req & req) { + auto res = create_response(); + if (!params.endpoint_slots) { + res->error(format_error_response("This server does not support slots endpoint. Start it with `--slots`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + // request slots data using task queue + { + server_task task(SERVER_TASK_TYPE_METRICS); + task.id = res->rd.get_new_id(); + res->rd.post_task(std::move(task), true); // high-priority task + } + + // get the result + auto result = res->rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + // TODO: get rid of this dynamic_cast + auto * res_task = dynamic_cast(result.get()); + GGML_ASSERT(res_task != nullptr); + + // optionally return "fail_on_no_slot" error + if (!req.get_param("fail_on_no_slot").empty()) { + if (res_task->n_idle_slots == 0) { + res->error(format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE)); + return res; + } + } + + res->ok(res_task->slots_data); + return res; + }; + + this->post_slots = [this](const server_http_req & req) { + auto res = create_response(); + if (params.slot_save_path.empty()) { + res->error(format_error_response("This server does not support slots action. Start it with `--slot-save-path`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + std::string id_slot_str = req.get_param("id_slot"); + + int id_slot; + try { + id_slot = std::stoi(id_slot_str); + } catch (const std::exception &) { + res->error(format_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + std::string action = req.get_param("action"); + + if (action == "save") { + return handle_slots_save(req, id_slot); + } + if (action == "restore") { + return handle_slots_restore(req, id_slot); + } + if (action == "erase") { + return handle_slots_erase(req, id_slot); + } + + res->error(format_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST)); + return res; + }; + + this->get_props = [this](const server_http_req &) { + auto res = create_response(true); + + // this endpoint can be accessed during sleeping + // the next LOC is to avoid someone accidentally use ctx_server + bool ctx_server; // do NOT delete this line + GGML_UNUSED(ctx_server); + + task_params tparams; + tparams.sampling = params.sampling; + json default_generation_settings_for_props = json { + { "params", tparams.to_json(true) }, + { "n_ctx", meta->slot_n_ctx }, + }; + + std::string tmpl_default = common_chat_templates_source(meta->chat_params.tmpls.get(), ""); + std::string tmpl_tools = common_chat_templates_source(meta->chat_params.tmpls.get(), "tool_use"); + + json props = { + { "default_generation_settings", default_generation_settings_for_props }, + { "total_slots", params.n_parallel }, + { "model_alias", meta->model_name }, + { "model_path", meta->model_path }, + { "modalities", json { + {"vision", meta->has_inp_image}, + {"audio", meta->has_inp_audio}, + } }, + { "endpoint_slots", params.endpoint_slots }, + { "endpoint_props", params.endpoint_props }, + { "endpoint_metrics", params.endpoint_metrics }, + { "webui", params.webui }, + { "webui_settings", meta->json_webui_settings }, + { "chat_template", tmpl_default }, + { "chat_template_caps", meta->chat_template_caps }, + { "bos_token", meta->bos_token_str }, + { "eos_token", meta->eos_token_str }, + { "build_info", meta->build_info }, + { "is_sleeping", queue_tasks.is_sleeping() }, + }; + if (params.use_jinja) { + if (!tmpl_tools.empty()) { + props["chat_template_tool_use"] = tmpl_tools; + } + } + res->ok(props); + return res; + }; + + this->post_props = [this](const server_http_req &) { + auto res = create_response(); + if (!params.endpoint_props) { + res->error(format_error_response("This server does not support changing global properties. Start it with `--props`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + // update any props here + + res->ok({{ "success", true }}); + return res; + }; + + this->get_api_show = [this](const server_http_req &) { + auto res = create_response(); + std::string tmpl_default = common_chat_templates_source(meta->chat_params.tmpls.get(), ""); + json data = { + { + "model_info", { + { "llama.context_length", meta->slot_n_ctx }, + } + }, + {"modelfile", ""}, + {"parameters", ""}, + {"template", tmpl_default}, + {"details", { + {"parent_model", ""}, + {"format", "gguf"}, + {"family", ""}, + {"families", {""}}, + {"parameter_size", ""}, + {"quantization_level", ""} + }}, + {"model_info", ""}, + {"capabilities", meta->has_mtmd ? json({"completion","multimodal"}) : json({"completion"})} + }; + + res->ok(data); + return res; + }; + + this->post_infill = [this](const server_http_req & req) { + auto res = create_response(); + // check model compatibility + std::string err; + if (llama_vocab_fim_pre(ctx_server.vocab) == LLAMA_TOKEN_NULL) { + err += "prefix token is missing. "; + } + if (llama_vocab_fim_suf(ctx_server.vocab) == LLAMA_TOKEN_NULL) { + err += "suffix token is missing. "; + } + if (llama_vocab_fim_mid(ctx_server.vocab) == LLAMA_TOKEN_NULL) { + err += "middle token is missing. "; + } + if (!err.empty()) { + res->error(format_error_response(string_format("Infill is not supported by this model: %s", err.c_str()), ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + // validate input + json data = json::parse(req.body); + if (data.contains("prompt") && !data.at("prompt").is_string()) { + // prompt is optional + res->error(format_error_response("\"prompt\" must be a string", ERROR_TYPE_INVALID_REQUEST)); + } + + if (!data.contains("input_prefix")) { + res->error(format_error_response("\"input_prefix\" is required", ERROR_TYPE_INVALID_REQUEST)); + } + + if (!data.contains("input_suffix")) { + res->error(format_error_response("\"input_suffix\" is required", ERROR_TYPE_INVALID_REQUEST)); + } + + if (data.contains("input_extra") && !data.at("input_extra").is_array()) { + // input_extra is optional + res->error(format_error_response("\"input_extra\" must be an array of {\"filename\": string, \"text\": string}", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + json input_extra = json_value(data, "input_extra", json::array()); + for (const auto & chunk : input_extra) { + // { "text": string, "filename": string } + if (!chunk.contains("text") || !chunk.at("text").is_string()) { + res->error(format_error_response("extra_context chunk must contain a \"text\" field with a string value", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + // filename is optional + if (chunk.contains("filename") && !chunk.at("filename").is_string()) { + res->error(format_error_response("extra_context chunk's \"filename\" field must be a string", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + } + data["input_extra"] = input_extra; // default to empty array if it's not exist + + std::string prompt = json_value(data, "prompt", std::string()); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, false, true); + SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size()); + data["prompt"] = format_prompt_infill( + ctx_server.vocab, + data.at("input_prefix"), + data.at("input_suffix"), + data.at("input_extra"), + params.n_batch, + params.n_predict, + meta->slot_n_ctx, + params.spm_infill, + tokenized_prompts[0].get_text_tokens() // TODO: this could maybe be multimodal. + ); + + std::vector files; // dummy + return handle_completions_impl( + req, + SERVER_TASK_TYPE_INFILL, + data, + files, + TASK_RESPONSE_TYPE_NONE); // infill is not OAI compatible + }; + + this->post_completions = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; // dummy + const json body = json::parse(req.body); + return handle_completions_impl( + req, + SERVER_TASK_TYPE_COMPLETION, + body, + files, + TASK_RESPONSE_TYPE_NONE); + }; + + this->post_completions_oai = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; // dummy + const json body = json::parse(req.body); + return handle_completions_impl( + req, + SERVER_TASK_TYPE_COMPLETION, + body, + files, + TASK_RESPONSE_TYPE_OAI_CMPL); + }; + + this->post_chat_completions = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; + json body = json::parse(req.body); + json body_parsed = oaicompat_chat_params_parse( + body, + meta->chat_params, + files); + return handle_completions_impl( + req, + SERVER_TASK_TYPE_COMPLETION, + body_parsed, + files, + TASK_RESPONSE_TYPE_OAI_CHAT); + }; + + this->post_responses_oai = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; + json body = convert_responses_to_chatcmpl(json::parse(req.body)); + SRV_DBG("%s\n", "Request converted: OpenAI Responses -> OpenAI Chat Completions"); + SRV_DBG("converted request: %s\n", body.dump().c_str()); + json body_parsed = oaicompat_chat_params_parse( + body, + meta->chat_params, + files); + return handle_completions_impl( + req, + SERVER_TASK_TYPE_COMPLETION, + body_parsed, + files, + TASK_RESPONSE_TYPE_OAI_RESP); + }; + + this->post_anthropic_messages = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; + json body = convert_anthropic_to_oai(json::parse(req.body)); + SRV_DBG("%s\n", "Request converted: Anthropic -> OpenAI Chat Completions"); + SRV_DBG("converted request: %s\n", body.dump().c_str()); + json body_parsed = oaicompat_chat_params_parse( + body, + meta->chat_params, + files); + return handle_completions_impl( + req, + SERVER_TASK_TYPE_COMPLETION, + body_parsed, + files, + TASK_RESPONSE_TYPE_ANTHROPIC); + }; + + this->post_anthropic_count_tokens = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; + json body = convert_anthropic_to_oai(json::parse(req.body)); + SRV_DBG("%s\n", "Request converted: Anthropic -> OpenAI Chat Completions"); + SRV_DBG("converted request: %s\n", body.dump().c_str()); + json body_parsed = oaicompat_chat_params_parse( + body, + meta->chat_params, + files); + + json prompt = body_parsed.at("prompt"); + llama_tokens tokens = tokenize_mixed(ctx_server.vocab, prompt, true, true); + res->ok({{"input_tokens", static_cast(tokens.size())}}); + return res; + }; + + // same with handle_chat_completions, but without inference part + this->post_apply_template = [this](const server_http_req & req) { + auto res = create_response(); + std::vector files; // dummy, unused + json body = json::parse(req.body); + json data = oaicompat_chat_params_parse( + body, + meta->chat_params, + files); + res->ok({{ "prompt", std::move(data.at("prompt")) }}); + return res; + }; + + this->get_models = [this](const server_http_req &) { + auto res = create_response(true); + + // this endpoint can be accessed during sleeping + // the next LOC is to avoid someone accidentally use ctx_server + bool ctx_server; // do NOT delete this line + GGML_UNUSED(ctx_server); + + json models = { + {"models", { + { + {"name", meta->model_name}, + {"model", meta->model_name}, + {"modified_at", ""}, + {"size", ""}, + {"digest", ""}, // dummy value, llama.cpp does not support managing model file's hash + {"type", "model"}, + {"description", ""}, + {"tags", {""}}, + {"capabilities", meta->has_mtmd ? json({"completion","multimodal"}) : json({"completion"})}, + {"parameters", ""}, + {"details", { + {"parent_model", ""}, + {"format", "gguf"}, + {"family", ""}, + {"families", {""}}, + {"parameter_size", ""}, + {"quantization_level", ""} + }} + } + }}, + {"object", "list"}, + {"data", { + { + {"id", meta->model_name}, + {"object", "model"}, + {"created", std::time(0)}, + {"owned_by", "llamacpp"}, + {"meta", { + {"vocab_type", meta->model_vocab_type}, + {"n_vocab", meta->model_vocab_n_tokens}, + {"n_ctx_train", meta->model_n_ctx_train}, + {"n_embd", meta->model_n_embd_inp}, + {"n_params", meta->model_n_params}, + {"size", meta->model_size}, + }}, + }, + }} + }; + + res->ok(models); + return res; + }; + + this->post_tokenize = [this](const server_http_req & req) { + auto res = create_response(); + const json body = json::parse(req.body); + json tokens_response = json::array(); + if (body.count("content") != 0) { + const bool add_special = json_value(body, "add_special", false); + const bool parse_special = json_value(body, "parse_special", true); + const bool with_pieces = json_value(body, "with_pieces", false); + + llama_tokens tokens = tokenize_mixed(ctx_server.vocab, body.at("content"), add_special, parse_special); + + if (with_pieces) { + for (const auto& token : tokens) { + std::string piece = common_token_to_piece(ctx_server.vocab, token); + json piece_json; + + // Check if the piece is valid UTF-8 + if (is_valid_utf8(piece)) { + piece_json = piece; + } else { + // If not valid UTF-8, store as array of byte values + piece_json = json::array(); + for (unsigned char c : piece) { + piece_json.push_back(static_cast(c)); + } + } + + tokens_response.push_back({ + {"id", token}, + {"piece", piece_json} + }); + } + } else { + tokens_response = tokens; + } + } + + res->ok(json{{"tokens", std::move(tokens_response)}}); + return res; + }; + + this->post_detokenize = [this](const server_http_req & req) { + auto res = create_response(); + const json body = json::parse(req.body); + + std::string content; + if (body.count("tokens") != 0) { + const llama_tokens tokens = body.at("tokens"); + content = tokens_to_str(ctx_server.vocab, tokens); + } + + res->ok(json{{"content", std::move(content)}}); + return res; + }; + + this->post_embeddings = [this](const server_http_req & req) { + return handle_embeddings_impl(req, TASK_RESPONSE_TYPE_NONE); + }; + + this->post_embeddings_oai = [this](const server_http_req & req) { + return handle_embeddings_impl(req, TASK_RESPONSE_TYPE_OAI_EMBD); + }; + + this->post_rerank = [this](const server_http_req & req) { + auto res = create_response(); + if (!params.embedding || params.pooling_type != LLAMA_POOLING_TYPE_RANK) { + res->error(format_error_response("This server does not support reranking. Start it with `--reranking`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + const json body = json::parse(req.body); + + // if true, use TEI API format, otherwise use Jina API format + // Jina: https://jina.ai/reranker/ + // TEI: https://huggingface.github.io/text-embeddings-inference/#/Text%20Embeddings%20Inference/rerank + bool is_tei_format = body.contains("texts"); + + json query; + if (body.count("query") == 1) { + query = body.at("query"); + if (!query.is_string()) { + res->error(format_error_response("\"query\" must be a string", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + } else { + res->error(format_error_response("\"query\" must be provided", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + std::vector documents = json_value(body, "documents", + json_value(body, "texts", std::vector())); + if (documents.empty()) { + res->error(format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + int top_n = json_value(body, "top_n", (int)documents.size()); + + // create and queue the task + json responses = json::array(); + auto & rd = res->rd; + { + std::vector tasks; + tasks.reserve(documents.size()); + for (size_t i = 0; i < documents.size(); i++) { + auto tmp = format_prompt_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, query, documents[i]); + server_task task = server_task(SERVER_TASK_TYPE_RERANK); + task.id = rd.get_new_id(); + task.tokens = std::move(tmp); + tasks.push_back(std::move(task)); + } + rd.post_tasks(std::move(tasks)); + } + + // wait for the results + auto all_results = rd.wait_for_all(req.should_stop); + + // collect results + if (all_results.is_terminated) { + return res; // connection is closed + } else if (all_results.error) { + res->error(all_results.error->to_json()); + return res; + } else { + for (auto & res : all_results.results) { + GGML_ASSERT(dynamic_cast(res.get()) != nullptr); + responses.push_back(res->to_json()); + } + } + + // write JSON response + json root = format_response_rerank( + body, + meta->model_name, + responses, + is_tei_format, + documents, + top_n); + + res->ok(root); + return res; + }; + + this->get_lora_adapters = [this](const server_http_req & req) { + auto res = create_response(); + + auto & rd = res->rd; + { + server_task task(SERVER_TASK_TYPE_GET_LORA); + task.id = rd.get_new_id(); + rd.post_task(std::move(task)); + } + + // get the result + auto result = rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res->ok(result->to_json()); + return res; + }; + + this->post_lora_adapters = [this](const server_http_req & req) { + auto res = create_response(); + const json body = json::parse(req.body); + if (!body.is_array()) { + res->error(format_error_response("Request body must be an array", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + auto & rd = res->rd; + { + server_task task(SERVER_TASK_TYPE_SET_LORA); + task.id = rd.get_new_id(); + task.set_lora = parse_lora_request(body); + rd.post_task(std::move(task)); + } + + // get the result + auto result = rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res->ok(result->to_json()); + return res; + }; +} + +std::unique_ptr server_routes::handle_slots_save(const server_http_req & req, int id_slot) { + auto res = create_response(); + const json request_data = json::parse(req.body); + std::string filename = request_data.at("filename"); + if (!fs_validate_filename(filename)) { + res->error(format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + std::string filepath = params.slot_save_path + filename; + + auto & rd = res->rd; + { + server_task task(SERVER_TASK_TYPE_SLOT_SAVE); + task.id = rd.get_new_id(); + task.slot_action.id_slot = id_slot; + task.slot_action.filename = filename; + task.slot_action.filepath = filepath; + rd.post_task(std::move(task)); + } + + auto result = rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + res->ok(result->to_json()); + return res; +} + +std::unique_ptr server_routes::handle_slots_restore(const server_http_req & req, int id_slot) { + auto res = create_response(); + const json request_data = json::parse(req.body); + std::string filename = request_data.at("filename"); + if (!fs_validate_filename(filename)) { + res->error(format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + std::string filepath = params.slot_save_path + filename; + + auto & rd = res->rd; + { + server_task task(SERVER_TASK_TYPE_SLOT_RESTORE); + task.id = rd.get_new_id(); + task.slot_action.id_slot = id_slot; + task.slot_action.filename = filename; + task.slot_action.filepath = filepath; + rd.post_task(std::move(task)); + } + + auto result = rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res->ok(result->to_json()); + return res; +} + +std::unique_ptr server_routes::handle_slots_erase(const server_http_req & req, int id_slot) { + auto res = create_response(); + auto & rd = res->rd; + { + server_task task(SERVER_TASK_TYPE_SLOT_ERASE); + task.id = rd.get_new_id(); + task.slot_action.id_slot = id_slot; + rd.post_task(std::move(task)); + } + + auto result = rd.next(req.should_stop); + if (!result) { + // connection was closed + GGML_ASSERT(req.should_stop()); + return res; + } + + if (result->is_error()) { + res->error(result->to_json()); + return res; + } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res->ok(result->to_json()); + return res; +} + +std::unique_ptr server_routes::handle_embeddings_impl(const server_http_req & req, task_response_type res_type) { + auto res = create_response(); + if (!params.embedding) { + res->error(format_error_response("This server does not support embeddings. Start it with `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); + return res; + } + + if (res_type != TASK_RESPONSE_TYPE_NONE && meta->pooling_type == LLAMA_POOLING_TYPE_NONE) { + res->error(format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + const json body = json::parse(req.body); + + // for the shape of input/content, see tokenize_input_prompts() + json prompt; + if (body.count("input") != 0) { + prompt = body.at("input"); + } else if (body.contains("content")) { + res_type = TASK_RESPONSE_TYPE_NONE; // "content" field is not OAI compatible + prompt = body.at("content"); + } else { + res->error(format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + + bool use_base64 = false; + if (body.count("encoding_format") != 0) { + const std::string & format = body.at("encoding_format"); + if (format == "base64") { + use_base64 = true; + } else if (format != "float") { + res->error(format_error_response("The format to return the embeddings in. Can be either float or base64", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + } + + auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); + for (const auto & tokens : tokenized_prompts) { + // this check is necessary for models that do not add BOS token to the input + if (tokens.empty()) { + res->error(format_error_response("Input content cannot be empty", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + } + + int embd_normalize = 2; // default to Euclidean/L2 norm + if (body.count("embd_normalize") != 0) { + embd_normalize = body.at("embd_normalize"); + if (meta->pooling_type == LLAMA_POOLING_TYPE_NONE) { + SRV_DBG("embd_normalize is not supported by pooling type %d, ignoring it\n", meta->pooling_type); + } + } + + // create and queue the task + json responses = json::array(); + auto & rd = res->rd; + { + std::vector tasks; + for (size_t i = 0; i < tokenized_prompts.size(); i++) { + server_task task = server_task(SERVER_TASK_TYPE_EMBEDDING); + + task.id = rd.get_new_id(); + task.tokens = std::move(tokenized_prompts[i]); + + // OAI-compat + task.params.res_type = res_type; + task.params.embd_normalize = embd_normalize; + + tasks.push_back(std::move(task)); + } + rd.post_tasks(std::move(tasks)); + } + + // wait for the results + auto all_results = rd.wait_for_all(req.should_stop); + + // collect results + if (all_results.is_terminated) { + return res; // connection is closed + } else if (all_results.error) { + res->error(all_results.error->to_json()); + return res; + } else { + for (auto & res : all_results.results) { + GGML_ASSERT(dynamic_cast(res.get()) != nullptr); + responses.push_back(res->to_json()); + } + } + + // write JSON response + json root = res_type == TASK_RESPONSE_TYPE_OAI_EMBD + ? format_embeddings_response_oaicompat(body, meta->model_name, responses, use_base64) + : json(responses); + res->ok(root); + return res; +} diff --git a/llama.cpp/tools/server/server-context.h b/llama.cpp/tools/server/server-context.h new file mode 100644 index 0000000..c0b5d37 --- /dev/null +++ b/llama.cpp/tools/server/server-context.h @@ -0,0 +1,131 @@ +#include "server-http.h" +#include "server-task.h" +#include "server-queue.h" + +#include + +#include +#include + +struct server_context_impl; // private implementation + +struct server_context_meta { + std::string build_info; + std::string model_name; + std::string model_path; + bool has_mtmd; + bool has_inp_image; + bool has_inp_audio; + json json_webui_settings; + int slot_n_ctx; + enum llama_pooling_type pooling_type; + + // chat params + server_chat_params & chat_params; + std::map chat_template_caps; + + // tokens + std::string bos_token_str; + std::string eos_token_str; + llama_token fim_pre_token; + llama_token fim_sub_token; + llama_token fim_mid_token; + + // model meta + enum llama_vocab_type model_vocab_type; + int32_t model_vocab_n_tokens; + int32_t model_n_ctx_train; + int32_t model_n_embd_inp; + uint64_t model_n_params; + uint64_t model_size; +}; + +struct server_context { + std::unique_ptr impl; + + server_context(); + ~server_context(); + + // load the model and initialize llama_context + // returns true on success + bool load_model(const common_params & params); + + // this function will block main thread until termination + void start_loop(); + + // terminate main loop (will unblock start_loop) + void terminate(); + + // get the underlaying llama_context, can return nullptr if sleeping + // not thread-safe, should only be used from the main thread + llama_context * get_llama_context() const; + + // get a new response reader, used by CLI application + server_response_reader get_response_reader(); + + // get server metadata (read-only), can only be called after load_model() + // not thread-safe, should only be used from the main thread + server_context_meta get_meta() const; +}; + + +// forward declarations +struct server_res_generator; + +struct server_routes { + server_routes(const common_params & params, server_context & ctx_server); + + void init_routes(); + + // note: this is not thread-safe and can only when ctx_http.is_ready is false + void update_meta(const server_context & ctx_server) { + this->meta = std::make_unique(ctx_server.get_meta()); + } + + // handlers using lambda function, so that they can capture `this` without `std::bind` + // they won't be called until ctx_http.is_ready is set to true + server_http_context::handler_t get_health; + server_http_context::handler_t get_metrics; + server_http_context::handler_t get_slots; + server_http_context::handler_t post_slots; + server_http_context::handler_t get_props; + server_http_context::handler_t post_props; + server_http_context::handler_t get_api_show; + server_http_context::handler_t post_infill; + server_http_context::handler_t post_completions; + server_http_context::handler_t post_completions_oai; + server_http_context::handler_t post_chat_completions; + server_http_context::handler_t post_responses_oai; + server_http_context::handler_t post_anthropic_messages; + server_http_context::handler_t post_anthropic_count_tokens; + server_http_context::handler_t post_apply_template; + server_http_context::handler_t get_models; + server_http_context::handler_t post_tokenize; + server_http_context::handler_t post_detokenize; + server_http_context::handler_t post_embeddings; + server_http_context::handler_t post_embeddings_oai; + server_http_context::handler_t post_rerank; + server_http_context::handler_t get_lora_adapters; + server_http_context::handler_t post_lora_adapters; +private: + std::unique_ptr handle_completions_impl( + const server_http_req & req, + server_task_type type, + const json & data, + const std::vector & files, + task_response_type res_type); + std::unique_ptr handle_slots_save(const server_http_req & req, int id_slot); + std::unique_ptr handle_slots_restore(const server_http_req & req, int id_slot); + std::unique_ptr handle_slots_erase(const server_http_req &, int id_slot); + std::unique_ptr handle_embeddings_impl(const server_http_req & req, task_response_type res_type); + + // using unique_ptr to allow late initialization of const + std::unique_ptr meta; + + const common_params & params; + const server_context_impl & ctx_server; + + server_queue & queue_tasks; + server_response & queue_results; + std::unique_ptr create_response(bool bypass_sleep = false); +}; diff --git a/llama.cpp/tools/server/server-http.cpp b/llama.cpp/tools/server/server-http.cpp new file mode 100644 index 0000000..00897ee --- /dev/null +++ b/llama.cpp/tools/server/server-http.cpp @@ -0,0 +1,406 @@ +#include "common.h" +#include "server-http.h" +#include "server-common.h" + +#include + +#include +#include +#include + +// auto generated files (see README.md for details) +#include "index.html.gz.hpp" +#include "loading.html.hpp" + +// +// HTTP implementation using cpp-httplib +// + +class server_http_context::Impl { +public: + std::unique_ptr srv; +}; + +server_http_context::server_http_context() + : pimpl(std::make_unique()) +{} + +server_http_context::~server_http_context() = default; + +static void log_server_request(const httplib::Request & req, const httplib::Response & res) { + // skip logging requests that are regularly sent, to avoid log spam + if (req.path == "/health" + || req.path == "/v1/health" + || req.path == "/models" + || req.path == "/v1/models" + || req.path == "/props" + || req.path == "/metrics" + ) { + return; + } + + // reminder: this function is not covered by httplib's exception handler; if someone does more complicated stuff, think about wrapping it in try-catch + + SRV_INF("done request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status); + + SRV_DBG("request: %s\n", req.body.c_str()); + SRV_DBG("response: %s\n", res.body.c_str()); +} + +bool server_http_context::init(const common_params & params) { + path_prefix = params.api_prefix; + port = params.port; + hostname = params.hostname; + + auto & srv = pimpl->srv; + +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + if (params.ssl_file_key != "" && params.ssl_file_cert != "") { + LOG_INF("Running with SSL: key = %s, cert = %s\n", params.ssl_file_key.c_str(), params.ssl_file_cert.c_str()); + srv.reset( + new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str()) + ); + } else { + LOG_INF("Running without SSL\n"); + srv.reset(new httplib::Server()); + } +#else + if (params.ssl_file_key != "" && params.ssl_file_cert != "") { + LOG_ERR("Server is built without SSL support\n"); + return false; + } + srv.reset(new httplib::Server()); +#endif + + srv->set_default_headers({{"Server", "llama.cpp"}}); + srv->set_logger(log_server_request); + srv->set_exception_handler([](const httplib::Request &, httplib::Response & res, const std::exception_ptr & ep) { + // this is fail-safe; exceptions should already handled by `ex_wrapper` + + std::string message; + try { + std::rethrow_exception(ep); + } catch (const std::exception & e) { + message = e.what(); + } catch (...) { + message = "Unknown Exception"; + } + + res.status = 500; + res.set_content(message, "text/plain"); + LOG_ERR("got exception: %s\n", message.c_str()); + }); + + srv->set_error_handler([](const httplib::Request &, httplib::Response & res) { + if (res.status == 404) { + res.set_content( + safe_json_to_str(json { + {"error", { + {"message", "File Not Found"}, + {"type", "not_found_error"}, + {"code", 404} + }} + }), + "application/json; charset=utf-8" + ); + } + // for other error codes, we skip processing here because it's already done by res->error() + }); + + // set timeouts and change hostname and port + srv->set_read_timeout (params.timeout_read); + srv->set_write_timeout(params.timeout_write); + + if (params.api_keys.size() == 1) { + auto key = params.api_keys[0]; + std::string substr = key.substr(std::max((int)(key.length() - 4), 0)); + LOG_INF("%s: api_keys: ****%s\n", __func__, substr.c_str()); + } else if (params.api_keys.size() > 1) { + LOG_INF("%s: api_keys: %zu keys loaded\n", __func__, params.api_keys.size()); + } + + // + // Middlewares + // + + auto middleware_validate_api_key = [api_keys = params.api_keys](const httplib::Request & req, httplib::Response & res) { + static const std::unordered_set public_endpoints = { + "/health", + "/v1/health", + "/models", + "/v1/models", + "/api/tags" + }; + + // If API key is not set, skip validation + if (api_keys.empty()) { + return true; + } + + // If path is public or is static file, skip validation + if (public_endpoints.find(req.path) != public_endpoints.end() || req.path == "/") { + return true; + } + + // Check for API key in the Authorization header + std::string req_api_key = req.get_header_value("Authorization"); + if (req_api_key.empty()) { + // retry with anthropic header + req_api_key = req.get_header_value("X-Api-Key"); + } + + // remove the "Bearer " prefix if needed + std::string prefix = "Bearer "; + if (req_api_key.substr(0, prefix.size()) == prefix) { + req_api_key = req_api_key.substr(prefix.size()); + } + + // validate the API key + if (std::find(api_keys.begin(), api_keys.end(), req_api_key) != api_keys.end()) { + return true; // API key is valid + } + + // API key is invalid or not provided + res.status = 401; + res.set_content( + safe_json_to_str(json { + {"error", { + {"message", "Invalid API Key"}, + {"type", "authentication_error"}, + {"code", 401} + }} + }), + "application/json; charset=utf-8" + ); + + LOG_WRN("Unauthorized: Invalid API Key\n"); + + return false; + }; + + auto middleware_server_state = [this](const httplib::Request & req, httplib::Response & res) { + bool ready = is_ready.load(); + if (!ready) { + auto tmp = string_split(req.path, '.'); + if (req.path == "/" || tmp.back() == "html") { + res.status = 503; + res.set_content(reinterpret_cast(loading_html), loading_html_len, "text/html; charset=utf-8"); + } else { + // no endpoints is allowed to be accessed when the server is not ready + // this is to prevent any data races or inconsistent states + res.status = 503; + res.set_content( + safe_json_to_str(json { + {"error", { + {"message", "Loading model"}, + {"type", "unavailable_error"}, + {"code", 503} + }} + }), + "application/json; charset=utf-8" + ); + } + return false; + } + return true; + }; + + // register server middlewares + srv->set_pre_routing_handler([middleware_validate_api_key, middleware_server_state](const httplib::Request & req, httplib::Response & res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); + // If this is OPTIONS request, skip validation because browsers don't include Authorization header + if (req.method == "OPTIONS") { + res.set_header("Access-Control-Allow-Credentials", "true"); + res.set_header("Access-Control-Allow-Methods", "GET, POST"); + res.set_header("Access-Control-Allow-Headers", "*"); + res.set_content("", "text/html"); // blank response, no data + return httplib::Server::HandlerResponse::Handled; // skip further processing + } + if (!middleware_server_state(req, res)) { + return httplib::Server::HandlerResponse::Handled; + } + if (!middleware_validate_api_key(req, res)) { + return httplib::Server::HandlerResponse::Handled; + } + return httplib::Server::HandlerResponse::Unhandled; + }); + + int n_threads_http = params.n_threads_http; + if (n_threads_http < 1) { + // +2 threads for monitoring endpoints + n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1); + } + LOG_INF("%s: using %d threads for HTTP server\n", __func__, n_threads_http); + srv->new_task_queue = [n_threads_http] { return new httplib::ThreadPool(n_threads_http); }; + + // + // Web UI setup + // + + if (!params.webui) { + LOG_INF("Web UI is disabled\n"); + } else { + // register static assets routes + if (!params.public_path.empty()) { + // Set the base directory for serving static files + bool is_found = srv->set_mount_point(params.api_prefix + "/", params.public_path); + if (!is_found) { + LOG_ERR("%s: static assets path not found: %s\n", __func__, params.public_path.c_str()); + return 1; + } + } else { + // using embedded static index.html + srv->Get(params.api_prefix + "/", [](const httplib::Request & req, httplib::Response & res) { + if (req.get_header_value("Accept-Encoding").find("gzip") == std::string::npos) { + res.set_content("Error: gzip is not supported by this browser", "text/plain"); + } else { + res.set_header("Content-Encoding", "gzip"); + // COEP and COOP headers, required by pyodide (python interpreter) + res.set_header("Cross-Origin-Embedder-Policy", "require-corp"); + res.set_header("Cross-Origin-Opener-Policy", "same-origin"); + res.set_content(reinterpret_cast(index_html_gz), index_html_gz_len, "text/html; charset=utf-8"); + } + return false; + }); + } + } + return true; +} + +bool server_http_context::start() { + // Bind and listen + + auto & srv = pimpl->srv; + bool was_bound = false; + bool is_sock = false; + if (string_ends_with(std::string(hostname), ".sock")) { + is_sock = true; + LOG_INF("%s: setting address family to AF_UNIX\n", __func__); + srv->set_address_family(AF_UNIX); + // bind_to_port requires a second arg, any value other than 0 should + // simply get ignored + was_bound = srv->bind_to_port(hostname, 8080); + } else { + LOG_INF("%s: binding port with default address family\n", __func__); + // bind HTTP listen port + if (port == 0) { + int bound_port = srv->bind_to_any_port(hostname); + was_bound = (bound_port >= 0); + if (was_bound) { + port = bound_port; + } + } else { + was_bound = srv->bind_to_port(hostname, port); + } + } + + if (!was_bound) { + LOG_ERR("%s: couldn't bind HTTP server socket, hostname: %s, port: %d\n", __func__, hostname.c_str(), port); + return false; + } + + // run the HTTP server in a thread + thread = std::thread([this]() { pimpl->srv->listen_after_bind(); }); + srv->wait_until_ready(); + + listening_address = is_sock ? string_format("unix://%s", hostname.c_str()) + : string_format("http://%s:%d", hostname.c_str(), port); + return true; +} + +void server_http_context::stop() const { + if (pimpl->srv) { + pimpl->srv->stop(); + } +} + +static void set_headers(httplib::Response & res, const std::map & headers) { + for (const auto & [key, value] : headers) { + res.set_header(key, value); + } +} + +static std::map get_params(const httplib::Request & req) { + std::map params; + for (const auto & [key, value] : req.params) { + params[key] = value; + } + for (const auto & [key, value] : req.path_params) { + params[key] = value; + } + return params; +} + +static std::map get_headers(const httplib::Request & req) { + std::map headers; + for (const auto & [key, value] : req.headers) { + headers[key] = value; + } + return headers; +} + +// using unique_ptr for request to allow safe capturing in lambdas +using server_http_req_ptr = std::unique_ptr; + +static void process_handler_response(server_http_req_ptr && request, server_http_res_ptr & response, httplib::Response & res) { + if (response->is_stream()) { + res.status = response->status; + set_headers(res, response->headers); + std::string content_type = response->content_type; + // convert to shared_ptr as both chunked_content_provider() and on_complete() need to use it + std::shared_ptr q_ptr = std::move(request); + std::shared_ptr r_ptr = std::move(response); + const auto chunked_content_provider = [response = r_ptr](size_t, httplib::DataSink & sink) -> bool { + std::string chunk; + bool has_next = response->next(chunk); + if (!chunk.empty()) { + // TODO: maybe handle sink.write unsuccessful? for now, we rely on is_connection_closed() + sink.write(chunk.data(), chunk.size()); + SRV_DBG("http: streamed chunk: %s\n", chunk.c_str()); + } + if (!has_next) { + sink.done(); + SRV_DBG("%s", "http: stream ended\n"); + } + return has_next; + }; + const auto on_complete = [request = q_ptr, response = r_ptr](bool) mutable { + response.reset(); // trigger the destruction of the response object + request.reset(); // trigger the destruction of the request object + }; + res.set_chunked_content_provider(content_type, chunked_content_provider, on_complete); + } else { + res.status = response->status; + set_headers(res, response->headers); + res.set_content(response->data, response->content_type); + } +} + +void server_http_context::get(const std::string & path, const server_http_context::handler_t & handler) const { + pimpl->srv->Get(path_prefix + path, [handler](const httplib::Request & req, httplib::Response & res) { + server_http_req_ptr request = std::make_unique(server_http_req{ + get_params(req), + get_headers(req), + req.path, + req.body, + req.is_connection_closed + }); + server_http_res_ptr response = handler(*request); + process_handler_response(std::move(request), response, res); + }); +} + +void server_http_context::post(const std::string & path, const server_http_context::handler_t & handler) const { + pimpl->srv->Post(path_prefix + path, [handler](const httplib::Request & req, httplib::Response & res) { + server_http_req_ptr request = std::make_unique(server_http_req{ + get_params(req), + get_headers(req), + req.path, + req.body, + req.is_connection_closed + }); + server_http_res_ptr response = handler(*request); + process_handler_response(std::move(request), response, res); + }); +} + diff --git a/llama.cpp/tools/server/server-http.h b/llama.cpp/tools/server/server-http.h new file mode 100644 index 0000000..24c0b40 --- /dev/null +++ b/llama.cpp/tools/server/server-http.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include +#include + +struct common_params; + +// generator-like API for HTTP response generation +// this object response with one of the 2 modes: +// 1) normal response: `data` contains the full response body +// 2) streaming response: each call to next(output) generates the next chunk +// when next(output) returns false, no more data after the current chunk +// note: some chunks can be empty, in which case no data is sent for that chunk +struct server_http_res { + std::string content_type = "application/json; charset=utf-8"; + int status = 200; + std::string data; + std::map headers; + + // TODO: move this to a virtual function once we have proper polymorphism support + std::function next = nullptr; + bool is_stream() const { + return next != nullptr; + } + + virtual ~server_http_res() = default; +}; + +// unique pointer, used by set_chunked_content_provider +// httplib requires the stream provider to be stored in heap +using server_http_res_ptr = std::unique_ptr; + +struct server_http_req { + std::map params; // path_params + query_params + std::map headers; // reserved for future use + std::string path; // reserved for future use + std::string body; + const std::function & should_stop; + + std::string get_param(const std::string & key, const std::string & def = "") const { + auto it = params.find(key); + if (it != params.end()) { + return it->second; + } + return def; + } +}; + +struct server_http_context { + class Impl; + std::unique_ptr pimpl; + + std::thread thread; // server thread + std::atomic is_ready = false; + + std::string path_prefix; + std::string hostname; + int port; + + server_http_context(); + ~server_http_context(); + + bool init(const common_params & params); + bool start(); + void stop() const; + + // note: the handler should never throw exceptions + using handler_t = std::function; + + void get(const std::string & path, const handler_t & handler) const; + void post(const std::string & path, const handler_t & handler) const; + + // for debugging + std::string listening_address; +}; diff --git a/llama.cpp/tools/server/server-models.cpp b/llama.cpp/tools/server/server-models.cpp new file mode 100644 index 0000000..5765547 --- /dev/null +++ b/llama.cpp/tools/server/server-models.cpp @@ -0,0 +1,1092 @@ +#include "server-common.h" +#include "server-models.h" + +#include "preset.h" +#include "download.h" + +#include // TODO: remove this once we use HTTP client from download.h +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#include +#include +extern char **environ; +#endif + +#if defined(__APPLE__) && defined(__MACH__) +// macOS: use _NSGetExecutablePath to get the executable path +#include +#include +#endif + +#define DEFAULT_STOP_TIMEOUT 10 // seconds + +#define CMD_ROUTER_TO_CHILD_EXIT "cmd_router_to_child:exit" +#define CMD_CHILD_TO_ROUTER_READY "cmd_child_to_router:ready" + +// address for child process, this is needed because router may run on 0.0.0.0 +// ref: https://github.com/ggml-org/llama.cpp/issues/17862 +#define CHILD_ADDR "127.0.0.1" + +static std::filesystem::path get_server_exec_path() { +#if defined(_WIN32) + wchar_t buf[32768] = { 0 }; // Large buffer to handle long paths + DWORD len = GetModuleFileNameW(nullptr, buf, _countof(buf)); + if (len == 0 || len >= _countof(buf)) { + throw std::runtime_error("GetModuleFileNameW failed or path too long"); + } + return std::filesystem::path(buf); +#elif defined(__APPLE__) && defined(__MACH__) + char small_path[PATH_MAX]; + uint32_t size = sizeof(small_path); + + if (_NSGetExecutablePath(small_path, &size) == 0) { + // resolve any symlinks to get absolute path + try { + return std::filesystem::canonical(std::filesystem::path(small_path)); + } catch (...) { + return std::filesystem::path(small_path); + } + } else { + // buffer was too small, allocate required size and call again + std::vector buf(size); + if (_NSGetExecutablePath(buf.data(), &size) == 0) { + try { + return std::filesystem::canonical(std::filesystem::path(buf.data())); + } catch (...) { + return std::filesystem::path(buf.data()); + } + } + throw std::runtime_error("_NSGetExecutablePath failed after buffer resize"); + } +#else + char path[FILENAME_MAX]; + ssize_t count = readlink("/proc/self/exe", path, FILENAME_MAX); + if (count <= 0) { + throw std::runtime_error("failed to resolve /proc/self/exe"); + } + return std::filesystem::path(std::string(path, count)); +#endif +} + +static void unset_reserved_args(common_preset & preset, bool unset_model_args) { + preset.unset_option("LLAMA_ARG_SSL_KEY_FILE"); + preset.unset_option("LLAMA_ARG_SSL_CERT_FILE"); + preset.unset_option("LLAMA_API_KEY"); + preset.unset_option("LLAMA_ARG_MODELS_DIR"); + preset.unset_option("LLAMA_ARG_MODELS_MAX"); + preset.unset_option("LLAMA_ARG_MODELS_PRESET"); + preset.unset_option("LLAMA_ARG_MODELS_AUTOLOAD"); + if (unset_model_args) { + preset.unset_option("LLAMA_ARG_MODEL"); + preset.unset_option("LLAMA_ARG_MMPROJ"); + preset.unset_option("LLAMA_ARG_HF_REPO"); + } +} + +#ifdef _WIN32 +static std::string wide_to_utf8(const wchar_t * ws) { + if (!ws || !*ws) { + return {}; + } + + const int len = static_cast(std::wcslen(ws)); + const int bytes = WideCharToMultiByte(CP_UTF8, 0, ws, len, nullptr, 0, nullptr, nullptr); + if (bytes == 0) { + return {}; + } + + std::string utf8(bytes, '\0'); + WideCharToMultiByte(CP_UTF8, 0, ws, len, utf8.data(), bytes, nullptr, nullptr); + + return utf8; +} +#endif + +static std::vector get_environment() { + std::vector env; + +#ifdef _WIN32 + LPWCH env_block = GetEnvironmentStringsW(); + if (!env_block) { + return env; + } + for (LPWCH e = env_block; *e; e += wcslen(e) + 1) { + env.emplace_back(wide_to_utf8(e)); + } + FreeEnvironmentStringsW(env_block); +#else + if (environ == nullptr) { + return env; + } + for (char ** e = environ; *e != nullptr; e++) { + env.emplace_back(*e); + } +#endif + + return env; +} + +void server_model_meta::update_args(common_preset_context & ctx_preset, std::string bin_path) { + // update params + unset_reserved_args(preset, false); + preset.set_option(ctx_preset, "LLAMA_ARG_HOST", CHILD_ADDR); + preset.set_option(ctx_preset, "LLAMA_ARG_PORT", std::to_string(port)); + preset.set_option(ctx_preset, "LLAMA_ARG_ALIAS", name); + // TODO: maybe validate preset before rendering ? + // render args + args = preset.to_args(bin_path); +} + +// +// server_models +// + +server_models::server_models( + const common_params & params, + int argc, + char ** argv) + : ctx_preset(LLAMA_EXAMPLE_SERVER), + base_params(params), + base_env(get_environment()), + base_preset(ctx_preset.load_from_args(argc, argv)) { + // clean up base preset + unset_reserved_args(base_preset, true); + // set binary path + try { + bin_path = get_server_exec_path().string(); + } catch (const std::exception & e) { + bin_path = argv[0]; + LOG_WRN("failed to get server executable path: %s\n", e.what()); + LOG_WRN("using original argv[0] as fallback: %s\n", argv[0]); + } + load_models(); +} + +void server_models::add_model(server_model_meta && meta) { + if (mapping.find(meta.name) != mapping.end()) { + throw std::runtime_error(string_format("model '%s' appears multiple times", meta.name.c_str())); + } + meta.update_args(ctx_preset, bin_path); // render args + std::string name = meta.name; + mapping[name] = instance_t{ + /* subproc */ std::make_shared(), + /* th */ std::thread(), + /* meta */ std::move(meta) + }; +} + +// TODO: allow refreshing cached model list +void server_models::load_models() { + // loading models from 3 sources: + // 1. cached models + common_presets cached_models = ctx_preset.load_from_cache(); + SRV_INF("Loaded %zu cached model presets\n", cached_models.size()); + // 2. local models from --models-dir + common_presets local_models; + if (!base_params.models_dir.empty()) { + local_models = ctx_preset.load_from_models_dir(base_params.models_dir); + SRV_INF("Loaded %zu local model presets from %s\n", local_models.size(), base_params.models_dir.c_str()); + } + // 3. custom-path models from presets + common_preset global = {}; + common_presets custom_presets = {}; + if (!base_params.models_preset.empty()) { + custom_presets = ctx_preset.load_from_ini(base_params.models_preset, global); + SRV_INF("Loaded %zu custom model presets from %s\n", custom_presets.size(), base_params.models_preset.c_str()); + } + + // cascade, apply global preset first + cached_models = ctx_preset.cascade(global, cached_models); + local_models = ctx_preset.cascade(global, local_models); + custom_presets = ctx_preset.cascade(global, custom_presets); + + // note: if a model exists in both cached and local, local takes precedence + common_presets final_presets; + for (const auto & [name, preset] : cached_models) { + final_presets[name] = preset; + } + for (const auto & [name, preset] : local_models) { + final_presets[name] = preset; + } + + // process custom presets from INI + for (const auto & [name, custom] : custom_presets) { + if (final_presets.find(name) != final_presets.end()) { + // apply custom config if exists + common_preset & target = final_presets[name]; + target.merge(custom); + } else { + // otherwise add directly + final_presets[name] = custom; + } + } + + // server base preset from CLI args take highest precedence + for (auto & [name, preset] : final_presets) { + preset.merge(base_preset); + } + + // convert presets to server_model_meta and add to mapping + for (const auto & preset : final_presets) { + server_model_meta meta{ + /* preset */ preset.second, + /* name */ preset.first, + /* port */ 0, + /* status */ SERVER_MODEL_STATUS_UNLOADED, + /* last_used */ 0, + /* args */ std::vector(), + /* exit_code */ 0, + /* stop_timeout */ DEFAULT_STOP_TIMEOUT, + }; + add_model(std::move(meta)); + } + + // log available models + { + std::unordered_set custom_names; + for (const auto & [name, preset] : custom_presets) { + custom_names.insert(name); + } + SRV_INF("Available models (%zu) (*: custom preset)\n", mapping.size()); + for (const auto & [name, inst] : mapping) { + bool has_custom = custom_names.find(name) != custom_names.end(); + SRV_INF(" %c %s\n", has_custom ? '*' : ' ', name.c_str()); + } + } + + // handle custom stop-timeout option + for (auto & [name, inst] : mapping) { + std::string val; + if (inst.meta.preset.get_option(COMMON_ARG_PRESET_STOP_TIMEOUT, val)) { + try { + inst.meta.stop_timeout = std::stoi(val); + } catch (...) { + SRV_WRN("invalid stop-timeout value '%s' for model '%s', using default %d seconds\n", + val.c_str(), name.c_str(), DEFAULT_STOP_TIMEOUT); + inst.meta.stop_timeout = DEFAULT_STOP_TIMEOUT; + } + } + } + + // load any autoload models + std::vector models_to_load; + for (const auto & [name, inst] : mapping) { + std::string val; + if (inst.meta.preset.get_option(COMMON_ARG_PRESET_LOAD_ON_STARTUP, val)) { + models_to_load.push_back(name); + } + } + if ((int)models_to_load.size() > base_params.models_max) { + throw std::runtime_error(string_format( + "number of models to load on startup (%zu) exceeds models_max (%d)", + models_to_load.size(), + base_params.models_max + )); + } + for (const auto & name : models_to_load) { + SRV_INF("(startup) loading model %s\n", name.c_str()); + load(name); + } +} + +void server_models::update_meta(const std::string & name, const server_model_meta & meta) { + std::lock_guard lk(mutex); + auto it = mapping.find(name); + if (it != mapping.end()) { + it->second.meta = meta; + } + cv.notify_all(); // notify wait_until_loaded +} + +bool server_models::has_model(const std::string & name) { + std::lock_guard lk(mutex); + return mapping.find(name) != mapping.end(); +} + +std::optional server_models::get_meta(const std::string & name) { + std::lock_guard lk(mutex); + auto it = mapping.find(name); + if (it != mapping.end()) { + return it->second.meta; + } + return std::nullopt; +} + +static int get_free_port() { +#ifdef _WIN32 + WSADATA wsaData; + if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) { + return -1; + } + typedef SOCKET native_socket_t; +#define INVALID_SOCKET_VAL INVALID_SOCKET +#define CLOSE_SOCKET(s) closesocket(s) +#else + typedef int native_socket_t; +#define INVALID_SOCKET_VAL -1 +#define CLOSE_SOCKET(s) close(s) +#endif + + native_socket_t sock = socket(AF_INET, SOCK_STREAM, 0); + if (sock == INVALID_SOCKET_VAL) { +#ifdef _WIN32 + WSACleanup(); +#endif + return -1; + } + + struct sockaddr_in serv_addr; + std::memset(&serv_addr, 0, sizeof(serv_addr)); + serv_addr.sin_family = AF_INET; + serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); + serv_addr.sin_port = htons(0); + + if (bind(sock, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) != 0) { + CLOSE_SOCKET(sock); +#ifdef _WIN32 + WSACleanup(); +#endif + return -1; + } + +#ifdef _WIN32 + int namelen = sizeof(serv_addr); +#else + socklen_t namelen = sizeof(serv_addr); +#endif + if (getsockname(sock, (struct sockaddr*)&serv_addr, &namelen) != 0) { + CLOSE_SOCKET(sock); +#ifdef _WIN32 + WSACleanup(); +#endif + return -1; + } + + int port = ntohs(serv_addr.sin_port); + + CLOSE_SOCKET(sock); +#ifdef _WIN32 + WSACleanup(); +#endif + + return port; +} + +// helper to convert vector to char ** +// pointers are only valid as long as the original vector is valid +static std::vector to_char_ptr_array(const std::vector & vec) { + std::vector result; + result.reserve(vec.size() + 1); + for (const auto & s : vec) { + result.push_back(const_cast(s.c_str())); + } + result.push_back(nullptr); + return result; +} + +std::vector server_models::get_all_meta() { + std::lock_guard lk(mutex); + std::vector result; + result.reserve(mapping.size()); + for (const auto & [name, inst] : mapping) { + result.push_back(inst.meta); + } + return result; +} + +void server_models::unload_lru() { + if (base_params.models_max <= 0) { + return; // no limit + } + // remove one of the servers if we passed the models_max (least recently used - LRU) + std::string lru_model_name = ""; + int64_t lru_last_used = ggml_time_ms(); + size_t count_active = 0; + { + std::unique_lock lk(mutex); + for (const auto & m : mapping) { + if (m.second.meta.is_active()) { + count_active++; + if (m.second.meta.last_used < lru_last_used) { + lru_model_name = m.first; + lru_last_used = m.second.meta.last_used; + } + } + } + } + if (!lru_model_name.empty() && count_active >= (size_t)base_params.models_max) { + SRV_INF("models_max limit reached, removing LRU name=%s\n", lru_model_name.c_str()); + unload(lru_model_name); + // wait for unload to complete + { + std::unique_lock lk(mutex); + cv.wait(lk, [this, &lru_model_name]() { + return mapping[lru_model_name].meta.status == SERVER_MODEL_STATUS_UNLOADED; + }); + } + } +} + +void server_models::load(const std::string & name) { + if (!has_model(name)) { + throw std::runtime_error("model name=" + name + " is not found"); + } + unload_lru(); + + std::lock_guard lk(mutex); + + auto meta = mapping[name].meta; + if (meta.status != SERVER_MODEL_STATUS_UNLOADED) { + SRV_INF("model %s is not ready\n", name.c_str()); + return; + } + + // prepare new instance info + instance_t inst; + inst.meta = meta; + inst.meta.port = get_free_port(); + inst.meta.status = SERVER_MODEL_STATUS_LOADING; + inst.meta.last_used = ggml_time_ms(); + + if (inst.meta.port <= 0) { + throw std::runtime_error("failed to get a port number"); + } + + inst.subproc = std::make_shared(); + { + SRV_INF("spawning server instance with name=%s on port %d\n", inst.meta.name.c_str(), inst.meta.port); + + inst.meta.update_args(ctx_preset, bin_path); // render args + + std::vector child_args = inst.meta.args; // copy + std::vector child_env = base_env; // copy + child_env.push_back("LLAMA_SERVER_ROUTER_PORT=" + std::to_string(base_params.port)); + + SRV_INF("%s", "spawning server instance with args:\n"); + for (const auto & arg : child_args) { + SRV_INF(" %s\n", arg.c_str()); + } + inst.meta.args = child_args; // save for debugging + + std::vector argv = to_char_ptr_array(child_args); + std::vector envp = to_char_ptr_array(child_env); + + // TODO @ngxson : maybe separate stdout and stderr in the future + // so that we can use stdout for commands and stderr for logging + int options = subprocess_option_no_window | subprocess_option_combined_stdout_stderr; + int result = subprocess_create_ex(argv.data(), options, envp.data(), inst.subproc.get()); + if (result != 0) { + throw std::runtime_error("failed to spawn server instance"); + } + + inst.stdin_file = subprocess_stdin(inst.subproc.get()); + } + + // start a thread to manage the child process + // captured variables are guaranteed to be destroyed only after the thread is joined + inst.th = std::thread([this, name, child_proc = inst.subproc, port = inst.meta.port, stop_timeout = inst.meta.stop_timeout]() { + FILE * stdin_file = subprocess_stdin(child_proc.get()); + FILE * stdout_file = subprocess_stdout(child_proc.get()); // combined stdout/stderr + + std::thread log_thread([&]() { + // read stdout/stderr and forward to main server log + // also handle status report from child process + bool state_received = false; // true if child state received + if (stdout_file) { + char buffer[4096]; + while (fgets(buffer, sizeof(buffer), stdout_file) != nullptr) { + LOG("[%5d] %s", port, buffer); + if (!state_received && std::strstr(buffer, CMD_CHILD_TO_ROUTER_READY) != nullptr) { + // child process is ready + this->update_status(name, SERVER_MODEL_STATUS_LOADED, 0); + state_received = true; + } + } + } else { + SRV_ERR("failed to get stdout/stderr of child process for name=%s\n", name.c_str()); + } + }); + + std::thread stopping_thread([&]() { + // thread to monitor stopping signal + auto is_stopping = [this, &name]() { + return this->stopping_models.find(name) != this->stopping_models.end(); + }; + { + std::unique_lock lk(this->mutex); + this->cv_stop.wait(lk, is_stopping); + } + SRV_INF("stopping model instance name=%s\n", name.c_str()); + // send interrupt to child process + fprintf(stdin_file, "%s\n", CMD_ROUTER_TO_CHILD_EXIT); + fflush(stdin_file); + // wait to stop gracefully or timeout + int64_t start_time = ggml_time_ms(); + while (true) { + std::unique_lock lk(this->mutex); + if (!is_stopping()) { + return; // already stopped + } + int64_t elapsed = ggml_time_ms() - start_time; + if (elapsed >= stop_timeout * 1000) { + // timeout, force kill + SRV_WRN("force-killing model instance name=%s after %d seconds timeout\n", name.c_str(), stop_timeout); + subprocess_terminate(child_proc.get()); + return; + } + this->cv_stop.wait_for(lk, std::chrono::seconds(1)); + } + }); + + // we reach here when the child process exits + // note: we cannot join() prior to this point because it will close stdin_file + if (log_thread.joinable()) { + log_thread.join(); + } + + // stop the timeout monitoring thread + { + std::lock_guard lk(this->mutex); + stopping_models.erase(name); + cv_stop.notify_all(); + } + if (stopping_thread.joinable()) { + stopping_thread.join(); + } + + // get the exit code + int exit_code = 0; + subprocess_join(child_proc.get(), &exit_code); + subprocess_destroy(child_proc.get()); + + // update status and exit code + this->update_status(name, SERVER_MODEL_STATUS_UNLOADED, exit_code); + SRV_INF("instance name=%s exited with status %d\n", name.c_str(), exit_code); + }); + + // clean up old process/thread if exists + { + auto & old_instance = mapping[name]; + // old process should have exited already, but just in case, we clean it up here + if (subprocess_alive(old_instance.subproc.get())) { + SRV_WRN("old process for model name=%s is still alive, this is unexpected\n", name.c_str()); + subprocess_terminate(old_instance.subproc.get()); // force kill + } + if (old_instance.th.joinable()) { + old_instance.th.join(); + } + } + + mapping[name] = std::move(inst); + cv.notify_all(); +} + +void server_models::unload(const std::string & name) { + std::lock_guard lk(mutex); + auto it = mapping.find(name); + if (it != mapping.end()) { + if (it->second.meta.is_active()) { + SRV_INF("unloading model instance name=%s\n", name.c_str()); + stopping_models.insert(name); + cv_stop.notify_all(); + // status change will be handled by the managing thread + } else { + SRV_WRN("model instance name=%s is not loaded\n", name.c_str()); + } + } +} + +void server_models::unload_all() { + std::vector to_join; + { + std::lock_guard lk(mutex); + for (auto & [name, inst] : mapping) { + if (inst.meta.is_active()) { + SRV_INF("unloading model instance name=%s\n", name.c_str()); + stopping_models.insert(name); + cv_stop.notify_all(); + // status change will be handled by the managing thread + } + // moving the thread to join list to avoid deadlock + to_join.push_back(std::move(inst.th)); + } + } + for (auto & th : to_join) { + if (th.joinable()) { + th.join(); + } + } +} + +void server_models::update_status(const std::string & name, server_model_status status, int exit_code) { + std::unique_lock lk(mutex); + auto it = mapping.find(name); + if (it != mapping.end()) { + auto & meta = it->second.meta; + meta.status = status; + meta.exit_code = exit_code; + } + cv.notify_all(); +} + +void server_models::wait_until_loaded(const std::string & name) { + std::unique_lock lk(mutex); + cv.wait(lk, [this, &name]() { + auto it = mapping.find(name); + if (it != mapping.end()) { + return it->second.meta.status != SERVER_MODEL_STATUS_LOADING; + } + return false; + }); +} + +bool server_models::ensure_model_loaded(const std::string & name) { + auto meta = get_meta(name); + if (!meta.has_value()) { + throw std::runtime_error("model name=" + name + " is not found"); + } + if (meta->status == SERVER_MODEL_STATUS_LOADED) { + return false; // already loaded + } + if (meta->status == SERVER_MODEL_STATUS_UNLOADED) { + SRV_INF("model name=%s is not loaded, loading...\n", name.c_str()); + load(name); + } + + // for loading state + SRV_INF("waiting until model name=%s is fully loaded...\n", name.c_str()); + wait_until_loaded(name); + + // check final status + meta = get_meta(name); + if (!meta.has_value() || meta->is_failed()) { + throw std::runtime_error("model name=" + name + " failed to load"); + } + + return true; +} + +server_http_res_ptr server_models::proxy_request(const server_http_req & req, const std::string & method, const std::string & name, bool update_last_used) { + auto meta = get_meta(name); + if (!meta.has_value()) { + throw std::runtime_error("model name=" + name + " is not found"); + } + if (meta->status != SERVER_MODEL_STATUS_LOADED) { + throw std::invalid_argument("model name=" + name + " is not loaded"); + } + if (update_last_used) { + std::unique_lock lk(mutex); + mapping[name].meta.last_used = ggml_time_ms(); + } + SRV_INF("proxying request to model %s on port %d\n", name.c_str(), meta->port); + auto proxy = std::make_unique( + method, + CHILD_ADDR, + meta->port, + req.path, + req.headers, + req.body, + req.should_stop, + base_params.timeout_read, + base_params.timeout_write + ); + return proxy; +} + +std::thread server_models::setup_child_server(const std::function & shutdown_handler) { + // send a notification to the router server that a model instance is ready + common_log_pause(common_log_main()); + fflush(stdout); + fprintf(stdout, "%s\n", CMD_CHILD_TO_ROUTER_READY); + fflush(stdout); + common_log_resume(common_log_main()); + + // setup thread for monitoring stdin + return std::thread([shutdown_handler]() { + // wait for EOF on stdin + SRV_INF("%s", "child server monitoring thread started, waiting for EOF on stdin...\n"); + bool eof = false; + while (true) { + std::string line; + if (!std::getline(std::cin, line)) { + // EOF detected, that means the router server is unexpectedly exit or killed + eof = true; + break; + } + if (line.find(CMD_ROUTER_TO_CHILD_EXIT) != std::string::npos) { + SRV_INF("%s", "exit command received, exiting...\n"); + shutdown_handler(0); + break; + } + } + if (eof) { + SRV_INF("%s", "EOF on stdin detected, forcing shutdown...\n"); + exit(1); + } + }); +} + + + +// +// server_models_routes +// + +static void res_ok(std::unique_ptr & res, const json & response_data) { + res->status = 200; + res->data = safe_json_to_str(response_data); +} + +static void res_err(std::unique_ptr & res, const json & error_data) { + res->status = json_value(error_data, "code", 500); + res->data = safe_json_to_str({{ "error", error_data }}); +} + +static bool router_validate_model(const std::string & name, server_models & models, bool models_autoload, std::unique_ptr & res) { + if (name.empty()) { + res_err(res, format_error_response("model name is missing from the request", ERROR_TYPE_INVALID_REQUEST)); + return false; + } + auto meta = models.get_meta(name); + if (!meta.has_value()) { + res_err(res, format_error_response(string_format("model '%s' not found", name.c_str()), ERROR_TYPE_INVALID_REQUEST)); + return false; + } + if (models_autoload) { + models.ensure_model_loaded(name); + } else { + if (meta->status != SERVER_MODEL_STATUS_LOADED) { + res_err(res, format_error_response("model is not loaded", ERROR_TYPE_INVALID_REQUEST)); + return false; + } + } + return true; +} + +static bool is_autoload(const common_params & params, const server_http_req & req) { + std::string autoload = req.get_param("autoload"); + if (autoload.empty()) { + return params.models_autoload; + } else { + return autoload == "true" || autoload == "1"; + } +} + +void server_models_routes::init_routes() { + this->get_router_props = [this](const server_http_req & req) { + std::string name = req.get_param("model"); + if (name.empty()) { + // main instance + auto res = std::make_unique(); + res_ok(res, { + // TODO: add support for this on web UI + {"role", "router"}, + {"max_instances", 4}, // dummy value for testing + // this is a dummy response to make sure webui doesn't break + {"model_alias", "llama-server"}, + {"model_path", "none"}, + {"default_generation_settings", { + {"params", json{}}, + {"n_ctx", 0}, + }}, + {"webui_settings", webui_settings}, + }); + return res; + } + return proxy_get(req); + }; + + this->proxy_get = [this](const server_http_req & req) { + std::string method = "GET"; + std::string name = req.get_param("model"); + bool autoload = is_autoload(params, req); + auto error_res = std::make_unique(); + if (!router_validate_model(name, models, autoload, error_res)) { + return error_res; + } + return models.proxy_request(req, method, name, false); + }; + + this->proxy_post = [this](const server_http_req & req) { + std::string method = "POST"; + json body = json::parse(req.body); + std::string name = json_value(body, "model", std::string()); + bool autoload = is_autoload(params, req); + auto error_res = std::make_unique(); + if (!router_validate_model(name, models, autoload, error_res)) { + return error_res; + } + return models.proxy_request(req, method, name, true); // update last usage for POST request only + }; + + this->post_router_models_load = [this](const server_http_req & req) { + auto res = std::make_unique(); + json body = json::parse(req.body); + std::string name = json_value(body, "model", std::string()); + auto model = models.get_meta(name); + if (!model.has_value()) { + res_err(res, format_error_response("model is not found", ERROR_TYPE_NOT_FOUND)); + return res; + } + if (model->status == SERVER_MODEL_STATUS_LOADED) { + res_err(res, format_error_response("model is already loaded", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + models.load(name); + res_ok(res, {{"success", true}}); + return res; + }; + + this->get_router_models = [this](const server_http_req &) { + auto res = std::make_unique(); + json models_json = json::array(); + auto all_models = models.get_all_meta(); + std::time_t t = std::time(0); + for (const auto & meta : all_models) { + json status { + {"value", server_model_status_to_string(meta.status)}, + {"args", meta.args}, + }; + if (!meta.preset.name.empty()) { + common_preset preset_copy = meta.preset; + unset_reserved_args(preset_copy, false); + preset_copy.unset_option("LLAMA_ARG_HOST"); + preset_copy.unset_option("LLAMA_ARG_PORT"); + preset_copy.unset_option("LLAMA_ARG_ALIAS"); + status["preset"] = preset_copy.to_ini(); + } + if (meta.is_failed()) { + status["exit_code"] = meta.exit_code; + status["failed"] = true; + } + models_json.push_back(json { + {"id", meta.name}, + {"object", "model"}, // for OAI-compat + {"owned_by", "llamacpp"}, // for OAI-compat + {"created", t}, // for OAI-compat + {"status", status}, + // TODO: add other fields, may require reading GGUF metadata + }); + } + res_ok(res, { + {"data", models_json}, + {"object", "list"}, + }); + return res; + }; + + this->post_router_models_unload = [this](const server_http_req & req) { + auto res = std::make_unique(); + json body = json::parse(req.body); + std::string name = json_value(body, "model", std::string()); + auto model = models.get_meta(name); + if (!model.has_value()) { + res_err(res, format_error_response("model is not found", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + if (!model->is_active()) { + res_err(res, format_error_response("model is not loaded", ERROR_TYPE_INVALID_REQUEST)); + return res; + } + models.unload(name); + res_ok(res, {{"success", true}}); + return res; + }; +} + + + +// +// server_http_proxy +// + +// simple implementation of a pipe +// used for streaming data between threads +template +struct pipe_t { + std::mutex mutex; + std::condition_variable cv; + std::queue queue; + std::atomic writer_closed{false}; + std::atomic reader_closed{false}; + void close_write() { + writer_closed.store(true, std::memory_order_relaxed); + cv.notify_all(); + } + void close_read() { + reader_closed.store(true, std::memory_order_relaxed); + cv.notify_all(); + } + bool read(T & output, const std::function & should_stop) { + std::unique_lock lk(mutex); + constexpr auto poll_interval = std::chrono::milliseconds(500); + while (true) { + if (!queue.empty()) { + output = std::move(queue.front()); + queue.pop(); + return true; + } + if (writer_closed.load()) { + return false; // clean EOF + } + if (should_stop()) { + close_read(); // signal broken pipe to writer + return false; // cancelled / reader no longer alive + } + cv.wait_for(lk, poll_interval); + } + } + bool write(T && data) { + std::lock_guard lk(mutex); + if (reader_closed.load()) { + return false; // broken pipe + } + queue.push(std::move(data)); + cv.notify_one(); + return true; + } +}; + +static std::string to_lower_copy(const std::string & value) { + std::string lowered(value.size(), '\0'); + std::transform(value.begin(), value.end(), lowered.begin(), [](unsigned char c) { return std::tolower(c); }); + return lowered; +} + +static bool should_strip_proxy_header(const std::string & header_name) { + // Headers that get duplicated when router forwards child responses + if (header_name == "server" || + header_name == "transfer-encoding" || + header_name == "content-length" || // quick fix for https://github.com/ggml-org/llama.cpp/issues/17710 + header_name == "keep-alive") { + return true; + } + + // Router injects CORS, child also sends them: duplicate + if (header_name.rfind("access-control-", 0) == 0) { + return true; + } + + return false; +} + +server_http_proxy::server_http_proxy( + const std::string & method, + const std::string & host, + int port, + const std::string & path, + const std::map & headers, + const std::string & body, + const std::function should_stop, + int32_t timeout_read, + int32_t timeout_write + ) { + // shared between reader and writer threads + auto cli = std::make_shared(host, port); + auto pipe = std::make_shared>(); + + // setup Client + cli->set_connection_timeout(0, 200000); // 200 milliseconds + cli->set_write_timeout(timeout_read, 0); // reversed for cli (client) vs srv (server) + cli->set_read_timeout(timeout_write, 0); + this->status = 500; // to be overwritten upon response + this->cleanup = [pipe]() { + pipe->close_read(); + pipe->close_write(); + }; + + // wire up the receive end of the pipe + this->next = [pipe, should_stop](std::string & out) -> bool { + msg_t msg; + bool has_next = pipe->read(msg, should_stop); + if (!msg.data.empty()) { + out = std::move(msg.data); + } + return has_next; // false if EOF or pipe broken + }; + + // wire up the HTTP client + // note: do NOT capture `this` pointer, as it may be destroyed before the thread ends + httplib::ResponseHandler response_handler = [pipe, cli](const httplib::Response & response) { + msg_t msg; + msg.status = response.status; + for (const auto & [key, value] : response.headers) { + const auto lowered = to_lower_copy(key); + if (should_strip_proxy_header(lowered)) { + continue; + } + if (lowered == "content-type") { + msg.content_type = value; + continue; + } + msg.headers[key] = value; + } + return pipe->write(std::move(msg)); // send headers first + }; + httplib::ContentReceiverWithProgress content_receiver = [pipe](const char * data, size_t data_length, size_t, size_t) { + // send data chunks + // returns false if pipe is closed / broken (signal to stop receiving) + return pipe->write({{}, 0, std::string(data, data_length), ""}); + }; + + // prepare the request to destination server + httplib::Request req; + { + req.method = method; + req.path = path; + for (const auto & [key, value] : headers) { + req.set_header(key, value); + } + req.body = body; + req.response_handler = response_handler; + req.content_receiver = content_receiver; + } + + // start the proxy thread + SRV_DBG("start proxy thread %s %s\n", req.method.c_str(), req.path.c_str()); + this->thread = std::thread([cli, pipe, req]() { + auto result = cli->send(std::move(req)); + if (result.error() != httplib::Error::Success) { + auto err_str = httplib::to_string(result.error()); + SRV_ERR("http client error: %s\n", err_str.c_str()); + pipe->write({{}, 500, "", ""}); // header + pipe->write({{}, 0, "proxy error: " + err_str, ""}); // body + } + pipe->close_write(); // signal EOF to reader + SRV_DBG("%s", "client request thread ended\n"); + }); + this->thread.detach(); + + // wait for the first chunk (headers) + { + msg_t header; + if (pipe->read(header, should_stop)) { + SRV_DBG("%s", "received response headers\n"); + this->status = header.status; + this->headers = std::move(header.headers); + if (!header.content_type.empty()) { + this->content_type = std::move(header.content_type); + } + } else { + SRV_DBG("%s", "no response headers received (request cancelled?)\n"); + } + } +} diff --git a/llama.cpp/tools/server/server-models.h b/llama.cpp/tools/server/server-models.h new file mode 100644 index 0000000..a397abd --- /dev/null +++ b/llama.cpp/tools/server/server-models.h @@ -0,0 +1,203 @@ +#pragma once + +#include "common.h" +#include "preset.h" +#include "server-common.h" +#include "server-http.h" + +#include +#include +#include +#include +#include + +/** + * state diagram: + * + * UNLOADED ──► LOADING ──► LOADED + * ▲ │ │ + * └───failed───┘ │ + * ▲ │ + * └────────unloaded─────────┘ + */ +enum server_model_status { + // TODO: also add downloading state when the logic is added + SERVER_MODEL_STATUS_UNLOADED, + SERVER_MODEL_STATUS_LOADING, + SERVER_MODEL_STATUS_LOADED +}; + +static server_model_status server_model_status_from_string(const std::string & status_str) { + if (status_str == "unloaded") { + return SERVER_MODEL_STATUS_UNLOADED; + } + if (status_str == "loading") { + return SERVER_MODEL_STATUS_LOADING; + } + if (status_str == "loaded") { + return SERVER_MODEL_STATUS_LOADED; + } + throw std::runtime_error("invalid server model status"); +} + +static std::string server_model_status_to_string(server_model_status status) { + switch (status) { + case SERVER_MODEL_STATUS_UNLOADED: return "unloaded"; + case SERVER_MODEL_STATUS_LOADING: return "loading"; + case SERVER_MODEL_STATUS_LOADED: return "loaded"; + default: return "unknown"; + } +} + +struct server_model_meta { + common_preset preset; + std::string name; + int port = 0; + server_model_status status = SERVER_MODEL_STATUS_UNLOADED; + int64_t last_used = 0; // for LRU unloading + std::vector args; // args passed to the model instance, will be populated by render_args() + int exit_code = 0; // exit code of the model instance process (only valid if status == FAILED) + int stop_timeout = 0; // seconds to wait before force-killing the model instance during shutdown + + bool is_active() const { + return status == SERVER_MODEL_STATUS_LOADED || status == SERVER_MODEL_STATUS_LOADING; + } + + bool is_failed() const { + return status == SERVER_MODEL_STATUS_UNLOADED && exit_code != 0; + } + + void update_args(common_preset_context & ctx_presets, std::string bin_path); +}; + +struct subprocess_s; + +struct server_models { +private: + struct instance_t { + std::shared_ptr subproc; // shared between main thread and monitoring thread + std::thread th; + server_model_meta meta; + FILE * stdin_file = nullptr; + }; + + std::mutex mutex; + std::condition_variable cv; + std::map mapping; + + // for stopping models + std::condition_variable cv_stop; + std::set stopping_models; + + common_preset_context ctx_preset; + + common_params base_params; + std::string bin_path; + std::vector base_env; + common_preset base_preset; // base preset from llama-server CLI args + + void update_meta(const std::string & name, const server_model_meta & meta); + + // unload least recently used models if the limit is reached + void unload_lru(); + + // not thread-safe, caller must hold mutex + void add_model(server_model_meta && meta); + +public: + server_models(const common_params & params, int argc, char ** argv); + + void load_models(); + + // check if a model instance exists (thread-safe) + bool has_model(const std::string & name); + + // return a copy of model metadata (thread-safe) + std::optional get_meta(const std::string & name); + + // return a copy of all model metadata (thread-safe) + std::vector get_all_meta(); + + // load and unload model instances + // these functions are thread-safe + void load(const std::string & name); + void unload(const std::string & name); + void unload_all(); + + // update the status of a model instance (thread-safe) + void update_status(const std::string & name, server_model_status status, int exit_code); + + // wait until the model instance is fully loaded (thread-safe) + // return when the model is loaded or failed to load + void wait_until_loaded(const std::string & name); + + // load the model if not loaded, otherwise do nothing (thread-safe) + // return false if model is already loaded; return true otherwise (meta may need to be refreshed) + bool ensure_model_loaded(const std::string & name); + + // proxy an HTTP request to the model instance + server_http_res_ptr proxy_request(const server_http_req & req, const std::string & method, const std::string & name, bool update_last_used); + + // notify the router server that a model instance is ready + // return the monitoring thread (to be joined by the caller) + static std::thread setup_child_server(const std::function & shutdown_handler); +}; + +struct server_models_routes { + common_params params; + json webui_settings = json::object(); + server_models models; + server_models_routes(const common_params & params, int argc, char ** argv) + : params(params), models(params, argc, argv) { + if (!this->params.webui_config_json.empty()) { + try { + webui_settings = json::parse(this->params.webui_config_json); + } catch (const std::exception & e) { + LOG_ERR("%s: failed to parse webui config: %s\n", __func__, e.what()); + throw; + } + } + init_routes(); + } + + void init_routes(); + // handlers using lambda function, so that they can capture `this` without `std::bind` + server_http_context::handler_t get_router_props; + server_http_context::handler_t proxy_get; + server_http_context::handler_t proxy_post; + server_http_context::handler_t get_router_models; + server_http_context::handler_t post_router_models_load; + server_http_context::handler_t post_router_models_unload; +}; + +/** + * A simple HTTP proxy that forwards requests to another server + * and relays the responses back. + */ +struct server_http_proxy : server_http_res { + std::function cleanup = nullptr; +public: + server_http_proxy(const std::string & method, + const std::string & host, + int port, + const std::string & path, + const std::map & headers, + const std::string & body, + const std::function should_stop, + int32_t timeout_read, + int32_t timeout_write + ); + ~server_http_proxy() { + if (cleanup) { + cleanup(); + } + } +private: + std::thread thread; + struct msg_t { + std::map headers; + int status = 0; + std::string data; + std::string content_type; + }; +}; diff --git a/llama.cpp/tools/server/server-queue.cpp b/llama.cpp/tools/server/server-queue.cpp new file mode 100644 index 0000000..a2a026a --- /dev/null +++ b/llama.cpp/tools/server/server-queue.cpp @@ -0,0 +1,450 @@ +#include "server-task.h" +#include "server-queue.h" + +#include "log.h" + +#include + +#define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) + +#define RES_INF(fmt, ...) LOG_INF("res %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define RES_WRN(fmt, ...) LOG_WRN("res %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define RES_ERR(fmt, ...) LOG_ERR("res %12.*s: " fmt, 12, __func__, __VA_ARGS__) +#define RES_DBG(fmt, ...) LOG_DBG("res %12.*s: " fmt, 12, __func__, __VA_ARGS__) + +// +// server_queue +// + +int server_queue::post(server_task && task, bool front) { + std::unique_lock lock(mutex_tasks); + GGML_ASSERT(task.id != -1); + // if this is cancel task make sure to clean up pending tasks + if (task.type == SERVER_TASK_TYPE_CANCEL) { + cleanup_pending_task(task.id_target); + } + const int task_id = task.id; + QUE_DBG("new task, id = %d, front = %d\n", task_id, front); + if (front) { + queue_tasks.push_front(std::move(task)); + } else { + queue_tasks.push_back(std::move(task)); + } + time_last_task = ggml_time_ms(); + condition_tasks.notify_one(); + return task_id; +} + +int server_queue::post(std::vector && tasks, bool front) { + std::unique_lock lock(mutex_tasks); + for (auto & task : tasks) { + if (task.id == -1) { + task.id = id++; + } + // if this is cancel task make sure to clean up pending tasks + if (task.type == SERVER_TASK_TYPE_CANCEL) { + cleanup_pending_task(task.id_target); + } + QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int) tasks.size(), front); + if (front) { + queue_tasks.push_front(std::move(task)); + } else { + queue_tasks.push_back(std::move(task)); + } + } + time_last_task = ggml_time_ms(); + condition_tasks.notify_one(); + return 0; +} + +void server_queue::defer(server_task && task) { + std::unique_lock lock(mutex_tasks); + QUE_DBG("defer task, id = %d\n", task.id); + queue_tasks_deferred.push_back(std::move(task)); + time_last_task = ggml_time_ms(); + condition_tasks.notify_one(); +} + +int server_queue::get_new_id() { + std::unique_lock lock(mutex_tasks); + int new_id = id++; + return new_id; +} + +void server_queue::pop_deferred_task(int id_slot) { + std::unique_lock lock(mutex_tasks); + if (!queue_tasks_deferred.empty()) { + // try to find a task that uses the specified slot + bool found = false; + for (auto it = queue_tasks_deferred.begin(); it != queue_tasks_deferred.end(); ++it) { + if (it->id_slot == id_slot) { + QUE_DBG("pop deferred task (use slot %d), id_task = %d\n", id_slot, it->id); + queue_tasks.emplace_front(std::move(*it)); + queue_tasks_deferred.erase(it); + found = true; + break; + } + } + // if not tasks found using the slot, just pop the first deferred task (default behavior) + if (!found) { + QUE_DBG("pop deferred task, id_task = %d\n", queue_tasks_deferred.front().id); + queue_tasks.emplace_front(std::move(queue_tasks_deferred.front())); + queue_tasks_deferred.pop_front(); + } + } + time_last_task = ggml_time_ms(); + condition_tasks.notify_one(); +} + +void server_queue::wait_until_no_sleep() { + std::unique_lock lock(mutex_tasks); + if (!sleeping) { + return; + } else { + if (!req_stop_sleeping) { + QUE_DBG("%s", "requesting to stop sleeping\n"); + req_stop_sleeping = true; + condition_tasks.notify_one(); // only main thread is waiting on this + } + QUE_DBG("%s", "waiting until no sleep\n"); + condition_tasks.wait(lock, [&]{ + return !sleeping; + }); + } +} + +void server_queue::terminate() { + std::unique_lock lock(mutex_tasks); + running = false; + condition_tasks.notify_all(); +} + +void server_queue::start_loop(int64_t idle_sleep_ms) { + running = true; + time_last_task = ggml_time_ms(); + + constexpr auto max_wait_time = std::chrono::seconds(1); + auto should_sleep = [&]() -> bool { + // caller must hold mutex_tasks + if (idle_sleep_ms < 0) { + return false; + } + int64_t now = ggml_time_ms(); + return (now - time_last_task) >= idle_sleep_ms; + }; + + while (true) { + QUE_DBG("%s", "processing new tasks\n"); + + while (true) { + std::unique_lock lock(mutex_tasks); + if (!running) { + QUE_DBG("%s", "terminate\n"); + return; + } + if (queue_tasks.empty()) { + lock.unlock(); + break; + } + server_task task = std::move(queue_tasks.front()); + queue_tasks.pop_front(); + lock.unlock(); + + QUE_DBG("processing task, id = %d\n", task.id); + callback_new_task(std::move(task)); + } + // all tasks in the current loop is processed, slots data is now ready + QUE_DBG("%s", "update slots\n"); + + // this will run the main inference process for all slots + callback_update_slots(); + { + // update_slots() may take a while to finish, we need to make sure it's not counted as idle + std::unique_lock lock(mutex_tasks); + time_last_task = ggml_time_ms(); + } + + QUE_DBG("%s", "waiting for new tasks\n"); + while (true) { + std::unique_lock lock(mutex_tasks); + if (!running || !queue_tasks.empty()) { + break; // go back to process new tasks or terminate + } + + // no tasks, check for sleeping state + if (should_sleep()) { + QUE_INF("%s", "entering sleeping state\n"); + sleeping = true; + callback_sleeping_state(true); + req_stop_sleeping = false; + // wait until we are requested to exit sleeping state + condition_tasks.wait(lock, [&]{ + return (!running || req_stop_sleeping); + }); + if (!running) { // may changed during sleep + break; // terminate + } + QUE_INF("%s", "exiting sleeping state\n"); + req_stop_sleeping = false; + callback_sleeping_state(false); + sleeping = false; + time_last_task = ggml_time_ms(); + condition_tasks.notify_all(); // notify wait_until_no_sleep() + break; // process new tasks + } else { + // wait for new tasks or timeout for checking sleeping condition + bool res = condition_tasks.wait_for(lock, max_wait_time, [&]{ + return (!queue_tasks.empty() || !running); + }); + if (res) { + break; // new task arrived or terminate + } + // otherwise, loop again to check sleeping condition + } + } + } +} + +void server_queue::cleanup_pending_task(int id_target) { + // no need lock because this is called exclusively by post() + auto rm_func = [id_target](const server_task & task) { + return task.id == id_target; + }; + queue_tasks.erase( + std::remove_if(queue_tasks.begin(), queue_tasks.end(), rm_func), + queue_tasks.end()); + queue_tasks_deferred.erase( + std::remove_if(queue_tasks_deferred.begin(), queue_tasks_deferred.end(), rm_func), + queue_tasks_deferred.end()); +} + +// +// server_response +// + +void server_response::add_waiting_task_id(int id_task) { + RES_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size()); + + std::unique_lock lock(mutex_results); + waiting_task_ids.insert(id_task); +} + +void server_response::add_waiting_task_ids(const std::unordered_set & id_tasks) { + std::unique_lock lock(mutex_results); + + for (const auto & id_task : id_tasks) { + RES_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size()); + waiting_task_ids.insert(id_task); + } +} + +void server_response::remove_waiting_task_id(int id_task) { + RES_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size()); + + std::unique_lock lock(mutex_results); + waiting_task_ids.erase(id_task); + // make sure to clean up all pending results + queue_results.erase( + std::remove_if(queue_results.begin(), queue_results.end(), [id_task](const server_task_result_ptr & res) { + return res->id == id_task; + }), + queue_results.end()); +} + +void server_response::remove_waiting_task_ids(const std::unordered_set & id_tasks) { + std::unique_lock lock(mutex_results); + + for (const auto & id_task : id_tasks) { + RES_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size()); + waiting_task_ids.erase(id_task); + } +} + +server_task_result_ptr server_response::recv(const std::unordered_set & id_tasks) { + while (true) { + std::unique_lock lock(mutex_results); + condition_results.wait(lock, [&]{ + if (!running) { + RES_DBG("%s : queue result stop\n", "recv"); + std::terminate(); // we cannot return here since the caller is HTTP code + } + return !queue_results.empty(); + }); + + for (size_t i = 0; i < queue_results.size(); i++) { + if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) { + server_task_result_ptr res = std::move(queue_results[i]); + queue_results.erase(queue_results.begin() + i); + return res; + } + } + } + + // should never reach here +} + +server_task_result_ptr server_response::recv_with_timeout(const std::unordered_set & id_tasks, int timeout) { + while (true) { + std::unique_lock lock(mutex_results); + + for (int i = 0; i < (int) queue_results.size(); i++) { + if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) { + server_task_result_ptr res = std::move(queue_results[i]); + queue_results.erase(queue_results.begin() + i); + return res; + } + } + + std::cv_status cr_res = condition_results.wait_for(lock, std::chrono::seconds(timeout)); + if (!running) { + RES_DBG("%s : queue result stop\n", __func__); + std::terminate(); // we cannot return here since the caller is HTTP code + } + if (cr_res == std::cv_status::timeout) { + return nullptr; + } + } + + // should never reach here +} + +server_task_result_ptr server_response::recv(int id_task) { + std::unordered_set id_tasks = {id_task}; + return recv(id_tasks); +} + +void server_response::send(server_task_result_ptr && result) { + RES_DBG("sending result for task id = %d\n", result->id); + + std::unique_lock lock(mutex_results); + for (const auto & id_task : waiting_task_ids) { + if (result->id == id_task) { + RES_DBG("task id = %d pushed to result queue\n", result->id); + + queue_results.emplace_back(std::move(result)); + condition_results.notify_all(); + return; + } + } +} + +void server_response::terminate() { + running = false; + condition_results.notify_all(); +} + +// +// server_response_reader +// + +void server_response_reader::post_task(server_task && task, bool front) { + GGML_ASSERT(id_tasks.empty() && "post_task() can only be called once per reader"); + GGML_ASSERT(!task.is_parent() && "not supported, use post_tasks() instead"); + task.index = 0; + id_tasks.insert(task.id); + states.push_back(task.create_state()); + queue_results.add_waiting_task_id(task.id); + queue_tasks.post(std::move(task), front); +} + +void server_response_reader::post_tasks(std::vector && tasks, bool front) { + GGML_ASSERT(id_tasks.empty() && "post_tasks() can only be called once per reader"); + id_tasks = server_task::get_list_id(tasks); + states.reserve(tasks.size()); + size_t index = 0; + for (auto & task : tasks) { + task.index = index++; + states.push_back(task.create_state()); + // for child tasks + for (auto & child_task : task.child_tasks) { + child_task.index = index++; + states.push_back(child_task.create_state()); + } + } + GGML_ASSERT(states.size() == id_tasks.size()); + queue_results.add_waiting_task_ids(id_tasks); + queue_tasks.post(std::move(tasks), front); +} + +bool server_response_reader::has_next() const { + return !cancelled && received_count < id_tasks.size(); +} + +// return nullptr if should_stop() is true before receiving a result +// note: if one error is received, it will stop further processing and return error result +server_task_result_ptr server_response_reader::next(const std::function & should_stop) { + while (true) { + server_task_result_ptr result = queue_results.recv_with_timeout(id_tasks, polling_interval_seconds); + if (result == nullptr) { + // timeout, check stop condition + if (should_stop()) { + SRV_DBG("%s", "stopping wait for next result due to should_stop condition\n"); + return nullptr; + } + } else { + if (result->is_error()) { + stop(); // cancel remaining tasks + SRV_DBG("%s", "received error result, stopping further processing\n"); + return result; + } + if (!states.empty()) { + // update the generation state if needed + const size_t idx = result->index; + GGML_ASSERT(idx < states.size()); + result->update(states[idx]); + } + if (result->is_stop()) { + received_count++; + } + return result; + } + } + + // should not reach here +} + +server_response_reader::batch_response server_response_reader::wait_for_all(const std::function & should_stop) { + batch_response batch_res; + batch_res.results.clear(); + batch_res.results.resize(id_tasks.size()); + while (has_next()) { + auto res = next(should_stop); + if (res == nullptr) { + batch_res.is_terminated = true; + return batch_res; + } + if (res->is_error()) { + batch_res.error = std::move(res); + return batch_res; + } + const size_t idx = res->index; + GGML_ASSERT(idx < batch_res.results.size() && "index out of range"); + GGML_ASSERT(batch_res.results[idx] == nullptr && "duplicate result received"); + batch_res.results[idx] = std::move(res); + } + return batch_res; +} + +void server_response_reader::stop() { + queue_results.remove_waiting_task_ids(id_tasks); + if (has_next() && !cancelled) { + // if tasks is not finished yet, cancel them + cancelled = true; + std::vector cancel_tasks; + cancel_tasks.reserve(id_tasks.size()); + for (const auto & id_task : id_tasks) { + SRV_WRN("cancel task, id_task = %d\n", id_task); + server_task task(SERVER_TASK_TYPE_CANCEL); + task.id_target = id_task; + queue_results.remove_waiting_task_id(id_task); + cancel_tasks.push_back(std::move(task)); + } + // push to beginning of the queue, so it has highest priority + queue_tasks.post(std::move(cancel_tasks), true); + } else { + SRV_DBG("%s", "all tasks already finished, no need to cancel\n"); + } +} diff --git a/llama.cpp/tools/server/server-queue.h b/llama.cpp/tools/server/server-queue.h new file mode 100644 index 0000000..164f09b --- /dev/null +++ b/llama.cpp/tools/server/server-queue.h @@ -0,0 +1,197 @@ +#pragma once + +#include "server-task.h" + +#include +#include +#include +#include +#include + +// struct for managing server tasks +// in most cases, use server_response_reader to post new tasks and retrieve results +struct server_queue { +private: + int id = 0; + bool running = false; + bool sleeping = false; + bool req_stop_sleeping = false; + int64_t time_last_task = 0; + + // queues + std::deque queue_tasks; + std::deque queue_tasks_deferred; + + std::mutex mutex_tasks; + std::condition_variable condition_tasks; + + // callback functions + std::function callback_new_task; + std::function callback_update_slots; + std::function callback_sleeping_state; + +public: + // Add a new task to the end of the queue + int post(server_task && task, bool front = false); + + // multi-task version of post() + int post(std::vector && tasks, bool front = false); + + // Add a new task, but defer until one slot is available + void defer(server_task && task); + + // Get the next id for creating a new task + int get_new_id(); + + // Call when the state of one slot is changed, it will move one task from deferred to main queue + // prioritize tasks that use the specified slot (otherwise, pop the first deferred task) + void pop_deferred_task(int id_slot); + + // if sleeping, request exiting sleep state and wait until it is done + // returns immediately if not sleeping + void wait_until_no_sleep(); + + bool is_sleeping() { + std::unique_lock lock(mutex_tasks); + return sleeping; + } + + // end the start_loop routine + void terminate(); + + /** + * Main loop consists of these steps: + * - Wait until a new task arrives + * - Process the task (i.e. maybe copy data into slot) + * - Check if multitask is finished + * - Update all slots + * + * Sleeping procedure (disabled if idle_sleep_ms < 0): + * - If there is no task after idle_sleep_ms, enter sleeping state + * - Call callback_sleeping_state(true) + * - Wait until req_stop_sleeping is set to true + * - Call callback_sleeping_state(false) + * - Exit sleeping state + */ + void start_loop(int64_t idle_sleep_ms = -1); + + // for metrics + size_t queue_tasks_deferred_size() { + std::unique_lock lock(mutex_tasks); + return queue_tasks_deferred.size(); + } + + // + // Functions below are not thread-safe, must only be used before start_loop() is called + // + + // Register function to process a new task + void on_new_task(std::function callback) { + callback_new_task = std::move(callback); + } + + // Register the function to be called when all slots data is ready to be processed + void on_update_slots(std::function callback) { + callback_update_slots = std::move(callback); + } + + // Register callback for sleeping state change + // note: when entering sleeping state, the callback is called AFTER sleeping is set to true + // when leaving sleeping state, the callback is called BEFORE sleeping is set to false + void on_sleeping_state(std::function callback) { + callback_sleeping_state = std::move(callback); + } + +private: + void cleanup_pending_task(int id_target); +}; + +// struct for managing server responses +// in most cases, use server_response_reader to retrieve results +struct server_response { +private: + bool running = true; + + // for keeping track of all tasks waiting for the result + std::unordered_set waiting_task_ids; + + // the main result queue (using ptr for polymorphism) + std::vector queue_results; + + std::mutex mutex_results; + std::condition_variable condition_results; + +public: + // add the id_task to the list of tasks waiting for response + void add_waiting_task_id(int id_task); + + void add_waiting_task_ids(const std::unordered_set & id_tasks); + + // when the request is finished, we can remove task associated with it + void remove_waiting_task_id(int id_task); + + // remove multiple tasks from waiting list + void remove_waiting_task_ids(const std::unordered_set & id_tasks); + + // This function blocks the thread until there is a response for one of the id_tasks + server_task_result_ptr recv(const std::unordered_set & id_tasks); + + // same as recv(), but have timeout in seconds + // if timeout is reached, nullptr is returned + server_task_result_ptr recv_with_timeout(const std::unordered_set & id_tasks, int timeout); + + // single-task version of recv() + server_task_result_ptr recv(int id_task); + + // Send a new result to a waiting id_task + void send(server_task_result_ptr && result); + + // terminate the waiting loop + void terminate(); +}; + +// utility class to make working with server_queue and server_response easier +// it provides a generator-like API for server responses +// support pooling connection state and aggregating multiple results +struct server_response_reader { + std::unordered_set id_tasks; + server_queue & queue_tasks; + server_response & queue_results; + size_t received_count = 0; + bool cancelled = false; + int polling_interval_seconds; + + // tracking generation state and partial tool calls + // only used by streaming completions + std::vector states; + + // should_stop function will be called each polling_interval_seconds + server_response_reader(server_queue & queue_tasks, server_response & queue_results, int polling_interval_seconds) + : queue_tasks(queue_tasks), queue_results(queue_results), polling_interval_seconds(polling_interval_seconds) {} + ~server_response_reader() { + stop(); + } + + int get_new_id() { + return queue_tasks.get_new_id(); + } + + // if front = true, the task will be posted to the front of the queue (high priority) + void post_task(server_task && task, bool front = false); + void post_tasks(std::vector && tasks, bool front = false); + bool has_next() const; + + // return nullptr if should_stop() is true before receiving a result + // note: if one error is received, it will stop further processing and return error result + server_task_result_ptr next(const std::function & should_stop); + + struct batch_response { + bool is_terminated = false; // if true, indicates that processing was stopped before all results were received + std::vector results; + server_task_result_ptr error; // nullptr if no error + }; + // aggregate multiple results + batch_response wait_for_all(const std::function & should_stop); + + void stop(); +}; diff --git a/llama.cpp/tools/server/server-task.cpp b/llama.cpp/tools/server/server-task.cpp new file mode 100644 index 0000000..a137427 --- /dev/null +++ b/llama.cpp/tools/server/server-task.cpp @@ -0,0 +1,2005 @@ +#include "server-common.h" +#include "server-task.h" + +#include "common.h" +#include "llama.h" +#include "chat.h" +#include "sampling.h" +#include "speculative.h" +#include "json-schema-to-grammar.h" + +using json = nlohmann::ordered_json; + +// +// task_params +// + +json task_params::format_logit_bias(const std::vector & logit_bias) const { + json data = json::array(); + for (const auto & lb : logit_bias) { + data.push_back(json{ + {"bias", lb.bias}, + {"token", lb.token}, + }); + } + return data; +} + +json task_params::to_json(bool only_metrics) const { + std::vector samplers; + samplers.reserve(sampling.samplers.size()); + for (const auto & sampler : sampling.samplers) { + samplers.emplace_back(common_sampler_type_to_str(sampler)); + } + + json lora = json::array(); + for (auto & it : this->lora) { + lora.push_back({{"id", it.first}, {"scale", it.second}}); + } + + if (only_metrics) { + return json { + {"seed", sampling.seed}, + {"temperature", sampling.temp}, + {"dynatemp_range", sampling.dynatemp_range}, + {"dynatemp_exponent", sampling.dynatemp_exponent}, + {"top_k", sampling.top_k}, + {"top_p", sampling.top_p}, + {"min_p", sampling.min_p}, + {"top_n_sigma", sampling.top_n_sigma}, + {"xtc_probability", sampling.xtc_probability}, + {"xtc_threshold", sampling.xtc_threshold}, + {"typical_p", sampling.typ_p}, + {"repeat_last_n", sampling.penalty_last_n}, + {"repeat_penalty", sampling.penalty_repeat}, + {"presence_penalty", sampling.penalty_present}, + {"frequency_penalty", sampling.penalty_freq}, + {"dry_multiplier", sampling.dry_multiplier}, + {"dry_base", sampling.dry_base}, + {"dry_allowed_length", sampling.dry_allowed_length}, + {"dry_penalty_last_n", sampling.dry_penalty_last_n}, + {"mirostat", sampling.mirostat}, + {"mirostat_tau", sampling.mirostat_tau}, + {"mirostat_eta", sampling.mirostat_eta}, + {"max_tokens", n_predict}, + {"n_predict", n_predict}, // TODO: deduplicate? + {"n_keep", n_keep}, + {"n_discard", n_discard}, + {"ignore_eos", sampling.ignore_eos}, + {"stream", stream}, + {"n_probs", sampling.n_probs}, + {"min_keep", sampling.min_keep}, + {"chat_format", common_chat_format_name(chat_parser_params.format)}, + {"reasoning_format", common_reasoning_format_name(chat_parser_params.reasoning_format)}, + {"reasoning_in_content", chat_parser_params.reasoning_in_content}, + {"thinking_forced_open", chat_parser_params.thinking_forced_open}, + {"samplers", samplers}, + {"speculative.n_max", speculative.n_max}, + {"speculative.n_min", speculative.n_min}, + {"speculative.p_min", speculative.p_min}, + {"speculative.type", common_speculative_type_to_str(speculative.type)}, + {"speculative.ngram_size_n", speculative.ngram_size_n}, + {"speculative.ngram_size_m", speculative.ngram_size_m}, + {"speculative.ngram_m_hits", speculative.ngram_min_hits}, + {"timings_per_token", timings_per_token}, + {"post_sampling_probs", post_sampling_probs}, + {"backend_sampling", sampling.backend_sampling}, + {"lora", lora}, + }; + } + + auto grammar_triggers = json::array(); + for (const auto & trigger : sampling.grammar_triggers) { + server_grammar_trigger ct(trigger); + grammar_triggers.push_back(ct.to_json()); + } + + return json { + {"seed", sampling.seed}, + {"temperature", sampling.temp}, + {"dynatemp_range", sampling.dynatemp_range}, + {"dynatemp_exponent", sampling.dynatemp_exponent}, + {"top_k", sampling.top_k}, + {"top_p", sampling.top_p}, + {"min_p", sampling.min_p}, + {"top_n_sigma", sampling.top_n_sigma}, + {"xtc_probability", sampling.xtc_probability}, + {"xtc_threshold", sampling.xtc_threshold}, + {"typical_p", sampling.typ_p}, + {"repeat_last_n", sampling.penalty_last_n}, + {"repeat_penalty", sampling.penalty_repeat}, + {"presence_penalty", sampling.penalty_present}, + {"frequency_penalty", sampling.penalty_freq}, + {"dry_multiplier", sampling.dry_multiplier}, + {"dry_base", sampling.dry_base}, + {"dry_allowed_length", sampling.dry_allowed_length}, + {"dry_penalty_last_n", sampling.dry_penalty_last_n}, + {"dry_sequence_breakers", sampling.dry_sequence_breakers}, + {"mirostat", sampling.mirostat}, + {"mirostat_tau", sampling.mirostat_tau}, + {"mirostat_eta", sampling.mirostat_eta}, + {"stop", antiprompt}, + {"max_tokens", n_predict}, + {"n_predict", n_predict}, // TODO: deduplicate? + {"n_keep", n_keep}, + {"n_discard", n_discard}, + {"ignore_eos", sampling.ignore_eos}, + {"stream", stream}, + {"logit_bias", format_logit_bias(sampling.logit_bias)}, + {"n_probs", sampling.n_probs}, + {"min_keep", sampling.min_keep}, + {"grammar", sampling.grammar}, + {"grammar_lazy", sampling.grammar_lazy}, + {"grammar_triggers", grammar_triggers}, + {"preserved_tokens", sampling.preserved_tokens}, + {"chat_format", common_chat_format_name(chat_parser_params.format)}, + {"reasoning_format", common_reasoning_format_name(chat_parser_params.reasoning_format)}, + {"reasoning_in_content", chat_parser_params.reasoning_in_content}, + {"thinking_forced_open", chat_parser_params.thinking_forced_open}, + {"samplers", samplers}, + {"speculative.n_max", speculative.n_max}, + {"speculative.n_min", speculative.n_min}, + {"speculative.p_min", speculative.p_min}, + {"speculative.type", common_speculative_type_to_str(speculative.type)}, + {"speculative.ngram_size_n", speculative.ngram_size_n}, + {"speculative.ngram_size_m", speculative.ngram_size_m}, + {"speculative.ngram_m_hits", speculative.ngram_min_hits}, + {"timings_per_token", timings_per_token}, + {"post_sampling_probs", post_sampling_probs}, + {"backend_sampling", sampling.backend_sampling}, + {"lora", lora}, + }; +} + +// +// task_result_state +// +common_chat_msg task_result_state::update_chat_msg( + const std::string & text_added, + bool is_partial, + std::vector & diffs) { + generated_text += text_added; + auto msg_prv_copy = chat_msg; + SRV_DBG("Parsing chat message: %s\n", generated_text.c_str()); + auto new_msg = common_chat_parse( + generated_text, + is_partial, + chat_parser_params); + if (!new_msg.empty()) { + new_msg.set_tool_call_ids(generated_tool_call_ids, gen_tool_call_id); + chat_msg = new_msg; + diffs = common_chat_msg_diff::compute_diffs(msg_prv_copy, new_msg.empty() ? msg_prv_copy : new_msg); + } + return chat_msg; +} + +// +// server_task +// + +task_params server_task::params_from_json_cmpl( + const llama_vocab * vocab, + const common_params & params_base, + const int n_ctx_slot, + const json & data) { + task_params params; + + // Sampling parameter defaults are loaded from the global server context (but individual requests can still them) + task_params defaults; + defaults.sampling = params_base.sampling; + defaults.speculative = params_base.speculative; + defaults.n_keep = params_base.n_keep; + defaults.n_predict = params_base.n_predict; + defaults.n_cache_reuse = params_base.n_cache_reuse; + defaults.cache_prompt = params_base.cache_prompt; + defaults.antiprompt = params_base.antiprompt; + + // enabling this will output extra debug information in the HTTP responses from the server + params.verbose = params_base.verbosity > 9; + params.timings_per_token = json_value(data, "timings_per_token", false); + + params.stream = json_value(data, "stream", false); + auto stream_opt = json_value(data, "stream_options", json::object()); + params.include_usage = json_value(stream_opt, "include_usage", false); + params.cache_prompt = json_value(data, "cache_prompt", defaults.cache_prompt); + params.return_tokens = json_value(data, "return_tokens", false); + params.return_progress = json_value(data, "return_progress", false); + params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict)); + params.n_indent = json_value(data, "n_indent", defaults.n_indent); + params.n_keep = json_value(data, "n_keep", defaults.n_keep); + params.n_discard = json_value(data, "n_discard", defaults.n_discard); + params.n_cmpl = json_value(data, "n_cmpl", json_value(data, "n", 1)); + params.n_cache_reuse = json_value(data, "n_cache_reuse", defaults.n_cache_reuse); + //params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", defaults.t_max_prompt_ms); // TODO: implement + params.t_max_predict_ms = json_value(data, "t_max_predict_ms", defaults.t_max_predict_ms); + params.response_fields = json_value(data, "response_fields", std::vector()); + + params.sampling.top_k = json_value(data, "top_k", defaults.sampling.top_k); + params.sampling.top_p = json_value(data, "top_p", defaults.sampling.top_p); + params.sampling.min_p = json_value(data, "min_p", defaults.sampling.min_p); + params.sampling.top_n_sigma = json_value(data, "top_n_sigma", defaults.sampling.top_n_sigma); + params.sampling.xtc_probability = json_value(data, "xtc_probability", defaults.sampling.xtc_probability); + params.sampling.xtc_threshold = json_value(data, "xtc_threshold", defaults.sampling.xtc_threshold); + params.sampling.typ_p = json_value(data, "typical_p", defaults.sampling.typ_p); + params.sampling.temp = json_value(data, "temperature", defaults.sampling.temp); + params.sampling.dynatemp_range = json_value(data, "dynatemp_range", defaults.sampling.dynatemp_range); + params.sampling.dynatemp_exponent = json_value(data, "dynatemp_exponent", defaults.sampling.dynatemp_exponent); + params.sampling.penalty_last_n = json_value(data, "repeat_last_n", defaults.sampling.penalty_last_n); + params.sampling.penalty_repeat = json_value(data, "repeat_penalty", defaults.sampling.penalty_repeat); + params.sampling.penalty_freq = json_value(data, "frequency_penalty", defaults.sampling.penalty_freq); + params.sampling.penalty_present = json_value(data, "presence_penalty", defaults.sampling.penalty_present); + params.sampling.dry_multiplier = json_value(data, "dry_multiplier", defaults.sampling.dry_multiplier); + params.sampling.dry_base = json_value(data, "dry_base", defaults.sampling.dry_base); + params.sampling.dry_allowed_length = json_value(data, "dry_allowed_length", defaults.sampling.dry_allowed_length); + params.sampling.dry_penalty_last_n = json_value(data, "dry_penalty_last_n", defaults.sampling.dry_penalty_last_n); + params.sampling.mirostat = json_value(data, "mirostat", defaults.sampling.mirostat); + params.sampling.mirostat_tau = json_value(data, "mirostat_tau", defaults.sampling.mirostat_tau); + params.sampling.mirostat_eta = json_value(data, "mirostat_eta", defaults.sampling.mirostat_eta); + params.sampling.adaptive_target = json_value(data, "adaptive_target", defaults.sampling.adaptive_target); + params.sampling.adaptive_decay = json_value(data, "adaptive_decay", defaults.sampling.adaptive_decay); + params.sampling.seed = json_value(data, "seed", defaults.sampling.seed); + params.sampling.n_probs = json_value(data, "n_probs", defaults.sampling.n_probs); + params.sampling.min_keep = json_value(data, "min_keep", defaults.sampling.min_keep); + params.sampling.backend_sampling = json_value(data, "backend_sampling", defaults.sampling.backend_sampling); + params.post_sampling_probs = json_value(data, "post_sampling_probs", defaults.post_sampling_probs); + + params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min); + params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max); + params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min); + + params.speculative.n_min = std::min(params.speculative.n_max, params.speculative.n_min); + params.speculative.n_min = std::max(params.speculative.n_min, 0); + params.speculative.n_max = std::max(params.speculative.n_max, 0); + + params.speculative.type = common_speculative_type_from_name(json_value(data, "speculative.type", common_speculative_type_to_str(defaults.speculative.type))); + + params.speculative.ngram_size_n = json_value(data, "speculative.ngram_size_n", defaults.speculative.ngram_size_n); + params.speculative.ngram_size_m = json_value(data, "speculative.ngram_size_m", defaults.speculative.ngram_size_m); + params.speculative.ngram_min_hits = json_value(data, "speculative.ngram_m_hits", defaults.speculative.ngram_min_hits); + + params.speculative.ngram_size_n = std::max(std::min(1, (int) params.speculative.ngram_size_n), 1024); + params.speculative.ngram_size_m = std::max(std::min(1, (int) params.speculative.ngram_size_m), 1024); + params.speculative.ngram_min_hits = std::max(std::min(1, (int) params.speculative.ngram_min_hits), 1024); + + // Use OpenAI API logprobs only if n_probs wasn't provided + if (data.contains("logprobs") && params.sampling.n_probs == defaults.sampling.n_probs){ + params.sampling.n_probs = json_value(data, "logprobs", defaults.sampling.n_probs); + } + + if (data.contains("lora")) { + if (data.at("lora").is_array()) { + params.lora = parse_lora_request(data.at("lora")); + } else { + throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields"); + } + } else { + params.lora = {}; + } + + // TODO: add more sanity checks for the input parameters + + if (params.sampling.penalty_last_n < -1) { + throw std::runtime_error("Error: repeat_last_n must be >= -1"); + } + + if (params.sampling.dry_penalty_last_n < -1) { + throw std::runtime_error("Error: dry_penalty_last_n must be >= -1"); + } + + if (params.sampling.penalty_last_n == -1) { + // note: should be the slot's context and not the full context, but it's ok + params.sampling.penalty_last_n = n_ctx_slot; + } + + if (params.sampling.dry_penalty_last_n == -1) { + params.sampling.dry_penalty_last_n = n_ctx_slot; + } + + if (params.sampling.dry_base < 1.0f) { + params.sampling.dry_base = defaults.sampling.dry_base; + } + + // sequence breakers for DRY + { + // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format + // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39 + + if (data.contains("dry_sequence_breakers")) { + params.sampling.dry_sequence_breakers = json_value(data, "dry_sequence_breakers", std::vector()); + if (params.sampling.dry_sequence_breakers.empty()) { + throw std::runtime_error("Error: dry_sequence_breakers must be a non-empty array of strings"); + } + } + } + + // process "json_schema" and "grammar" + if (data.contains("json_schema") && !data.contains("grammar")) { + try { + auto schema = json_value(data, "json_schema", json::object()); + SRV_DBG("JSON schema: %s\n", schema.dump(2).c_str()); + params.sampling.grammar = json_schema_to_grammar(schema); + SRV_DBG("Converted grammar: %s\n", params.sampling.grammar.c_str()); + } catch (const std::exception & e) { + throw std::runtime_error(std::string("\"json_schema\": ") + e.what()); + } + } else { + params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar); + SRV_DBG("Grammar: %s\n", params.sampling.grammar.c_str()); + params.sampling.grammar_lazy = json_value(data, "grammar_lazy", defaults.sampling.grammar_lazy); + SRV_DBG("Grammar lazy: %s\n", params.sampling.grammar_lazy ? "true" : "false"); + } + + { + auto it = data.find("chat_format"); + if (it != data.end()) { + params.chat_parser_params.format = static_cast(it->get()); + SRV_INF("Chat format: %s\n", common_chat_format_name(params.chat_parser_params.format)); + } else { + params.chat_parser_params.format = defaults.chat_parser_params.format; + } + common_reasoning_format reasoning_format = params_base.reasoning_format; + if (data.contains("reasoning_format")) { + reasoning_format = common_reasoning_format_from_name(data.at("reasoning_format").get()); + } + params.chat_parser_params.reasoning_format = reasoning_format; + params.chat_parser_params.reasoning_in_content = params.stream && (reasoning_format == COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY); + params.chat_parser_params.thinking_forced_open = json_value(data, "thinking_forced_open", false); + params.chat_parser_params.parse_tool_calls = json_value(data, "parse_tool_calls", false); + if (data.contains("chat_parser")) { + params.chat_parser_params.parser.load(data.at("chat_parser").get()); + } + } + + { + const auto preserved_tokens = data.find("preserved_tokens"); + if (preserved_tokens != data.end()) { + for (const auto & t : *preserved_tokens) { + auto ids = common_tokenize(vocab, t.get(), /* add_special= */ false, /* parse_special= */ true); + if (ids.size() == 1) { + SRV_DBG("Preserved token: %d\n", ids[0]); + params.sampling.preserved_tokens.insert(ids[0]); + } else { + // This may happen when using a tool call style meant for a model with special tokens to preserve on a model without said tokens. + SRV_DBG("Not preserved because more than 1 token: %s\n", t.get().c_str()); + } + } + } + const auto grammar_triggers = data.find("grammar_triggers"); + if (grammar_triggers != data.end()) { + for (const auto & t : *grammar_triggers) { + server_grammar_trigger ct(t); + if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_WORD) { + const auto & word = ct.value.value; + auto ids = common_tokenize(vocab, word, /* add_special= */ false, /* parse_special= */ true); + if (ids.size() == 1) { + auto token = ids[0]; + if (std::find(params.sampling.preserved_tokens.begin(), params.sampling.preserved_tokens.end(), (llama_token) token) == params.sampling.preserved_tokens.end()) { + throw std::runtime_error("Grammar trigger word should be marked as preserved token: " + word); + } + SRV_DBG("Grammar trigger token: %d (`%s`)\n", token, word.c_str()); + common_grammar_trigger trigger; + trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN; + trigger.value = word; + trigger.token = token; + params.sampling.grammar_triggers.push_back(std::move(trigger)); + } else { + SRV_DBG("Grammar trigger word: `%s`\n", word.c_str()); + params.sampling.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, word}); + } + } else { + if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN) { + SRV_DBG("Grammar trigger pattern: `%s`\n", ct.value.value.c_str()); + } else if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL) { + SRV_DBG("Grammar trigger pattern full: `%s`\n", ct.value.value.c_str()); + } else { + throw std::runtime_error("Unknown grammar trigger type"); + } + params.sampling.grammar_triggers.emplace_back(std::move(ct.value)); + } + } + } + if (params.sampling.grammar_lazy && params.sampling.grammar_triggers.empty()) { + throw std::runtime_error("Error: no triggers set for lazy grammar!"); + } + } + + { + params.sampling.logit_bias.clear(); + + const auto & logit_bias = data.find("logit_bias"); + if (logit_bias != data.end() && logit_bias->is_array()) { + const int n_vocab = llama_vocab_n_tokens(vocab); + for (const auto & el : *logit_bias) { + // TODO: we may want to throw errors here, in case "el" is incorrect + if (el.is_array() && el.size() == 2) { + float bias; + if (el[1].is_number()) { + bias = el[1].get(); + } else if (el[1].is_boolean() && !el[1].get()) { + bias = -INFINITY; + } else { + continue; + } + + if (el[0].is_number_integer()) { + llama_token tok = el[0].get(); + if (tok >= 0 && tok < n_vocab) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } else if (el[0].is_string()) { + auto toks = common_tokenize(vocab, el[0].get(), false); + for (auto tok : toks) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } + } + } + } else if (logit_bias != data.end() && logit_bias->is_object()) { + const int n_vocab = llama_vocab_n_tokens(vocab); + for (const auto & el : logit_bias->items()) { + float bias; + const auto & key = el.key(); + const auto & value = el.value(); + if (value.is_number()) { + bias = value.get(); + } else if (value.is_boolean() && !value.get()) { + bias = -INFINITY; + } else { + continue; + } + + char *end; + llama_token tok = strtol(key.c_str(), &end, 10); + if (*end == 0) { + if (tok >= 0 && tok < n_vocab) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } else { + auto toks = common_tokenize(vocab, key, false); + for (auto tok : toks) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } + } + } + + params.sampling.ignore_eos = json_value(data, "ignore_eos", params_base.sampling.ignore_eos); + if (params.sampling.ignore_eos) { + params.sampling.logit_bias.insert( + params.sampling.logit_bias.end(), + defaults.sampling.logit_bias_eog.begin(), defaults.sampling.logit_bias_eog.end()); + } + } + + { + params.antiprompt.clear(); + + const auto & stop = data.find("stop"); + if (stop != data.end() && stop->is_array()) { + for (const auto & word : *stop) { + if (!word.empty()) { + params.antiprompt.push_back(word); + } + } + } + // set reverse prompt from cli args if not set in the request + if (params.antiprompt.empty()) { + params.antiprompt = defaults.antiprompt; + } + } + + { + const auto samplers = data.find("samplers"); + if (samplers != data.end()) { + if (samplers->is_array()) { + params.sampling.samplers = common_sampler_types_from_names(*samplers, false); + } else if (samplers->is_string()){ + params.sampling.samplers = common_sampler_types_from_chars(samplers->get()); + } + } else { + params.sampling.samplers = defaults.sampling.samplers; + } + } + + if (params.n_cmpl > params_base.n_parallel) { + throw std::runtime_error("n_cmpl cannot be greater than the number of slots, please increase -np"); + } + + return params; +} + +// +// result_timings +// + +json result_timings::to_json() const { + json base = { + {"cache_n", cache_n}, + + {"prompt_n", prompt_n}, + {"prompt_ms", prompt_ms}, + {"prompt_per_token_ms", prompt_per_token_ms}, + {"prompt_per_second", prompt_per_second}, + + {"predicted_n", predicted_n}, + {"predicted_ms", predicted_ms}, + {"predicted_per_token_ms", predicted_per_token_ms}, + {"predicted_per_second", predicted_per_second}, + }; + + if (draft_n > 0) { + base["draft_n"] = draft_n; + base["draft_n_accepted"] = draft_n_accepted; + } + + return base; +} + +// +// result_prompt_progress +// +json result_prompt_progress::to_json() const { + return json { + {"total", total}, + {"cache", cache}, + {"processed", processed}, + {"time_ms", time_ms}, + }; +} + +static inline std::string stop_type_to_str(stop_type type) { + switch (type) { + case STOP_TYPE_EOS: return "eos"; + case STOP_TYPE_WORD: return "word"; + case STOP_TYPE_LIMIT: return "limit"; + default: return "none"; + } +} + +// +// completion_token_output +// + +json completion_token_output::to_json(bool post_sampling_probs) const { + json probs_for_token = json::array(); + for (const auto & p : probs) { + std::string txt(p.txt); + txt.resize(validate_utf8(txt)); + probs_for_token.push_back(json { + {"id", p.tok}, + {"token", txt}, + {"bytes", str_to_bytes(p.txt)}, + { + post_sampling_probs ? "prob" : "logprob", + post_sampling_probs ? p.prob : logarithm(p.prob) + }, + }); + } + return probs_for_token; +} + +json completion_token_output::probs_vector_to_json(const std::vector & probs, bool post_sampling_probs) { + json out = json::array(); + for (const auto & p : probs) { + std::string txt(p.text_to_send); + txt.resize(validate_utf8(txt)); + out.push_back(json { + {"id", p.tok}, + {"token", txt}, + {"bytes", str_to_bytes(p.text_to_send)}, + { + post_sampling_probs ? "prob" : "logprob", + post_sampling_probs ? p.prob : logarithm(p.prob) + }, + { + post_sampling_probs ? "top_probs" : "top_logprobs", + p.to_json(post_sampling_probs) + }, + }); + } + return out; +} + +float completion_token_output::logarithm(float x) { + // nlohmann::json converts -inf to null, so we need to prevent that + return x == 0.0f ? std::numeric_limits::lowest() : std::log(x); +} + +std::vector completion_token_output::str_to_bytes(const std::string & str) { + std::vector bytes; + for (unsigned char c : str) { + bytes.push_back(c); + } + return bytes; +} + +// +// server_task_result_cmpl_final +// +json server_task_result_cmpl_final::to_json() { + GGML_ASSERT(is_updated && "update() must be called before to_json()"); + switch (res_type) { + case TASK_RESPONSE_TYPE_NONE: + return to_json_non_oaicompat(); + case TASK_RESPONSE_TYPE_OAI_CMPL: + return to_json_oaicompat(); + case TASK_RESPONSE_TYPE_OAI_CHAT: + return stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat(); + case TASK_RESPONSE_TYPE_OAI_RESP: + return stream ? to_json_oaicompat_resp_stream() : to_json_oaicompat_resp(); + case TASK_RESPONSE_TYPE_ANTHROPIC: + return stream ? to_json_anthropic_stream() : to_json_anthropic(); + default: + GGML_ASSERT(false && "Invalid task_response_type"); + } +} + +json server_task_result_cmpl_final::to_json_non_oaicompat() { + json res = json { + {"index", index}, + {"content", content}, + {"tokens", tokens}, + {"id_slot", id_slot}, + {"stop", true}, + {"model", oaicompat_model}, + {"tokens_predicted", n_decoded}, + {"tokens_evaluated", n_prompt_tokens}, + {"generation_settings", generation_params.to_json()}, + {"prompt", prompt}, + {"has_new_line", has_new_line}, + {"truncated", truncated}, + {"stop_type", stop_type_to_str(stop)}, + {"stopping_word", stopping_word}, + {"tokens_cached", n_tokens_cached}, + {"timings", timings.to_json()}, + }; + if (!stream && !probs_output.empty()) { + res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs); + } + return response_fields.empty() ? res : json_get_nested_values(response_fields, res); +} + +json server_task_result_cmpl_final::to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (!stream && probs_output.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)}, + }; + } + json finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = "stop"; + } + json res = json { + {"choices", json::array({ + json{ + {"text", content}, + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", finish_reason}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens} + }}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; +} + +json server_task_result_cmpl_final::to_json_oaicompat_chat() { + std::string finish_reason = "length"; + common_chat_msg msg; + if (!oaicompat_msg.empty()) { + msg = oaicompat_msg; + } else { + msg.role = "assistant"; + msg.content = content; + } + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = msg.tool_calls.empty() ? "stop" : "tool_calls"; + } + + json choice { + {"finish_reason", finish_reason}, + {"index", index}, + {"message", msg.to_json_oaicompat()}, + }; + + if (!stream && probs_output.size() > 0) { + choice["logprobs"] = json{ + {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)}, + }; + } + + std::time_t t = std::time(0); + + json res = json { + {"choices", json::array({choice})}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens} + }}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; +} + +json server_task_result_cmpl_final::to_json_oaicompat_chat_stream() { + std::time_t t = std::time(0); + std::string finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = oaicompat_msg.tool_calls.empty() ? "stop" : "tool_calls"; + } + + json deltas = json::array(); + for (const auto & diff : oaicompat_msg_diffs) { + deltas.push_back({ + {"choices", json::array({ + json { + {"finish_reason", nullptr}, + {"index", 0}, + {"delta", common_chat_msg_diff_to_json_oaicompat(diff)}, + }, + })}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, + }); + } + + deltas.push_back({ + {"choices", json::array({ + json { + {"finish_reason", finish_reason}, + {"index", 0}, + {"delta", json::object()}, + }, + })}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, + }); + + if (include_usage) { + // OpenAI API spec for chat.completion.chunks specifies an empty `choices` array for the last chunk when including usage + // https://platform.openai.com/docs/api-reference/chat_streaming/streaming#chat_streaming/streaming-choices + deltas.push_back({ + {"choices", json::array()}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens}, + }}, + }); + } + + if (timings.prompt_n >= 0) { + deltas.back().push_back({"timings", timings.to_json()}); + } + + // extra fields for debugging purposes + if (verbose && !deltas.empty()) { + deltas.front()["__verbose"] = to_json_non_oaicompat(); + } + + return deltas; +} + +json server_task_result_cmpl_final::to_json_oaicompat_resp() { + common_chat_msg msg; + if (!oaicompat_msg.empty()) { + msg = oaicompat_msg; + } else { + msg.role = "assistant"; + msg.content = content; + } + + std::vector output; + + if (msg.reasoning_content != "") { + output.push_back(json { + {"id", "rs_" + random_string()}, + {"summary", json::array()}, + {"type", "reasoning"}, + {"content", json::array({ json { + {"text", msg.reasoning_content}, + {"type", "reasoning_text"}, + }})}, + {"encrypted_content", ""}, + {"status", "completed"}, + }); + } + + if (msg.content != "") { + output.push_back(json { + {"content", json::array({ json { + {"type", "output_text"}, + {"annotations", json::array()}, + {"logprobs", json::array()}, + {"text", msg.content}, + }})}, + {"id", "msg_" + random_string()}, + {"role", msg.role}, + {"status", "completed"}, + {"type", "message"}, + }); + } + + for (const common_chat_tool_call & tool_call : oaicompat_msg.tool_calls) { + output.push_back(json { + {"type", "function_call"}, + {"status", "completed"}, + {"arguments", tool_call.arguments}, + {"call_id", "fc_" + tool_call.id}, + {"name", tool_call.name}, + }); + } + + std::time_t t = std::time(0); + json res = { + {"completed_at", t}, + {"created_at", t}, + {"id", oai_resp_id}, + {"model", oaicompat_model}, + {"object", "response"}, + {"output", output}, + {"status", "completed"}, + {"usage", json { + {"input_tokens", n_prompt_tokens}, + {"output_tokens", n_decoded}, + {"total_tokens", n_decoded + n_prompt_tokens}, + }}, + }; + + return res; +} + +json server_task_result_cmpl_final::to_json_oaicompat_resp_stream() { + std::vector server_sent_events; + std::vector output; + + if (oaicompat_msg.reasoning_content != "") { + const json output_item = json { + {"id", oai_resp_reasoning_id}, + {"summary", json::array()}, + {"type", "reasoning"}, + {"content", json::array({ json { + {"text", oaicompat_msg.reasoning_content}, + {"type", "reasoning_text"}, + }})}, + {"encrypted_content", ""}, + }; + + server_sent_events.push_back(json { + {"event", "response.output_item.done"}, + {"data", json { + {"type", "response.output_item.done"}, + {"item", output_item} + }} + }); + output.push_back(output_item); + } + + if (oaicompat_msg.content != "") { + server_sent_events.push_back(json { + {"event", "response.output_text.done"}, + {"data", json { + {"type", "response.output_text.done"}, + {"item_id", oai_resp_message_id}, + {"text", oaicompat_msg.content} + }} + }); + + const json content_part = { + {"type", "output_text"}, + {"annotations", json::array()}, + {"logprobs", json::array()}, + {"text", oaicompat_msg.content} + }; + + server_sent_events.push_back(json { + {"event", "response.content_part.done"}, + {"data", json { + {"type", "response.content_part.done"}, + {"item_id", oai_resp_message_id}, + {"part", content_part} + }} + }); + const json output_item = { + {"type", "message"}, + {"status", "completed"}, + {"id", oai_resp_message_id}, + {"content", json::array({content_part})}, + {"role", "assistant"} + }; + + server_sent_events.push_back(json { + {"event", "response.output_item.done"}, + {"data", json { + {"type", "response.output_item.done"}, + {"item", output_item} + }} + }); + output.push_back(output_item); + } + + for (const common_chat_tool_call & tool_call : oaicompat_msg.tool_calls) { + const json output_item = { + {"type", "function_call"}, + {"status", "completed"}, + {"arguments", tool_call.arguments}, + {"call_id", "fc_" + tool_call.id}, + {"name", tool_call.name} + }; + server_sent_events.push_back(json { + {"event", "response.output_item.done"}, + {"data", json { + {"type", "response.output_item.done"}, + {"item", output_item} + }} + }); + output.push_back(output_item); + } + + std::time_t t = std::time(0); + server_sent_events.push_back(json { + {"event", "response.completed"}, + {"data", json { + {"type", "response.completed"}, + {"response", json { + {"id", oai_resp_id}, + {"object", "response"}, + {"created_at", t}, + {"status", "completed"}, + {"model", oaicompat_model}, + {"output", output}, + {"usage", json { + {"input_tokens", n_prompt_tokens}, + {"output_tokens", n_decoded}, + {"total_tokens", n_decoded + n_prompt_tokens} + }} + }}, + }} + }); + + return server_sent_events; +} + +json server_task_result_cmpl_final::to_json_anthropic() { + std::string stop_reason = "max_tokens"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + stop_reason = oaicompat_msg.tool_calls.empty() ? "end_turn" : "tool_use"; + } + + json content_blocks = json::array(); + + common_chat_msg msg; + if (!oaicompat_msg.empty()) { + msg = oaicompat_msg; + } else { + msg.role = "assistant"; + msg.content = content; + } + + // thinking block comes first (Anthropic extended thinking format) + if (!msg.reasoning_content.empty()) { + content_blocks.push_back({ + {"type", "thinking"}, + {"thinking", msg.reasoning_content}, + {"signature", ""} // empty signature for local models (no cryptographic verification) + }); + } + + if (!msg.content.empty()) { + content_blocks.push_back({ + {"type", "text"}, + {"text", msg.content} + }); + } + + for (const auto & tool_call : msg.tool_calls) { + json tool_use_block = { + {"type", "tool_use"}, + {"id", tool_call.id}, + {"name", tool_call.name} + }; + + try { + tool_use_block["input"] = json::parse(tool_call.arguments); + } catch (const std::exception &) { + tool_use_block["input"] = json::object(); + } + + content_blocks.push_back(tool_use_block); + } + + json res = { + {"id", oaicompat_cmpl_id}, + {"type", "message"}, + {"role", "assistant"}, + {"content", content_blocks}, + {"model", oaicompat_model}, + {"stop_reason", stop_reason}, + {"stop_sequence", stopping_word.empty() ? nullptr : json(stopping_word)}, + {"usage", { + {"input_tokens", n_prompt_tokens}, + {"output_tokens", n_decoded} + }} + }; + + return res; +} + +json server_task_result_cmpl_final::to_json_anthropic_stream() { + json events = json::array(); + + std::string stop_reason = "max_tokens"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + stop_reason = oaicompat_msg.tool_calls.empty() ? "end_turn" : "tool_use"; + } + + bool has_thinking = !oaicompat_msg.reasoning_content.empty(); + bool has_text = !oaicompat_msg.content.empty(); + size_t num_tool_calls = oaicompat_msg.tool_calls.size(); + + // content block indices: thinking (0) -> text (0 or 1) -> tool_use (n+) + size_t thinking_block_index = 0; + size_t text_block_index = has_thinking ? 1 : 0; + + bool thinking_block_started = false; + bool text_block_started = false; + std::unordered_set tool_calls_started; + + for (const auto & diff : oaicompat_msg_diffs) { + // handle thinking/reasoning content + if (!diff.reasoning_content_delta.empty()) { + if (!thinking_block_started) { + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", thinking_block_index}, + {"content_block", { + {"type", "thinking"}, + {"thinking", ""} + }} + }} + }); + thinking_block_started = true; + } + + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", thinking_block_index}, + {"delta", { + {"type", "thinking_delta"}, + {"thinking", diff.reasoning_content_delta} + }} + }} + }); + } + + // handle regular text content + if (!diff.content_delta.empty()) { + if (!text_block_started) { + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", text_block_index}, + {"content_block", { + {"type", "text"}, + {"text", ""} + }} + }} + }); + text_block_started = true; + } + + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", text_block_index}, + {"delta", { + {"type", "text_delta"}, + {"text", diff.content_delta} + }} + }} + }); + } + + // handle tool calls + if (diff.tool_call_index != std::string::npos) { + size_t content_block_index = (has_thinking ? 1 : 0) + (has_text ? 1 : 0) + diff.tool_call_index; + + if (tool_calls_started.find(diff.tool_call_index) == tool_calls_started.end()) { + const auto & full_tool_call = oaicompat_msg.tool_calls[diff.tool_call_index]; + + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", content_block_index}, + {"content_block", { + {"type", "tool_use"}, + {"id", full_tool_call.id}, + {"name", full_tool_call.name} + }} + }} + }); + tool_calls_started.insert(diff.tool_call_index); + } + + if (!diff.tool_call_delta.arguments.empty()) { + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", content_block_index}, + {"delta", { + {"type", "input_json_delta"}, + {"partial_json", diff.tool_call_delta.arguments} + }} + }} + }); + } + } + } + + // close content blocks in order + if (has_thinking) { + // Anthropic API requires a signature_delta before closing thinking blocks + // We use an empty signature since we can't generate a cryptographic signature for local models + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", thinking_block_index}, + {"delta", { + {"type", "signature_delta"}, + {"signature", ""} + }} + }} + }); + events.push_back({ + {"event", "content_block_stop"}, + {"data", { + {"type", "content_block_stop"}, + {"index", thinking_block_index} + }} + }); + } + + if (has_text) { + events.push_back({ + {"event", "content_block_stop"}, + {"data", { + {"type", "content_block_stop"}, + {"index", text_block_index} + }} + }); + } + + for (size_t i = 0; i < num_tool_calls; i++) { + size_t content_block_index = (has_thinking ? 1 : 0) + (has_text ? 1 : 0) + i; + events.push_back({ + {"event", "content_block_stop"}, + {"data", { + {"type", "content_block_stop"}, + {"index", content_block_index} + }} + }); + } + + events.push_back({ + {"event", "message_delta"}, + {"data", { + {"type", "message_delta"}, + {"delta", { + {"stop_reason", stop_reason}, + {"stop_sequence", stopping_word.empty() ? nullptr : json(stopping_word)} + }}, + {"usage", { + {"output_tokens", n_decoded} + }} + }} + }); + + events.push_back({ + {"event", "message_stop"}, + {"data", { + {"type", "message_stop"} + }} + }); + + return events; +} + +// +// server_task_result_cmpl_partial +// +void server_task_result_cmpl_partial::update(task_result_state & state) { + is_updated = true; + state.update_chat_msg(content, true, oaicompat_msg_diffs); + + // Copy current state for use in to_json_*() (reflects state BEFORE this chunk) + thinking_block_started = state.thinking_block_started; + text_block_started = state.text_block_started; + + oai_resp_id = state.oai_resp_id; + oai_resp_reasoning_id = state.oai_resp_reasoning_id; + oai_resp_message_id = state.oai_resp_message_id; + oai_resp_fc_id = state.oai_resp_fc_id; + + // track if the accumulated message has any reasoning content + anthropic_has_reasoning = !state.chat_msg.reasoning_content.empty(); + + // Pre-compute state updates based on diffs (for next chunk) + for (const common_chat_msg_diff & diff : oaicompat_msg_diffs) { + if (!diff.reasoning_content_delta.empty() && !state.thinking_block_started) { + state.thinking_block_started = true; + } + if (!diff.content_delta.empty() && !state.text_block_started) { + state.text_block_started = true; + } + if (!diff.tool_call_delta.name.empty()) { + state.oai_resp_fc_id = diff.tool_call_delta.id; + } + } +} + +json server_task_result_cmpl_partial::to_json() { + GGML_ASSERT(is_updated && "update() must be called before to_json()"); + switch (res_type) { + case TASK_RESPONSE_TYPE_NONE: + return to_json_non_oaicompat(); + case TASK_RESPONSE_TYPE_OAI_CMPL: + return to_json_oaicompat(); + case TASK_RESPONSE_TYPE_OAI_CHAT: + return to_json_oaicompat_chat(); + case TASK_RESPONSE_TYPE_OAI_RESP: + return to_json_oaicompat_resp(); + case TASK_RESPONSE_TYPE_ANTHROPIC: + return to_json_anthropic(); + default: + GGML_ASSERT(false && "Invalid task_response_type"); + } +} + +json server_task_result_cmpl_partial::to_json_non_oaicompat() { + // non-OAI-compat JSON + json res = json { + {"index", index}, + {"content", content}, + {"tokens", tokens}, + {"stop", false}, + {"id_slot", id_slot}, + {"tokens_predicted", n_decoded}, + {"tokens_evaluated", n_prompt_tokens}, + }; + // populate the timings object when needed (usually for the last response or with timings_per_token enabled) + if (timings.prompt_n > 0) { + res.push_back({"timings", timings.to_json()}); + } + if (is_progress) { + res.push_back({"prompt_progress", progress.to_json()}); + } + if (!prob_output.probs.empty()) { + res["completion_probabilities"] = completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs); + } + return res; +} + +json server_task_result_cmpl_partial::to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (prob_output.probs.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)}, + }; + } + json res = json { + {"choices", json::array({ + json{ + {"text", content}, + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", nullptr}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + if (is_progress) { + res.push_back({"prompt_progress", progress.to_json()}); + } + + return res; +} + +json server_task_result_cmpl_partial::to_json_oaicompat_chat() { + bool first = n_decoded == 1; + std::time_t t = std::time(0); + json choices; + + std::vector deltas; + auto add_delta = [&](const json & delta) { + deltas.push_back({ + {"choices", json::array({ + json { + {"finish_reason", nullptr}, + {"index", index}, + {"delta", delta}, + }, + })}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, + }); + }; + // We have to send an initial update to conform to openai behavior + if (first || is_progress) { + add_delta({ + {"role", "assistant"}, + {"content", nullptr}, + }); + } + + for (const auto & diff : oaicompat_msg_diffs) { + add_delta(common_chat_msg_diff_to_json_oaicompat(diff)); + } + + if (!deltas.empty()) { + auto & last_json = deltas[deltas.size() - 1]; + GGML_ASSERT(last_json.at("choices").size() >= 1); + + if (prob_output.probs.size() > 0) { + last_json.at("choices").at(0)["logprobs"] = json { + {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)}, + }; + } + + if (timings.prompt_n >= 0) { + last_json.push_back({"timings", timings.to_json()}); + } + if (is_progress) { + last_json.push_back({"prompt_progress", progress.to_json()}); + } + } + + return deltas; +} + +json server_task_result_cmpl_partial::to_json_oaicompat_resp() { + std::vector events; + + if (n_decoded == 1) { + events.push_back(json { + {"event", "response.created"}, + {"data", json { + {"type", "response.created"}, + {"response", json { + {"id", oai_resp_id}, + {"object", "response"}, + {"status", "in_progress"}, + }}, + }}, + }); + events.push_back(json { + {"event", "response.in_progress"}, + {"data", json { + {"type", "response.in_progress"}, + {"response", json { + {"id", oai_resp_id}, + {"object", "response"}, + {"status", "in_progress"}, + }}, + }}, + }); + } + + for (const common_chat_msg_diff & diff : oaicompat_msg_diffs) { + if (!diff.reasoning_content_delta.empty()) { + if (!thinking_block_started) { + events.push_back(json { + {"event", "response.output_item.added"}, + {"data", json { + {"type", "response.output_item.added"}, + {"item", json { + {"id", oai_resp_reasoning_id}, + {"summary", json::array()}, + {"type", "reasoning"}, + {"content", json::array()}, + {"encrypted_content", ""}, + {"status", "in_progress"}, + }}, + }}, + }); + thinking_block_started = true; + } + events.push_back(json { + {"event", "response.reasoning_text.delta"}, + {"data", json { + {"type", "response.reasoning_text.delta"}, + {"delta", diff.reasoning_content_delta}, + {"item_id", oai_resp_reasoning_id}, + }}, + }); + } + + if (!diff.content_delta.empty()) { + if (!text_block_started) { + events.push_back(json { + {"event", "response.output_item.added"}, + {"data", json { + {"type", "response.output_item.added"}, + {"item", json { + {"content", json::array()}, + {"id", oai_resp_message_id}, + {"role", "assistant"}, + {"status", "in_progress"}, + {"type", "message"}, + }}, + }}, + }); + events.push_back(json { + {"event", "response.content_part.added"}, + {"data", json { + {"type", "response.content_part.added"}, + {"item_id", oai_resp_message_id}, + {"part", json { + {"type", "output_text"}, + {"text", ""}, + }}, + }}, + }); + text_block_started = true; + } + events.push_back(json { + {"event", "response.output_text.delta"}, + {"data", json { + {"type", "response.output_text.delta"}, + {"item_id", oai_resp_message_id}, + {"delta", diff.content_delta}, + }}, + }); + } + + if (!diff.tool_call_delta.name.empty()) { + events.push_back(json { + {"event", "response.output_item.added"}, + {"data", json { + {"type", "response.output_item.added"}, + {"item", json { + {"arguments", ""}, + {"call_id", "fc_" + diff.tool_call_delta.id}, + {"name", diff.tool_call_delta.name}, + {"type", "function_call"}, + {"status", "in_progress"}, + }}, + }}, + }); + oai_resp_fc_id = diff.tool_call_delta.id; + } + + if (!diff.tool_call_delta.arguments.empty()) { + events.push_back(json { + {"event", "response.function_call_arguments.delta"}, + {"data", json { + {"type", "response.function_call_arguments.delta"}, + {"delta", diff.tool_call_delta.arguments}, + {"item_id", "fc_" + oai_resp_fc_id}, + }}, + }); + } + } + return events; +} + +json server_task_result_cmpl_partial::to_json_anthropic() { + json events = json::array(); + bool first = (n_decoded == 1); + // use member variables to track block state across streaming calls + // (anthropic_thinking_block_started, anthropic_text_block_started) + + if (first) { + events.push_back({ + {"event", "message_start"}, + {"data", { + {"type", "message_start"}, + {"message", { + {"id", oaicompat_cmpl_id}, + {"type", "message"}, + {"role", "assistant"}, + {"content", json::array()}, + {"model", oaicompat_model}, + {"stop_reason", nullptr}, + {"stop_sequence", nullptr}, + {"usage", { + {"input_tokens", n_prompt_tokens}, + {"output_tokens", 0} + }} + }} + }} + }); + } + + // content block indices: thinking (0) -> text (0 or 1) -> tool_use (n+) + size_t thinking_block_index = 0; + // use anthropic_has_reasoning (set in update()) to know if ANY reasoning was generated + size_t text_block_index = anthropic_has_reasoning ? 1 : 0; + + // use local copies of streaming state (copied from task_result_state in update()) + // these reflect the state BEFORE this chunk was processed + bool thinking_started = thinking_block_started; + bool text_started = text_block_started; + + for (const auto & diff : oaicompat_msg_diffs) { + // handle thinking/reasoning content + if (!diff.reasoning_content_delta.empty()) { + if (!thinking_started) { + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", thinking_block_index}, + {"content_block", { + {"type", "thinking"}, + {"thinking", ""} + }} + }} + }); + thinking_started = true; + } + + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", thinking_block_index}, + {"delta", { + {"type", "thinking_delta"}, + {"thinking", diff.reasoning_content_delta} + }} + }} + }); + } + + // handle regular text content + if (!diff.content_delta.empty()) { + if (!text_started) { + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", text_block_index}, + {"content_block", { + {"type", "text"}, + {"text", ""} + }} + }} + }); + text_started = true; + } + + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", text_block_index}, + {"delta", { + {"type", "text_delta"}, + {"text", diff.content_delta} + }} + }} + }); + } + + // handle tool calls + if (diff.tool_call_index != std::string::npos) { + // use anthropic_has_reasoning for thinking block count (persists across calls) + size_t content_block_index = (anthropic_has_reasoning ? 1 : 0) + (text_started ? 1 : 0) + diff.tool_call_index; + + if (!diff.tool_call_delta.name.empty()) { + events.push_back({ + {"event", "content_block_start"}, + {"data", { + {"type", "content_block_start"}, + {"index", content_block_index}, + {"content_block", { + {"type", "tool_use"}, + {"id", diff.tool_call_delta.id}, + {"name", diff.tool_call_delta.name} + }} + }} + }); + } + + if (!diff.tool_call_delta.arguments.empty()) { + events.push_back({ + {"event", "content_block_delta"}, + {"data", { + {"type", "content_block_delta"}, + {"index", content_block_index}, + {"delta", { + {"type", "input_json_delta"}, + {"partial_json", diff.tool_call_delta.arguments} + }} + }} + }); + } + } + } + + return events; +} + +// +// server_task_result_embd +// +json server_task_result_embd::to_json() { + return res_type == TASK_RESPONSE_TYPE_OAI_EMBD + ? to_json_oaicompat() + : to_json_non_oaicompat(); +} + +json server_task_result_embd::to_json_non_oaicompat() { + return json { + {"index", index}, + {"embedding", embedding}, + }; +} + +json server_task_result_embd::to_json_oaicompat() { + return json { + {"index", index}, + {"embedding", embedding[0]}, + {"tokens_evaluated", n_tokens}, + }; +} + +// +// server_task_result_rerank +// +json server_task_result_rerank::to_json() { + return json { + {"index", index}, + {"score", score}, + {"tokens_evaluated", n_tokens}, + }; +} + +// +// server_task_result_error +// +json server_task_result_error::to_json() { + json res = format_error_response(err_msg, err_type); + if (err_type == ERROR_TYPE_EXCEED_CONTEXT_SIZE) { + res["n_prompt_tokens"] = n_prompt_tokens; + res["n_ctx"] = n_ctx; + } + return res; +} + +// +// server_task_result_metrics +// +json server_task_result_metrics::to_json() { + return json { + { "idle", n_idle_slots }, + { "processing", n_processing_slots }, + { "deferred", n_tasks_deferred }, + { "t_start", t_start }, + + { "n_prompt_tokens_processed_total", n_prompt_tokens_processed_total }, + { "t_tokens_generation_total", t_tokens_generation_total }, + { "n_tokens_predicted_total", n_tokens_predicted_total }, + { "t_prompt_processing_total", t_prompt_processing_total }, + + { "n_tokens_max", n_tokens_max }, + + { "n_prompt_tokens_processed", n_prompt_tokens_processed }, + { "t_prompt_processing", t_prompt_processing }, + { "n_tokens_predicted", n_tokens_predicted }, + { "t_tokens_generation", t_tokens_generation }, + + { "n_decode_total", n_decode_total }, + { "n_busy_slots_total", n_busy_slots_total }, + + { "slots", slots_data }, + }; +} + +// +// server_task_result_slot_save_load +// +json server_task_result_slot_save_load::to_json() { + if (is_save) { + return json { + { "id_slot", id_slot }, + { "filename", filename }, + { "n_saved", n_tokens }, + { "n_written", n_bytes }, + { "timings", { + { "save_ms", t_ms } + }}, + }; + } + + return json { + { "id_slot", id_slot }, + { "filename", filename }, + { "n_restored", n_tokens }, + { "n_read", n_bytes }, + { "timings", { + { "restore_ms", t_ms } + }}, + }; +} + +// +// server_task_result_slot_erase +// +json server_task_result_slot_erase::to_json() { + return json { + { "id_slot", id_slot }, + { "n_erased", n_erased }, + }; +} + +// +// server_task_result_get_lora +// + +json server_task_result_get_lora::to_json() { + json result = json::array(); + for (size_t i = 0; i < loras.size(); ++i) { + auto & lora = loras[i]; + json entry = { + {"id", i}, + {"path", lora.info.path}, + {"scale", lora.info.scale}, + {"task_name", lora.info.task_name}, + {"prompt_prefix", lora.info.prompt_prefix}, + }; + if (!lora.alora_invocation_tokens.empty()) { + entry["alora_invocation_string"] = lora.alora_invocation_string; + entry["alora_invocation_tokens"] = lora.alora_invocation_tokens; + } + result.push_back(std::move(entry)); + } + return result; +} + +// +// server_task_result_apply_lora +// + +json server_task_result_apply_lora::to_json() { + return json {{ "success", true }}; +} + +// +// server_prompt_cache +// +size_t server_prompt_cache::size() const { + size_t res = 0; + + for (const auto & state : states) { + res += state.size(); + } + + return res; +} + +size_t server_prompt_cache::n_tokens() const { + size_t res = 0; + + for (const auto & state : states) { + res += state.n_tokens(); + } + + return res; +} + +server_prompt * server_prompt_cache::alloc(const server_prompt & prompt, size_t state_size) { + // first check if the current state is contained fully in the cache + for (auto it = states.begin(); it != states.end(); ++it) { + const int cur_lcp_len = it->tokens.get_common_prefix(prompt.tokens); + + if (cur_lcp_len == (int) prompt.tokens.size()) { + SRV_WRN("%s", " - prompt is already in the cache, skipping\n"); + return nullptr; + } + } + + // next, remove any cached prompts that are fully contained in the current prompt + for (auto it = states.begin(); it != states.end();) { + const int len = it->tokens.get_common_prefix(prompt.tokens); + + if (len == (int) it->tokens.size()) { + SRV_WRN(" - removing obsolete cached prompt with length %d\n", len); + + it = states.erase(it); + } else { + ++it; + } + } + + std::vector state_data; + + // check if we can allocate enough memory for the new state + try { + state_data.resize(state_size); + } catch (const std::bad_alloc & e) { + SRV_ERR("failed to allocate memory for prompt cache state: %s\n", e.what()); + + limit_size = std::max(1, 0.4*size()); + + SRV_WRN(" - cache size limit reduced to %.3f MiB\n", limit_size / (1024.0 * 1024.0)); + + update(); + + return nullptr; + } + + // TODO: for some reason we can't copy server_tokens, so we have to do this workaround + auto & cur = states.emplace_back(); + cur = { + /*.tokens =*/ server_tokens(prompt.tokens.get_text_tokens(), false), + /*.data =*/ std::move(state_data), + /*.checkpoints =*/ prompt.checkpoints, + }; + + return &cur; +} + +bool server_prompt_cache::load(server_prompt & prompt, const server_tokens & tokens_new, llama_context * ctx, int32_t id_slot) { + const int lcp_best = prompt.tokens.get_common_prefix(tokens_new); + + float f_keep_best = float(lcp_best) / prompt.tokens.size(); + float sim_best = float(lcp_best) / tokens_new.size(); + + SRV_WRN(" - looking for better prompt, base f_keep = %.3f, sim = %.3f\n", f_keep_best, sim_best); + + auto it_best = states.end(); + + // find the most similar cached prompt, that would also preserve the most context + for (auto it = states.begin(); it != states.end(); ++it) { + const int lcp_cur = it->tokens.get_common_prefix(tokens_new); + + const float f_keep_cur = float(lcp_cur) / it->tokens.size(); + const float sim_cur = float(lcp_cur) / tokens_new.size(); + + // don't trash large prompts + if (f_keep_cur < 0.25f) { + continue; + } + + if (f_keep_best < f_keep_cur && sim_best < sim_cur) { + f_keep_best = f_keep_cur; + sim_best = sim_cur; + + it_best = it; + } + } + + if (it_best != states.end()) { + SRV_WRN(" - found better prompt with f_keep = %.3f, sim = %.3f\n", f_keep_best, sim_best); + + const size_t size = it_best->data.size(); + const size_t n = llama_state_seq_set_data_ext(ctx, it_best->data.data(), size, id_slot, 0); + if (n != size) { + SRV_WRN("failed to restore state with size %zu\n", size); + + return false; + } + + it_best->data.clear(); + it_best->data.shrink_to_fit(); + + prompt = std::move(*it_best); + + states.erase(it_best); + } + + return true; +} + +void server_prompt_cache::update() { + if (limit_size > 0) { + // always keep at least one state, regardless of the limits + while (states.size() > 1 && size() > limit_size) { + if (states.empty()) { + break; + } + + SRV_WRN(" - cache size limit reached, removing oldest entry (size = %.3f MiB)\n", states.front().size() / (1024.0 * 1024.0)); + + states.pop_front(); + } + } + + // average size per token + const float size_per_token = std::max(1.0f, float(size()) / (std::max(1, n_tokens()))); + + // dynamically increase the token limit if it can fit in the memory limit + const size_t limit_tokens_cur = limit_size > 0 ? std::max(limit_tokens, limit_size/size_per_token) : limit_tokens; + + if (limit_tokens > 0) { + while (states.size() > 1 && n_tokens() > limit_tokens_cur) { + if (states.empty()) { + break; + } + + SRV_WRN(" - cache token limit (%zu, est: %zu) reached, removing oldest entry (size = %.3f MiB)\n", + limit_tokens, limit_tokens_cur, states.front().size() / (1024.0 * 1024.0)); + + states.pop_front(); + } + } + + SRV_WRN(" - cache state: %zu prompts, %.3f MiB (limits: %.3f MiB, %zu tokens, %zu est)\n", + states.size(), size() / (1024.0 * 1024.0), limit_size / (1024.0 * 1024.0), limit_tokens, limit_tokens_cur); + + for (const auto & state : states) { + SRV_WRN(" - prompt %p: %7d tokens, checkpoints: %2zu, %9.3f MiB\n", + (const void *)&state, state.n_tokens(), state.checkpoints.size(), state.size() / (1024.0 * 1024.0)); + } +} diff --git a/llama.cpp/tools/server/server-task.h b/llama.cpp/tools/server/server-task.h new file mode 100644 index 0000000..a69e8f1 --- /dev/null +++ b/llama.cpp/tools/server/server-task.h @@ -0,0 +1,620 @@ +#pragma once + +#include "common.h" +#include "llama.h" + +#include +#include +#include +#include + +// TODO: prevent including the whole server-common.h as we only use server_tokens +#include "server-common.h" + +using json = nlohmann::ordered_json; + +enum server_task_type { + SERVER_TASK_TYPE_COMPLETION, + SERVER_TASK_TYPE_EMBEDDING, + SERVER_TASK_TYPE_RERANK, + SERVER_TASK_TYPE_INFILL, + SERVER_TASK_TYPE_CANCEL, + SERVER_TASK_TYPE_NEXT_RESPONSE, + SERVER_TASK_TYPE_METRICS, + SERVER_TASK_TYPE_SLOT_SAVE, + SERVER_TASK_TYPE_SLOT_RESTORE, + SERVER_TASK_TYPE_SLOT_ERASE, + SERVER_TASK_TYPE_GET_LORA, + SERVER_TASK_TYPE_SET_LORA, +}; + +// TODO: change this to more generic "response_format" to replace the "format_response_*" in server-common +enum task_response_type { + TASK_RESPONSE_TYPE_NONE, // llama.cpp native format + TASK_RESPONSE_TYPE_OAI_CHAT, + TASK_RESPONSE_TYPE_OAI_CMPL, + TASK_RESPONSE_TYPE_OAI_RESP, + TASK_RESPONSE_TYPE_OAI_EMBD, + TASK_RESPONSE_TYPE_ANTHROPIC, +}; + +enum stop_type { + STOP_TYPE_NONE, + STOP_TYPE_EOS, + STOP_TYPE_WORD, + STOP_TYPE_LIMIT, +}; + +struct task_params { + bool stream = true; + bool include_usage = false; + bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt + bool return_tokens = false; + bool return_progress = false; + + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half + int32_t n_predict = -1; // new tokens to predict + int32_t n_indent = 0; // minimum line indentation for the generated text in number of whitespace characters + int32_t n_cmpl = 1; // number of completions to generate from this prompt + + int32_t n_cache_reuse = 0; // min chunk size to attempt reusing from the cache via KV shifting (0 = disabled) + + int64_t t_max_prompt_ms = -1; // TODO: implement + int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit + + std::map lora; // mapping adapter ID -> scale + + std::vector antiprompt; + std::vector response_fields; + + bool timings_per_token = false; + bool post_sampling_probs = false; + + struct common_params_sampling sampling; + struct common_params_speculative speculative; + + // response formatting + bool verbose = false; + task_response_type res_type = TASK_RESPONSE_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + + // per-request parameters for chat parsing + common_chat_parser_params chat_parser_params; + + // Embeddings + int32_t embd_normalize = 2; // (-1=none, 0=max absolute int16, 1=taxicab, 2=Euclidean/L2, >2=p-norm) + + json format_logit_bias(const std::vector & logit_bias) const; + json to_json(bool only_metrics = false) const; +}; + +// struct for tracking the state of a task (e.g., for streaming) +struct task_result_state { + // tracking diffs for partial tool calls + std::vector diffs; + common_chat_parser_params chat_parser_params; + common_chat_msg chat_msg; + std::string generated_text; // append new chunks of generated text here + std::vector generated_tool_call_ids; + + // for OpenAI Responses and Anthropic streaming API: + // track output item / content block state across chunks + bool thinking_block_started = false; + bool text_block_started = false; + + // for OpenAI Responses streaming API + const std::string oai_resp_id; + const std::string oai_resp_reasoning_id; + const std::string oai_resp_message_id; + std::string oai_resp_fc_id; // function call ID for current args delta + + task_result_state(const common_chat_parser_params & chat_parser_params) + : chat_parser_params(chat_parser_params) + , oai_resp_id("resp_" + random_string()) + , oai_resp_reasoning_id("rs_" + random_string()) + , oai_resp_message_id("msg_" + random_string()) {} + + // parse partial tool calls and update the internal state + common_chat_msg update_chat_msg( + const std::string & text_added, + bool is_partial, + std::vector & diffs); +}; + +struct server_task { + int id = -1; // to be filled by server_queue + + // TODO @ngxson : remove this field and implement a mapping task_id -> idx in the response_reader + size_t index = 0; // used when there are multiple prompts (batch request) + + // used by SERVER_TASK_TYPE_CANCEL + int id_target = -1; + int id_slot = -1; + + // used by parallel sampling (multiple completions from same prompt) + int id_parent = -1; + // temporary store of child tasks for scheduling + // note: accessing to elements is invalid after the task is moved to server_slot + std::vector child_tasks; + + // used by SERVER_TASK_TYPE_INFERENCE + task_params params; + server_tokens tokens; + + // only used by CLI, this allow tokenizing CLI inputs on server side + // we need this because mtmd_context and vocab are not accessible outside of server_context + bool cli = false; + std::string cli_prompt; + std::vector cli_files; + + server_task_type type; + + // used by SERVER_TASK_TYPE_SLOT_SAVE, SERVER_TASK_TYPE_SLOT_RESTORE, SERVER_TASK_TYPE_SLOT_ERASE + struct slot_action { + int id_slot; + std::string filename; + std::string filepath; + }; + slot_action slot_action; + + // used by SERVER_TASK_TYPE_METRICS + bool metrics_reset_bucket = false; + + // used by SERVER_TASK_TYPE_SET_LORA + std::map set_lora; // mapping adapter ID -> scale + + server_task() = default; + + server_task(server_task_type type) : type(type) {} + + int32_t n_tokens() const { + return tokens.size(); + } + + bool need_embd() const { + switch (type) { + case SERVER_TASK_TYPE_EMBEDDING: + case SERVER_TASK_TYPE_RERANK: + return true; + default: + return false; + } + } + + bool need_logits() const { + switch (type) { + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + return true; + default: + return false; + } + } + + bool need_sampling() const { + switch (type) { + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + return true; + default: + return false; + } + } + + static task_params params_from_json_cmpl( + const llama_vocab * vocab, + const common_params & params_base, + const int n_ctx_slot, + const json & data); + + // utility function + static std::unordered_set get_list_id(const std::vector & tasks) { + std::unordered_set ids(tasks.size()); + for (size_t i = 0; i < tasks.size(); i++) { + ids.insert(tasks[i].id); + for (auto & child : tasks[i].child_tasks) { + ids.insert(child.id); + } + } + return ids; + } + + void add_child(int id_parent, int id_child) { + server_task copy; + + copy.id = id_child; + copy.id_parent = id_parent; + copy.params = params; + copy.type = type; + copy.tokens = tokens.clone(); + copy.id_slot = -1; // child tasks cannot specify slot + + // use different sampling seed for each child + // note: https://github.com/ggml-org/llama.cpp/pull/18700#discussion_r2675115723 + if (copy.params.sampling.seed != LLAMA_DEFAULT_SEED) { + copy.params.sampling.seed += (uint32_t)child_tasks.size() + 1; + } + + child_tasks.push_back(std::move(copy)); + } + + // the task will be moved into queue, then onto slots + // however, the state must be kept by caller (e.g., HTTP thread) + task_result_state create_state() const { + return task_result_state(params.chat_parser_params); + } + + bool is_parent() const { + return child_tasks.size() > 0; + } + + bool is_child() const { + return id_parent != -1; + } +}; + +struct result_timings { + int32_t cache_n = -1; + + int32_t prompt_n = -1; + double prompt_ms; + double prompt_per_token_ms; + double prompt_per_second; + + int32_t predicted_n = -1; + double predicted_ms; + double predicted_per_token_ms; + double predicted_per_second; + + // Optional speculative metrics - only included when > 0 + int32_t draft_n = 0; + int32_t draft_n_accepted = 0; + + json to_json() const; +}; + +struct result_prompt_progress { + int32_t total = 0; + int32_t cache = 0; + int32_t processed = 0; + int64_t time_ms = 0; + + json to_json() const; +}; + +struct server_task_result { + int id = -1; + int id_slot = -1; + + // TODO @ngxson : remove this field and implement a mapping task_id -> idx in the response_reader + size_t index = 0; // to be used for batched tasks + + virtual bool is_error() { + // only used by server_task_result_error + return false; + } + virtual bool is_stop() { + // only used by server_task_result_cmpl_* + return true; + } + virtual void update(task_result_state &) { + // only used by server_task_result_cmpl_* + } + virtual json to_json() = 0; + virtual ~server_task_result() = default; +}; + +// using shared_ptr for polymorphism of server_task_result +using server_task_result_ptr = std::unique_ptr; + +struct completion_token_output { + llama_token tok; + float prob; + std::string text_to_send; + struct prob_info { + llama_token tok; + std::string txt; + float prob; + }; + std::vector probs; + + json to_json(bool post_sampling_probs) const; + + static json probs_vector_to_json(const std::vector & probs, bool post_sampling_probs); + + static float logarithm(float x); + + static std::vector str_to_bytes(const std::string & str); + +}; + +struct server_task_result_cmpl_final : server_task_result { + std::string content; + llama_tokens tokens; + + bool stream; + bool include_usage; + result_timings timings; + std::string prompt; + + bool truncated; + int32_t n_decoded; + int32_t n_prompt_tokens; + int32_t n_tokens_cached; + bool has_new_line; + std::string stopping_word; + stop_type stop = STOP_TYPE_NONE; + + bool post_sampling_probs; + std::vector probs_output; + std::vector response_fields; + + task_params generation_params; + + // response formatting + bool verbose = false; + task_response_type res_type = TASK_RESPONSE_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + common_chat_msg oaicompat_msg; // to be populated by update() + + std::vector oaicompat_msg_diffs; // to be populated by update() + bool is_updated = false; + + // for OpenAI Responses API + std::string oai_resp_id; + std::string oai_resp_reasoning_id; + std::string oai_resp_message_id; + + virtual bool is_stop() override { + return true; // in stream mode, final responses are considered stop + } + + virtual json to_json() override; + + virtual void update(task_result_state & state) override { + is_updated = true; + oaicompat_msg = state.update_chat_msg(content, false, oaicompat_msg_diffs); + + oai_resp_id = state.oai_resp_id; + oai_resp_reasoning_id = state.oai_resp_reasoning_id; + oai_resp_message_id = state.oai_resp_message_id; + } + + json to_json_non_oaicompat(); + + json to_json_oaicompat(); + + json to_json_oaicompat_chat(); + + json to_json_oaicompat_chat_stream(); + + json to_json_oaicompat_resp(); + + json to_json_oaicompat_resp_stream(); + + json to_json_anthropic(); + + json to_json_anthropic_stream(); +}; + +struct server_task_result_cmpl_partial : server_task_result { + std::string content; + llama_tokens tokens; + + int32_t n_decoded; + int32_t n_prompt_tokens; + + bool post_sampling_probs; + bool is_progress = false; + completion_token_output prob_output; + result_timings timings; + result_prompt_progress progress; + + // response formatting + bool verbose = false; + task_response_type res_type = TASK_RESPONSE_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + std::vector oaicompat_msg_diffs; // to be populated by update() + bool is_updated = false; + + // Streaming state copied from task_result_state for this chunk + bool thinking_block_started = false; + bool text_block_started = false; + + // for OpenAI Responses API + std::string oai_resp_id; + std::string oai_resp_reasoning_id; + std::string oai_resp_message_id; + std::string oai_resp_fc_id; + + // for Anthropic API: track if any reasoning content has been generated + bool anthropic_has_reasoning = false; + + virtual bool is_stop() override { + return false; // in stream mode, partial responses are not considered stop + } + + virtual void update(task_result_state & state) override; + + virtual json to_json() override; + + json to_json_non_oaicompat(); + + json to_json_oaicompat(); + + json to_json_oaicompat_chat(); + + json to_json_oaicompat_resp(); + + json to_json_anthropic(); +}; + +struct server_task_result_embd : server_task_result { + std::vector> embedding; + + int32_t n_tokens; + + // response formatting + task_response_type res_type = TASK_RESPONSE_TYPE_NONE; + + virtual json to_json() override; + + json to_json_non_oaicompat(); + + json to_json_oaicompat(); +}; + +struct server_task_result_rerank : server_task_result { + float score = -1e6; + + int32_t n_tokens; + + virtual json to_json() override; +}; + +struct server_task_result_error : server_task_result { + error_type err_type = ERROR_TYPE_SERVER; + std::string err_msg; + + // for ERROR_TYPE_EXCEED_CONTEXT_SIZE + int32_t n_prompt_tokens = 0; + int32_t n_ctx = 0; + + virtual bool is_error() override { + return true; + } + + virtual json to_json() override; +}; + +struct server_task_result_metrics : server_task_result { + int n_idle_slots; + int n_processing_slots; + int n_tasks_deferred; + int64_t t_start; + + // TODO: somehow reuse server_metrics in the future, instead of duplicating the fields + uint64_t n_prompt_tokens_processed_total = 0; + uint64_t t_prompt_processing_total = 0; + uint64_t n_tokens_predicted_total = 0; + uint64_t t_tokens_generation_total = 0; + + uint64_t n_tokens_max = 0; + + uint64_t n_prompt_tokens_processed = 0; + uint64_t t_prompt_processing = 0; + + uint64_t n_tokens_predicted = 0; + uint64_t t_tokens_generation = 0; + + uint64_t n_decode_total = 0; + uint64_t n_busy_slots_total = 0; + + // while we can also use std::vector this requires copying the slot object which can be quite messy + // therefore, we use json to temporarily store the slot.to_json() result + json slots_data = json::array(); + + virtual json to_json() override; +}; + +struct server_task_result_slot_save_load : server_task_result { + std::string filename; + bool is_save; // true = save, false = load + + size_t n_tokens; + size_t n_bytes; + double t_ms; + + virtual json to_json() override; +}; + +struct server_task_result_slot_erase : server_task_result { + size_t n_erased; + + virtual json to_json() override; +}; + +struct server_task_result_get_lora : server_task_result { + struct lora { + common_adapter_lora_info info; + std::string alora_invocation_string; + llama_tokens alora_invocation_tokens; + }; + std::vector loras; + + virtual json to_json() override; +}; + +struct server_task_result_apply_lora : server_task_result { + virtual json to_json() override; +}; + +struct server_prompt_checkpoint { + llama_pos pos_min; + llama_pos pos_max; + + std::vector data; + + size_t size() const { + return data.size(); + } +}; + +struct server_prompt { + server_tokens tokens; + + std::vector data; + + std::list checkpoints; + + size_t size() const { + size_t res = data.size(); + + for (const auto & checkpoint : checkpoints) { + res += checkpoint.size(); + } + + return res; + } + + int n_tokens() const { + return tokens.size(); + } + + server_prompt clone() const { + return server_prompt { + tokens.clone(), + data, + checkpoints + }; + } +}; + +struct server_prompt_cache { + server_prompt_cache(int32_t limit_size_mib, size_t limit_tokens) { + this->limit_size = 1024ull*1024ull*(limit_size_mib < 0 ? 0 : limit_size_mib); + this->limit_tokens = limit_tokens; + } + + std::list states; + + // in bytes, 0 = no limit + size_t limit_size = 0; + + // in tokens, 0 = no limit + size_t limit_tokens = 0; + + size_t size() const; + + size_t n_tokens() const; + + server_prompt * alloc(const server_prompt & prompt, size_t state_size); + + bool load(server_prompt & prompt, const server_tokens & tokens_new, llama_context * ctx, int32_t id_slot); + + void update(); +}; diff --git a/llama.cpp/tools/server/server.cpp b/llama.cpp/tools/server/server.cpp new file mode 100644 index 0000000..d3d4316 --- /dev/null +++ b/llama.cpp/tools/server/server.cpp @@ -0,0 +1,322 @@ +#include "server-context.h" +#include "server-http.h" +#include "server-models.h" + +#include "arg.h" +#include "common.h" +#include "llama.h" +#include "log.h" + +#include +#include +#include +#include // for std::thread::hardware_concurrency + +#if defined(_WIN32) +#include +#endif + +static std::function shutdown_handler; +static std::atomic_flag is_terminating = ATOMIC_FLAG_INIT; + +static inline void signal_handler(int signal) { + if (is_terminating.test_and_set()) { + // in case it hangs, we can force terminate the server by hitting Ctrl+C twice + // this is for better developer experience, we can remove when the server is stable enough + fprintf(stderr, "Received second interrupt, terminating immediately.\n"); + exit(1); + } + + shutdown_handler(signal); +} + +// wrapper function that handles exceptions and logs errors +// this is to make sure handler_t never throws exceptions; instead, it returns an error response +static server_http_context::handler_t ex_wrapper(server_http_context::handler_t func) { + return [func = std::move(func)](const server_http_req & req) -> server_http_res_ptr { + std::string message; + error_type error; + try { + return func(req); + } catch (const std::invalid_argument & e) { + // treat invalid_argument as invalid request (400) + error = ERROR_TYPE_INVALID_REQUEST; + message = e.what(); + } catch (const std::exception & e) { + // treat other exceptions as server error (500) + error = ERROR_TYPE_SERVER; + message = e.what(); + } catch (...) { + error = ERROR_TYPE_SERVER; + message = "unknown error"; + } + + auto res = std::make_unique(); + res->status = 500; + try { + json error_data = format_error_response(message, error); + res->status = json_value(error_data, "code", 500); + res->data = safe_json_to_str({{ "error", error_data }}); + SRV_WRN("got exception: %s\n", res->data.c_str()); + } catch (const std::exception & e) { + SRV_ERR("got another exception: %s | while handling exception: %s\n", e.what(), message.c_str()); + res->data = "Internal Server Error"; + } + return res; + }; +} + +int main(int argc, char ** argv) { + // own arguments required by this example + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) { + return 1; + } + + // validate batch size for embeddings + // embeddings require all tokens to be processed in a single ubatch + // see https://github.com/ggml-org/llama.cpp/issues/12836 + if (params.embedding && params.n_batch > params.n_ubatch) { + LOG_WRN("%s: embeddings enabled with n_batch (%d) > n_ubatch (%d)\n", __func__, params.n_batch, params.n_ubatch); + LOG_WRN("%s: setting n_batch = n_ubatch = %d to avoid assertion failure\n", __func__, params.n_ubatch); + params.n_batch = params.n_ubatch; + } + + if (params.n_parallel < 0) { + LOG_INF("%s: n_parallel is set to auto, using n_parallel = 4 and kv_unified = true\n", __func__); + + params.n_parallel = 4; + params.kv_unified = true; + } + + // for consistency between server router mode and single-model mode, we set the same model name as alias + if (params.model_alias.empty() && !params.model.name.empty()) { + params.model_alias = params.model.name; + } + + common_init(); + + // struct that contains llama context and inference + server_context ctx_server; + + llama_backend_init(); + llama_numa_init(params.numa); + + LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency()); + LOG_INF("\n"); + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + LOG_INF("\n"); + + server_http_context ctx_http; + if (!ctx_http.init(params)) { + LOG_ERR("%s: failed to initialize HTTP server\n", __func__); + return 1; + } + + // + // Router + // + + // register API routes + server_routes routes(params, ctx_server); + + bool is_router_server = params.model.path.empty(); + std::optional models_routes{}; + if (is_router_server) { + // setup server instances manager + try { + models_routes.emplace(params, argc, argv); + } catch (const std::exception & e) { + LOG_ERR("%s: failed to initialize router models: %s\n", __func__, e.what()); + return 1; + } + + // proxy handlers + // note: routes.get_health stays the same + routes.get_metrics = models_routes->proxy_get; + routes.post_props = models_routes->proxy_post; + routes.get_api_show = models_routes->proxy_get; + routes.post_completions = models_routes->proxy_post; + routes.post_completions_oai = models_routes->proxy_post; + routes.post_chat_completions = models_routes->proxy_post; + routes.post_responses_oai = models_routes->proxy_post; + routes.post_anthropic_messages = models_routes->proxy_post; + routes.post_anthropic_count_tokens = models_routes->proxy_post; + routes.post_infill = models_routes->proxy_post; + routes.post_embeddings = models_routes->proxy_post; + routes.post_embeddings_oai = models_routes->proxy_post; + routes.post_rerank = models_routes->proxy_post; + routes.post_tokenize = models_routes->proxy_post; + routes.post_detokenize = models_routes->proxy_post; + routes.post_apply_template = models_routes->proxy_post; + routes.get_lora_adapters = models_routes->proxy_get; + routes.post_lora_adapters = models_routes->proxy_post; + routes.get_slots = models_routes->proxy_get; + routes.post_slots = models_routes->proxy_post; + + // custom routes for router + routes.get_props = models_routes->get_router_props; + routes.get_models = models_routes->get_router_models; + ctx_http.post("/models/load", ex_wrapper(models_routes->post_router_models_load)); + ctx_http.post("/models/unload", ex_wrapper(models_routes->post_router_models_unload)); + } + + ctx_http.get ("/health", ex_wrapper(routes.get_health)); // public endpoint (no API key check) + ctx_http.get ("/v1/health", ex_wrapper(routes.get_health)); // public endpoint (no API key check) + ctx_http.get ("/metrics", ex_wrapper(routes.get_metrics)); + ctx_http.get ("/props", ex_wrapper(routes.get_props)); + ctx_http.post("/props", ex_wrapper(routes.post_props)); + ctx_http.post("/api/show", ex_wrapper(routes.get_api_show)); + ctx_http.get ("/models", ex_wrapper(routes.get_models)); // public endpoint (no API key check) + ctx_http.get ("/v1/models", ex_wrapper(routes.get_models)); // public endpoint (no API key check) + ctx_http.get ("/api/tags", ex_wrapper(routes.get_models)); // ollama specific endpoint. public endpoint (no API key check) + ctx_http.post("/completion", ex_wrapper(routes.post_completions)); // legacy + ctx_http.post("/completions", ex_wrapper(routes.post_completions)); + ctx_http.post("/v1/completions", ex_wrapper(routes.post_completions_oai)); + ctx_http.post("/chat/completions", ex_wrapper(routes.post_chat_completions)); + ctx_http.post("/v1/chat/completions", ex_wrapper(routes.post_chat_completions)); + ctx_http.post("/api/chat", ex_wrapper(routes.post_chat_completions)); // ollama specific endpoint + ctx_http.post("/v1/responses", ex_wrapper(routes.post_responses_oai)); + ctx_http.post("/v1/messages", ex_wrapper(routes.post_anthropic_messages)); // anthropic messages API + ctx_http.post("/v1/messages/count_tokens", ex_wrapper(routes.post_anthropic_count_tokens)); // anthropic token counting + ctx_http.post("/infill", ex_wrapper(routes.post_infill)); + ctx_http.post("/embedding", ex_wrapper(routes.post_embeddings)); // legacy + ctx_http.post("/embeddings", ex_wrapper(routes.post_embeddings)); + ctx_http.post("/v1/embeddings", ex_wrapper(routes.post_embeddings_oai)); + ctx_http.post("/rerank", ex_wrapper(routes.post_rerank)); + ctx_http.post("/reranking", ex_wrapper(routes.post_rerank)); + ctx_http.post("/v1/rerank", ex_wrapper(routes.post_rerank)); + ctx_http.post("/v1/reranking", ex_wrapper(routes.post_rerank)); + ctx_http.post("/tokenize", ex_wrapper(routes.post_tokenize)); + ctx_http.post("/detokenize", ex_wrapper(routes.post_detokenize)); + ctx_http.post("/apply-template", ex_wrapper(routes.post_apply_template)); + // LoRA adapters hotswap + ctx_http.get ("/lora-adapters", ex_wrapper(routes.get_lora_adapters)); + ctx_http.post("/lora-adapters", ex_wrapper(routes.post_lora_adapters)); + // Save & load slots + ctx_http.get ("/slots", ex_wrapper(routes.get_slots)); + ctx_http.post("/slots/:id_slot", ex_wrapper(routes.post_slots)); + + // + // Start the server + // + + std::function clean_up; + + if (is_router_server) { + LOG_INF("%s: starting router server, no model will be loaded in this process\n", __func__); + + clean_up = [&models_routes]() { + SRV_INF("%s: cleaning up before exit...\n", __func__); + if (models_routes.has_value()) { + models_routes->models.unload_all(); + } + llama_backend_free(); + }; + + if (!ctx_http.start()) { + clean_up(); + LOG_ERR("%s: exiting due to HTTP server error\n", __func__); + return 1; + } + ctx_http.is_ready.store(true); + + shutdown_handler = [&](int) { + ctx_http.stop(); + }; + + } else { + // setup clean up function, to be called before exit + clean_up = [&ctx_http, &ctx_server]() { + SRV_INF("%s: cleaning up before exit...\n", __func__); + ctx_http.stop(); + ctx_server.terminate(); + llama_backend_free(); + }; + + // start the HTTP server before loading the model to be able to serve /health requests + if (!ctx_http.start()) { + clean_up(); + LOG_ERR("%s: exiting due to HTTP server error\n", __func__); + return 1; + } + + // load the model + LOG_INF("%s: loading model\n", __func__); + + if (!ctx_server.load_model(params)) { + clean_up(); + if (ctx_http.thread.joinable()) { + ctx_http.thread.join(); + } + LOG_ERR("%s: exiting due to model loading error\n", __func__); + return 1; + } + + routes.update_meta(ctx_server); + ctx_http.is_ready.store(true); + + LOG_INF("%s: model loaded\n", __func__); + + shutdown_handler = [&](int) { + // this will unblock start_loop() + ctx_server.terminate(); + }; + } + + // TODO: refactor in common/console +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = signal_handler; + sigemptyset (&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); + sigaction(SIGTERM, &sigint_action, NULL); +#elif defined (_WIN32) + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { + return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false; + }; + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); +#endif + + if (is_router_server) { + LOG_INF("%s: router server is listening on %s\n", __func__, ctx_http.listening_address.c_str()); + LOG_INF("%s: NOTE: router mode is experimental\n", __func__); + LOG_INF("%s: it is not recommended to use this mode in untrusted environments\n", __func__); + if (ctx_http.thread.joinable()) { + ctx_http.thread.join(); // keep the main thread alive + } + + // when the HTTP server stops, clean up and exit + clean_up(); + } else { + LOG_INF("%s: server is listening on %s\n", __func__, ctx_http.listening_address.c_str()); + LOG_INF("%s: starting the main loop...\n", __func__); + + // optionally, notify router server that this instance is ready + const char * router_port = std::getenv("LLAMA_SERVER_ROUTER_PORT"); + std::thread monitor_thread; + if (router_port != nullptr) { + monitor_thread = server_models::setup_child_server(shutdown_handler); + } + + // this call blocks the main thread until queue_tasks.terminate() is called + ctx_server.start_loop(); + + clean_up(); + if (ctx_http.thread.joinable()) { + ctx_http.thread.join(); + } + if (monitor_thread.joinable()) { + monitor_thread.join(); + } + + auto * ll_ctx = ctx_server.get_llama_context(); + if (ll_ctx != nullptr) { + llama_memory_breakdown_print(ll_ctx); + } + } + + return 0; +} diff --git a/llama.cpp/tools/server/tests/.gitignore b/llama.cpp/tools/server/tests/.gitignore new file mode 100644 index 0000000..90ee7fe --- /dev/null +++ b/llama.cpp/tools/server/tests/.gitignore @@ -0,0 +1,2 @@ +.venv +tmp diff --git a/llama.cpp/tools/server/tests/README.md b/llama.cpp/tools/server/tests/README.md new file mode 100644 index 0000000..a60d3f8 --- /dev/null +++ b/llama.cpp/tools/server/tests/README.md @@ -0,0 +1,96 @@ +# Server tests + +Python based server tests scenario using [pytest](https://docs.pytest.org/en/stable/). + +Tests target GitHub workflows job runners with 4 vCPU. + +Note: If the host architecture inference speed is faster than GitHub runners one, parallel scenario may randomly fail. +To mitigate it, you can increase values in `n_predict`, `kv_size`. + +### Install dependencies + +`pip install -r requirements.txt` + +### Run tests + +1. Build the server + +```shell +cd ../../.. +cmake -B build +cmake --build build --target llama-server +``` + +2. Start the test: `./tests.sh` + +It's possible to override some scenario steps values with environment variables: + +| variable | description | +|--------------------------|------------------------------------------------------------------------------------------------| +| `PORT` | `context.server_port` to set the listening port of the server during scenario, default: `8080` | +| `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/llama-server` | +| `DEBUG` | to enable steps and server verbose mode `--verbose` | +| `N_GPU_LAYERS` | number of model layers to offload to VRAM `-ngl --n-gpu-layers` | +| `LLAMA_CACHE` | by default server tests re-download models to the `tmp` subfolder. Set this to your cache (e.g. `$HOME/Library/Caches/llama.cpp` on Mac or `$HOME/.cache/llama.cpp` on Unix) to avoid this | + +To run slow tests (will download many models, make sure to set `LLAMA_CACHE` if needed): + +```shell +SLOW_TESTS=1 ./tests.sh +``` + +To run with stdout/stderr display in real time (verbose output, but useful for debugging): + +```shell +DEBUG=1 ./tests.sh -s -v -x +``` + +To run all the tests in a file: + +```shell +./tests.sh unit/test_chat_completion.py -v -x +``` + +To run a single test: + +```shell +./tests.sh unit/test_chat_completion.py::test_invalid_chat_completion_req +``` + +Hint: You can compile and run test in single command, useful for local developement: + +```shell +cmake --build build -j --target llama-server && ./tools/server/tests/tests.sh +``` + +To see all available arguments, please refer to [pytest documentation](https://docs.pytest.org/en/stable/how-to/usage.html) + +### Debugging external llama-server +It can sometimes be useful to run the server in a debugger when invesigating test +failures. To do this, the environment variable `DEBUG_EXTERNAL=1` can be set +which will cause the test to skip starting a llama-server itself. Instead, the +server can be started in a debugger. + +Example using `gdb`: +```console +$ gdb --args ../../../build/bin/llama-server \ + --host 127.0.0.1 --port 8080 \ + --temp 0.8 --seed 42 \ + --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf \ + --batch-size 32 --no-slots --alias tinyllama-2 --ctx-size 512 \ + --parallel 2 --n-predict 64 +``` +And a break point can be set in before running: +```console +(gdb) br server.cpp:4604 +(gdb) r +main: server is listening on http://127.0.0.1:8080 - starting the main loop +srv update_slots: all slots are idle +``` + +And then the test in question can be run in another terminal: +```console +(venv) $ env DEBUG_EXTERNAL=1 ./tests.sh unit/test_chat_completion.py -v -x +``` +And this should trigger the breakpoint and allow inspection of the server state +in the debugger terminal. diff --git a/llama.cpp/tools/server/tests/conftest.py b/llama.cpp/tools/server/tests/conftest.py new file mode 100644 index 0000000..c7ed775 --- /dev/null +++ b/llama.cpp/tools/server/tests/conftest.py @@ -0,0 +1,21 @@ +import pytest +from utils import * + + +# ref: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test +@pytest.fixture(autouse=True) +def stop_server_after_each_test(): + # do nothing before each test + yield + # stop all servers after each test + instances = set( + server_instances + ) # copy the set to prevent 'Set changed size during iteration' + for server in instances: + server.stop() + + +@pytest.fixture(scope="module", autouse=True) +def do_something(): + # this will be run once per test session, before any tests + ServerPreset.load_all() diff --git a/llama.cpp/tools/server/tests/pytest.ini b/llama.cpp/tools/server/tests/pytest.ini new file mode 100644 index 0000000..6df308d --- /dev/null +++ b/llama.cpp/tools/server/tests/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + serial diff --git a/llama.cpp/tools/server/tests/requirements.txt b/llama.cpp/tools/server/tests/requirements.txt new file mode 100644 index 0000000..ca79d02 --- /dev/null +++ b/llama.cpp/tools/server/tests/requirements.txt @@ -0,0 +1,8 @@ +aiohttp~=3.9.3 +pytest~=8.3.3 +huggingface_hub>=0.34.0,<1.0 +numpy~=1.26.4 +openai~=2.14.0 +prometheus-client~=0.20.0 +requests~=2.32.3 +wget~=3.2 diff --git a/llama.cpp/tools/server/tests/tests.sh b/llama.cpp/tools/server/tests/tests.sh new file mode 100755 index 0000000..709b584 --- /dev/null +++ b/llama.cpp/tools/server/tests/tests.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# make sure we are in the right directory +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $SCRIPT_DIR + +set -eu + +if [[ "${SLOW_TESTS:-0}" == 1 ]]; then + # Slow tests for tool calls need quite a few models ahead of time to avoid timing out. + python $SCRIPT_DIR/../../../scripts/fetch_server_test_models.py +fi + +if [ $# -lt 1 ] +then + if [[ "${SLOW_TESTS:-0}" == 1 ]]; then + pytest -v -x + else + pytest -v -x -m "not slow" + fi +else + pytest "$@" +fi diff --git a/llama.cpp/tools/server/tests/unit/test_basic.py b/llama.cpp/tools/server/tests/unit/test_basic.py new file mode 100644 index 0000000..3405be3 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_basic.py @@ -0,0 +1,96 @@ +import pytest +import requests +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +def test_server_start_simple(): + global server + server.start() + res = server.make_request("GET", "/health") + assert res.status_code == 200 + + +def test_server_props(): + global server + server.start() + res = server.make_request("GET", "/props") + assert res.status_code == 200 + assert ".gguf" in res.body["model_path"] + assert res.body["total_slots"] == server.n_slots + default_val = res.body["default_generation_settings"] + assert server.n_ctx is not None and server.n_slots is not None + assert default_val["n_ctx"] == server.n_ctx / server.n_slots + assert default_val["params"]["seed"] == server.seed + + +def test_server_models(): + global server + server.start() + res = server.make_request("GET", "/models") + assert res.status_code == 200 + assert len(res.body["data"]) == 1 + assert res.body["data"][0]["id"] == server.model_alias + + +def test_server_slots(): + global server + + # without slots endpoint enabled, this should return error + server.server_slots = False + server.start() + res = server.make_request("GET", "/slots") + assert res.status_code == 501 # ERROR_TYPE_NOT_SUPPORTED + assert "error" in res.body + server.stop() + + # with slots endpoint enabled, this should return slots info + server.server_slots = True + server.n_slots = 2 + server.start() + res = server.make_request("GET", "/slots") + assert res.status_code == 200 + assert len(res.body) == server.n_slots + assert server.n_ctx is not None and server.n_slots is not None + assert res.body[0]["n_ctx"] == server.n_ctx / server.n_slots + assert "params" not in res.body[0] + + +def test_load_split_model(): + global server + server.offline = False + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "tinyllamas/split/stories15M-q8_0-00001-of-00003.gguf" + server.model_alias = "tinyllama-split" + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 16, + "prompt": "Hello", + "temperature": 0.0, + }) + assert res.status_code == 200 + assert match_regex("(little|girl)+", res.body["content"]) + + +def test_no_webui(): + global server + # default: webui enabled + server.start() + url = f"http://{server.server_host}:{server.server_port}" + res = requests.get(url) + assert res.status_code == 200 + assert "" in res.text + server.stop() + + # with --no-webui + server.no_webui = True + server.start() + res = requests.get(url) + assert res.status_code == 404 diff --git a/llama.cpp/tools/server/tests/unit/test_chat_completion.py b/llama.cpp/tools/server/tests/unit/test_chat_completion.py new file mode 100644 index 0000000..d56a930 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_chat_completion.py @@ -0,0 +1,512 @@ +import pytest +from openai import OpenAI +from utils import * + +server: ServerProcess + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +@pytest.mark.parametrize( + "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason,jinja,chat_template", + [ + (None, "Book", "Hey", 8, "But she couldn't", 69, 8, "length", False, None), + (None, "Book", "Hey", 8, "But she couldn't", 69, 8, "length", True, None), + (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", False, None), + (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, None), + (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, 'chatml'), + (None, "Book", "What is the best book", 8, "^ blue", 23, 8, "length", True, "This is not a chat template, it is"), + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length", False, None), + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length", True, None), + (None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", False, None), + (None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", True, None), + ] +) +def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template): + global server + server.jinja = jinja + server.chat_template = chat_template + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "model": model, + "max_tokens": max_tokens, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + }) + assert res.status_code == 200 + assert "cmpl" in res.body["id"] # make sure the completion id has the expected format + assert res.body["system_fingerprint"].startswith("b") + # we no longer reflect back the model name, see https://github.com/ggml-org/llama.cpp/pull/17668 + # assert res.body["model"] == model if model is not None else server.model_alias + assert res.body["usage"]["prompt_tokens"] == n_prompt + assert res.body["usage"]["completion_tokens"] == n_predicted + choice = res.body["choices"][0] + assert "assistant" == choice["message"]["role"] + assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}' + assert choice["finish_reason"] == finish_reason + + +@pytest.mark.parametrize( + "system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason", + [ + ("Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length"), + ("You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 128, "length"), + ] +) +def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason): + global server + server.model_alias = "llama-test-model" + server.start() + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": max_tokens, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + "stream": True, + }) + content = "" + last_cmpl_id = None + for i, data in enumerate(res): + if data["choices"]: + choice = data["choices"][0] + if i == 0: + # Check first role message for stream=True + assert choice["delta"]["content"] is None + assert choice["delta"]["role"] == "assistant" + else: + assert "role" not in choice["delta"] + assert data["system_fingerprint"].startswith("b") + assert data["model"] == "llama-test-model" + if last_cmpl_id is None: + last_cmpl_id = data["id"] + assert last_cmpl_id == data["id"] # make sure the completion id is the same for all events in the stream + if choice["finish_reason"] in ["stop", "length"]: + assert "content" not in choice["delta"] + assert match_regex(re_content, content) + assert choice["finish_reason"] == finish_reason + else: + assert choice["finish_reason"] is None + content += choice["delta"]["content"] or '' + else: + assert data["usage"]["prompt_tokens"] == n_prompt + assert data["usage"]["completion_tokens"] == n_predicted + + +def test_chat_completion_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=8, + seed=42, + temperature=0.8, + ) + assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b") + assert res.choices[0].finish_reason == "length" + assert res.choices[0].message.content is not None + assert match_regex("(Suddenly)+", res.choices[0].message.content) + + +def test_chat_template(): + global server + server.chat_template = "llama3" + server.debug = True # to get the "__verbose" object in the response + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 8, + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ] + }) + assert res.status_code == 200 + assert "__verbose" in res.body + assert res.body["__verbose"]["prompt"] == " <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" + + +@pytest.mark.parametrize("prefill,re_prefill", [ + ("Whill", "Whill"), + ([{"type": "text", "text": "Wh"}, {"type": "text", "text": "ill"}], "Whill"), +]) +def test_chat_template_assistant_prefill(prefill, re_prefill): + global server + server.chat_template = "llama3" + server.debug = True # to get the "__verbose" object in the response + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 8, + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + {"role": "assistant", "content": prefill}, + ] + }) + assert res.status_code == 200 + assert "__verbose" in res.body + assert res.body["__verbose"]["prompt"] == f" <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{re_prefill}" + + +def test_apply_chat_template(): + global server + server.chat_template = "command-r" + server.start() + res = server.make_request("POST", "/apply-template", data={ + "messages": [ + {"role": "system", "content": "You are a test."}, + {"role": "user", "content":"Hi there"}, + ] + }) + assert res.status_code == 200 + assert "prompt" in res.body + assert res.body["prompt"] == "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>You are a test.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hi there<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" + + +@pytest.mark.parametrize("response_format,n_predicted,re_content", [ + ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), + ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), + ({"type": "json_schema", "json_schema": {"schema": {"const": "foooooo"}}}, 10, "\"foooooo\""), + ({"type": "json_object"}, 10, "(\\{|John)+"), + ({"type": "sound"}, 0, None), + # invalid response format (expected to fail) + ({"type": "json_object", "schema": 123}, 0, None), + ({"type": "json_object", "schema": {"type": 123}}, 0, None), + ({"type": "json_object", "schema": {"type": "hiccup"}}, 0, None), +]) +def test_completion_with_response_format(response_format: dict, n_predicted: int, re_content: str | None): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predicted, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "response_format": response_format, + }) + if re_content is not None: + assert res.status_code == 200 + choice = res.body["choices"][0] + assert match_regex(re_content, choice["message"]["content"]) + else: + assert res.status_code == 400 + assert "error" in res.body + + +@pytest.mark.parametrize("jinja,json_schema,n_predicted,re_content", [ + (False, {"const": "42"}, 6, "\"42\""), + (True, {"const": "42"}, 6, "\"42\""), +]) +def test_completion_with_json_schema(jinja: bool, json_schema: dict, n_predicted: int, re_content: str): + global server + server.jinja = jinja + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predicted, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "json_schema": json_schema, + }) + assert res.status_code == 200, f'Expected 200, got {res.status_code}' + choice = res.body["choices"][0] + assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}' + + +@pytest.mark.parametrize("jinja,grammar,n_predicted,re_content", [ + (False, 'root ::= "a"{5,5}', 6, "a{5,5}"), + (True, 'root ::= "a"{5,5}', 6, "a{5,5}"), +]) +def test_completion_with_grammar(jinja: bool, grammar: str, n_predicted: int, re_content: str): + global server + server.jinja = jinja + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predicted, + "messages": [ + {"role": "user", "content": "Does not matter what I say, does it?"}, + ], + "grammar": grammar, + }) + assert res.status_code == 200, res.body + choice = res.body["choices"][0] + assert match_regex(re_content, choice["message"]["content"]), choice["message"]["content"] + + +@pytest.mark.parametrize("messages", [ + None, + "string", + [123], + [{}], + [{"role": 123}], + [{"role": "system", "content": 123}], + # [{"content": "hello"}], # TODO: should not be a valid case + [{"role": "system", "content": "test"}, {}], + [{"role": "user", "content": "test"}, {"role": "assistant", "content": "test"}, {"role": "assistant", "content": "test"}], +]) +def test_invalid_chat_completion_req(messages): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "messages": messages, + }) + assert res.status_code == 400 or res.status_code == 500 + assert "error" in res.body + + +def test_chat_completion_with_timings_per_token(): + global server + server.start() + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 10, + "messages": [{"role": "user", "content": "test"}], + "stream": True, + "stream_options": {"include_usage": True}, + "timings_per_token": True, + }) + stats_received = False + for i, data in enumerate(res): + if i == 0: + # Check first role message for stream=True + assert data["choices"][0]["delta"]["content"] is None + assert data["choices"][0]["delta"]["role"] == "assistant" + assert "timings" not in data, f'First event should not have timings: {data}' + else: + if data["choices"]: + assert "role" not in data["choices"][0]["delta"] + else: + assert "timings" in data + assert "prompt_per_second" in data["timings"] + assert "predicted_per_second" in data["timings"] + assert "predicted_n" in data["timings"] + assert data["timings"]["predicted_n"] <= 10 + stats_received = True + assert stats_received + + +def test_logprobs(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + temperature=0.0, + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=5, + logprobs=True, + top_logprobs=10, + ) + output_text = res.choices[0].message.content + aggregated_text = '' + assert res.choices[0].logprobs is not None + assert res.choices[0].logprobs.content is not None + for token in res.choices[0].logprobs.content: + aggregated_text += token.token + assert token.logprob <= 0.0 + assert token.bytes is not None + assert len(token.top_logprobs) > 0 + assert aggregated_text == output_text + + +def test_logprobs_stream(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + temperature=0.0, + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=5, + logprobs=True, + top_logprobs=10, + stream=True, + ) + output_text = '' + aggregated_text = '' + for i, data in enumerate(res): + if data.choices: + choice = data.choices[0] + if i == 0: + # Check first role message for stream=True + assert choice.delta.content is None + assert choice.delta.role == "assistant" + else: + assert choice.delta.role is None + if choice.finish_reason is None: + if choice.delta.content: + output_text += choice.delta.content + assert choice.logprobs is not None + assert choice.logprobs.content is not None + for token in choice.logprobs.content: + aggregated_text += token.token + assert token.logprob <= 0.0 + assert token.bytes is not None + assert token.top_logprobs is not None + assert len(token.top_logprobs) > 0 + assert aggregated_text == output_text + + +def test_logit_bias(): + global server + server.start() + + exclude = ["i", "I", "the", "The", "to", "a", "an", "be", "is", "was", "but", "But", "and", "And", "so", "So", "you", "You", "he", "He", "she", "She", "we", "We", "they", "They", "it", "It", "his", "His", "her", "Her", "book", "Book"] + + res = server.make_request("POST", "/tokenize", data={ + "content": " " + " ".join(exclude) + " ", + }) + assert res.status_code == 200 + tokens = res.body["tokens"] + logit_bias = {tok: -100 for tok in tokens} + + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + temperature=0.0, + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=64, + logit_bias=logit_bias + ) + output_text = res.choices[0].message.content + assert output_text + assert all(output_text.find(" " + tok + " ") == -1 for tok in exclude) + +def test_context_size_exceeded(): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ] * 100, # make the prompt too long + }) + assert res.status_code == 400 + assert "error" in res.body + assert res.body["error"]["type"] == "exceed_context_size_error" + assert res.body["error"]["n_prompt_tokens"] > 0 + assert server.n_ctx is not None + assert server.n_slots is not None + assert res.body["error"]["n_ctx"] == server.n_ctx // server.n_slots + + +def test_context_size_exceeded_stream(): + global server + server.start() + try: + for _ in server.make_stream_request("POST", "/chat/completions", data={ + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ] * 100, # make the prompt too long + "stream": True}): + pass + assert False, "Should have failed" + except ServerError as e: + assert e.code == 400 + assert "error" in e.body + assert e.body["error"]["type"] == "exceed_context_size_error" + assert e.body["error"]["n_prompt_tokens"] > 0 + assert server.n_ctx is not None + assert server.n_slots is not None + assert e.body["error"]["n_ctx"] == server.n_ctx // server.n_slots + + +@pytest.mark.parametrize( + "n_batch,batch_count,reuse_cache", + [ + (64, 4, False), + (64, 2, True), + ] +) +def test_return_progress(n_batch, batch_count, reuse_cache): + global server + server.n_batch = n_batch + server.n_ctx = 256 + server.n_slots = 1 + server.start() + def make_cmpl_request(): + return server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 10, + "messages": [ + {"role": "user", "content": "This is a test" * 10}, + ], + "stream": True, + "return_progress": True, + }) + if reuse_cache: + # make a first request to populate the cache + res0 = make_cmpl_request() + for _ in res0: + pass # discard the output + + res = make_cmpl_request() + last_progress = None + total_batch_count = 0 + + for data in res: + cur_progress = data.get("prompt_progress", None) + if cur_progress is None: + continue + if total_batch_count == 0: + # first progress report must have n_cache == n_processed + assert cur_progress["total"] > 0 + assert cur_progress["cache"] == cur_progress["processed"] + if reuse_cache: + # when reusing cache, we expect some cached tokens + assert cur_progress["cache"] > 0 + if last_progress is not None: + assert cur_progress["total"] == last_progress["total"] + assert cur_progress["cache"] == last_progress["cache"] + assert cur_progress["processed"] > last_progress["processed"] + total_batch_count += 1 + last_progress = cur_progress + + # last progress should indicate completion (all tokens processed) + assert last_progress is not None + assert last_progress["total"] > 0 + assert last_progress["processed"] == last_progress["total"] + assert total_batch_count == batch_count + + +def test_chat_completions_multiple_choices(): + global server + server.start() + # make sure cache can be reused across multiple choices and multiple requests + # ref: https://github.com/ggml-org/llama.cpp/pull/18663 + for _ in range(2): + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 8, + "n": 2, + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + # test forcing the same slot to be used + # the scheduler should not be locked up in this case + "id_slot": 0, + }) + assert res.status_code == 200 + assert len(res.body["choices"]) == 2 + for choice in res.body["choices"]: + assert "assistant" == choice["message"]["role"] + assert choice["finish_reason"] == "length" diff --git a/llama.cpp/tools/server/tests/unit/test_compat_anthropic.py b/llama.cpp/tools/server/tests/unit/test_compat_anthropic.py new file mode 100644 index 0000000..e16e023 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_compat_anthropic.py @@ -0,0 +1,896 @@ +#!/usr/bin/env python3 +import pytest +import base64 +import requests + +from utils import * + +server: ServerProcess + + +def get_test_image_base64() -> str: + """Get a test image in base64 format""" + # Use the same test image as test_vision_api.py + IMG_URL = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png" + response = requests.get(IMG_URL) + response.raise_for_status() + return base64.b64encode(response.content).decode("utf-8") + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.model_alias = "tinyllama-2-anthropic" + server.server_port = 8082 + server.n_slots = 1 + server.n_ctx = 8192 + server.n_batch = 2048 + + +@pytest.fixture +def vision_server(): + """Separate fixture for vision tests that require multimodal support""" + global server + server = ServerPreset.tinygemma3() + server.offline = False # Allow downloading the model + server.model_alias = "tinygemma3-anthropic" + server.server_port = 8083 # Different port to avoid conflicts + server.n_slots = 1 + return server + + +# Basic message tests + +def test_anthropic_messages_basic(): + """Test basic Anthropic messages endpoint""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "messages": [ + {"role": "user", "content": "Say hello"} + ] + }) + + assert res.status_code == 200, f"Expected 200, got {res.status_code}" + assert res.body["type"] == "message", f"Expected type 'message', got {res.body.get('type')}" + assert res.body["role"] == "assistant", f"Expected role 'assistant', got {res.body.get('role')}" + assert "content" in res.body, "Missing 'content' field" + assert isinstance(res.body["content"], list), "Content should be an array" + assert len(res.body["content"]) > 0, "Content array should not be empty" + assert res.body["content"][0]["type"] == "text", "First content block should be text" + assert "text" in res.body["content"][0], "Text content block missing 'text' field" + assert res.body["stop_reason"] in ["end_turn", "max_tokens"], f"Invalid stop_reason: {res.body.get('stop_reason')}" + assert "usage" in res.body, "Missing 'usage' field" + assert "input_tokens" in res.body["usage"], "Missing usage.input_tokens" + assert "output_tokens" in res.body["usage"], "Missing usage.output_tokens" + assert isinstance(res.body["usage"]["input_tokens"], int), "input_tokens should be integer" + assert isinstance(res.body["usage"]["output_tokens"], int), "output_tokens should be integer" + assert res.body["usage"]["output_tokens"] > 0, "Should have generated some tokens" + # Anthropic API should NOT include timings + assert "timings" not in res.body, "Anthropic API should not include timings field" + + +def test_anthropic_messages_with_system(): + """Test messages with system prompt""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "system": "You are a helpful assistant.", + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + assert len(res.body["content"]) > 0 + + +def test_anthropic_messages_multipart_content(): + """Test messages with multipart content blocks""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "messages": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What is"}, + {"type": "text", "text": " the answer?"} + ] + } + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_messages_conversation(): + """Test multi-turn conversation""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "messages": [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + {"role": "user", "content": "How are you?"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +# Streaming tests + +def test_anthropic_messages_streaming(): + """Test streaming messages""" + server.start() + + res = server.make_stream_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 30, + "messages": [ + {"role": "user", "content": "Say hello"} + ], + "stream": True + }) + + events = [] + for data in res: + # Each event should have type and other fields + assert "type" in data, f"Missing 'type' in event: {data}" + events.append(data) + + # Verify event sequence + event_types = [e["type"] for e in events] + assert "message_start" in event_types, "Missing message_start event" + assert "content_block_start" in event_types, "Missing content_block_start event" + assert "content_block_delta" in event_types, "Missing content_block_delta event" + assert "content_block_stop" in event_types, "Missing content_block_stop event" + assert "message_delta" in event_types, "Missing message_delta event" + assert "message_stop" in event_types, "Missing message_stop event" + + # Check message_start structure + message_start = next(e for e in events if e["type"] == "message_start") + assert "message" in message_start, "message_start missing 'message' field" + assert message_start["message"]["type"] == "message" + assert message_start["message"]["role"] == "assistant" + assert message_start["message"]["content"] == [] + assert "usage" in message_start["message"] + assert message_start["message"]["usage"]["input_tokens"] > 0 + + # Check content_block_start + block_start = next(e for e in events if e["type"] == "content_block_start") + assert "index" in block_start, "content_block_start missing 'index'" + assert block_start["index"] == 0, "First content block should be at index 0" + assert "content_block" in block_start + assert block_start["content_block"]["type"] == "text" + + # Check content_block_delta + deltas = [e for e in events if e["type"] == "content_block_delta"] + assert len(deltas) > 0, "Should have at least one content_block_delta" + for delta in deltas: + assert "index" in delta + assert "delta" in delta + assert delta["delta"]["type"] == "text_delta" + assert "text" in delta["delta"] + + # Check content_block_stop + block_stop = next(e for e in events if e["type"] == "content_block_stop") + assert "index" in block_stop + assert block_stop["index"] == 0 + + # Check message_delta + message_delta = next(e for e in events if e["type"] == "message_delta") + assert "delta" in message_delta + assert "stop_reason" in message_delta["delta"] + assert message_delta["delta"]["stop_reason"] in ["end_turn", "max_tokens"] + assert "usage" in message_delta + assert message_delta["usage"]["output_tokens"] > 0 + + # Check message_stop + message_stop = next(e for e in events if e["type"] == "message_stop") + # message_stop should NOT have timings for Anthropic API + assert "timings" not in message_stop, "Anthropic streaming should not include timings" + + +# Token counting tests + +def test_anthropic_count_tokens(): + """Test token counting endpoint""" + server.start() + + res = server.make_request("POST", "/v1/messages/count_tokens", data={ + "model": "test", + "messages": [ + {"role": "user", "content": "Hello world"} + ] + }) + + assert res.status_code == 200 + assert "input_tokens" in res.body + assert isinstance(res.body["input_tokens"], int) + assert res.body["input_tokens"] > 0 + # Should only have input_tokens, no other fields + assert "output_tokens" not in res.body + + +def test_anthropic_count_tokens_with_system(): + """Test token counting with system prompt""" + server.start() + + res = server.make_request("POST", "/v1/messages/count_tokens", data={ + "model": "test", + "system": "You are a helpful assistant.", + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["input_tokens"] > 0 + + +def test_anthropic_count_tokens_no_max_tokens(): + """Test that count_tokens doesn't require max_tokens""" + server.start() + + # max_tokens is NOT required for count_tokens + res = server.make_request("POST", "/v1/messages/count_tokens", data={ + "model": "test", + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert "input_tokens" in res.body + + +# Tool use tests + +def test_anthropic_tool_use_basic(): + """Test basic tool use""" + server.jinja = True + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 200, + "tools": [{ + "name": "get_weather", + "description": "Get the current weather in a location", + "input_schema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City name" + } + }, + "required": ["location"] + } + }], + "messages": [ + {"role": "user", "content": "What's the weather in Paris?"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + assert len(res.body["content"]) > 0 + + # Check if model used the tool (it might not always, depending on the model) + content_types = [block.get("type") for block in res.body["content"]] + + if "tool_use" in content_types: + # Model used the tool + assert res.body["stop_reason"] == "tool_use" + + # Find the tool_use block + tool_block = next(b for b in res.body["content"] if b.get("type") == "tool_use") + assert "id" in tool_block + assert "name" in tool_block + assert tool_block["name"] == "get_weather" + assert "input" in tool_block + assert isinstance(tool_block["input"], dict) + + +def test_anthropic_tool_result(): + """Test sending tool results back + + This test verifies that tool_result blocks are properly converted to + role="tool" messages internally. Without proper conversion, this would + fail with a 500 error: "unsupported content[].type" because tool_result + blocks would remain in the user message content array. + """ + server.jinja = True + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 100, + "messages": [ + {"role": "user", "content": "What's the weather?"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "test123", + "name": "get_weather", + "input": {"location": "Paris"} + } + ] + }, + { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "test123", + "content": "The weather is sunny, 25°C" + } + ] + } + ] + }) + + # This would be 500 with the old bug where tool_result blocks weren't converted + assert res.status_code == 200 + assert res.body["type"] == "message" + # Model should respond to the tool result + assert len(res.body["content"]) > 0 + assert res.body["content"][0]["type"] == "text" + + +def test_anthropic_tool_result_with_text(): + """Test tool result mixed with text content + + This tests the edge case where a user message contains both text and + tool_result blocks. The server must properly split these into separate + messages: a user message with text, followed by tool messages. + Without proper handling, this would fail with 500: "unsupported content[].type" + """ + server.jinja = True + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 100, + "messages": [ + {"role": "user", "content": "What's the weather?"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "tool_1", + "name": "get_weather", + "input": {"location": "Paris"} + } + ] + }, + { + "role": "user", + "content": [ + {"type": "text", "text": "Here are the results:"}, + { + "type": "tool_result", + "tool_use_id": "tool_1", + "content": "Sunny, 25°C" + } + ] + } + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + assert len(res.body["content"]) > 0 + + +def test_anthropic_tool_result_error(): + """Test tool result with error flag""" + server.jinja = True + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 100, + "messages": [ + {"role": "user", "content": "Get the weather"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "test123", + "name": "get_weather", + "input": {"location": "InvalidCity"} + } + ] + }, + { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "test123", + "is_error": True, + "content": "City not found" + } + ] + } + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_tool_streaming(): + """Test streaming with tool use""" + server.jinja = True + server.start() + + res = server.make_stream_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 200, + "stream": True, + "tools": [{ + "name": "calculator", + "description": "Calculate math", + "input_schema": { + "type": "object", + "properties": { + "expression": {"type": "string"} + }, + "required": ["expression"] + } + }], + "messages": [ + {"role": "user", "content": "Calculate 2+2"} + ] + }) + + events = [] + for data in res: + events.append(data) + + event_types = [e["type"] for e in events] + + # Should have basic events + assert "message_start" in event_types + assert "message_stop" in event_types + + # If tool was used, check for proper tool streaming + if any(e.get("type") == "content_block_start" and + e.get("content_block", {}).get("type") == "tool_use" + for e in events): + # Find tool use block start + tool_starts = [e for e in events if + e.get("type") == "content_block_start" and + e.get("content_block", {}).get("type") == "tool_use"] + + assert len(tool_starts) > 0, "Should have tool_use content_block_start" + + # Check index is correct (should be 0 if no text, 1 if there's text) + tool_start = tool_starts[0] + assert "index" in tool_start + assert tool_start["content_block"]["type"] == "tool_use" + assert "name" in tool_start["content_block"] + + +# Vision/multimodal tests + +def test_anthropic_vision_format_accepted(): + """Test that Anthropic vision format is accepted (format validation only)""" + server.start() + + # Small 1x1 red PNG image in base64 + red_pixel_png = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg==" + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 10, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": red_pixel_png + } + }, + { + "type": "text", + "text": "What is this?" + } + ] + } + ] + }) + + # Server accepts the format but tinyllama doesn't support images + # So it should return 500 with clear error message about missing mmproj + assert res.status_code == 500 + assert "image input is not supported" in res.body.get("error", {}).get("message", "").lower() + + +def test_anthropic_vision_base64_with_multimodal_model(vision_server): + """Test vision with base64 image using Anthropic format with multimodal model""" + global server + server = vision_server + server.start() + + # Get test image in base64 format + image_base64 = get_test_image_base64() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 10, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": image_base64 + } + }, + { + "type": "text", + "text": "What is this:\n" + } + ] + } + ] + }) + + assert res.status_code == 200, f"Expected 200, got {res.status_code}: {res.body}" + assert res.body["type"] == "message" + assert len(res.body["content"]) > 0 + assert res.body["content"][0]["type"] == "text" + # The model should generate some response about the image + assert len(res.body["content"][0]["text"]) > 0 + + +# Parameter tests + +def test_anthropic_stop_sequences(): + """Test stop_sequences parameter""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 100, + "stop_sequences": ["\n", "END"], + "messages": [ + {"role": "user", "content": "Count to 10"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_temperature(): + """Test temperature parameter""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "temperature": 0.5, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_top_p(): + """Test top_p parameter""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "top_p": 0.9, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_top_k(): + """Test top_k parameter (llama.cpp specific)""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "top_k": 40, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +# Error handling tests + +def test_anthropic_missing_messages(): + """Test error when messages are missing""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50 + # missing "messages" field + }) + + # Should return an error (400 or 500) + assert res.status_code >= 400 + + +def test_anthropic_empty_messages(): + """Test permissive handling of empty messages array""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "messages": [] + }) + + # Server is permissive and accepts empty messages (provides defaults) + # This matches the permissive validation design choice + assert res.status_code == 200 + assert res.body["type"] == "message" + + +# Content block index tests + +def test_anthropic_streaming_content_block_indices(): + """Test that content block indices are correct in streaming""" + server.jinja = True + server.start() + + # Request that might produce both text and tool use + res = server.make_stream_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 400, + "stream": True, + "tools": [{ + "name": "test_tool", + "description": "A test tool", + "input_schema": { + "type": "object", + "properties": { + "param": {"type": "string"} + }, + "required": ["param"] + } + }], + "messages": [ + {"role": "user", "content": "Use the test tool"} + ] + }) + + events = [] + for data in res: + events.append(data) + + # Check content_block_start events have sequential indices + block_starts = [e for e in events if e.get("type") == "content_block_start"] + if len(block_starts) > 1: + # If there are multiple blocks, indices should be sequential + indices = [e["index"] for e in block_starts] + expected_indices = list(range(len(block_starts))) + assert indices == expected_indices, f"Expected indices {expected_indices}, got {indices}" + + # Check content_block_stop events match the starts + block_stops = [e for e in events if e.get("type") == "content_block_stop"] + start_indices = set(e["index"] for e in block_starts) + stop_indices = set(e["index"] for e in block_stops) + assert start_indices == stop_indices, "content_block_stop indices should match content_block_start indices" + + +# Extended features tests + +def test_anthropic_thinking(): + """Test extended thinking parameter""" + server.jinja = True + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 100, + "thinking": { + "type": "enabled", + "budget_tokens": 50 + }, + "messages": [ + {"role": "user", "content": "What is 2+2?"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +def test_anthropic_metadata(): + """Test metadata parameter""" + server.start() + + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "metadata": { + "user_id": "test_user_123" + }, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + +# Compatibility tests + +def test_anthropic_vs_openai_different_response_format(): + """Verify Anthropic format is different from OpenAI format""" + server.start() + + # Make OpenAI request + openai_res = server.make_request("POST", "/v1/chat/completions", data={ + "model": "test", + "max_tokens": 50, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + # Make Anthropic request + anthropic_res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 50, + "messages": [ + {"role": "user", "content": "Hello"} + ] + }) + + assert openai_res.status_code == 200 + assert anthropic_res.status_code == 200 + + # OpenAI has "object", Anthropic has "type" + assert "object" in openai_res.body + assert "type" in anthropic_res.body + assert openai_res.body["object"] == "chat.completion" + assert anthropic_res.body["type"] == "message" + + # OpenAI has "choices", Anthropic has "content" + assert "choices" in openai_res.body + assert "content" in anthropic_res.body + + # Different usage field names + assert "prompt_tokens" in openai_res.body["usage"] + assert "input_tokens" in anthropic_res.body["usage"] + assert "completion_tokens" in openai_res.body["usage"] + assert "output_tokens" in anthropic_res.body["usage"] + + +# Extended thinking tests with reasoning models + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [False, True]) +def test_anthropic_thinking_with_reasoning_model(stream): + """Test that thinking content blocks are properly returned for reasoning models""" + global server + server = ServerProcess() + server.model_hf_repo = "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF" + server.model_hf_file = "DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf" + server.reasoning_format = "deepseek" + server.jinja = True + server.n_ctx = 8192 + server.n_predict = 1024 + server.server_port = 8084 + server.start(timeout_seconds=600) # large model needs time to download + + if stream: + res = server.make_stream_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 1024, + "thinking": { + "type": "enabled", + "budget_tokens": 500 + }, + "messages": [ + {"role": "user", "content": "What is 2+2?"} + ], + "stream": True + }) + + events = list(res) + + # should have thinking content block events + thinking_starts = [e for e in events if + e.get("type") == "content_block_start" and + e.get("content_block", {}).get("type") == "thinking"] + assert len(thinking_starts) > 0, "Should have thinking content_block_start event" + assert thinking_starts[0]["index"] == 0, "Thinking block should be at index 0" + + # should have thinking_delta events + thinking_deltas = [e for e in events if + e.get("type") == "content_block_delta" and + e.get("delta", {}).get("type") == "thinking_delta"] + assert len(thinking_deltas) > 0, "Should have thinking_delta events" + + # should have signature_delta event before thinking block closes (Anthropic API requirement) + signature_deltas = [e for e in events if + e.get("type") == "content_block_delta" and + e.get("delta", {}).get("type") == "signature_delta"] + assert len(signature_deltas) > 0, "Should have signature_delta event for thinking block" + + # should have text block after thinking + text_starts = [e for e in events if + e.get("type") == "content_block_start" and + e.get("content_block", {}).get("type") == "text"] + assert len(text_starts) > 0, "Should have text content_block_start event" + assert text_starts[0]["index"] == 1, "Text block should be at index 1 (after thinking)" + else: + res = server.make_request("POST", "/v1/messages", data={ + "model": "test", + "max_tokens": 1024, + "thinking": { + "type": "enabled", + "budget_tokens": 500 + }, + "messages": [ + {"role": "user", "content": "What is 2+2?"} + ] + }) + + assert res.status_code == 200 + assert res.body["type"] == "message" + + content = res.body["content"] + assert len(content) >= 2, "Should have at least thinking and text blocks" + + # first block should be thinking + thinking_blocks = [b for b in content if b.get("type") == "thinking"] + assert len(thinking_blocks) > 0, "Should have thinking content block" + assert "thinking" in thinking_blocks[0], "Thinking block should have 'thinking' field" + assert len(thinking_blocks[0]["thinking"]) > 0, "Thinking content should not be empty" + assert "signature" in thinking_blocks[0], "Thinking block should have 'signature' field (Anthropic API requirement)" + + # should also have text block + text_blocks = [b for b in content if b.get("type") == "text"] + assert len(text_blocks) > 0, "Should have text content block" diff --git a/llama.cpp/tools/server/tests/unit/test_compat_oai_responses.py b/llama.cpp/tools/server/tests/unit/test_compat_oai_responses.py new file mode 100644 index 0000000..7aab4a8 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_compat_oai_responses.py @@ -0,0 +1,73 @@ +import pytest +from openai import OpenAI +from utils import * + +server: ServerProcess + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + +def test_responses_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.responses.create( + model="gpt-4.1", + input=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_output_tokens=8, + temperature=0.8, + ) + assert res.id.startswith("resp_") + assert res.output[0].id is not None + assert res.output[0].id.startswith("msg_") + assert match_regex("(Suddenly)+", res.output_text) + +def test_responses_stream_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + stream = client.responses.create( + model="gpt-4.1", + input=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_output_tokens=8, + temperature=0.8, + stream=True, + ) + + gathered_text = '' + resp_id = '' + msg_id = '' + for r in stream: + if r.type == "response.created": + assert r.response.id.startswith("resp_") + resp_id = r.response.id + if r.type == "response.in_progress": + assert r.response.id == resp_id + if r.type == "response.output_item.added": + assert r.item.id is not None + assert r.item.id.startswith("msg_") + msg_id = r.item.id + if (r.type == "response.content_part.added" or + r.type == "response.output_text.delta" or + r.type == "response.output_text.done" or + r.type == "response.content_part.done"): + assert r.item_id == msg_id + if r.type == "response.output_item.done": + assert r.item.id == msg_id + + if r.type == "response.output_text.delta": + gathered_text += r.delta + if r.type == "response.completed": + assert r.response.id.startswith("resp_") + assert r.response.output[0].id is not None + assert r.response.output[0].id.startswith("msg_") + assert gathered_text == r.response.output_text + assert match_regex("(Suddenly)+", r.response.output_text) diff --git a/llama.cpp/tools/server/tests/unit/test_completion.py b/llama.cpp/tools/server/tests/unit/test_completion.py new file mode 100644 index 0000000..2a98060 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_completion.py @@ -0,0 +1,608 @@ +import pytest +import requests +import time +import random + +from openai import OpenAI +from utils import * + +server = ServerPreset.tinyllama2() + +JSON_MULTIMODAL_KEY = "multimodal_data" +JSON_PROMPT_STRING_KEY = "prompt_string" + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + +@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated,return_tokens", [ + ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False, False), + ("Write a joke about AI from a very long prompt which will not be truncated", 64, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False, True), +]) +def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool, return_tokens: bool): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": prompt, + "return_tokens": return_tokens, + }) + assert res.status_code == 200 + assert res.body["timings"]["prompt_n"] == n_prompt + assert res.body["timings"]["predicted_n"] == n_predicted + assert res.body["truncated"] == truncated + assert type(res.body["has_new_line"]) == bool + assert match_regex(re_content, res.body["content"]) + if return_tokens: + assert len(res.body["tokens"]) > 0 + assert all(type(tok) == int for tok in res.body["tokens"]) + else: + assert res.body["tokens"] == [] + + +@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [ + ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False), + ("Write a joke about AI from a very long prompt which will not be truncated", 64, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False), +]) +def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool): + global server + server.start() + res = server.make_stream_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": prompt, + "stream": True, + }) + content = "" + for data in res: + assert "stop" in data and type(data["stop"]) == bool + if data["stop"]: + assert data["timings"]["prompt_n"] == n_prompt + assert data["timings"]["predicted_n"] == n_predicted + assert data["truncated"] == truncated + assert data["stop_type"] == "limit" + assert type(data["has_new_line"]) == bool + assert "generation_settings" in data + assert server.n_predict is not None + assert data["generation_settings"]["n_predict"] == min(n_predict, server.n_predict) + assert data["generation_settings"]["seed"] == server.seed + assert match_regex(re_content, content) + else: + assert len(data["tokens"]) > 0 + assert all(type(tok) == int for tok in data["tokens"]) + content += data["content"] + + +def test_completion_stream_vs_non_stream(): + global server + server.start() + res_stream = server.make_stream_request("POST", "/completion", data={ + "n_predict": 8, + "prompt": "I believe the meaning of life is", + "stream": True, + }) + res_non_stream = server.make_request("POST", "/completion", data={ + "n_predict": 8, + "prompt": "I believe the meaning of life is", + }) + content_stream = "" + for data in res_stream: + content_stream += data["content"] + assert content_stream == res_non_stream.body["content"] + + +def test_completion_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + ) + assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b") + assert res.choices[0].finish_reason == "length" + assert res.choices[0].text is not None + assert match_regex("(going|bed)+", res.choices[0].text) + + +def test_completion_stream_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + stream=True, + ) + output_text = '' + for data in res: + choice = data.choices[0] + if choice.finish_reason is None: + assert choice.text is not None + output_text += choice.text + assert match_regex("(going|bed)+", output_text) + + +# Test case from https://github.com/ggml-org/llama.cpp/issues/13780 +@pytest.mark.slow +def test_completion_stream_with_openai_library_stops(): + global server + server.model_hf_repo = "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M" + server.model_hf_file = None + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="System: You are helpfull assistant.\nAssistant:\nHey! How could I help?\nUser:\nTell me a joke.\nAssistant:\n", + stop=["User:\n", "Assistant:\n"], + max_tokens=200, + stream=True, + ) + output_text = '' + for data in res: + choice = data.choices[0] + if choice.finish_reason is None: + assert choice.text is not None + output_text += choice.text + assert match_regex("Sure, here's one for[\\s\\S]*", output_text), f'Unexpected output: {output_text}' + + +@pytest.mark.parametrize("n_slots", [1, 2]) +def test_consistent_result_same_seed(n_slots: int): + global server + server.n_slots = n_slots + server.start() + last_res = None + for _ in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 0.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] == last_res.body["content"] + last_res = res + + +@pytest.mark.parametrize("n_slots", [1, 2]) +def test_different_result_different_seed(n_slots: int): + global server + server.n_slots = n_slots + server.start() + last_res = None + for seed in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": seed, + "temperature": 1.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] != last_res.body["content"] + last_res = res + +# TODO figure why it don't work with temperature = 1 +# @pytest.mark.parametrize("temperature", [0.0, 1.0]) +@pytest.mark.parametrize("n_batch", [16, 32]) +@pytest.mark.parametrize("temperature", [0.0]) +def test_consistent_result_different_batch_size(n_batch: int, temperature: float): + global server + server.n_batch = n_batch + server.start() + last_res = None + for _ in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": temperature, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] == last_res.body["content"] + last_res = res + + +@pytest.mark.skip(reason="This test fails on linux, need to be fixed") +def test_cache_vs_nocache_prompt(): + global server + server.start() + res_cache = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 1.0, + "cache_prompt": True, + }) + res_no_cache = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, + }) + assert res_cache.body["content"] == res_no_cache.body["content"] + + +def test_nocache_long_input_prompt(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is"*32, + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, + }) + assert res.status_code == 400 + +def test_json_prompt_no_mtmd(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": { JSON_PROMPT_STRING_KEY: "I believe the meaning of life is" }, + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, + }) + assert res.status_code == 200 + +def test_json_prompt_mtm_error_when_not_supported(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": { JSON_PROMPT_STRING_KEY: "I believe the meaning of life is <__media__>", JSON_MULTIMODAL_KEY: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=" }, + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, + }) + # MTMD is disabled on this model, so this should fail. + assert res.status_code != 200 + +def test_completion_with_tokens_input(): + global server + server.temperature = 0.0 + server.start() + prompt_str = "I believe the meaning of life is" + res = server.make_request("POST", "/tokenize", data={ + "content": prompt_str, + "add_special": True, + }) + assert res.status_code == 200 + tokens = res.body["tokens"] + + # single completion + res = server.make_request("POST", "/completion", data={ + "prompt": tokens, + }) + assert res.status_code == 200 + assert type(res.body["content"]) == str + + # batch completion + res = server.make_request("POST", "/completion", data={ + "prompt": [tokens, tokens], + }) + assert res.status_code == 200 + assert type(res.body) == list + assert len(res.body) == 2 + assert res.body[0]["content"] == res.body[1]["content"] + + # mixed string and tokens + res = server.make_request("POST", "/completion", data={ + "prompt": [tokens, prompt_str], + }) + assert res.status_code == 200 + assert type(res.body) == list + assert len(res.body) == 2 + assert res.body[0]["content"] == res.body[1]["content"] + + # mixed JSON and tokens + res = server.make_request("POST", "/completion", data={ + "prompt": [ + tokens, + { + JSON_PROMPT_STRING_KEY: "I believe the meaning of life is", + }, + ], + }) + assert res.status_code == 200 + assert type(res.body) == list + assert len(res.body) == 2 + assert res.body[0]["content"] == res.body[1]["content"] + + # mixed string and tokens in one sequence + res = server.make_request("POST", "/completion", data={ + "prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str], + }) + assert res.status_code == 200 + assert type(res.body["content"]) == str + + +@pytest.mark.parametrize("n_slots,n_requests", [ + (1, 3), + (2, 2), + (2, 4), + (4, 2), # some slots must be idle + (4, 6), +]) +def test_completion_parallel_slots(n_slots: int, n_requests: int): + global server + server.n_slots = n_slots + server.temperature = 0.0 + server.start() + + PROMPTS = [ + ("Write a very long book.", "(very|special|big)+"), + ("Write another a poem.", "(small|house)+"), + ("What is LLM?", "(Dad|said)+"), + ("The sky is blue and I love it.", "(climb|leaf)+"), + ("Write another very long music lyrics.", "(friends|step|sky)+"), + ("Write a very long joke.", "(cat|Whiskers)+"), + ] + def check_slots_status(): + should_all_slots_busy = n_requests >= n_slots + time.sleep(0.1) + res = server.make_request("GET", "/slots") + n_busy = sum([1 for slot in res.body if slot["is_processing"]]) + if should_all_slots_busy: + assert n_busy == n_slots + else: + assert n_busy <= n_slots + + tasks = [] + for i in range(n_requests): + prompt, re_content = PROMPTS[i % len(PROMPTS)] + tasks.append((server.make_request, ("POST", "/completion", { + "prompt": prompt, + "seed": 42, + "temperature": 1.0, + }))) + tasks.append((check_slots_status, ())) + results = parallel_function_calls(tasks) + + # check results + for i in range(n_requests): + prompt, re_content = PROMPTS[i % len(PROMPTS)] + res = results[i] + assert res.status_code == 200 + assert type(res.body["content"]) == str + assert len(res.body["content"]) > 10 + # FIXME: the result is not deterministic when using other slot than slot 0 + # assert match_regex(re_content, res.body["content"]) + + +@pytest.mark.parametrize( + "n_ctx,n_slots,n_predict_vals,expected_success", + [ + (256, 4, [80, 40, 80, 80], [True, True, True, True]), + (256, 4, [70, 70, 70, 70], [False, False, False, False]), + (256, 4, [90, 90, 40, 90], [False, False, True, False]), + (256, 4, [90, 90, 40, 75], [True, True, True, True]), + ], +) +def test_completion_unified(n_ctx, n_slots, n_predict_vals, expected_success): + global server + server.n_slots = n_slots + server.kv_unified = True + server.n_ctx = n_ctx + server.start() + prompt = "A" + tasks = [] + for n_predict in n_predict_vals: + tasks.append((server.make_request, ("POST", "/completion", {"prompt": prompt, "n_predict": n_predict}))) + results = parallel_function_calls(tasks) + for res, n_predict, expect_ok in zip(results, n_predict_vals, expected_success): + if expect_ok: + assert res.status_code == 200 + + # note: https://github.com/ggml-org/llama.cpp/pull/18700#issuecomment-3728695581 + if res.status_code == 200: + assert "content" in res.body + if "timings" in res.body: + assert res.body["timings"]["predicted_n"] == n_predict + + +@pytest.mark.parametrize( + "prompt,n_predict,response_fields", + [ + ("I believe the meaning of life is", 8, []), + ("I believe the meaning of life is", 32, ["content", "generation_settings/n_predict", "prompt"]), + ], +) +def test_completion_response_fields( + prompt: str, n_predict: int, response_fields: list[str] +): + global server + server.start() + res = server.make_request( + "POST", + "/completion", + data={ + "n_predict": n_predict, + "prompt": prompt, + "response_fields": response_fields, + }, + ) + assert res.status_code == 200 + assert "content" in res.body + assert len(res.body["content"]) + if len(response_fields): + assert res.body["generation_settings/n_predict"] == n_predict + assert res.body["prompt"] == " " + prompt + assert isinstance(res.body["content"], str) + assert len(res.body) == len(response_fields) + else: + assert len(res.body) + assert "generation_settings" in res.body + + +def test_n_probs(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + }) + assert res.status_code == 200 + assert "completion_probabilities" in res.body + assert len(res.body["completion_probabilities"]) == 5 + for tok in res.body["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "logprob" in tok and tok["logprob"] <= 0.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_logprobs"]) == 10 + for prob in tok["top_logprobs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "logprob" in prob and prob["logprob"] <= 0.0 + assert "bytes" in prob and type(prob["bytes"]) == list + + +def test_n_probs_stream(): + global server + server.start() + res = server.make_stream_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + "stream": True, + }) + for data in res: + if data["stop"] == False: + assert "completion_probabilities" in data + assert len(data["completion_probabilities"]) == 1 + for tok in data["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "logprob" in tok and tok["logprob"] <= 0.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_logprobs"]) == 10 + for prob in tok["top_logprobs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "logprob" in prob and prob["logprob"] <= 0.0 + assert "bytes" in prob and type(prob["bytes"]) == list + + +def test_n_probs_post_sampling(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + "post_sampling_probs": True, + }) + assert res.status_code == 200 + assert "completion_probabilities" in res.body + assert len(res.body["completion_probabilities"]) == 5 + for tok in res.body["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "prob" in tok and 0.0 < tok["prob"] <= 1.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_probs"]) == 10 + for prob in tok["top_probs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "prob" in prob and 0.0 <= prob["prob"] <= 1.0 + assert "bytes" in prob and type(prob["bytes"]) == list + # because the test model usually output token with either 100% or 0% probability, we need to check all the top_probs + assert any(prob["prob"] == 1.0 for prob in tok["top_probs"]) + + +@pytest.mark.parametrize("tokenize,openai_style", [(False, False), (False, True), (True, False), (True, True)]) +def test_logit_bias(tokenize, openai_style): + global server + server.start() + + exclude = ["i", "I", "the", "The", "to", "a", "an", "be", "is", "was", "but", "But", "and", "And", "so", "So", "you", "You", "he", "He", "she", "She", "we", "We", "they", "They", "it", "It", "his", "His", "her", "Her", "book", "Book"] + + logit_bias = [] + if tokenize: + res = server.make_request("POST", "/tokenize", data={ + "content": " " + " ".join(exclude) + " ", + }) + assert res.status_code == 200 + tokens = res.body["tokens"] + logit_bias = [[tok, -100] for tok in tokens] + + else: + logit_bias = [[" " + tok + " ", -100] for tok in exclude] + + if openai_style: + logit_bias = {el[0]: -100 for el in logit_bias} + + res = server.make_request("POST", "/completion", data={ + "n_predict": 64, + "prompt": "What is the best book", + "logit_bias": logit_bias, + "temperature": 0.0 + }) + assert res.status_code == 200 + output_text = res.body["content"] + assert all(output_text.find(" " + tok + " ") == -1 for tok in exclude) + + +def test_cancel_request(): + global server + server.n_ctx = 4096 + server.n_predict = -1 + server.n_slots = 1 + server.server_slots = True + server.start() + # send a request that will take a long time, but cancel it before it finishes + try: + server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + }, timeout=0.1) + except requests.exceptions.ReadTimeout: + pass # expected + # make sure the slot is free + time.sleep(1) # wait for HTTP_POLLING_SECONDS + res = server.make_request("GET", "/slots") + assert res.body[0]["is_processing"] == False + + +# this test exercises the host-memory prompt cache +# ref: https://github.com/ggml-org/llama.cpp/pull/16391 +# ref: https://github.com/ggml-org/llama.cpp/pull/17078 +def test_completion_prompt_cache(): + global server + server.n_slots = 2 + server.kv_unified = True + server.start() + + for _ in range(16): + # generate alternating random prompts with variable lengths in order to get them in and out of the cache + r = random.randint(0, 4) + prompt = (" Hello " + str(r)) * (40 + r) + n_prompt = (40 + r)*5 + 2 + n_predict = random.randint(1, 8) + + res = server.make_request( + "POST", + "/completion", + data={ + "prompt": prompt, + "n_predict": n_predict, + }, + ) + + assert res.status_code == 200 + assert "content" in res.body + content = res.body["content"] + assert isinstance(content, str) + assert len(content) > 0 + + assert type(res.body["has_new_line"]) == bool + assert "timings" in res.body + timings = res.body["timings"] + + assert "prompt_n" in timings and timings["prompt_n"] + timings["cache_n"] == n_prompt + assert "predicted_n" in timings and timings["predicted_n"] == n_predict + assert "tokens" in res.body and isinstance(res.body["tokens"], list) diff --git a/llama.cpp/tools/server/tests/unit/test_ctx_shift.py b/llama.cpp/tools/server/tests/unit/test_ctx_shift.py new file mode 100644 index 0000000..7b047b7 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_ctx_shift.py @@ -0,0 +1,89 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + + +SHORT_TEXT = """ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +""".strip() + +LONG_TEXT = """ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +""".strip() + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.n_ctx = 512 + server.n_slots = 2 + server.n_predict = 128 + + +def test_ctx_shift_enabled(): + # the prompt is 226 tokens + # the slot context is 512/2 = 256 tokens + # 96 tokens are generated thanks to shifting the context when it gets full + global server + server.enable_ctx_shift = True + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 96, + "prompt": SHORT_TEXT, + }) + assert res.status_code == 200 + assert res.body["timings"]["prompt_n"] == 226 + assert res.body["timings"]["predicted_n"] == 96 + assert res.body["truncated"] is True + + +@pytest.mark.parametrize("n_predict,n_token_output,truncated", [ + (64, 64, False), + (-1, 248, True), # 8 tokens prompt + 248 tokens generated = 256 tokens total +]) +def test_ctx_shift_disabled_short_prompt(n_predict: int, n_token_output: int, truncated: bool): + global server + server.n_predict = -1 + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": "Hi how are you", + }) + assert res.status_code == 200 + assert res.body["timings"]["predicted_n"] == n_token_output + assert res.body["truncated"] == truncated + + +def test_ctx_shift_disabled_long_prompt(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 64, + "prompt": LONG_TEXT, + }) + assert res.status_code != 200 + assert "error" in res.body + assert "exceeds the available context size" in res.body["error"]["message"] + +def test_ctx_shift_disabled_stream(): + global server + server.start() + res = server.make_stream_request("POST", "/v1/completions", data={ + "n_predict": 256, + "prompt": "Once", + "stream": True, + }) + content = "" + for data in res: + choice = data["choices"][0] + if choice["finish_reason"] == "length": + assert len(content) > 0 + else: + assert choice["finish_reason"] is None + content += choice["text"] diff --git a/llama.cpp/tools/server/tests/unit/test_embedding.py b/llama.cpp/tools/server/tests/unit/test_embedding.py new file mode 100644 index 0000000..50601b8 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_embedding.py @@ -0,0 +1,257 @@ +import base64 +import struct +import pytest +from openai import OpenAI +from utils import * + +server = ServerPreset.bert_bge_small() + +EPSILON = 1e-3 + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.bert_bge_small() + + +def test_embedding_single(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "I believe the meaning of life is", + }) + assert res.status_code == 200 + assert len(res.body['data']) == 1 + assert 'embedding' in res.body['data'][0] + assert len(res.body['data'][0]['embedding']) > 1 + + # make sure embedding vector is normalized + assert abs(sum([x ** 2 for x in res.body['data'][0]['embedding']]) - 1) < EPSILON + + +def test_embedding_multiple(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "Write a joke about AI from a very long prompt which will not be truncated", + "This is a test", + "This is another test", + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 4 + for d in res.body['data']: + assert 'embedding' in d + assert len(d['embedding']) > 1 + + +def test_embedding_multiple_with_fa(): + server = ServerPreset.bert_bge_small_with_fa() + server.pooling = 'last' + server.start() + # one of these should trigger the FA branch (i.e. context size % 256 == 0) + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "a "*253, + "b "*254, + "c "*255, + "d "*256, + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 4 + for d in res.body['data']: + assert 'embedding' in d + assert len(d['embedding']) > 1 + + +@pytest.mark.parametrize( + "input,is_multi_prompt", + [ + # do not crash on empty input + ("", False), + # single prompt + ("string", False), + ([12, 34, 56], False), + ([12, 34, "string", 56, 78], False), + # multiple prompts + (["string1", "string2"], True), + (["string1", [12, 34, 56]], True), + ([[12, 34, 56], [12, 34, 56]], True), + ([[12, 34, 56], [12, "string", 34, 56]], True), + ] +) +def test_embedding_mixed_input(input, is_multi_prompt: bool): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={"input": input}) + assert res.status_code == 200 + data = res.body['data'] + if is_multi_prompt: + assert len(data) == len(input) + for d in data: + assert 'embedding' in d + assert len(d['embedding']) > 1 + else: + assert 'embedding' in data[0] + assert len(data[0]['embedding']) > 1 + + +def test_embedding_pooling_none(): + global server + server.pooling = 'none' + server.start() + res = server.make_request("POST", "/embeddings", data={ + "input": "hello hello hello", + }) + assert res.status_code == 200 + assert 'embedding' in res.body[0] + assert len(res.body[0]['embedding']) == 5 # 3 text tokens + 2 special + + # make sure embedding vector is not normalized + for x in res.body[0]['embedding']: + assert abs(sum([x ** 2 for x in x]) - 1) > EPSILON + + +def test_embedding_pooling_none_oai(): + global server + server.pooling = 'none' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "hello hello hello", + }) + + # /v1/embeddings does not support pooling type 'none' + assert res.status_code == 400 + assert "error" in res.body + + +def test_embedding_openai_library_single(): + global server + server.pooling = 'last' + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.embeddings.create(model="text-embedding-3-small", input="I believe the meaning of life is") + assert len(res.data) == 1 + assert len(res.data[0].embedding) > 1 + + +def test_embedding_openai_library_multiple(): + global server + server.pooling = 'last' + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.embeddings.create(model="text-embedding-3-small", input=[ + "I believe the meaning of life is", + "Write a joke about AI from a very long prompt which will not be truncated", + "This is a test", + "This is another test", + ]) + assert len(res.data) == 4 + for d in res.data: + assert len(d.embedding) > 1 + + +def test_embedding_error_prompt_too_long(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "This is a test " * 512, + }) + assert res.status_code != 200 + assert "too large" in res.body["error"]["message"] + + +def test_same_prompt_give_same_result(): + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 5 + for i in range(1, len(res.body['data'])): + v0 = res.body['data'][0]['embedding'] + vi = res.body['data'][i]['embedding'] + for x, y in zip(v0, vi): + assert abs(x - y) < EPSILON + + +@pytest.mark.parametrize( + "content,n_tokens", + [ + ("I believe the meaning of life is", 9), + ("This is a test", 6), + ] +) +def test_embedding_usage_single(content, n_tokens): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={"input": content}) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == n_tokens + + +def test_embedding_usage_multiple(): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "I believe the meaning of life is", + ], + }) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == 2 * 9 + + +def test_embedding_openai_library_base64(): + server.start() + test_input = "Test base64 embedding output" + + # get embedding in default format + res = server.make_request("POST", "/v1/embeddings", data={ + "input": test_input + }) + assert res.status_code == 200 + vec0 = res.body["data"][0]["embedding"] + + # get embedding in base64 format + res = server.make_request("POST", "/v1/embeddings", data={ + "input": test_input, + "encoding_format": "base64" + }) + + assert res.status_code == 200 + assert "data" in res.body + assert len(res.body["data"]) == 1 + + embedding_data = res.body["data"][0] + assert "embedding" in embedding_data + assert isinstance(embedding_data["embedding"], str) + + # Verify embedding is valid base64 + decoded = base64.b64decode(embedding_data["embedding"]) + # Verify decoded data can be converted back to float array + float_count = len(decoded) // 4 # 4 bytes per float + floats = struct.unpack(f'{float_count}f', decoded) + assert len(floats) > 0 + assert all(isinstance(x, float) for x in floats) + assert len(floats) == len(vec0) + + # make sure the decoded data is the same as the original + for x, y in zip(floats, vec0): + assert abs(x - y) < EPSILON diff --git a/llama.cpp/tools/server/tests/unit/test_infill.py b/llama.cpp/tools/server/tests/unit/test_infill.py new file mode 100644 index 0000000..cd1a391 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_infill.py @@ -0,0 +1,77 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama_infill() + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama_infill() + + +def test_infill_without_input_extra(): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert match_regex("(Ann|small|shiny|Daddy|Jimmy)+", res.body["content"]) + + +def test_infill_with_input_extra(): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_extra": [{ + "filename": "llama.h", + "text": "LLAMA_API int32_t llama_n_threads();\n" + }], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert match_regex("(Dad|excited|park|Jimmy)+", res.body["content"]) + + +@pytest.mark.parametrize("input_extra", [ + {}, + {"filename": "ok"}, + {"filename": 123}, + {"filename": 123, "text": "abc"}, + {"filename": 123, "text": 456}, +]) +def test_invalid_input_extra_req(input_extra): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_extra": [input_extra], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 400 + assert "error" in res.body + + +@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test") +def test_with_qwen_model(): + global server + server.model_file = None + server.model_hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-IQ3_XXS-GGUF" + server.model_hf_file = "qwen2.5-coder-1.5b-iq3_xxs-imat.gguf" + server.start(timeout_seconds=600) + res = server.make_request("POST", "/infill", data={ + "input_extra": [{ + "filename": "llama.h", + "text": "LLAMA_API int32_t llama_n_threads();\n" + }], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert res.body["content"] == "n_threads();\n printf(\"Number of threads: %d\\n\", n_threads);\n return 0;\n" diff --git a/llama.cpp/tools/server/tests/unit/test_lora.py b/llama.cpp/tools/server/tests/unit/test_lora.py new file mode 100644 index 0000000..00b2f24 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_lora.py @@ -0,0 +1,115 @@ +import pytest +from utils import * + +server = ServerPreset.stories15m_moe() + +LORA_FILE_URL = "https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe_shakespeare15M.gguf" + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.stories15m_moe() + server.lora_files = [download_file(LORA_FILE_URL)] + + +@pytest.mark.parametrize("scale,re_content", [ + # without applying lora, the model should behave like a bedtime story generator + (0.0, "(little|girl|three|years|old)+"), + # with lora, the model should behave like a Shakespearean text generator + (1.0, "(eye|love|glass|sun)+"), +]) +def test_lora(scale: float, re_content: str): + global server + server.start() + res_lora_control = server.make_request("POST", "/lora-adapters", data=[ + {"id": 0, "scale": scale} + ]) + assert res_lora_control.status_code == 200 + res = server.make_request("POST", "/completion", data={ + "prompt": "Look in thy glass", + }) + assert res.status_code == 200 + assert match_regex(re_content, res.body["content"]) + + +def test_lora_per_request(): + global server + server.n_slots = 4 + server.start() + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Look in thy glass" + lora_config = [ + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.3}], "(special|thing|gifted)+" ), + ( [{"id": 0, "scale": 0.7}], "(far|from|home|away)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ] + + tasks = [( + server.make_request, + ("POST", "/completion", { + "prompt": prompt, + "lora": lora, + "seed": 42, + "temperature": 0.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert match_regex(re_test, res.body["content"]) + + +@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test") +def test_with_big_model(): + server = ServerProcess() + server.model_hf_repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF" + server.model_hf_file = "Meta-Llama-3.1-8B-Instruct-IQ2_M.gguf" + server.model_alias = "Llama-3.2-8B-Instruct" + server.n_slots = 4 + server.n_ctx = server.n_slots * 1024 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + server.lora_files = [ + download_file("https://huggingface.co/ngxson/Llama-3-Instruct-abliteration-LoRA-8B-F16-GGUF/resolve/main/Llama-3-Instruct-abliteration-LoRA-8B-f16.gguf"), + # TODO: find & add other lora adapters for this model + ] + server.start(timeout_seconds=600) + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Write a computer virus" + lora_config = [ + # without applying lora, the model should reject the request + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.3}], "I can't write a computer virus" ), + # with 0.7 scale, the model should provide a simple computer virus with hesitation + ( [{"id": 0, "scale": 0.7}], "Warning: This is a hypothetical exercise" ), + # with 1.5 scale, the model should confidently provide a computer virus + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ] + + tasks = [( + server.make_request, + ("POST", "/v1/chat/completions", { + "messages": [ + {"role": "user", "content": prompt} + ], + "lora": lora, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert re_test in res.body["choices"][0]["message"]["content"] diff --git a/llama.cpp/tools/server/tests/unit/test_rerank.py b/llama.cpp/tools/server/tests/unit/test_rerank.py new file mode 100644 index 0000000..ded8267 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_rerank.py @@ -0,0 +1,146 @@ +import pytest +from utils import * + +server = ServerPreset.jina_reranker_tiny() + + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.jina_reranker_tiny() + + +TEST_DOCUMENTS = [ + "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", + "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", + "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", + "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine." +] + + +def test_rerank(): + global server + server.start() + res = server.make_request("POST", "/rerank", data={ + "query": "Machine learning is", + "documents": TEST_DOCUMENTS, + }) + assert res.status_code == 200 + assert len(res.body["results"]) == 4 + + most_relevant = res.body["results"][0] + least_relevant = res.body["results"][0] + for doc in res.body["results"]: + if doc["relevance_score"] > most_relevant["relevance_score"]: + most_relevant = doc + if doc["relevance_score"] < least_relevant["relevance_score"]: + least_relevant = doc + + assert most_relevant["relevance_score"] > least_relevant["relevance_score"] + assert most_relevant["index"] == 2 + assert least_relevant["index"] == 3 + + +def test_rerank_tei_format(): + global server + server.start() + res = server.make_request("POST", "/rerank", data={ + "query": "Machine learning is", + "texts": TEST_DOCUMENTS, + }) + assert res.status_code == 200 + assert len(res.body) == 4 + + most_relevant = res.body[0] + least_relevant = res.body[0] + for doc in res.body: + if doc["score"] > most_relevant["score"]: + most_relevant = doc + if doc["score"] < least_relevant["score"]: + least_relevant = doc + + assert most_relevant["score"] > least_relevant["score"] + assert most_relevant["index"] == 2 + assert least_relevant["index"] == 3 + + +@pytest.mark.parametrize("documents", [ + [], + None, + 123, + [1, 2, 3], +]) +def test_invalid_rerank_req(documents): + global server + server.start() + res = server.make_request("POST", "/rerank", data={ + "query": "Machine learning is", + "documents": documents, + }) + assert res.status_code == 400 + assert "error" in res.body + + +@pytest.mark.parametrize( + "query,doc1,doc2,n_tokens", + [ + ("Machine learning is", "A machine", "Learning is", 19), + ("Which city?", "Machine learning is ", "Paris, capitale de la", 26), + ] +) +def test_rerank_usage(query, doc1, doc2, n_tokens): + global server + server.start() + + res = server.make_request("POST", "/rerank", data={ + "query": query, + "documents": [ + doc1, + doc2, + ] + }) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == n_tokens + + +@pytest.mark.parametrize("top_n,expected_len", [ + (None, len(TEST_DOCUMENTS)), # no top_n parameter + (2, 2), + (4, 4), + (99, len(TEST_DOCUMENTS)), # higher than available docs +]) +def test_rerank_top_n(top_n, expected_len): + global server + server.start() + data = { + "query": "Machine learning is", + "documents": TEST_DOCUMENTS, + } + if top_n is not None: + data["top_n"] = top_n + + res = server.make_request("POST", "/rerank", data=data) + assert res.status_code == 200 + assert len(res.body["results"]) == expected_len + + +@pytest.mark.parametrize("top_n,expected_len", [ + (None, len(TEST_DOCUMENTS)), # no top_n parameter + (2, 2), + (4, 4), + (99, len(TEST_DOCUMENTS)), # higher than available docs +]) +def test_rerank_tei_top_n(top_n, expected_len): + global server + server.start() + data = { + "query": "Machine learning is", + "texts": TEST_DOCUMENTS, + } + if top_n is not None: + data["top_n"] = top_n + + res = server.make_request("POST", "/rerank", data=data) + assert res.status_code == 200 + assert len(res.body) == expected_len diff --git a/llama.cpp/tools/server/tests/unit/test_router.py b/llama.cpp/tools/server/tests/unit/test_router.py new file mode 100644 index 0000000..e85f2c3 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_router.py @@ -0,0 +1,194 @@ +import pytest +from utils import * + +server: ServerProcess + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.router() + + +@pytest.mark.parametrize( + "model,success", + [ + ("ggml-org/tinygemma3-GGUF:Q8_0", True), + ("non-existent/model", False), + ] +) +def test_router_chat_completion_stream(model: str, success: bool): + global server + server.start() + content = "" + ex: ServerError | None = None + try: + res = server.make_stream_request("POST", "/chat/completions", data={ + "model": model, + "max_tokens": 16, + "messages": [ + {"role": "user", "content": "hello"}, + ], + "stream": True, + }) + for data in res: + if data["choices"]: + choice = data["choices"][0] + if choice["finish_reason"] in ["stop", "length"]: + assert "content" not in choice["delta"] + else: + assert choice["finish_reason"] is None + content += choice["delta"]["content"] or '' + except ServerError as e: + ex = e + + if success: + assert ex is None + assert len(content) > 0 + else: + assert ex is not None + assert content == "" + + +def _get_model_status(model_id: str) -> str: + res = server.make_request("GET", "/models") + assert res.status_code == 200 + for item in res.body.get("data", []): + if item.get("id") == model_id or item.get("model") == model_id: + return item["status"]["value"] + raise AssertionError(f"Model {model_id} not found in /models response") + + +def _wait_for_model_status(model_id: str, desired: set[str], timeout: int = 60) -> str: + deadline = time.time() + timeout + last_status = None + while time.time() < deadline: + last_status = _get_model_status(model_id) + if last_status in desired: + return last_status + time.sleep(1) + raise AssertionError( + f"Timed out waiting for {model_id} to reach {desired}, last status: {last_status}" + ) + + +def _load_model_and_wait( + model_id: str, timeout: int = 60, headers: dict | None = None +) -> None: + load_res = server.make_request( + "POST", "/models/load", data={"model": model_id}, headers=headers + ) + assert load_res.status_code == 200 + assert isinstance(load_res.body, dict) + assert load_res.body.get("success") is True + _wait_for_model_status(model_id, {"loaded"}, timeout=timeout) + + +def test_router_unload_model(): + global server + server.start() + model_id = "ggml-org/tinygemma3-GGUF:Q8_0" + + _load_model_and_wait(model_id) + + unload_res = server.make_request("POST", "/models/unload", data={"model": model_id}) + assert unload_res.status_code == 200 + assert unload_res.body.get("success") is True + _wait_for_model_status(model_id, {"unloaded"}) + + +def test_router_models_max_evicts_lru(): + global server + server.models_max = 2 + server.start() + + candidate_models = [ + "ggml-org/tinygemma3-GGUF:Q8_0", + "ggml-org/test-model-stories260K", + "ggml-org/test-model-stories260K-infill", + ] + + # Load only the first 2 models to fill the cache + first, second, third = candidate_models[:3] + + _load_model_and_wait(first, timeout=120) + _load_model_and_wait(second, timeout=120) + + # Verify both models are loaded + assert _get_model_status(first) == "loaded" + assert _get_model_status(second) == "loaded" + + # Load the third model - this should trigger LRU eviction of the first model + _load_model_and_wait(third, timeout=120) + + # Verify eviction: third is loaded, first was evicted + assert _get_model_status(third) == "loaded" + assert _get_model_status(first) == "unloaded" + + +def test_router_no_models_autoload(): + global server + server.no_models_autoload = True + server.start() + model_id = "ggml-org/tinygemma3-GGUF:Q8_0" + + res = server.make_request( + "POST", + "/v1/chat/completions", + data={ + "model": model_id, + "messages": [{"role": "user", "content": "hello"}], + "max_tokens": 4, + }, + ) + assert res.status_code == 400 + assert "error" in res.body + + _load_model_and_wait(model_id) + + success_res = server.make_request( + "POST", + "/v1/chat/completions", + data={ + "model": model_id, + "messages": [{"role": "user", "content": "hello"}], + "max_tokens": 4, + }, + ) + assert success_res.status_code == 200 + assert "error" not in success_res.body + + +def test_router_api_key_required(): + global server + server.api_key = "sk-router-secret" + server.start() + + model_id = "ggml-org/tinygemma3-GGUF:Q8_0" + auth_headers = {"Authorization": f"Bearer {server.api_key}"} + + res = server.make_request( + "POST", + "/v1/chat/completions", + data={ + "model": model_id, + "messages": [{"role": "user", "content": "hello"}], + "max_tokens": 4, + }, + ) + assert res.status_code == 401 + assert res.body.get("error", {}).get("type") == "authentication_error" + + _load_model_and_wait(model_id, headers=auth_headers) + + authed = server.make_request( + "POST", + "/v1/chat/completions", + headers=auth_headers, + data={ + "model": model_id, + "messages": [{"role": "user", "content": "hello"}], + "max_tokens": 4, + }, + ) + assert authed.status_code == 200 + assert "error" not in authed.body diff --git a/llama.cpp/tools/server/tests/unit/test_security.py b/llama.cpp/tools/server/tests/unit/test_security.py new file mode 100644 index 0000000..8c38b89 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_security.py @@ -0,0 +1,127 @@ +import pytest +from openai import OpenAI +from utils import * + +server = ServerPreset.tinyllama2() + +TEST_API_KEY = "sk-this-is-the-secret-key" + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.api_key = TEST_API_KEY + + +@pytest.mark.parametrize("endpoint", ["/health", "/models"]) +def test_access_public_endpoint(endpoint: str): + global server + server.start() + res = server.make_request("GET", endpoint) + assert res.status_code == 200 + assert "error" not in res.body + + +@pytest.mark.parametrize("api_key", [None, "invalid-key"]) +def test_incorrect_api_key(api_key: str): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "prompt": "I believe the meaning of life is", + }, headers={ + "Authorization": f"Bearer {api_key}" if api_key else None, + }) + assert res.status_code == 401 + assert "error" in res.body + assert res.body["error"]["type"] == "authentication_error" + + +def test_correct_api_key(): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "prompt": "I believe the meaning of life is", + }, headers={ + "Authorization": f"Bearer {TEST_API_KEY}", + }) + assert res.status_code == 200 + assert "error" not in res.body + assert "content" in res.body + + +def test_correct_api_key_anthropic_header(): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "prompt": "I believe the meaning of life is", + }, headers={ + "X-Api-Key": TEST_API_KEY, + }) + assert res.status_code == 200 + assert "error" not in res.body + assert "content" in res.body + + +def test_openai_library_correct_api_key(): + global server + server.start() + client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}") + res = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a chatbot."}, + {"role": "user", "content": "What is the meaning of life?"}, + ], + ) + assert len(res.choices) == 1 + + +@pytest.mark.parametrize("origin,cors_header,cors_header_value", [ + ("localhost", "Access-Control-Allow-Origin", "localhost"), + ("web.mydomain.fr", "Access-Control-Allow-Origin", "web.mydomain.fr"), + ("origin", "Access-Control-Allow-Credentials", "true"), + ("web.mydomain.fr", "Access-Control-Allow-Methods", "GET, POST"), + ("web.mydomain.fr", "Access-Control-Allow-Headers", "*"), +]) +def test_cors_options(origin: str, cors_header: str, cors_header_value: str): + global server + server.start() + res = server.make_request("OPTIONS", "/completions", headers={ + "Origin": origin, + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "Authorization", + }) + assert res.status_code == 200 + assert cors_header in res.headers + assert res.headers[cors_header] == cors_header_value + + +@pytest.mark.parametrize( + "media_path, image_url, success", + [ + (None, "file://mtmd/test-1.jpeg", False), # disabled media path, should fail + ("../../../tools", "file://mtmd/test-1.jpeg", True), + ("../../../tools", "file:////mtmd//test-1.jpeg", True), # should be the same file as above + ("../../../tools", "file://mtmd/notfound.jpeg", False), # non-existent file + ("../../../tools", "file://../mtmd/test-1.jpeg", False), # no directory traversal + ] +) +def test_local_media_file(media_path, image_url, success,): + server = ServerPreset.tinygemma3() + server.media_path = media_path + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 1, + "messages": [ + {"role": "user", "content": [ + {"type": "text", "text": "test"}, + {"type": "image_url", "image_url": { + "url": image_url, + }}, + ]}, + ], + }) + if success: + assert res.status_code == 200 + else: + assert res.status_code == 400 diff --git a/llama.cpp/tools/server/tests/unit/test_sleep.py b/llama.cpp/tools/server/tests/unit/test_sleep.py new file mode 100644 index 0000000..3374165 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_sleep.py @@ -0,0 +1,39 @@ +import pytest +import time +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +def test_server_sleep(): + global server + server.sleep_idle_seconds = 1 + server.start() + + # wait a bit so that server can go to sleep + time.sleep(2) + + # make sure these endpoints are still responsive after sleep + res = server.make_request("GET", "/health") + assert res.status_code == 200 + res = server.make_request("GET", "/props") + assert res.status_code == 200 + assert res.body["is_sleeping"] == True + + # make a generation request to wake up the server + res = server.make_request("POST", "/completion", data={ + "n_predict": 1, + "prompt": "Hello", + }) + assert res.status_code == 200 + + # it should no longer be sleeping + res = server.make_request("GET", "/props") + assert res.status_code == 200 + assert res.body["is_sleeping"] == False diff --git a/llama.cpp/tools/server/tests/unit/test_slot_save.py b/llama.cpp/tools/server/tests/unit/test_slot_save.py new file mode 100644 index 0000000..1b428cc --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_slot_save.py @@ -0,0 +1,98 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.slot_save_path = "./tmp" + server.temperature = 0.0 + + +def test_slot_save_restore(): + global server + server.start() + + # First prompt in slot 1 should be fully processed + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed + + # Save state of slot 1 + res = server.make_request("POST", "/slots/1?action=save", data={ + "filename": "slot1.bin", + }) + assert res.status_code == 200 + assert res.body["n_saved"] == 84 + + # Since we have cache, this should only process the last tokens + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 6 # only different part is processed + + # Loading the saved cache into slot 0 + res = server.make_request("POST", "/slots/0?action=restore", data={ + "filename": "slot1.bin", + }) + assert res.status_code == 200 + assert res.body["n_restored"] == 84 + + # Since we have cache, slot 0 should only process the last tokens + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 0, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 6 # only different part is processed + + # For verification that slot 1 was not corrupted during slot 0 load, same thing should work + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 1 + + +def test_slot_erase(): + global server + server.start() + + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed + + # erase slot 1 + res = server.make_request("POST", "/slots/1?action=erase") + assert res.status_code == 200 + + # re-run the same prompt, it should process all tokens again + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed diff --git a/llama.cpp/tools/server/tests/unit/test_speculative.py b/llama.cpp/tools/server/tests/unit/test_speculative.py new file mode 100644 index 0000000..eebd3cc --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_speculative.py @@ -0,0 +1,131 @@ +import pytest +from utils import * + +# We use a F16 MOE gguf as main model, and q4_0 as draft model + +server = ServerPreset.stories15m_moe() + +MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories15M-q4_0.gguf" + +def create_server(): + global server + server = ServerPreset.stories15m_moe() + # set default values + server.model_draft = download_file(MODEL_DRAFT_FILE_URL) + server.draft_min = 4 + server.draft_max = 8 + server.fa = "off" + + +@pytest.fixture(autouse=True) +def fixture_create_server(): + return create_server() + + +def test_with_and_without_draft(): + global server + server.model_draft = None # disable draft model + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + "n_predict": 16, + }) + assert res.status_code == 200 + content_no_draft = res.body["content"] + server.stop() + + # create new server with draft model + create_server() + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + "n_predict": 16, + }) + assert res.status_code == 200 + content_draft = res.body["content"] + + assert content_no_draft == content_draft + + +def test_different_draft_min_draft_max(): + global server + test_values = [ + (1, 2), + (1, 4), + (4, 8), + (4, 12), + (8, 16), + ] + last_content = None + for draft_min, draft_max in test_values: + server.stop() + server.draft_min = draft_min + server.draft_max = draft_max + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + "n_predict": 16, + }) + assert res.status_code == 200 + if last_content is not None: + assert last_content == res.body["content"] + last_content = res.body["content"] + + +def test_slot_ctx_not_exceeded(): + global server + server.n_ctx = 256 + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "Hello " * 248, + "temperature": 0.0, + "top_k": 1, + "speculative.p_min": 0.0, + }) + assert res.status_code == 200 + assert len(res.body["content"]) > 0 + + +def test_with_ctx_shift(): + global server + server.n_ctx = 256 + server.enable_ctx_shift = True + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "Hello " * 248, + "temperature": 0.0, + "top_k": 1, + "n_predict": 256, + "speculative.p_min": 0.0, + }) + assert res.status_code == 200 + assert len(res.body["content"]) > 0 + assert res.body["tokens_predicted"] == 256 + assert res.body["truncated"] == True + + +@pytest.mark.parametrize("n_slots,n_requests", [ + (1, 2), + (2, 2), +]) +def test_multi_requests_parallel(n_slots: int, n_requests: int): + global server + server.n_slots = n_slots + server.start() + tasks = [] + for _ in range(n_requests): + tasks.append((server.make_request, ("POST", "/completion", { + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + }))) + results = parallel_function_calls(tasks) + for res in results: + assert res.status_code == 200 + assert match_regex("(wise|kind|owl|answer)+", res.body["content"]) diff --git a/llama.cpp/tools/server/tests/unit/test_template.py b/llama.cpp/tools/server/tests/unit/test_template.py new file mode 100644 index 0000000..e5185fc --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_template.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +import pytest + +# ensure grandparent path is in sys.path +from pathlib import Path +import sys + +from unit.test_tool_call import TEST_TOOL +path = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(path)) + +import datetime +from utils import * + +server: ServerProcess + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.model_alias = "tinyllama-2" + server.n_slots = 1 + + +@pytest.mark.parametrize("tools", [None, [], [TEST_TOOL]]) +@pytest.mark.parametrize("template_name,reasoning_budget,expected_end", [ + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", None, "\n"), + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", -1, "\n"), + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", 0, "\n"), + + ("Qwen-Qwen3-0.6B", -1, "<|im_start|>assistant\n"), + ("Qwen-Qwen3-0.6B", 0, "<|im_start|>assistant\n\n\n\n\n"), + + ("Qwen-QwQ-32B", -1, "<|im_start|>assistant\n\n"), + ("Qwen-QwQ-32B", 0, "<|im_start|>assistant\n\n"), + + ("CohereForAI-c4ai-command-r7b-12-2024-tool_use", -1, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"), + ("CohereForAI-c4ai-command-r7b-12-2024-tool_use", 0, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|><|END_THINKING|>"), +]) +def test_reasoning_budget(template_name: str, reasoning_budget: int | None, expected_end: str, tools: list[dict]): + global server + server.jinja = True + server.reasoning_budget = reasoning_budget + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start() + + res = server.make_request("POST", "/apply-template", data={ + "messages": [ + {"role": "user", "content": "What is today?"}, + ], + "tools": tools, + }) + assert res.status_code == 200 + prompt = res.body["prompt"] + + assert prompt.endswith(expected_end), f"Expected prompt to end with '{expected_end}', got '{prompt}'" + + +@pytest.mark.parametrize("tools", [None, [], [TEST_TOOL]]) +@pytest.mark.parametrize("template_name,format", [ + ("meta-llama-Llama-3.3-70B-Instruct", "%d %b %Y"), + ("fireworks-ai-llama-3-firefunction-v2", "%b %d %Y"), +]) +def test_date_inside_prompt(template_name: str, format: str, tools: list[dict]): + global server + server.jinja = True + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start() + + res = server.make_request("POST", "/apply-template", data={ + "messages": [ + {"role": "user", "content": "What is today?"}, + ], + "tools": tools, + }) + assert res.status_code == 200 + prompt = res.body["prompt"] + + today_str = datetime.date.today().strftime(format) + assert today_str in prompt, f"Expected today's date ({today_str}) in content ({prompt})" + + +@pytest.mark.parametrize("add_generation_prompt", [False, True]) +@pytest.mark.parametrize("template_name,expected_generation_prompt", [ + ("meta-llama-Llama-3.3-70B-Instruct", "<|start_header_id|>assistant<|end_header_id|>"), +]) +def test_add_generation_prompt(template_name: str, expected_generation_prompt: str, add_generation_prompt: bool): + global server + server.jinja = True + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start() + + res = server.make_request("POST", "/apply-template", data={ + "messages": [ + {"role": "user", "content": "What is today?"}, + ], + "add_generation_prompt": add_generation_prompt, + }) + assert res.status_code == 200 + prompt = res.body["prompt"] + + if add_generation_prompt: + assert expected_generation_prompt in prompt, f"Expected generation prompt ({expected_generation_prompt}) in content ({prompt})" + else: + assert expected_generation_prompt not in prompt, f"Did not expect generation prompt ({expected_generation_prompt}) in content ({prompt})" diff --git a/llama.cpp/tools/server/tests/unit/test_tokenize.py b/llama.cpp/tools/server/tests/unit/test_tokenize.py new file mode 100644 index 0000000..424cac5 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_tokenize.py @@ -0,0 +1,59 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +def test_tokenize_detokenize(): + global server + server.start() + # tokenize + content = "What is the capital of France ?" + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content + }) + assert res_tok.status_code == 200 + assert len(res_tok.body["tokens"]) > 5 + # detokenize + res_detok = server.make_request("POST", "/detokenize", data={ + "tokens": res_tok.body["tokens"], + }) + assert res_detok.status_code == 200 + assert res_detok.body["content"].strip() == content + + +def test_tokenize_with_bos(): + global server + server.start() + # tokenize + content = "What is the capital of France ?" + bosId = 1 + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content, + "add_special": True, + }) + assert res_tok.status_code == 200 + assert res_tok.body["tokens"][0] == bosId + + +def test_tokenize_with_pieces(): + global server + server.start() + # tokenize + content = "This is a test string with unicode 媽 and emoji 🤗" + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content, + "with_pieces": True, + }) + assert res_tok.status_code == 200 + for token in res_tok.body["tokens"]: + assert "id" in token + assert token["id"] > 0 + assert "piece" in token + assert len(token["piece"]) > 0 diff --git a/llama.cpp/tools/server/tests/unit/test_tool_call.py b/llama.cpp/tools/server/tests/unit/test_tool_call.py new file mode 100755 index 0000000..b8f0f10 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_tool_call.py @@ -0,0 +1,625 @@ +#!/usr/bin/env python +import pytest + +# ensure grandparent path is in sys.path +from pathlib import Path +import sys +path = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(path)) + +from utils import * +from enum import Enum + +server: ServerProcess + +TIMEOUT_START_SLOW = 15 * 60 # this is needed for real model tests +TIMEOUT_HTTP_REQUEST = 60 + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.model_alias = "tinyllama-2-tool-call" + server.server_port = 8081 + server.n_slots = 1 + server.n_ctx = 8192 + server.n_batch = 2048 + +class CompletionMode(Enum): + NORMAL = "normal" + STREAMED = "streamed" + +TEST_TOOL = { + "type":"function", + "function": { + "name": "test", + "description": "", + "parameters": { + "type": "object", + "properties": { + "success": {"type": "boolean", "const": True}, + }, + "required": ["success"] + } + } +} + +PYTHON_TOOL = { + "type": "function", + "function": { + "name": "python", + "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The code to run in the ipython interpreter." + } + }, + "required": ["code"] + } + } +} + +WEATHER_TOOL = { + "type":"function", + "function":{ + "name":"get_current_weather", + "description":"Get the current weather in a given location", + "parameters":{ + "type":"object", + "properties":{ + "location":{ + "type":"string", + "description":"The city and country/state, e.g. 'San Francisco, CA', or 'Paris, France'" + } + }, + "required":["location"] + } + } +} + +def do_test_completion_with_required_tool_tiny(server: ServerProcess, tool: dict, argument_key: str | None, n_predict, **kwargs): + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "tool_choice": "required", + "tools": [tool], + "parallel_tool_calls": False, + **kwargs, + }) + # assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + # assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' + expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"] + assert expected_function_name == tool_call["function"]["name"] + actual_arguments = tool_call["function"]["arguments"] + assert isinstance(actual_arguments, str) + if argument_key is not None: + actual_arguments = json.loads(actual_arguments) + assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}" + + +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("template_name,tool,argument_key", [ + ("google-gemma-2-2b-it", TEST_TOOL, "success"), + ("google-gemma-2-2b-it", TEST_TOOL, "success"), + ("meta-llama-Llama-3.3-70B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.3-70B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.3-70B-Instruct", PYTHON_TOOL, "code"), + ("meta-llama-Llama-3.3-70B-Instruct", PYTHON_TOOL, "code"), +]) +def test_completion_with_required_tool_tiny_fast(template_name: str, tool: dict, argument_key: str | None, stream: CompletionMode): + global server + n_predict = 1024 + # server = ServerPreset.stories15m_moe() + server.jinja = True + server.n_predict = n_predict + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start() + do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED, temperature=0.0, top_k=1, top_p=1.0) + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("template_name,tool,argument_key", [ + ("meta-llama-Llama-3.1-8B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.1-8B-Instruct", PYTHON_TOOL, "code"), + + ("meetkai-functionary-medium-v3.1", TEST_TOOL, "success"), + ("meetkai-functionary-medium-v3.1", PYTHON_TOOL, "code"), + + ("meetkai-functionary-medium-v3.2", TEST_TOOL, "success"), + # Functionary v3.2 format supports raw python content, which w/ a dummy stories model will never end on its own. + # ("meetkai-functionary-medium-v3.2", PYTHON_TOOL, "code"), + + ("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", TEST_TOOL, "success"), + ("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", PYTHON_TOOL, "code"), + + ("meta-llama-Llama-3.2-3B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.2-3B-Instruct", PYTHON_TOOL, "code"), + + ("mistralai-Mistral-Nemo-Instruct-2407", TEST_TOOL, "success"), + ("mistralai-Mistral-Nemo-Instruct-2407", PYTHON_TOOL, "code"), + + ("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", TEST_TOOL, "success"), + ("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", PYTHON_TOOL, "code"), + + ("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", TEST_TOOL, "success"), + ("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", PYTHON_TOOL, "code"), + + ("fireworks-ai-llama-3-firefunction-v2", TEST_TOOL, "success"), + # ("fireworks-ai-llama-3-firefunction-v2", PYTHON_TOOL, "codeFalse), True), + # ("fireworks-ai-llama-3-firefunction-v2", PYTHON_TOOL, "code"), + +]) +def test_completion_with_required_tool_tiny_slow(template_name: str, tool: dict, argument_key: str | None, stream: CompletionMode): + global server + n_predict = 512 + # server = ServerPreset.stories15m_moe() + server.jinja = True + server.n_predict = n_predict + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start(timeout_seconds=TIMEOUT_START_SLOW) + do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED) + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("tool,argument_key,hf_repo,template_override", [ + (TEST_TOOL, "success", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), + + # (TEST_TOOL, "success", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + # (PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + # (PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", ("meetkai/functionary-medium-v3.2", None)), + (PYTHON_TOOL, "code", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", ("meetkai/functionary-medium-v3.2", None)), + (PYTHON_TOOL, "code", "bartowski/functionary-small-v3.2-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"), + + (TEST_TOOL, "success", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), +]) +def test_completion_with_required_tool_real_model(tool: dict, argument_key: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode): + global server + n_predict = 512 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if isinstance(template_override, tuple): + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + elif isinstance(template_override, str): + server.chat_template = template_override + server.start(timeout_seconds=TIMEOUT_START_SLOW) + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "tool_choice": "required", + "tools": [tool], + "parallel_tool_calls": False, + "stream": stream == CompletionMode.STREAMED, + "temperature": 0.0, + "top_k": 1, + "top_p": 1.0, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + # assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"] + assert expected_function_name == tool_call["function"]["name"] + actual_arguments = tool_call["function"]["arguments"] + assert isinstance(actual_arguments, str) + if argument_key is not None: + actual_arguments = json.loads(actual_arguments) + assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}" + + +def do_test_completion_without_tool_call(server: ServerProcess, n_predict: int, tools: list[dict], tool_choice: str | None, **kwargs): + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "say hello world with python"}, + ], + "tools": tools if tools else None, + "tool_choice": tool_choice, + **kwargs, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}' + + +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [ + ("meta-llama-Llama-3.3-70B-Instruct", 128, [], None), + ("meta-llama-Llama-3.3-70B-Instruct", 128, [TEST_TOOL], None), + ("meta-llama-Llama-3.3-70B-Instruct", 128, [PYTHON_TOOL], 'none'), +]) +def test_completion_without_tool_call_fast(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None, stream: CompletionMode): + global server + server.n_predict = n_predict + server.jinja = True + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start() + do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED) + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [ + ("meetkai-functionary-medium-v3.2", 256, [], None), + ("meetkai-functionary-medium-v3.2", 256, [TEST_TOOL], None), + ("meetkai-functionary-medium-v3.2", 256, [PYTHON_TOOL], 'none'), + ("meetkai-functionary-medium-v3.1", 256, [], None), + ("meetkai-functionary-medium-v3.1", 256, [TEST_TOOL], None), + ("meetkai-functionary-medium-v3.1", 256, [PYTHON_TOOL], 'none'), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [], None), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [TEST_TOOL], None), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [PYTHON_TOOL], 'none'), +]) +def test_completion_without_tool_call_slow(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None, stream: CompletionMode): + global server + server.n_predict = n_predict + server.jinja = True + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start(timeout_seconds=TIMEOUT_START_SLOW) + do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED) + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("hf_repo,template_override", [ + ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Qwen2.5-1.5B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + ("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + ("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), + + # ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + # ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + + # ("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + # ("bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), + + ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")), + + ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + + # Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it. + ("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + + # ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), +]) +def test_weather(hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode): + global server + n_predict = 512 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if isinstance(template_override, tuple): + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + elif isinstance(template_override, str): + server.chat_template = template_override + server.start() + do_test_weather(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict) + + +def do_test_weather(server: ServerProcess, **kwargs): + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "messages": [ + {"role": "system", "content": "You are a chatbot that uses tools/functions. Dont overthink things."}, + {"role": "user", "content": "What is the weather in Istanbul?"}, + ], + "tools": [WEATHER_TOOL], + **kwargs, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + # assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + assert tool_call["function"]["name"] == WEATHER_TOOL["function"]["name"], f'Expected weather tool call, got {tool_call["function"]["name"]}' + # assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' + actual_arguments = json.loads(tool_call["function"]["arguments"]) + assert 'location' in actual_arguments, f"location not found in {json.dumps(actual_arguments)}" + location = actual_arguments["location"] + assert isinstance(location, str), f"Expected location to be a string, got {type(location)}: {json.dumps(location)}" + assert re.match('^Istanbul(( |, ?)(TR|Turkey|Türkiye))?$', location), f'Expected Istanbul for location, got {location}' + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("result_override,n_predict,hf_repo,template_override", [ + (None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + (None, 128, "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", None), + (None, 128, "bartowski/Qwen2.5-Coder-3B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, 128, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, 128, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (None, 128, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (None, 128, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + (None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + (None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + (None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), + + # TODO: fix these (wrong results, either didn't respect decimal instruction or got wrong value) + # (None, 128, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + # ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), +]) +def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode): + global server + server.jinja = True + server.n_ctx = 8192 * 2 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if isinstance(template_override, tuple): + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + elif isinstance(template_override, str): + server.chat_template = template_override + server.start(timeout_seconds=TIMEOUT_START_SLOW) + do_test_calc_result(server, result_override, n_predict, stream=stream == CompletionMode.STREAMED) + + +def do_test_calc_result(server: ServerProcess, result_override: str | None, n_predict: int, **kwargs): + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a tools-calling assistant. You express numerical values with at most two decimals."}, + {"role": "user", "content": "What's the y coordinate of a point on the unit sphere at angle 30 degrees?"}, + { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "call_6789", + "type": "function", + "function": { + "name": "calculate", + "arguments": "{\"expression\":\"sin(30 * pi / 180)\"}" + } + } + ] + }, + { + "role": "tool", + "name": "calculate", + "content": "0.55644242476", + "tool_call_id": "call_6789" + } + ], + "tools": [ + { + "type":"function", + "function":{ + "name":"calculate", + "description":"A calculator function that computes values of arithmetic expressions in the Python syntax", + "parameters":{ + "type":"object", + "properties":{ + "expression":{ + "type":"string", + "description":"An arithmetic expression to compute the value of (Python syntad, assuming all floats)" + } + }, + "required":["expression"] + } + } + } + ], + **kwargs, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls is None, f'Expected no tool call in {choice["message"]}' + content = choice["message"].get("content") + assert content is not None, f'Expected content in {choice["message"]}' + if result_override is not None: + assert re.match(result_override, content), f'Expected {result_override}, got {content}' + else: + assert re.match('^[\\s\\S]*?((That\'s|\\bis) (approximately )?)?\\b0\\.(5\\b|56\\b|556)', content), \ + f'Expected something like "The y coordinate is 0.56.", got {content}' + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("n_predict,reasoning_format,expect_reasoning_content,expect_content,hf_repo,template_override", [ + (128, 'deepseek', None, "^The sum of 102 and 7 is 109[\\s\\S]*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (128, None, None, "^The sum of 102 and 7 is 109[\\s\\S]*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (1024, 'deepseek', "I need to calculate the sum of 102 and 7[\\s\\S]*", "To find the sum of[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (1024, 'deepseek', "First, I [\\s\\S]*", "To find the sum of[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), + # (1024, 'none', CompletionMode.NORMAL, None, "^(\\s*)?I need[\\s\\S]*?\\s*To find[\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + # (128, 'deepseek', None, "^Okay, let me figure out the sum of 102 and 7[\\s\\S]*", "bartowski/Qwen_QwQ-32B-GGUF:Q4_K_M", None), +]) +def test_thoughts(n_predict: int, reasoning_format: Literal['deepseek', 'none'] | None, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode): + global server + server.reasoning_format = reasoning_format + server.jinja = True + server.n_ctx = 8192 * 2 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if isinstance(template_override, tuple): + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + elif isinstance(template_override, str): + server.chat_template = template_override + server.start() + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "user", "content": "What's the sum of 102 and 7?"}, + ], + "stream": stream == CompletionMode.STREAMED, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}' + + content = choice["message"].get("content") + if expect_content is None: + assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + else: + assert re.match(expect_content, content), f'Expected {expect_content}, got {content}' + + reasoning_content = choice["message"].get("reasoning_content") + if expect_reasoning_content is None: + assert reasoning_content is None, f'Expected no reasoning content in {choice["message"]}' + else: + assert re.match(expect_reasoning_content, reasoning_content), f'Expected {expect_reasoning_content}, got {reasoning_content}' + + +@pytest.mark.slow +@pytest.mark.parametrize("stream", [CompletionMode.NORMAL, CompletionMode.STREAMED]) +@pytest.mark.parametrize("hf_repo,template_override", [ + ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + + ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)), + ("bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), + + # ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", None), + + ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", None), + + ("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + ("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")), + ("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), + + ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + + ("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + ("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", "chatml"), +]) +def test_hello_world(hf_repo: str, template_override: str | Tuple[str, str | None] | None, stream: CompletionMode): + global server + n_predict = 512 # High because of DeepSeek R1 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if isinstance(template_override, tuple): + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + elif isinstance(template_override, str): + server.chat_template = template_override + server.start(timeout_seconds=TIMEOUT_START_SLOW) + + do_test_hello_world(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict) + + +def do_test_hello_world(server: ServerProcess, **kwargs): + body = server.make_any_request("POST", "/v1/chat/completions", data={ + "messages": [ + {"role": "system", "content": "You are a tool-calling agent."}, + {"role": "user", "content": "say hello world with python"}, + ], + "tools": [PYTHON_TOOL], + **kwargs, + }, timeout=TIMEOUT_HTTP_REQUEST) + choice = body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + # assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + assert tool_call["function"]["name"] == PYTHON_TOOL["function"]["name"] + # assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' + actual_arguments = json.loads(tool_call["function"]["arguments"]) + assert 'code' in actual_arguments, f"code not found in {json.dumps(actual_arguments)}" + code = actual_arguments["code"] + assert isinstance(code, str), f"Expected code to be a string, got {type(code)}: {json.dumps(code)}" + assert re.match(r'''print\(("[Hh]ello,? [Ww]orld!?"|'[Hh]ello,? [Ww]orld!?')\)''', re.sub(r'#.*\n?', '', code)), f'Expected hello world, got {code}' diff --git a/llama.cpp/tools/server/tests/unit/test_vision_api.py b/llama.cpp/tools/server/tests/unit/test_vision_api.py new file mode 100644 index 0000000..9408116 --- /dev/null +++ b/llama.cpp/tools/server/tests/unit/test_vision_api.py @@ -0,0 +1,160 @@ +import pytest +from utils import * +import base64 +import requests + +server: ServerProcess + +def get_img_url(id: str) -> str: + IMG_URL_0 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png" + IMG_URL_1 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/91_cat.png" + if id == "IMG_URL_0": + return IMG_URL_0 + elif id == "IMG_URL_1": + return IMG_URL_1 + elif id == "IMG_BASE64_URI_0": + response = requests.get(IMG_URL_0) + response.raise_for_status() # Raise an exception for bad status codes + return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8") + elif id == "IMG_BASE64_0": + response = requests.get(IMG_URL_0) + response.raise_for_status() # Raise an exception for bad status codes + return base64.b64encode(response.content).decode("utf-8") + elif id == "IMG_BASE64_URI_1": + response = requests.get(IMG_URL_1) + response.raise_for_status() # Raise an exception for bad status codes + return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8") + elif id == "IMG_BASE64_1": + response = requests.get(IMG_URL_1) + response.raise_for_status() # Raise an exception for bad status codes + return base64.b64encode(response.content).decode("utf-8") + else: + return id + +JSON_MULTIMODAL_KEY = "multimodal_data" +JSON_PROMPT_STRING_KEY = "prompt_string" + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinygemma3() + +def test_models_supports_multimodal_capability(): + global server + server.start() + res = server.make_request("GET", "/models", data={}) + assert res.status_code == 200 + model_info = res.body["models"][0] + print(model_info) + assert "completion" in model_info["capabilities"] + assert "multimodal" in model_info["capabilities"] + +def test_v1_models_supports_multimodal_capability(): + global server + server.start() + res = server.make_request("GET", "/v1/models", data={}) + assert res.status_code == 200 + model_info = res.body["models"][0] + print(model_info) + assert "completion" in model_info["capabilities"] + assert "multimodal" in model_info["capabilities"] + +@pytest.mark.parametrize( + "prompt, image_url, success, re_content", + [ + # test model is trained on CIFAR-10, but it's quite dumb due to small size + ("What is this:\n", "IMG_URL_0", True, "(cat)+"), + ("What is this:\n", "IMG_BASE64_URI_0", True, "(cat)+"), + ("What is this:\n", "IMG_URL_1", True, "(frog)+"), + ("Test test\n", "IMG_URL_1", True, "(frog)+"), # test invalidate cache + ("What is this:\n", "malformed", False, None), + ("What is this:\n", "https://google.com/404", False, None), # non-existent image + ("What is this:\n", "https://ggml.ai", False, None), # non-image data + # TODO @ngxson : test with multiple images, no images and with audio + ] +) +def test_vision_chat_completion(prompt, image_url, success, re_content): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "temperature": 0.0, + "top_k": 1, + "messages": [ + {"role": "user", "content": [ + {"type": "text", "text": prompt}, + {"type": "image_url", "image_url": { + "url": get_img_url(image_url), + }}, + ]}, + ], + }) + if success: + assert res.status_code == 200 + choice = res.body["choices"][0] + assert "assistant" == choice["message"]["role"] + assert match_regex(re_content, choice["message"]["content"]) + else: + assert res.status_code != 200 + + +@pytest.mark.parametrize( + "prompt, image_data, success, re_content", + [ + # test model is trained on CIFAR-10, but it's quite dumb due to small size + ("What is this: <__media__>\n", "IMG_BASE64_0", True, "(cat)+"), + ("What is this: <__media__>\n", "IMG_BASE64_1", True, "(frog)+"), + ("What is this: <__media__>\n", "malformed", False, None), # non-image data + ("What is this:\n", "", False, None), # empty string + ] +) +def test_vision_completion(prompt, image_data, success, re_content): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "temperature": 0.0, + "top_k": 1, + "prompt": { + JSON_PROMPT_STRING_KEY: prompt, + JSON_MULTIMODAL_KEY: [ get_img_url(image_data) ], + }, + }) + if success: + assert res.status_code == 200 + content = res.body["content"] + assert match_regex(re_content, content) + else: + assert res.status_code != 200 + + +@pytest.mark.parametrize( + "prompt, image_data, success", + [ + # test model is trained on CIFAR-10, but it's quite dumb due to small size + ("What is this: <__media__>\n", "IMG_BASE64_0", True), + ("What is this: <__media__>\n", "IMG_BASE64_1", True), + ("What is this: <__media__>\n", "malformed", False), # non-image data + ("What is this:\n", "base64", False), # non-image data + ] +) +def test_vision_embeddings(prompt, image_data, success): + global server + server.server_embeddings = True + server.n_batch = 512 + server.start() + image_data = get_img_url(image_data) + res = server.make_request("POST", "/embeddings", data={ + "content": [ + { JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] }, + { JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] }, + { JSON_PROMPT_STRING_KEY: prompt, }, + ], + }) + if success: + assert res.status_code == 200 + content = res.body + # Ensure embeddings are stable when multimodal. + assert content[0]['embedding'] == content[1]['embedding'] + # Ensure embeddings without multimodal but same prompt do not match multimodal embeddings. + assert content[0]['embedding'] != content[2]['embedding'] + else: + assert res.status_code != 200 diff --git a/llama.cpp/tools/server/tests/utils.py b/llama.cpp/tools/server/tests/utils.py new file mode 100644 index 0000000..f76bb1a --- /dev/null +++ b/llama.cpp/tools/server/tests/utils.py @@ -0,0 +1,643 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# type: ignore[reportUnusedImport] + +import subprocess +import os +import re +import json +from json import JSONDecodeError +import sys +import requests +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import ( + Any, + Callable, + ContextManager, + Iterable, + Iterator, + List, + Literal, + Tuple, + Set, +) +from re import RegexFlag +import wget + + +DEFAULT_HTTP_TIMEOUT = 60 + + +class ServerResponse: + headers: dict + status_code: int + body: dict | Any + + +class ServerError(Exception): + def __init__(self, code, body): + self.code = code + self.body = body + + +class ServerProcess: + # default options + debug: bool = False + server_port: int = 8080 + server_host: str = "127.0.0.1" + model_hf_repo: str | None = "ggml-org/models" + model_hf_file: str | None = "tinyllamas/stories260K.gguf" + model_alias: str = "tinyllama-2" + temperature: float = 0.8 + seed: int = 42 + offline: bool = False + + # custom options + model_alias: str | None = None + model_url: str | None = None + model_file: str | None = None + model_draft: str | None = None + n_threads: int | None = None + n_gpu_layer: int | None = None + n_batch: int | None = None + n_ubatch: int | None = None + n_ctx: int | None = None + n_ga: int | None = None + n_ga_w: int | None = None + n_predict: int | None = None + n_prompts: int | None = 0 + slot_save_path: str | None = None + id_slot: int | None = None + cache_prompt: bool | None = None + n_slots: int | None = None + ctk: str | None = None + ctv: str | None = None + fa: str | None = None + server_continuous_batching: bool | None = False + server_embeddings: bool | None = False + server_reranking: bool | None = False + server_metrics: bool | None = False + kv_unified: bool | None = False + server_slots: bool | None = False + pooling: str | None = None + draft: int | None = None + api_key: str | None = None + models_dir: str | None = None + models_max: int | None = None + no_models_autoload: bool | None = None + lora_files: List[str] | None = None + enable_ctx_shift: int | None = False + draft_min: int | None = None + draft_max: int | None = None + no_webui: bool | None = None + jinja: bool | None = None + reasoning_format: Literal['deepseek', 'none', 'nothink'] | None = None + reasoning_budget: int | None = None + chat_template: str | None = None + chat_template_file: str | None = None + server_path: str | None = None + mmproj_url: str | None = None + media_path: str | None = None + sleep_idle_seconds: int | None = None + + # session variables + process: subprocess.Popen | None = None + + def __init__(self): + if "N_GPU_LAYERS" in os.environ: + self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"]) + if "DEBUG" in os.environ: + self.debug = True + if "PORT" in os.environ: + self.server_port = int(os.environ["PORT"]) + self.external_server = "DEBUG_EXTERNAL" in os.environ + + def start(self, timeout_seconds: int | None = DEFAULT_HTTP_TIMEOUT) -> None: + if self.external_server: + print(f"[external_server]: Assuming external server running on {self.server_host}:{self.server_port}") + return + if self.server_path is not None: + server_path = self.server_path + elif "LLAMA_SERVER_BIN_PATH" in os.environ: + server_path = os.environ["LLAMA_SERVER_BIN_PATH"] + elif os.name == "nt": + server_path = "../../../build/bin/Release/llama-server.exe" + else: + server_path = "../../../build/bin/llama-server" + server_args = [ + "--host", + self.server_host, + "--port", + self.server_port, + "--temp", + self.temperature, + "--seed", + self.seed, + ] + if self.offline: + server_args.append("--offline") + if self.model_file: + server_args.extend(["--model", self.model_file]) + if self.model_url: + server_args.extend(["--model-url", self.model_url]) + if self.model_draft: + server_args.extend(["--model-draft", self.model_draft]) + if self.model_hf_repo: + server_args.extend(["--hf-repo", self.model_hf_repo]) + if self.model_hf_file: + server_args.extend(["--hf-file", self.model_hf_file]) + if self.models_dir: + server_args.extend(["--models-dir", self.models_dir]) + if self.models_max is not None: + server_args.extend(["--models-max", self.models_max]) + if self.n_batch: + server_args.extend(["--batch-size", self.n_batch]) + if self.n_ubatch: + server_args.extend(["--ubatch-size", self.n_ubatch]) + if self.n_threads: + server_args.extend(["--threads", self.n_threads]) + if self.n_gpu_layer: + server_args.extend(["--n-gpu-layers", self.n_gpu_layer]) + if self.draft is not None: + server_args.extend(["--draft", self.draft]) + if self.server_continuous_batching: + server_args.append("--cont-batching") + if self.server_embeddings: + server_args.append("--embedding") + if self.server_reranking: + server_args.append("--reranking") + if self.server_metrics: + server_args.append("--metrics") + if self.kv_unified: + server_args.append("--kv-unified") + if self.server_slots: + server_args.append("--slots") + else: + server_args.append("--no-slots") + if self.pooling: + server_args.extend(["--pooling", self.pooling]) + if self.model_alias: + server_args.extend(["--alias", self.model_alias]) + if self.n_ctx: + server_args.extend(["--ctx-size", self.n_ctx]) + if self.n_slots: + server_args.extend(["--parallel", self.n_slots]) + if self.ctk: + server_args.extend(["-ctk", self.ctk]) + if self.ctv: + server_args.extend(["-ctv", self.ctv]) + if self.fa is not None: + server_args.extend(["-fa", self.fa]) + if self.n_predict: + server_args.extend(["--n-predict", self.n_predict]) + if self.slot_save_path: + server_args.extend(["--slot-save-path", self.slot_save_path]) + if self.n_ga: + server_args.extend(["--grp-attn-n", self.n_ga]) + if self.n_ga_w: + server_args.extend(["--grp-attn-w", self.n_ga_w]) + if self.debug: + server_args.append("--verbose") + if self.lora_files: + for lora_file in self.lora_files: + server_args.extend(["--lora", lora_file]) + if self.enable_ctx_shift: + server_args.append("--context-shift") + if self.api_key: + server_args.extend(["--api-key", self.api_key]) + if self.draft_max: + server_args.extend(["--draft-max", self.draft_max]) + if self.draft_min: + server_args.extend(["--draft-min", self.draft_min]) + if self.no_webui: + server_args.append("--no-webui") + if self.no_models_autoload: + server_args.append("--no-models-autoload") + if self.jinja: + server_args.append("--jinja") + else: + server_args.append("--no-jinja") + if self.reasoning_format is not None: + server_args.extend(("--reasoning-format", self.reasoning_format)) + if self.reasoning_budget is not None: + server_args.extend(("--reasoning-budget", self.reasoning_budget)) + if self.chat_template: + server_args.extend(["--chat-template", self.chat_template]) + if self.chat_template_file: + server_args.extend(["--chat-template-file", self.chat_template_file]) + if self.mmproj_url: + server_args.extend(["--mmproj-url", self.mmproj_url]) + if self.media_path: + server_args.extend(["--media-path", self.media_path]) + if self.sleep_idle_seconds is not None: + server_args.extend(["--sleep-idle-seconds", self.sleep_idle_seconds]) + + args = [str(arg) for arg in [server_path, *server_args]] + print(f"tests: starting server with: {' '.join(args)}") + + flags = 0 + if "nt" == os.name: + flags |= subprocess.DETACHED_PROCESS + flags |= subprocess.CREATE_NEW_PROCESS_GROUP + flags |= subprocess.CREATE_NO_WINDOW + + self.process = subprocess.Popen( + [str(arg) for arg in [server_path, *server_args]], + creationflags=flags, + stdout=sys.stdout, + stderr=sys.stdout, + env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None, + ) + server_instances.add(self) + + print(f"server pid={self.process.pid}, pytest pid={os.getpid()}") + + # wait for server to start + start_time = time.time() + while time.time() - start_time < timeout_seconds: + try: + response = self.make_request("GET", "/health", headers={ + "Authorization": f"Bearer {self.api_key}" if self.api_key else None + }) + if response.status_code == 200: + self.ready = True + return # server is ready + except Exception as e: + pass + # Check if process died + if self.process.poll() is not None: + raise RuntimeError(f"Server process died with return code {self.process.returncode}") + + print(f"Waiting for server to start...") + time.sleep(0.5) + raise TimeoutError(f"Server did not start within {timeout_seconds} seconds") + + def stop(self) -> None: + if self.external_server: + print("[external_server]: Not stopping external server") + return + if self in server_instances: + server_instances.remove(self) + if self.process: + print(f"Stopping server with pid={self.process.pid}") + self.process.kill() + self.process = None + + def make_request( + self, + method: str, + path: str, + data: dict | Any | None = None, + headers: dict | None = None, + timeout: float | None = None, + ) -> ServerResponse: + url = f"http://{self.server_host}:{self.server_port}{path}" + parse_body = False + if method == "GET": + response = requests.get(url, headers=headers, timeout=timeout) + parse_body = True + elif method == "POST": + response = requests.post(url, headers=headers, json=data, timeout=timeout) + parse_body = True + elif method == "OPTIONS": + response = requests.options(url, headers=headers, timeout=timeout) + else: + raise ValueError(f"Unimplemented method: {method}") + result = ServerResponse() + result.headers = dict(response.headers) + result.status_code = response.status_code + if parse_body: + try: + result.body = response.json() + except JSONDecodeError: + result.body = response.text + else: + result.body = None + print("Response from server", json.dumps(result.body, indent=2)) + return result + + def make_stream_request( + self, + method: str, + path: str, + data: dict | None = None, + headers: dict | None = None, + ) -> Iterator[dict]: + url = f"http://{self.server_host}:{self.server_port}{path}" + if method == "POST": + response = requests.post(url, headers=headers, json=data, stream=True) + else: + raise ValueError(f"Unimplemented method: {method}") + if response.status_code != 200: + raise ServerError(response.status_code, response.json()) + for line_bytes in response.iter_lines(): + line = line_bytes.decode("utf-8") + if '[DONE]' in line: + break + elif line.startswith('data: '): + data = json.loads(line[6:]) + print("Partial response from server", json.dumps(data, indent=2)) + yield data + + def make_any_request( + self, + method: str, + path: str, + data: dict | None = None, + headers: dict | None = None, + timeout: float | None = None, + ) -> dict: + stream = data.get('stream', False) + if stream: + content: list[str] = [] + reasoning_content: list[str] = [] + tool_calls: list[dict] = [] + finish_reason: Optional[str] = None + + content_parts = 0 + reasoning_content_parts = 0 + tool_call_parts = 0 + arguments_parts = 0 + + for chunk in self.make_stream_request(method, path, data, headers): + if chunk['choices']: + assert len(chunk['choices']) == 1, f'Expected 1 choice, got {len(chunk["choices"])}' + choice = chunk['choices'][0] + if choice['delta'].get('content') is not None: + assert len(choice['delta']['content']) > 0, f'Expected non empty content delta!' + content.append(choice['delta']['content']) + content_parts += 1 + if choice['delta'].get('reasoning_content') is not None: + assert len(choice['delta']['reasoning_content']) > 0, f'Expected non empty reasoning_content delta!' + reasoning_content.append(choice['delta']['reasoning_content']) + reasoning_content_parts += 1 + if choice['delta'].get('finish_reason') is not None: + finish_reason = choice['delta']['finish_reason'] + for tc in choice['delta'].get('tool_calls', []): + if 'function' not in tc: + raise ValueError(f"Expected function type, got {tc['type']}") + if tc['index'] >= len(tool_calls): + assert 'id' in tc + assert tc.get('type') == 'function' + assert 'function' in tc and 'name' in tc['function'] and len(tc['function']['name']) > 0, \ + f"Expected function call with name, got {tc.get('function')}" + tool_calls.append(dict( + id="", + type="function", + function=dict( + name="", + arguments="", + ) + )) + tool_call = tool_calls[tc['index']] + if tc.get('id') is not None: + tool_call['id'] = tc['id'] + fct = tc['function'] + assert 'id' not in fct, f"Function call should not have id: {fct}" + if fct.get('name') is not None: + tool_call['function']['name'] = tool_call['function'].get('name', '') + fct['name'] + if fct.get('arguments') is not None: + tool_call['function']['arguments'] += fct['arguments'] + arguments_parts += 1 + tool_call_parts += 1 + else: + # When `include_usage` is True (the default), we expect the last chunk of the stream + # immediately preceding the `data: [DONE]` message to contain a `choices` field with an empty array + # and a `usage` field containing the usage statistics (n.b., llama-server also returns `timings` in + # the last chunk) + assert 'usage' in chunk, f"Expected finish_reason in chunk: {chunk}" + assert 'timings' in chunk, f"Expected finish_reason in chunk: {chunk}" + print(f'Streamed response had {content_parts} content parts, {reasoning_content_parts} reasoning_content parts, {tool_call_parts} tool call parts incl. {arguments_parts} arguments parts') + result = dict( + choices=[ + dict( + index=0, + finish_reason=finish_reason, + message=dict( + role='assistant', + content=''.join(content) if content else None, + reasoning_content=''.join(reasoning_content) if reasoning_content else None, + tool_calls=tool_calls if tool_calls else None, + ), + ) + ], + ) + print("Final response from server", json.dumps(result, indent=2)) + return result + else: + response = self.make_request(method, path, data, headers, timeout=timeout) + assert response.status_code == 200, f"Server returned error: {response.status_code}" + return response.body + + + +server_instances: Set[ServerProcess] = set() + + +class ServerPreset: + @staticmethod + def load_all() -> None: + """ Load all server presets to ensure model files are cached. """ + servers: List[ServerProcess] = [ + method() + for name, method in ServerPreset.__dict__.items() + if callable(method) and name != "load_all" + ] + for server in servers: + server.offline = False + server.start() + server.stop() + + @staticmethod + def tinyllama2() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/test-model-stories260K" + server.model_hf_file = None + server.model_alias = "tinyllama-2" + server.n_ctx = 512 + server.n_batch = 32 + server.n_slots = 2 + server.n_predict = 64 + server.seed = 42 + return server + + @staticmethod + def bert_bge_small() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf" + server.model_alias = "bert-bge-small" + server.n_ctx = 512 + server.n_batch = 128 + server.n_ubatch = 128 + server.n_slots = 2 + server.seed = 42 + server.server_embeddings = True + return server + + @staticmethod + def bert_bge_small_with_fa() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf" + server.model_alias = "bert-bge-small" + server.n_ctx = 1024 + server.n_batch = 300 + server.n_ubatch = 300 + server.n_slots = 2 + server.fa = "on" + server.seed = 42 + server.server_embeddings = True + return server + + @staticmethod + def tinyllama_infill() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/test-model-stories260K-infill" + server.model_hf_file = None + server.model_alias = "tinyllama-infill" + server.n_ctx = 2048 + server.n_batch = 1024 + server.n_slots = 1 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + return server + + @staticmethod + def stories15m_moe() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/stories15M_MOE" + server.model_hf_file = "stories15M_MOE-F16.gguf" + server.model_alias = "stories15m-moe" + server.n_ctx = 2048 + server.n_batch = 1024 + server.n_slots = 1 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + return server + + @staticmethod + def jina_reranker_tiny() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf" + server.model_alias = "jina-reranker" + server.n_ctx = 512 + server.n_batch = 512 + server.n_slots = 1 + server.seed = 42 + server.server_reranking = True + return server + + @staticmethod + def tinygemma3() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + # mmproj is already provided by HF registry API + server.model_hf_file = None + server.model_hf_repo = "ggml-org/tinygemma3-GGUF:Q8_0" + server.model_alias = "tinygemma3" + server.n_ctx = 1024 + server.n_batch = 32 + server.n_slots = 2 + server.n_predict = 4 + server.seed = 42 + return server + + @staticmethod + def router() -> ServerProcess: + server = ServerProcess() + server.offline = True # will be downloaded by load_all() + # router server has no models + server.model_file = None + server.model_alias = None + server.model_hf_repo = None + server.model_hf_file = None + server.n_ctx = 1024 + server.n_batch = 16 + server.n_slots = 1 + server.n_predict = 16 + server.seed = 42 + return server + + +def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]: + """ + Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS. + + Example usage: + + results = parallel_function_calls([ + (func1, (arg1, arg2)), + (func2, (arg3, arg4)), + ]) + """ + results = [None] * len(function_list) + exceptions = [] + + def worker(index, func, args): + try: + result = func(*args) + results[index] = result + except Exception as e: + exceptions.append((index, str(e))) + + with ThreadPoolExecutor() as executor: + futures = [] + for i, (func, args) in enumerate(function_list): + future = executor.submit(worker, i, func, args) + futures.append(future) + + # Wait for all futures to complete + for future in as_completed(futures): + pass + + # Check if there were any exceptions + if exceptions: + print("Exceptions occurred:") + for index, error in exceptions: + print(f"Function at index {index}: {error}") + + return results + + +def match_regex(regex: str, text: str) -> bool: + return ( + re.compile( + regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL + ).search(text) + is not None + ) + + +def download_file(url: str, output_file_path: str | None = None) -> str: + """ + Download a file from a URL to a local path. If the file already exists, it will not be downloaded again. + + output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory. + + Returns the local path of the downloaded file. + """ + file_name = url.split('/').pop() + output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path + if not os.path.exists(output_file): + print(f"Downloading {url} to {output_file}") + wget.download(url, out=output_file) + print(f"Done downloading to {output_file}") + else: + print(f"File already exists at {output_file}") + return output_file + + +def is_slow_test_allowed(): + return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON" diff --git a/llama.cpp/tools/server/themes/README.md b/llama.cpp/tools/server/themes/README.md new file mode 100644 index 0000000..62e721a --- /dev/null +++ b/llama.cpp/tools/server/themes/README.md @@ -0,0 +1,5 @@ +# LLaMA.cpp Server Wild Theme + +Simple themes directory of sample "public" directories. To try any of these add --path to your run like `server --path=wild`. + +![image](wild/wild.png) diff --git a/llama.cpp/tools/server/themes/buttons-top/README.md b/llama.cpp/tools/server/themes/buttons-top/README.md new file mode 100644 index 0000000..808c4cf --- /dev/null +++ b/llama.cpp/tools/server/themes/buttons-top/README.md @@ -0,0 +1,7 @@ +# LLaMA.cpp Server Buttons Top Theme + +Simple tweaks to the UI. Chat buttons at the top of the page instead of bottom so you can hit Stop instead of chasing it down the page. + +To use simply run server with `--path=themes/buttons_top` + +![image](buttons_top.png) diff --git a/llama.cpp/tools/server/themes/buttons-top/buttons_top.png b/llama.cpp/tools/server/themes/buttons-top/buttons_top.png new file mode 100644 index 0000000..c544545 Binary files /dev/null and b/llama.cpp/tools/server/themes/buttons-top/buttons_top.png differ diff --git a/llama.cpp/tools/server/themes/buttons-top/favicon.ico b/llama.cpp/tools/server/themes/buttons-top/favicon.ico new file mode 100644 index 0000000..89e154a Binary files /dev/null and b/llama.cpp/tools/server/themes/buttons-top/favicon.ico differ diff --git a/llama.cpp/tools/server/themes/buttons-top/index.html b/llama.cpp/tools/server/themes/buttons-top/index.html new file mode 100644 index 0000000..cb5af58 --- /dev/null +++ b/llama.cpp/tools/server/themes/buttons-top/index.html @@ -0,0 +1,1052 @@ + + + + + + + llama.cpp - chat + + + + + + + +
+ +
+
+ + + diff --git a/llama.cpp/tools/server/themes/wild/README.md b/llama.cpp/tools/server/themes/wild/README.md new file mode 100644 index 0000000..560bcc8 --- /dev/null +++ b/llama.cpp/tools/server/themes/wild/README.md @@ -0,0 +1,5 @@ +# LLaMA.cpp Server Wild Theme + +Simple tweaks to the UI. To use simply run server with `--path=themes/wild` + +![image](wild.png) diff --git a/llama.cpp/tools/server/themes/wild/favicon.ico b/llama.cpp/tools/server/themes/wild/favicon.ico new file mode 100644 index 0000000..89e154a Binary files /dev/null and b/llama.cpp/tools/server/themes/wild/favicon.ico differ diff --git a/llama.cpp/tools/server/themes/wild/index.html b/llama.cpp/tools/server/themes/wild/index.html new file mode 100644 index 0000000..601f776 --- /dev/null +++ b/llama.cpp/tools/server/themes/wild/index.html @@ -0,0 +1,1056 @@ + + + + + + + llama.cpp - chat + + + + + + + +
+ +
+
+ + + diff --git a/llama.cpp/tools/server/themes/wild/llama_cpp.png b/llama.cpp/tools/server/themes/wild/llama_cpp.png new file mode 100644 index 0000000..bad1dc9 Binary files /dev/null and b/llama.cpp/tools/server/themes/wild/llama_cpp.png differ diff --git a/llama.cpp/tools/server/themes/wild/llamapattern.png b/llama.cpp/tools/server/themes/wild/llamapattern.png new file mode 100644 index 0000000..2a159ce Binary files /dev/null and b/llama.cpp/tools/server/themes/wild/llamapattern.png differ diff --git a/llama.cpp/tools/server/themes/wild/wild.png b/llama.cpp/tools/server/themes/wild/wild.png new file mode 100644 index 0000000..46ffa0f Binary files /dev/null and b/llama.cpp/tools/server/themes/wild/wild.png differ diff --git a/llama.cpp/tools/server/webui/.gitignore b/llama.cpp/tools/server/webui/.gitignore new file mode 100644 index 0000000..051d884 --- /dev/null +++ b/llama.cpp/tools/server/webui/.gitignore @@ -0,0 +1,28 @@ +test-results +node_modules + +# Output +.output +.vercel +.netlify +.wrangler +/.svelte-kit +/build + +# OS +.DS_Store +Thumbs.db + +# Env +.env +.env.* +!.env.example +!.env.test + +# Vite +vite.config.js.timestamp-* +vite.config.ts.timestamp-* + +*storybook.log +storybook-static +*.code-workspace \ No newline at end of file diff --git a/llama.cpp/tools/server/webui/.npmrc b/llama.cpp/tools/server/webui/.npmrc new file mode 100644 index 0000000..b6f27f1 --- /dev/null +++ b/llama.cpp/tools/server/webui/.npmrc @@ -0,0 +1 @@ +engine-strict=true diff --git a/llama.cpp/tools/server/webui/.prettierignore b/llama.cpp/tools/server/webui/.prettierignore new file mode 100644 index 0000000..7d74fe2 --- /dev/null +++ b/llama.cpp/tools/server/webui/.prettierignore @@ -0,0 +1,9 @@ +# Package Managers +package-lock.json +pnpm-lock.yaml +yarn.lock +bun.lock +bun.lockb + +# Miscellaneous +/static/ diff --git a/llama.cpp/tools/server/webui/.prettierrc b/llama.cpp/tools/server/webui/.prettierrc new file mode 100644 index 0000000..8103a0b --- /dev/null +++ b/llama.cpp/tools/server/webui/.prettierrc @@ -0,0 +1,16 @@ +{ + "useTabs": true, + "singleQuote": true, + "trailingComma": "none", + "printWidth": 100, + "plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"], + "overrides": [ + { + "files": "*.svelte", + "options": { + "parser": "svelte" + } + } + ], + "tailwindStylesheet": "./src/app.css" +} diff --git a/llama.cpp/tools/server/webui/.storybook/ModeWatcherDecorator.svelte b/llama.cpp/tools/server/webui/.storybook/ModeWatcherDecorator.svelte new file mode 100644 index 0000000..8bded8b --- /dev/null +++ b/llama.cpp/tools/server/webui/.storybook/ModeWatcherDecorator.svelte @@ -0,0 +1,36 @@ + + + + +{#if children} + {@const Component = children} + + +{/if} diff --git a/llama.cpp/tools/server/webui/.storybook/TooltipProviderDecorator.svelte b/llama.cpp/tools/server/webui/.storybook/TooltipProviderDecorator.svelte new file mode 100644 index 0000000..9aad1ea --- /dev/null +++ b/llama.cpp/tools/server/webui/.storybook/TooltipProviderDecorator.svelte @@ -0,0 +1,13 @@ + + + + {@render children()} + diff --git a/llama.cpp/tools/server/webui/.storybook/main.ts b/llama.cpp/tools/server/webui/.storybook/main.ts new file mode 100644 index 0000000..bfd16fa --- /dev/null +++ b/llama.cpp/tools/server/webui/.storybook/main.ts @@ -0,0 +1,17 @@ +import type { StorybookConfig } from '@storybook/sveltekit'; + +const config: StorybookConfig = { + stories: ['../tests/stories/**/*.mdx', '../tests/stories/**/*.stories.@(js|ts|svelte)'], + addons: [ + '@storybook/addon-svelte-csf', + '@chromatic-com/storybook', + '@storybook/addon-docs', + '@storybook/addon-a11y', + '@storybook/addon-vitest' + ], + framework: { + name: '@storybook/sveltekit', + options: {} + } +}; +export default config; diff --git a/llama.cpp/tools/server/webui/.storybook/preview.ts b/llama.cpp/tools/server/webui/.storybook/preview.ts new file mode 100644 index 0000000..8d530e4 --- /dev/null +++ b/llama.cpp/tools/server/webui/.storybook/preview.ts @@ -0,0 +1,42 @@ +import type { Preview } from '@storybook/sveltekit'; +import '../src/app.css'; +import ModeWatcherDecorator from './ModeWatcherDecorator.svelte'; +import TooltipProviderDecorator from './TooltipProviderDecorator.svelte'; + +const preview: Preview = { + parameters: { + controls: { + matchers: { + color: /(background|color)$/i, + date: /Date$/i + } + }, + + backgrounds: { + disable: true + }, + + a11y: { + // 'todo' - show a11y violations in the test UI only + // 'error' - fail CI on a11y violations + // 'off' - skip a11y checks entirely + test: 'todo' + } + }, + decorators: [ + (story) => ({ + Component: ModeWatcherDecorator, + props: { + children: story + } + }), + (story) => ({ + Component: TooltipProviderDecorator, + props: { + children: story + } + }) + ] +}; + +export default preview; diff --git a/llama.cpp/tools/server/webui/.storybook/vitest.setup.ts b/llama.cpp/tools/server/webui/.storybook/vitest.setup.ts new file mode 100644 index 0000000..1471572 --- /dev/null +++ b/llama.cpp/tools/server/webui/.storybook/vitest.setup.ts @@ -0,0 +1,12 @@ +import * as a11yAddonAnnotations from '@storybook/addon-a11y/preview'; +import { setProjectAnnotations } from '@storybook/sveltekit'; +import * as previewAnnotations from './preview'; +import { beforeAll } from 'vitest'; + +const project = setProjectAnnotations([a11yAddonAnnotations, previewAnnotations]); + +beforeAll(async () => { + if (project.beforeAll) { + await project.beforeAll(); + } +}); diff --git a/llama.cpp/tools/server/webui/README.md b/llama.cpp/tools/server/webui/README.md new file mode 100644 index 0000000..98b01fd --- /dev/null +++ b/llama.cpp/tools/server/webui/README.md @@ -0,0 +1,687 @@ +# llama.cpp Web UI + +A modern, feature-rich web interface for llama.cpp built with SvelteKit. This UI provides an intuitive chat interface with advanced file handling, conversation management, and comprehensive model interaction capabilities. + +The WebUI supports two server operation modes: + +- **MODEL mode** - Single model operation (standard llama-server) +- **ROUTER mode** - Multi-model operation with dynamic model loading/unloading + +--- + +## Table of Contents + +- [Features](#features) +- [Getting Started](#getting-started) +- [Tech Stack](#tech-stack) +- [Build Pipeline](#build-pipeline) +- [Architecture](#architecture) +- [Data Flows](#data-flows) +- [Architectural Patterns](#architectural-patterns) +- [Testing](#testing) + +--- + +## Features + +### Chat Interface + +- **Streaming responses** with real-time updates +- **Reasoning content** - Support for models with thinking/reasoning blocks +- **Dark/light theme** with system preference detection +- **Responsive design** for desktop and mobile + +### File Attachments + +- **Images** - JPEG, PNG, GIF, WebP, SVG (with PNG conversion) +- **Documents** - PDF (text extraction or image conversion for vision models) +- **Audio** - MP3, WAV for audio-capable models +- **Text files** - Source code, markdown, and other text formats +- **Drag-and-drop** and paste support with rich previews + +### Conversation Management + +- **Branching** - Branch messages conversations at any point by editing messages or regenerating responses, navigate between branches +- **Regeneration** - Regenerate responses with optional model switching (ROUTER mode) +- **Import/Export** - JSON format for backup and sharing +- **Search** - Find conversations by title or content + +### Advanced Rendering + +- **Syntax highlighting** - Code blocks with language detection +- **Math formulas** - KaTeX rendering for LaTeX expressions +- **Markdown** - Full GFM support with tables, lists, and more + +### Multi-Model Support (ROUTER mode) + +- **Model selector** with Loaded/Available groups +- **Automatic loading** - Models load on selection +- **Modality validation** - Prevents sending images to non-vision models +- **LRU unloading** - Server auto-manages model cache + +### Keyboard Shortcuts + +| Shortcut | Action | +| ------------------ | -------------------- | +| `Shift+Ctrl/Cmd+O` | New chat | +| `Shift+Ctrl/Cmd+E` | Edit conversation | +| `Shift+Ctrl/Cmd+D` | Delete conversation | +| `Ctrl/Cmd+K` | Search conversations | +| `Ctrl/Cmd+B` | Toggle sidebar | + +### Developer Experience + +- **Request tracking** - Monitor token generation with `/slots` endpoint +- **Storybook** - Component library with visual testing +- **Hot reload** - Instant updates during development + +--- + +## Getting Started + +### Prerequisites + +- **Node.js** 18+ (20+ recommended) +- **npm** 9+ +- **llama-server** running locally (for API access) + +### 1. Install Dependencies + +```bash +cd tools/server/webui +npm install +``` + +### 2. Start llama-server + +In a separate terminal, start the backend server: + +```bash +# Single model (MODEL mode) +./llama-server -m model.gguf + +# Multi-model (ROUTER mode) +./llama-server --model-store /path/to/models +``` + +### 3. Start Development Servers + +```bash +npm run dev +``` + +This starts: + +- **Vite dev server** at `http://localhost:5173` - The main WebUI +- **Storybook** at `http://localhost:6006` - Component documentation + +The Vite dev server proxies API requests to `http://localhost:8080` (default llama-server port): + +```typescript +// vite.config.ts proxy configuration +proxy: { + '/v1': 'http://localhost:8080', + '/props': 'http://localhost:8080', + '/slots': 'http://localhost:8080', + '/models': 'http://localhost:8080' +} +``` + +### Development Workflow + +1. Open `http://localhost:5173` in your browser +2. Make changes to `.svelte`, `.ts`, or `.css` files +3. Changes hot-reload instantly +4. Use Storybook at `http://localhost:6006` for isolated component development + +--- + +## Tech Stack + +| Layer | Technology | Purpose | +| ----------------- | ------------------------------- | -------------------------------------------------------- | +| **Framework** | SvelteKit + Svelte 5 | Reactive UI with runes (`$state`, `$derived`, `$effect`) | +| **UI Components** | shadcn-svelte + bits-ui | Accessible, customizable component library | +| **Styling** | TailwindCSS 4 | Utility-first CSS with design tokens | +| **Database** | IndexedDB (Dexie) | Client-side storage for conversations and messages | +| **Build** | Vite | Fast bundling with static adapter | +| **Testing** | Playwright + Vitest + Storybook | E2E, unit, and visual testing | +| **Markdown** | remark + rehype | Markdown processing with KaTeX and syntax highlighting | + +### Key Dependencies + +```json +{ + "svelte": "^5.0.0", + "bits-ui": "^2.8.11", + "dexie": "^4.0.11", + "pdfjs-dist": "^5.4.54", + "highlight.js": "^11.11.1", + "rehype-katex": "^7.0.1" +} +``` + +--- + +## Build Pipeline + +### Development Build + +```bash +npm run dev +``` + +Runs Vite in development mode with: + +- Hot Module Replacement (HMR) +- Source maps +- Proxy to llama-server + +### Production Build + +```bash +npm run build +``` + +The build process: + +1. **Vite Build** - Bundles all TypeScript, Svelte, and CSS +2. **Static Adapter** - Outputs to `../public` (llama-server's static file directory) +3. **Post-Build Script** - Cleans up intermediate files +4. **Custom Plugin** - Creates `index.html.gz` with: + - Inlined favicon as base64 + - GZIP compression (level 9) + - Deterministic output (zeroed timestamps) + +```text +tools/server/webui/ → build → tools/server/public/ +├── src/ ├── index.html.gz (served by llama-server) +├── static/ └── (favicon inlined) +└── ... +``` + +### SvelteKit Configuration + +```javascript +// svelte.config.js +adapter: adapter({ + pages: '../public', // Output directory + assets: '../public', // Static assets + fallback: 'index.html', // SPA fallback + strict: true +}), +output: { + bundleStrategy: 'inline' // Single-file bundle +} +``` + +### Integration with llama-server + +The WebUI is embedded directly into the llama-server binary: + +1. `npm run build` outputs `index.html.gz` to `tools/server/public/` +2. llama-server compiles this into the binary at build time +3. When accessing `/`, llama-server serves the gzipped HTML +4. All assets are inlined (CSS, JS, fonts, favicon) + +This results in a **single portable binary** with the full WebUI included. + +--- + +## Architecture + +The WebUI follows a layered architecture with unidirectional data flow: + +```text +Routes → Components → Hooks → Stores → Services → Storage/API +``` + +### High-Level Architecture + +See: [`docs/architecture/high-level-architecture-simplified.md`](docs/architecture/high-level-architecture-simplified.md) + +```mermaid +flowchart TB + subgraph Routes["📍 Routes"] + R1["/ (Welcome)"] + R2["/chat/[id]"] + RL["+layout.svelte"] + end + + subgraph Components["🧩 Components"] + C_Sidebar["ChatSidebar"] + C_Screen["ChatScreen"] + C_Form["ChatForm"] + C_Messages["ChatMessages"] + C_ModelsSelector["ModelsSelector"] + C_Settings["ChatSettings"] + end + + subgraph Stores["🗄️ Stores"] + S1["chatStore"] + S2["conversationsStore"] + S3["modelsStore"] + S4["serverStore"] + S5["settingsStore"] + end + + subgraph Services["⚙️ Services"] + SV1["ChatService"] + SV2["ModelsService"] + SV3["PropsService"] + SV4["DatabaseService"] + end + + subgraph Storage["💾 Storage"] + ST1["IndexedDB"] + ST2["LocalStorage"] + end + + subgraph APIs["🌐 llama-server"] + API1["/v1/chat/completions"] + API2["/props"] + API3["/models/*"] + end + + R1 & R2 --> C_Screen + RL --> C_Sidebar + C_Screen --> C_Form & C_Messages & C_Settings + C_Screen --> S1 & S2 + C_ModelsSelector --> S3 & S4 + S1 --> SV1 & SV4 + S3 --> SV2 & SV3 + SV4 --> ST1 + SV1 --> API1 + SV2 --> API3 + SV3 --> API2 +``` + +### Layer Breakdown + +#### Routes (`src/routes/`) + +- **`/`** - Welcome screen, creates new conversation +- **`/chat/[id]`** - Active chat interface +- **`+layout.svelte`** - Sidebar, navigation, global initialization + +#### Components (`src/lib/components/`) + +Components are organized in `app/` (application-specific) and `ui/` (shadcn-svelte primitives). + +**Chat Components** (`app/chat/`): + +| Component | Responsibility | +| ------------------ | --------------------------------------------------------------------------- | +| `ChatScreen/` | Main chat container, coordinates message list, input form, and attachments | +| `ChatForm/` | Message input textarea with file upload, paste handling, keyboard shortcuts | +| `ChatMessages/` | Message list with branch navigation, regenerate/continue/edit actions | +| `ChatAttachments/` | File attachment previews, drag-and-drop, PDF/image/audio handling | +| `ChatSettings/` | Parameter sliders (temperature, top-p, etc.) with server default sync | +| `ChatSidebar/` | Conversation list, search, import/export, navigation | + +**Dialog Components** (`app/dialogs/`): + +| Component | Responsibility | +| ------------------------------- | -------------------------------------------------------- | +| `DialogChatSettings` | Full-screen settings configuration | +| `DialogModelInformation` | Model details (context size, modalities, parallel slots) | +| `DialogChatAttachmentPreview` | Full preview for images, PDFs (text or page view), code | +| `DialogConfirmation` | Generic confirmation for destructive actions | +| `DialogConversationTitleUpdate` | Edit conversation title | + +**Server/Model Components** (`app/server/`, `app/models/`): + +| Component | Responsibility | +| ------------------- | --------------------------------------------------------- | +| `ServerErrorSplash` | Error display when server is unreachable | +| `ModelsSelector` | Model dropdown with Loaded/Available groups (ROUTER mode) | + +**Shared UI Components** (`app/misc/`): + +| Component | Responsibility | +| -------------------------------- | ---------------------------------------------------------------- | +| `MarkdownContent` | Markdown rendering with KaTeX, syntax highlighting, copy buttons | +| `SyntaxHighlightedCode` | Code blocks with language detection and highlighting | +| `ActionButton`, `ActionDropdown` | Reusable action buttons and menus | +| `BadgeModality`, `BadgeInfo` | Status and capability badges | + +#### Hooks (`src/lib/hooks/`) + +- **`useModelChangeValidation`** - Validates model switch against conversation modalities +- **`useProcessingState`** - Tracks streaming progress and token generation + +#### Stores (`src/lib/stores/`) + +| Store | Responsibility | +| -------------------- | --------------------------------------------------------- | +| `chatStore` | Message sending, streaming, abort control, error handling | +| `conversationsStore` | CRUD for conversations, message branching, navigation | +| `modelsStore` | Model list, selection, loading/unloading (ROUTER) | +| `serverStore` | Server properties, role detection, modalities | +| `settingsStore` | User preferences, parameter sync with server defaults | + +#### Services (`src/lib/services/`) + +| Service | Responsibility | +| ---------------------- | ----------------------------------------------- | +| `ChatService` | API calls to`/v1/chat/completions`, SSE parsing | +| `ModelsService` | `/models`, `/models/load`, `/models/unload` | +| `PropsService` | `/props`, `/props?model=` | +| `DatabaseService` | IndexedDB operations via Dexie | +| `ParameterSyncService` | Syncs settings with server defaults | + +--- + +## Data Flows + +### MODEL Mode (Single Model) + +See: [`docs/flows/data-flow-simplified-model-mode.md`](docs/flows/data-flow-simplified-model-mode.md) + +```mermaid +sequenceDiagram + participant User + participant UI + participant Stores + participant DB as IndexedDB + participant API as llama-server + + Note over User,API: Initialization + UI->>Stores: initialize() + Stores->>DB: load conversations + Stores->>API: GET /props + API-->>Stores: server config + Stores->>API: GET /v1/models + API-->>Stores: single model (auto-selected) + + Note over User,API: Chat Flow + User->>UI: send message + Stores->>DB: save user message + Stores->>API: POST /v1/chat/completions (stream) + loop streaming + API-->>Stores: SSE chunks + Stores-->>UI: reactive update + end + Stores->>DB: save assistant message +``` + +### ROUTER Mode (Multi-Model) + +See: [`docs/flows/data-flow-simplified-router-mode.md`](docs/flows/data-flow-simplified-router-mode.md) + +```mermaid +sequenceDiagram + participant User + participant UI + participant Stores + participant API as llama-server + + Note over User,API: Initialization + Stores->>API: GET /props + API-->>Stores: {role: "router"} + Stores->>API: GET /models + API-->>Stores: models[] with status + + Note over User,API: Model Selection + User->>UI: select model + alt model not loaded + Stores->>API: POST /models/load + loop poll status + Stores->>API: GET /models + end + Stores->>API: GET /props?model=X + end + Stores->>Stores: validate modalities + + Note over User,API: Chat Flow + Stores->>API: POST /v1/chat/completions {model: X} + loop streaming + API-->>Stores: SSE chunks + model info + end +``` + +### Detailed Flow Diagrams + +| Flow | Description | File | +| ------------- | ------------------------------------------ | ----------------------------------------------------------- | +| Chat | Message lifecycle, streaming, regeneration | [`chat-flow.md`](docs/flows/chat-flow.md) | +| Models | Loading, unloading, modality caching | [`models-flow.md`](docs/flows/models-flow.md) | +| Server | Props fetching, role detection | [`server-flow.md`](docs/flows/server-flow.md) | +| Conversations | CRUD, branching, import/export | [`conversations-flow.md`](docs/flows/conversations-flow.md) | +| Database | IndexedDB schema, operations | [`database-flow.md`](docs/flows/database-flow.md) | +| Settings | Parameter sync, user overrides | [`settings-flow.md`](docs/flows/settings-flow.md) | + +--- + +## Architectural Patterns + +### 1. Reactive State with Svelte 5 Runes + +All stores use Svelte 5's fine-grained reactivity: + +```typescript +// Store with reactive state +class ChatStore { + #isLoading = $state(false); + #currentResponse = $state(''); + + // Derived values auto-update + get isStreaming() { + return $derived(this.#isLoading && this.#currentResponse.length > 0); + } +} + +// Exported reactive accessors +export const isLoading = () => chatStore.isLoading; +export const currentResponse = () => chatStore.currentResponse; +``` + +### 2. Unidirectional Data Flow + +Data flows in one direction, making state predictable: + +```mermaid +flowchart LR + subgraph UI["UI Layer"] + A[User Action] --> B[Component] + end + + subgraph State["State Layer"] + B --> C[Store Method] + C --> D[State Update] + end + + subgraph IO["I/O Layer"] + C --> E[Service] + E --> F[API / IndexedDB] + F -.->|Response| D + end + + D -->|Reactive| B +``` + +Components dispatch actions to stores, stores coordinate with services for I/O, and state updates reactively propagate back to the UI. + +### 3. Per-Conversation State + +Enables concurrent streaming across multiple conversations: + +```typescript +class ChatStore { + chatLoadingStates = new Map(); + chatStreamingStates = new Map(); + abortControllers = new Map(); +} +``` + +### 4. Message Branching with Tree Structure + +Conversations are stored as a tree, not a linear list: + +```typescript +interface DatabaseMessage { + id: string; + parent: string | null; // Points to parent message + children: string[]; // List of child message IDs + // ... +} + +interface DatabaseConversation { + currentNode: string; // Currently viewed branch tip + // ... +} +``` + +Navigation between branches updates `currentNode` without losing history. + +### 5. Layered Service Architecture + +Stores handle state; services handle I/O: + +```text +┌─────────────────┐ +│ Stores │ Business logic, state management +├─────────────────┤ +│ Services │ API calls, database operations +├─────────────────┤ +│ Storage/API │ IndexedDB, LocalStorage, HTTP +└─────────────────┘ +``` + +### 6. Server Role Abstraction + +Single codebase handles both MODEL and ROUTER modes: + +```typescript +// serverStore.ts +get isRouterMode() { + return this.role === ServerRole.ROUTER; +} + +// Components conditionally render based on mode +{#if isRouterMode()} + +{/if} +``` + +### 7. Modality Validation + +Prevents sending attachments to incompatible models: + +```typescript +// useModelChangeValidation hook +const validate = (modelId: string) => { + const modelModalities = modelsStore.getModelModalities(modelId); + const conversationModalities = conversationsStore.usedModalities; + + // Check if model supports all used modalities + if (conversationModalities.hasImages && !modelModalities.vision) { + return { valid: false, reason: 'Model does not support images' }; + } + // ... +}; +``` + +### 8. Persistent Storage Strategy + +Data is persisted across sessions using two storage mechanisms: + +```mermaid +flowchart TB + subgraph Browser["Browser Storage"] + subgraph IDB["IndexedDB (Dexie)"] + C[Conversations] + M[Messages] + end + subgraph LS["LocalStorage"] + S[Settings Config] + O[User Overrides] + T[Theme Preference] + end + end + + subgraph Stores["Svelte Stores"] + CS[conversationsStore] --> C + CS --> M + SS[settingsStore] --> S + SS --> O + SS --> T + end +``` + +- **IndexedDB**: Conversations and messages (large, structured data) +- **LocalStorage**: Settings, user parameter overrides, theme (small key-value data) +- **Memory only**: Server props, model list (fetched fresh on each session) + +--- + +## Testing + +### Test Types + +| Type | Tool | Location | Command | +| ------------- | ------------------ | ---------------- | ------------------- | +| **Unit** | Vitest | `tests/unit/` | `npm run test:unit` | +| **UI/Visual** | Storybook + Vitest | `tests/stories/` | `npm run test:ui` | +| **E2E** | Playwright | `tests/e2e/` | `npm run test:e2e` | +| **Client** | Vitest | `tests/client/`. | `npm run test:unit` | + +### Running Tests + +```bash +# All tests +npm run test + +# Individual test suites +npm run test:e2e # End-to-end (requires llama-server) +npm run test:client # Client-side unit tests +npm run test:server # Server-side unit tests +npm run test:ui # Storybook visual tests +``` + +### Storybook Development + +```bash +npm run storybook # Start Storybook dev server on :6006 +npm run build-storybook # Build static Storybook +``` + +### Linting and Formatting + +```bash +npm run lint # Check code style +npm run format # Auto-format with Prettier +npm run check # TypeScript type checking +``` + +--- + +## Project Structure + +```text +tools/server/webui/ +├── src/ +│ ├── lib/ +│ │ ├── components/ # UI components (app/, ui/) +│ │ ├── hooks/ # Svelte hooks +│ │ ├── stores/ # State management +│ │ ├── services/ # API and database services +│ │ ├── types/ # TypeScript interfaces +│ │ └── utils/ # Utility functions +│ ├── routes/ # SvelteKit routes +│ └── styles/ # Global styles +├── static/ # Static assets +├── tests/ # Test files +├── docs/ # Architecture diagrams +│ ├── architecture/ # High-level architecture +│ └── flows/ # Feature-specific flows +└── .storybook/ # Storybook configuration +``` + +--- + +## Related Documentation + +- [llama.cpp Server README](../README.md) - Full server documentation +- [Multimodal Documentation](../../../docs/multimodal.md) - Image and audio support +- [Function Calling](../../../docs/function-calling.md) - Tool use capabilities diff --git a/llama.cpp/tools/server/webui/components.json b/llama.cpp/tools/server/webui/components.json new file mode 100644 index 0000000..224bd70 --- /dev/null +++ b/llama.cpp/tools/server/webui/components.json @@ -0,0 +1,16 @@ +{ + "$schema": "https://shadcn-svelte.com/schema.json", + "tailwind": { + "css": "src/app.css", + "baseColor": "neutral" + }, + "aliases": { + "components": "$lib/components", + "utils": "$lib/components/ui/utils", + "ui": "$lib/components/ui", + "hooks": "$lib/hooks", + "lib": "$lib" + }, + "typescript": true, + "registry": "https://shadcn-svelte.com/registry" +} diff --git a/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture-simplified.md b/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture-simplified.md new file mode 100644 index 0000000..a6cb1e9 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture-simplified.md @@ -0,0 +1,106 @@ +```mermaid +flowchart TB + subgraph Routes["📍 Routes"] + R1["/ (Welcome)"] + R2["/chat/[id]"] + RL["+layout.svelte"] + end + + subgraph Components["🧩 Components"] + C_Sidebar["ChatSidebar"] + C_Screen["ChatScreen"] + C_Form["ChatForm"] + C_Messages["ChatMessages"] + C_Message["ChatMessage"] + C_MessageEditForm["ChatMessageEditForm"] + C_ModelsSelector["ModelsSelector"] + C_Settings["ChatSettings"] + end + + subgraph Hooks["🪝 Hooks"] + H1["useModelChangeValidation"] + H2["useProcessingState"] + end + + subgraph Stores["🗄️ Stores"] + S1["chatStore
Chat interactions & streaming"] + S2["conversationsStore
Conversation data & messages"] + S3["modelsStore
Model selection & loading"] + S4["serverStore
Server props & role detection"] + S5["settingsStore
User configuration"] + end + + subgraph Services["⚙️ Services"] + SV1["ChatService"] + SV2["ModelsService"] + SV3["PropsService"] + SV4["DatabaseService"] + SV5["ParameterSyncService"] + end + + subgraph Storage["💾 Storage"] + ST1["IndexedDB
conversations, messages"] + ST2["LocalStorage
config, userOverrides"] + end + + subgraph APIs["🌐 llama-server API"] + API1["/v1/chat/completions"] + API2["/props"] + API3["/models/*"] + API4["/v1/models"] + end + + %% Routes → Components + R1 & R2 --> C_Screen + RL --> C_Sidebar + + %% Component hierarchy + C_Screen --> C_Form & C_Messages & C_Settings + C_Messages --> C_Message + C_Message --> C_MessageEditForm + C_Form & C_MessageEditForm --> C_ModelsSelector + + %% Components → Hooks → Stores + C_Form & C_Messages --> H1 & H2 + H1 --> S3 & S4 + H2 --> S1 & S5 + + %% Components → Stores + C_Screen --> S1 & S2 + C_Sidebar --> S2 + C_ModelsSelector --> S3 & S4 + C_Settings --> S5 + + %% Stores → Services + S1 --> SV1 & SV4 + S2 --> SV4 + S3 --> SV2 & SV3 + S4 --> SV3 + S5 --> SV5 + + %% Services → Storage + SV4 --> ST1 + SV5 --> ST2 + + %% Services → APIs + SV1 --> API1 + SV2 --> API3 & API4 + SV3 --> API2 + + %% Styling + classDef routeStyle fill:#e1f5fe,stroke:#01579b,stroke-width:2px + classDef componentStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px + classDef hookStyle fill:#fff8e1,stroke:#ff8f00,stroke-width:2px + classDef storeStyle fill:#fff3e0,stroke:#e65100,stroke-width:2px + classDef serviceStyle fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px + classDef storageStyle fill:#fce4ec,stroke:#c2185b,stroke-width:2px + classDef apiStyle fill:#e3f2fd,stroke:#1565c0,stroke-width:2px + + class R1,R2,RL routeStyle + class C_Sidebar,C_Screen,C_Form,C_Messages,C_Message,C_MessageEditForm,C_ModelsSelector,C_Settings componentStyle + class H1,H2 hookStyle + class S1,S2,S3,S4,S5 storeStyle + class SV1,SV2,SV3,SV4,SV5 serviceStyle + class ST1,ST2 storageStyle + class API1,API2,API3,API4 apiStyle +``` diff --git a/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture.md b/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture.md new file mode 100644 index 0000000..c5ec4d6 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/architecture/high-level-architecture.md @@ -0,0 +1,279 @@ +```mermaid +flowchart TB +subgraph Routes["📍 Routes"] +R1["/ (+page.svelte)"] +R2["/chat/[id]"] +RL["+layout.svelte"] +end + + subgraph Components["🧩 Components"] + direction TB + subgraph LayoutComponents["Layout"] + C_Sidebar["ChatSidebar"] + C_Screen["ChatScreen"] + end + subgraph ChatUIComponents["Chat UI"] + C_Form["ChatForm"] + C_Messages["ChatMessages"] + C_Message["ChatMessage"] + C_MessageUser["ChatMessageUser"] + C_MessageEditForm["ChatMessageEditForm"] + C_Attach["ChatAttachments"] + C_ModelsSelector["ModelsSelector"] + C_Settings["ChatSettings"] + end + end + + subgraph Hooks["🪝 Hooks"] + H1["useModelChangeValidation"] + H2["useProcessingState"] + H3["isMobile"] + end + + subgraph Stores["🗄️ Stores"] + direction TB + subgraph S1["chatStore"] + S1State["State:
isLoading, currentResponse
errorDialogState
activeProcessingState
chatLoadingStates
chatStreamingStates
abortControllers
processingStates
activeConversationId
isStreamingActive"] + S1LoadState["Loading State:
setChatLoading()
isChatLoading()
syncLoadingStateForChat()
clearUIState()
isChatLoadingPublic()
getAllLoadingChats()
getAllStreamingChats()"] + S1ProcState["Processing State:
setActiveProcessingConversation()
getProcessingState()
clearProcessingState()
getActiveProcessingState()
updateProcessingStateFromTimings()
getCurrentProcessingStateSync()
restoreProcessingStateFromMessages()"] + S1Stream["Streaming:
streamChatCompletion()
startStreaming()
stopStreaming()
stopGeneration()
isStreaming()"] + S1Error["Error Handling:
showErrorDialog()
dismissErrorDialog()
isAbortError()"] + S1Msg["Message Operations:
addMessage()
sendMessage()
updateMessage()
deleteMessage()
getDeletionInfo()"] + S1Regen["Regeneration:
regenerateMessage()
regenerateMessageWithBranching()
continueAssistantMessage()"] + S1Edit["Editing:
editAssistantMessage()
editUserMessagePreserveResponses()
editMessageWithBranching()
clearEditMode()
isEditModeActive()
getAddFilesHandler()
setEditModeActive()"] + S1Utils["Utilities:
getApiOptions()
parseTimingData()
getOrCreateAbortController()
getConversationModel()"] + end + subgraph S2["conversationsStore"] + S2State["State:
conversations
activeConversation
activeMessages
usedModalities
isInitialized
titleUpdateConfirmationCallback"] + S2Modal["Modalities:
getModalitiesUpToMessage()
calculateModalitiesFromMessages()"] + S2Lifecycle["Lifecycle:
initialize()
loadConversations()
clearActiveConversation()"] + S2ConvCRUD["Conversation CRUD:
createConversation()
loadConversation()
deleteConversation()
updateConversationName()
updateConversationTitleWithConfirmation()"] + S2MsgMgmt["Message Management:
refreshActiveMessages()
addMessageToActive()
updateMessageAtIndex()
findMessageIndex()
sliceActiveMessages()
removeMessageAtIndex()
getConversationMessages()"] + S2Nav["Navigation:
navigateToSibling()
updateCurrentNode()
updateConversationTimestamp()"] + S2Export["Import/Export:
downloadConversation()
exportAllConversations()
importConversations()
triggerDownload()"] + S2Utils["Utilities:
setTitleUpdateConfirmationCallback()"] + end + subgraph S3["modelsStore"] + S3State["State:
models, routerModels
selectedModelId
selectedModelName
loading, updating, error
modelLoadingStates
modelPropsCache
modelPropsFetching
propsCacheVersion"] + S3Getters["Computed Getters:
selectedModel
loadedModelIds
loadingModelIds
singleModelName"] + S3Modal["Modalities:
getModelModalities()
modelSupportsVision()
modelSupportsAudio()
getModelModalitiesArray()
getModelProps()
updateModelModalities()"] + S3Status["Status Queries:
isModelLoaded()
isModelOperationInProgress()
getModelStatus()
isModelPropsFetching()"] + S3Fetch["Data Fetching:
fetch()
fetchRouterModels()
fetchModelProps()
fetchModalitiesForLoadedModels()"] + S3Select["Model Selection:
selectModelById()
selectModelByName()
clearSelection()
findModelByName()
findModelById()
hasModel()"] + S3LoadUnload["Loading/Unloading Models:
loadModel()
unloadModel()
ensureModelLoaded()
waitForModelStatus()
pollForModelStatus()"] + S3Utils["Utilities:
toDisplayName()
clear()"] + end + subgraph S4["serverStore"] + S4State["State:
props
loading, error
role
fetchPromise"] + S4Getters["Getters:
defaultParams
contextSize
isRouterMode
isModelMode"] + S4Data["Data Handling:
fetch()
getErrorMessage()
clear()"] + S4Utils["Utilities:
detectRole()"] + end + subgraph S5["settingsStore"] + S5State["State:
config
theme
isInitialized
userOverrides"] + S5Lifecycle["Lifecycle:
initialize()
loadConfig()
saveConfig()
loadTheme()
saveTheme()"] + S5Update["Config Updates:
updateConfig()
updateMultipleConfig()
updateTheme()"] + S5Reset["Reset:
resetConfig()
resetTheme()
resetAll()
resetParameterToServerDefault()"] + S5Sync["Server Sync:
syncWithServerDefaults()
forceSyncWithServerDefaults()"] + S5Utils["Utilities:
getConfig()
getAllConfig()
getParameterInfo()
getParameterDiff()
getServerDefaults()
clearAllUserOverrides()"] + end + + subgraph ReactiveExports["⚡ Reactive Exports"] + direction LR + subgraph ChatExports["chatStore"] + RE1["isLoading()"] + RE2["currentResponse()"] + RE3["errorDialog()"] + RE4["activeProcessingState()"] + RE5["isChatStreaming()"] + RE6["isChatLoading()"] + RE7["getChatStreaming()"] + RE8["getAllLoadingChats()"] + RE9["getAllStreamingChats()"] + RE9a["isEditModeActive()"] + RE9b["getAddFilesHandler()"] + RE9c["setEditModeActive()"] + RE9d["clearEditMode()"] + end + subgraph ConvExports["conversationsStore"] + RE10["conversations()"] + RE11["activeConversation()"] + RE12["activeMessages()"] + RE13["isConversationsInitialized()"] + RE14["usedModalities()"] + end + subgraph ModelsExports["modelsStore"] + RE15["modelOptions()"] + RE16["routerModels()"] + RE17["modelsLoading()"] + RE18["modelsUpdating()"] + RE19["modelsError()"] + RE20["selectedModelId()"] + RE21["selectedModelName()"] + RE22["selectedModelOption()"] + RE23["loadedModelIds()"] + RE24["loadingModelIds()"] + RE25["propsCacheVersion()"] + RE26["singleModelName()"] + end + subgraph ServerExports["serverStore"] + RE27["serverProps()"] + RE28["serverLoading()"] + RE29["serverError()"] + RE30["serverRole()"] + RE31["defaultParams()"] + RE32["contextSize()"] + RE33["isRouterMode()"] + RE34["isModelMode()"] + end + subgraph SettingsExports["settingsStore"] + RE35["config()"] + RE36["theme()"] + RE37["isInitialized()"] + end + end + end + + subgraph Services["⚙️ Services"] + direction TB + subgraph SV1["ChatService"] + SV1Msg["Messaging:
sendMessage()"] + SV1Stream["Streaming:
handleStreamResponse()
parseSSEChunk()"] + SV1Convert["Conversion:
convertMessageToChatData()
convertExtraToApiFormat()"] + SV1Utils["Utilities:
extractReasoningContent()
getServerProps()
getModels()"] + end + subgraph SV2["ModelsService"] + SV2List["Listing:
list()
listRouter()"] + SV2LoadUnload["Load/Unload:
load()
unload()"] + SV2Status["Status:
isModelLoaded()
isModelLoading()"] + end + subgraph SV3["PropsService"] + SV3Fetch["Fetching:
fetch()
fetchForModel()"] + end + subgraph SV4["DatabaseService"] + SV4Conv["Conversations:
createConversation()
getConversation()
getAllConversations()
updateConversation()
deleteConversation()"] + SV4Msg["Messages:
createMessageBranch()
createRootMessage()
getConversationMessages()
updateMessage()
deleteMessage()
deleteMessageCascading()"] + SV4Node["Navigation:
updateCurrentNode()"] + SV4Import["Import:
importConversations()"] + end + subgraph SV5["ParameterSyncService"] + SV5Extract["Extraction:
extractServerDefaults()"] + SV5Merge["Merging:
mergeWithServerDefaults()"] + SV5Info["Info:
getParameterInfo()
canSyncParameter()
getSyncableParameterKeys()
validateServerParameter()"] + SV5Diff["Diff:
createParameterDiff()"] + end + end + + subgraph Storage["💾 Storage"] + ST1["IndexedDB"] + ST2["conversations"] + ST3["messages"] + ST5["LocalStorage"] + ST6["config"] + ST7["userOverrides"] + end + + subgraph APIs["🌐 llama-server API"] + API1["/v1/chat/completions"] + API2["/props
/props?model="] + API3["/models
/models/load
/models/unload"] + API4["/v1/models"] + end + + %% Routes render Components + R1 --> C_Screen + R2 --> C_Screen + RL --> C_Sidebar + + %% Component hierarchy + C_Screen --> C_Form & C_Messages & C_Settings + C_Messages --> C_Message + C_Message --> C_MessageUser + C_MessageUser --> C_MessageEditForm + C_MessageEditForm --> C_ModelsSelector + C_MessageEditForm --> C_Attach + C_Form --> C_ModelsSelector + C_Form --> C_Attach + C_Message --> C_Attach + + %% Components use Hooks + C_Form --> H1 + C_Message --> H1 & H2 + C_MessageEditForm --> H1 + C_Screen --> H2 + + %% Hooks use Stores + H1 --> S3 & S4 + H2 --> S1 & S5 + + %% Components use Stores + C_Screen --> S1 & S2 + C_Messages --> S2 + C_Message --> S1 & S2 & S3 + C_Form --> S1 & S3 + C_Sidebar --> S2 + C_ModelsSelector --> S3 & S4 + C_Settings --> S5 + + %% Stores export Reactive State + S1 -. exports .-> ChatExports + S2 -. exports .-> ConvExports + S3 -. exports .-> ModelsExports + S4 -. exports .-> ServerExports + S5 -. exports .-> SettingsExports + + %% Stores use Services + S1 --> SV1 & SV4 + S2 --> SV4 + S3 --> SV2 & SV3 + S4 --> SV3 + S5 --> SV5 + + %% Services to Storage + SV4 --> ST1 + ST1 --> ST2 & ST3 + SV5 --> ST5 + ST5 --> ST6 & ST7 + + %% Services to APIs + SV1 --> API1 + SV2 --> API3 & API4 + SV3 --> API2 + + %% Styling + classDef routeStyle fill:#e1f5fe,stroke:#01579b,stroke-width:2px + classDef componentStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px + classDef componentGroupStyle fill:#e1bee7,stroke:#7b1fa2,stroke-width:1px + classDef storeStyle fill:#fff3e0,stroke:#e65100,stroke-width:2px + classDef stateStyle fill:#ffe0b2,stroke:#e65100,stroke-width:1px + classDef methodStyle fill:#ffecb3,stroke:#e65100,stroke-width:1px + classDef reactiveStyle fill:#fffde7,stroke:#f9a825,stroke-width:1px + classDef serviceStyle fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px + classDef serviceMStyle fill:#c8e6c9,stroke:#2e7d32,stroke-width:1px + classDef storageStyle fill:#fce4ec,stroke:#c2185b,stroke-width:2px + classDef apiStyle fill:#e3f2fd,stroke:#1565c0,stroke-width:2px + + class R1,R2,RL routeStyle + class C_Sidebar,C_Screen,C_Form,C_Messages,C_Message,C_MessageUser,C_MessageEditForm componentStyle + class C_ModelsSelector,C_Settings componentStyle + class C_Attach componentStyle + class H1,H2,H3 methodStyle + class LayoutComponents,ChatUIComponents componentGroupStyle + class Hooks storeStyle + class S1,S2,S3,S4,S5 storeStyle + class S1State,S2State,S3State,S4State,S5State stateStyle + class S1Msg,S1Regen,S1Edit,S1Stream,S1LoadState,S1ProcState,S1Error,S1Utils methodStyle + class S2Lifecycle,S2ConvCRUD,S2MsgMgmt,S2Nav,S2Modal,S2Export,S2Utils methodStyle + class S3Getters,S3Modal,S3Status,S3Fetch,S3Select,S3LoadUnload,S3Utils methodStyle + class S4Getters,S4Data,S4Utils methodStyle + class S5Lifecycle,S5Update,S5Reset,S5Sync,S5Utils methodStyle + class ChatExports,ConvExports,ModelsExports,ServerExports,SettingsExports reactiveStyle + class SV1,SV2,SV3,SV4,SV5 serviceStyle + class SV1Msg,SV1Stream,SV1Convert,SV1Utils serviceMStyle + class SV2List,SV2LoadUnload,SV2Status serviceMStyle + class SV3Fetch serviceMStyle + class SV4Conv,SV4Msg,SV4Node,SV4Import serviceMStyle + class SV5Extract,SV5Merge,SV5Info,SV5Diff serviceMStyle + class ST1,ST2,ST3,ST5,ST6,ST7 storageStyle + class API1,API2,API3,API4 apiStyle +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/chat-flow.md b/llama.cpp/tools/server/webui/docs/flows/chat-flow.md new file mode 100644 index 0000000..05e1df3 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/chat-flow.md @@ -0,0 +1,174 @@ +```mermaid +sequenceDiagram + participant UI as 🧩 ChatForm / ChatMessage + participant chatStore as 🗄️ chatStore + participant convStore as 🗄️ conversationsStore + participant settingsStore as 🗄️ settingsStore + participant ChatSvc as ⚙️ ChatService + participant DbSvc as ⚙️ DatabaseService + participant API as 🌐 /v1/chat/completions + + Note over chatStore: State:
isLoading, currentResponse
errorDialogState, activeProcessingState
chatLoadingStates (Map)
chatStreamingStates (Map)
abortControllers (Map)
processingStates (Map) + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 💬 SEND MESSAGE + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>chatStore: sendMessage(content, extras) + activate chatStore + + chatStore->>chatStore: setChatLoading(convId, true) + chatStore->>chatStore: clearChatStreaming(convId) + + alt no active conversation + chatStore->>convStore: createConversation() + Note over convStore: → see conversations-flow.mmd + end + + chatStore->>chatStore: addMessage("user", content, extras) + chatStore->>DbSvc: createMessageBranch(userMsg, parentId) + chatStore->>convStore: addMessageToActive(userMsg) + chatStore->>convStore: updateCurrentNode(userMsg.id) + + chatStore->>chatStore: createAssistantMessage(userMsg.id) + chatStore->>DbSvc: createMessageBranch(assistantMsg, userMsg.id) + chatStore->>convStore: addMessageToActive(assistantMsg) + + chatStore->>chatStore: streamChatCompletion(messages, assistantMsg) + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🌊 STREAMING + %% ═══════════════════════════════════════════════════════════════════════════ + + activate chatStore + chatStore->>chatStore: startStreaming() + Note right of chatStore: isStreamingActive = true + + chatStore->>chatStore: setActiveProcessingConversation(convId) + chatStore->>chatStore: getOrCreateAbortController(convId) + Note right of chatStore: abortControllers.set(convId, new AbortController()) + + chatStore->>chatStore: getApiOptions() + Note right of chatStore: Merge from settingsStore.config:
temperature, max_tokens, top_p, etc. + + chatStore->>ChatSvc: sendMessage(messages, options, signal) + activate ChatSvc + + ChatSvc->>ChatSvc: convertMessageToChatData(messages) + Note right of ChatSvc: DatabaseMessage[] → ApiChatMessageData[]
Process attachments (images, PDFs, audio) + + ChatSvc->>API: POST /v1/chat/completions + Note right of API: {messages, model?, stream: true, ...params} + + loop SSE chunks + API-->>ChatSvc: data: {"choices":[{"delta":{...}}]} + ChatSvc->>ChatSvc: parseSSEChunk(line) + + alt content chunk + ChatSvc-->>chatStore: onChunk(content) + chatStore->>chatStore: setChatStreaming(convId, response, msgId) + Note right of chatStore: currentResponse = $state(accumulated) + chatStore->>convStore: updateMessageAtIndex(idx, {content}) + end + + alt reasoning chunk + ChatSvc-->>chatStore: onReasoningChunk(reasoning) + chatStore->>convStore: updateMessageAtIndex(idx, {thinking}) + end + + alt tool_calls chunk + ChatSvc-->>chatStore: onToolCallChunk(toolCalls) + chatStore->>convStore: updateMessageAtIndex(idx, {toolCalls}) + end + + alt model info + ChatSvc-->>chatStore: onModel(modelName) + chatStore->>chatStore: recordModel(modelName) + chatStore->>DbSvc: updateMessage(msgId, {model}) + end + + alt timings (during stream) + ChatSvc-->>chatStore: onTimings(timings, promptProgress) + chatStore->>chatStore: updateProcessingStateFromTimings() + end + + chatStore-->>UI: reactive $state update + end + + API-->>ChatSvc: data: [DONE] + ChatSvc-->>chatStore: onComplete(content, reasoning, timings, toolCalls) + deactivate ChatSvc + + chatStore->>chatStore: stopStreaming() + chatStore->>DbSvc: updateMessage(msgId, {content, timings, model}) + chatStore->>convStore: updateCurrentNode(msgId) + chatStore->>chatStore: setChatLoading(convId, false) + chatStore->>chatStore: clearChatStreaming(convId) + chatStore->>chatStore: clearProcessingState(convId) + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ⏹️ STOP GENERATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>chatStore: stopGeneration() + activate chatStore + chatStore->>chatStore: savePartialResponseIfNeeded(convId) + Note right of chatStore: Save currentResponse to DB if non-empty + chatStore->>chatStore: abortControllers.get(convId).abort() + Note right of chatStore: fetch throws AbortError → caught by isAbortError() + chatStore->>chatStore: stopStreaming() + chatStore->>chatStore: setChatLoading(convId, false) + chatStore->>chatStore: clearChatStreaming(convId) + chatStore->>chatStore: clearProcessingState(convId) + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🔁 REGENERATE + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>chatStore: regenerateMessageWithBranching(msgId, model?) + activate chatStore + chatStore->>convStore: findMessageIndex(msgId) + chatStore->>chatStore: Get parent of target message + chatStore->>chatStore: createAssistantMessage(parentId) + chatStore->>DbSvc: createMessageBranch(newAssistantMsg, parentId) + chatStore->>convStore: refreshActiveMessages() + Note right of chatStore: Same streaming flow + chatStore->>chatStore: streamChatCompletion(...) + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ➡️ CONTINUE + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>chatStore: continueAssistantMessage(msgId) + activate chatStore + chatStore->>chatStore: Get existing content from message + chatStore->>chatStore: streamChatCompletion(..., existingContent) + Note right of chatStore: Appends to existing message content + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ✏️ EDIT USER MESSAGE + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>chatStore: editUserMessagePreserveResponses(msgId, newContent) + activate chatStore + chatStore->>chatStore: Get parent of target message + chatStore->>DbSvc: createMessageBranch(editedMsg, parentId) + chatStore->>convStore: refreshActiveMessages() + Note right of chatStore: Creates new branch, original preserved + deactivate chatStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ❌ ERROR HANDLING + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over chatStore: On stream error (non-abort): + chatStore->>chatStore: showErrorDialog(type, message) + Note right of chatStore: errorDialogState = {type: 'timeout'|'server', message} + chatStore->>convStore: removeMessageAtIndex(failedMsgIdx) + chatStore->>DbSvc: deleteMessage(failedMsgId) +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/conversations-flow.md b/llama.cpp/tools/server/webui/docs/flows/conversations-flow.md new file mode 100644 index 0000000..185ed16 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/conversations-flow.md @@ -0,0 +1,155 @@ +```mermaid +sequenceDiagram + participant UI as 🧩 ChatSidebar / ChatScreen + participant convStore as 🗄️ conversationsStore + participant chatStore as 🗄️ chatStore + participant DbSvc as ⚙️ DatabaseService + participant IDB as 💾 IndexedDB + + Note over convStore: State:
conversations: DatabaseConversation[]
activeConversation: DatabaseConversation | null
activeMessages: DatabaseMessage[]
isInitialized: boolean
usedModalities: $derived({vision, audio}) + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 🚀 INITIALIZATION + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over convStore: Auto-initialized in constructor (browser only) + convStore->>convStore: initialize() + activate convStore + convStore->>convStore: loadConversations() + convStore->>DbSvc: getAllConversations() + DbSvc->>IDB: SELECT * FROM conversations ORDER BY lastModified DESC + IDB-->>DbSvc: Conversation[] + DbSvc-->>convStore: conversations + convStore->>convStore: conversations = $state(data) + convStore->>convStore: isInitialized = true + deactivate convStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: ➕ CREATE CONVERSATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: createConversation(name?) + activate convStore + convStore->>DbSvc: createConversation(name || "New Chat") + DbSvc->>IDB: INSERT INTO conversations + IDB-->>DbSvc: conversation {id, name, lastModified, currNode: ""} + DbSvc-->>convStore: conversation + convStore->>convStore: conversations.unshift(conversation) + convStore->>convStore: activeConversation = $state(conversation) + convStore->>convStore: activeMessages = $state([]) + deactivate convStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 📂 LOAD CONVERSATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: loadConversation(convId) + activate convStore + convStore->>DbSvc: getConversation(convId) + DbSvc->>IDB: SELECT * FROM conversations WHERE id = ? + IDB-->>DbSvc: conversation + convStore->>convStore: activeConversation = $state(conversation) + + convStore->>convStore: refreshActiveMessages() + convStore->>DbSvc: getConversationMessages(convId) + DbSvc->>IDB: SELECT * FROM messages WHERE convId = ? + IDB-->>DbSvc: allMessages[] + convStore->>convStore: filterByLeafNodeId(allMessages, currNode) + Note right of convStore: Filter to show only current branch path + convStore->>convStore: activeMessages = $state(filtered) + + convStore->>chatStore: syncLoadingStateForChat(convId) + Note right of chatStore: Sync isLoading/currentResponse if streaming + deactivate convStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 🌳 MESSAGE BRANCHING MODEL + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over IDB: Message Tree Structure:
- Each message has parent (null for root)
- Each message has children[] array
- Conversation.currNode points to active leaf
- filterByLeafNodeId() traverses from root to currNode + + rect rgb(240, 240, 255) + Note over convStore: Example Branch Structure: + Note over convStore: root → user1 → assistant1 → user2 → assistant2a (currNode)
↘ assistant2b (alt branch) + end + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: ↔️ BRANCH NAVIGATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: navigateToSibling(msgId, direction) + activate convStore + convStore->>convStore: Find message in activeMessages + convStore->>convStore: Get parent message + convStore->>convStore: Find sibling in parent.children[] + convStore->>convStore: findLeafNode(siblingId, allMessages) + Note right of convStore: Navigate to leaf of sibling branch + convStore->>convStore: updateCurrentNode(leafId) + convStore->>DbSvc: updateCurrentNode(convId, leafId) + DbSvc->>IDB: UPDATE conversations SET currNode = ? + convStore->>convStore: refreshActiveMessages() + deactivate convStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 📝 UPDATE CONVERSATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: updateConversationName(convId, newName) + activate convStore + convStore->>DbSvc: updateConversation(convId, {name: newName}) + DbSvc->>IDB: UPDATE conversations SET name = ? + convStore->>convStore: Update in conversations array + deactivate convStore + + Note over convStore: Auto-title update (after first response): + convStore->>convStore: updateConversationTitleWithConfirmation() + convStore->>convStore: titleUpdateConfirmationCallback?() + Note right of convStore: Shows dialog if title would change + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 🗑️ DELETE CONVERSATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: deleteConversation(convId) + activate convStore + convStore->>DbSvc: deleteConversation(convId) + DbSvc->>IDB: DELETE FROM conversations WHERE id = ? + DbSvc->>IDB: DELETE FROM messages WHERE convId = ? + convStore->>convStore: conversations.filter(c => c.id !== convId) + alt deleted active conversation + convStore->>convStore: clearActiveConversation() + end + deactivate convStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 📊 MODALITY TRACKING + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over convStore: usedModalities = $derived.by(() => {
calculateModalitiesFromMessages(activeMessages)
}) + + Note over convStore: Scans activeMessages for attachments:
- IMAGE → vision: true
- PDF (processedAsImages) → vision: true
- AUDIO → audio: true + + UI->>convStore: getModalitiesUpToMessage(msgId) + Note right of convStore: Used for regeneration validation
Only checks messages BEFORE target + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,IDB: 📤 EXPORT / 📥 IMPORT + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>convStore: exportAllConversations() + activate convStore + convStore->>DbSvc: getAllConversations() + loop each conversation + convStore->>DbSvc: getConversationMessages(convId) + end + convStore->>convStore: triggerDownload(JSON blob) + deactivate convStore + + UI->>convStore: importConversations(file) + activate convStore + convStore->>convStore: Parse JSON file + convStore->>DbSvc: importConversations(parsed) + DbSvc->>IDB: Bulk INSERT conversations + messages + convStore->>convStore: loadConversations() + deactivate convStore +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-model-mode.md b/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-model-mode.md new file mode 100644 index 0000000..07b3621 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-model-mode.md @@ -0,0 +1,45 @@ +```mermaid +%% MODEL Mode Data Flow (single model) +%% Detailed flows: ./flows/server-flow.mmd, ./flows/models-flow.mmd, ./flows/chat-flow.mmd + +sequenceDiagram + participant User as 👤 User + participant UI as 🧩 UI + participant Stores as 🗄️ Stores + participant DB as 💾 IndexedDB + participant API as 🌐 llama-server + + Note over User,API: 🚀 Initialization (see: server-flow.mmd, models-flow.mmd) + + UI->>Stores: initialize() + Stores->>DB: load conversations + Stores->>API: GET /props + API-->>Stores: server config + modalities + Stores->>API: GET /v1/models + API-->>Stores: single model (auto-selected) + + Note over User,API: 💬 Chat Flow (see: chat-flow.mmd) + + User->>UI: send message + UI->>Stores: sendMessage() + Stores->>DB: save user message + Stores->>API: POST /v1/chat/completions (stream) + loop streaming + API-->>Stores: SSE chunks + Stores-->>UI: reactive update + end + API-->>Stores: done + timings + Stores->>DB: save assistant message + + Note over User,API: 🔁 Regenerate + + User->>UI: regenerate + Stores->>DB: create message branch + Note right of Stores: same streaming flow + + Note over User,API: ⏹️ Stop + + User->>UI: stop + Stores->>Stores: abort stream + Stores->>DB: save partial response +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-router-mode.md b/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-router-mode.md new file mode 100644 index 0000000..bccacf5 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/data-flow-simplified-router-mode.md @@ -0,0 +1,77 @@ +```mermaid +%% ROUTER Mode Data Flow (multi-model) +%% Detailed flows: ./flows/server-flow.mmd, ./flows/models-flow.mmd, ./flows/chat-flow.mmd + +sequenceDiagram + participant User as 👤 User + participant UI as 🧩 UI + participant Stores as 🗄️ Stores + participant DB as 💾 IndexedDB + participant API as 🌐 llama-server + + Note over User,API: 🚀 Initialization (see: server-flow.mmd, models-flow.mmd) + + UI->>Stores: initialize() + Stores->>DB: load conversations + Stores->>API: GET /props + API-->>Stores: {role: "router"} + Stores->>API: GET /v1/models + API-->>Stores: models[] with status (loaded/available) + loop each loaded model + Stores->>API: GET /props?model=X + API-->>Stores: modalities (vision/audio) + end + + Note over User,API: 🔄 Model Selection (see: models-flow.mmd) + + User->>UI: select model + alt model not loaded + Stores->>API: POST /models/load + loop poll status + Stores->>API: GET /v1/models + API-->>Stores: check if loaded + end + Stores->>API: GET /props?model=X + API-->>Stores: cache modalities + end + Stores->>Stores: validate modalities vs conversation + alt valid + Stores->>Stores: select model + else invalid + Stores->>API: POST /models/unload + UI->>User: show error toast + end + + Note over User,API: 💬 Chat Flow (see: chat-flow.mmd) + + User->>UI: send message + UI->>Stores: sendMessage() + Stores->>DB: save user message + Stores->>API: POST /v1/chat/completions {model: X} + Note right of API: router forwards to model + loop streaming + API-->>Stores: SSE chunks + model info + Stores-->>UI: reactive update + end + API-->>Stores: done + timings + Stores->>DB: save assistant message + model used + + Note over User,API: 🔁 Regenerate (optional: different model) + + User->>UI: regenerate + Stores->>Stores: validate modalities up to this message + Stores->>DB: create message branch + Note right of Stores: same streaming flow + + Note over User,API: ⏹️ Stop + + User->>UI: stop + Stores->>Stores: abort stream + Stores->>DB: save partial response + + Note over User,API: 🗑️ LRU Unloading + + Note right of API: Server auto-unloads LRU models
when cache full + User->>UI: select unloaded model + Note right of Stores: triggers load flow again +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/database-flow.md b/llama.cpp/tools/server/webui/docs/flows/database-flow.md new file mode 100644 index 0000000..50f8284 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/database-flow.md @@ -0,0 +1,155 @@ +```mermaid +sequenceDiagram + participant Store as 🗄️ Stores + participant DbSvc as ⚙️ DatabaseService + participant Dexie as 📦 Dexie ORM + participant IDB as 💾 IndexedDB + + Note over DbSvc: Stateless service - all methods static
Database: "LlamacppWebui" + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 📊 SCHEMA + %% ═══════════════════════════════════════════════════════════════════════════ + + rect rgb(240, 248, 255) + Note over IDB: conversations table:
id (PK), lastModified, currNode, name + end + + rect rgb(255, 248, 240) + Note over IDB: messages table:
id (PK), convId (FK), type, role, timestamp,
parent, children[], content, thinking,
toolCalls, extra[], model, timings + end + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 💬 CONVERSATIONS CRUD + %% ═══════════════════════════════════════════════════════════════════════════ + + Store->>DbSvc: createConversation(name) + activate DbSvc + DbSvc->>DbSvc: Generate UUID + DbSvc->>Dexie: db.conversations.add({id, name, lastModified, currNode: ""}) + Dexie->>IDB: INSERT + IDB-->>Dexie: success + DbSvc-->>Store: DatabaseConversation + deactivate DbSvc + + Store->>DbSvc: getConversation(convId) + DbSvc->>Dexie: db.conversations.get(convId) + Dexie->>IDB: SELECT WHERE id = ? + IDB-->>DbSvc: DatabaseConversation + + Store->>DbSvc: getAllConversations() + DbSvc->>Dexie: db.conversations.orderBy('lastModified').reverse().toArray() + Dexie->>IDB: SELECT ORDER BY lastModified DESC + IDB-->>DbSvc: DatabaseConversation[] + + Store->>DbSvc: updateConversation(convId, updates) + DbSvc->>Dexie: db.conversations.update(convId, {...updates, lastModified}) + Dexie->>IDB: UPDATE + + Store->>DbSvc: deleteConversation(convId) + activate DbSvc + DbSvc->>Dexie: db.conversations.delete(convId) + Dexie->>IDB: DELETE FROM conversations + DbSvc->>Dexie: db.messages.where('convId').equals(convId).delete() + Dexie->>IDB: DELETE FROM messages WHERE convId = ? + deactivate DbSvc + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 📝 MESSAGES CRUD + %% ═══════════════════════════════════════════════════════════════════════════ + + Store->>DbSvc: createRootMessage(convId) + activate DbSvc + DbSvc->>DbSvc: Create root message {type: "root", parent: null} + DbSvc->>Dexie: db.messages.add(rootMsg) + Dexie->>IDB: INSERT + DbSvc-->>Store: rootMessageId + deactivate DbSvc + + Store->>DbSvc: createMessageBranch(message, parentId) + activate DbSvc + DbSvc->>DbSvc: Generate UUID for new message + DbSvc->>Dexie: db.messages.add({...message, id, parent: parentId}) + Dexie->>IDB: INSERT message + + alt parentId exists + DbSvc->>Dexie: db.messages.get(parentId) + Dexie->>IDB: SELECT parent + DbSvc->>DbSvc: parent.children.push(newId) + DbSvc->>Dexie: db.messages.update(parentId, {children}) + Dexie->>IDB: UPDATE parent.children + end + + DbSvc->>Dexie: db.conversations.update(convId, {currNode: newId}) + Dexie->>IDB: UPDATE conversation.currNode + DbSvc-->>Store: DatabaseMessage + deactivate DbSvc + + Store->>DbSvc: getConversationMessages(convId) + DbSvc->>Dexie: db.messages.where('convId').equals(convId).toArray() + Dexie->>IDB: SELECT WHERE convId = ? + IDB-->>DbSvc: DatabaseMessage[] + + Store->>DbSvc: updateMessage(msgId, updates) + DbSvc->>Dexie: db.messages.update(msgId, updates) + Dexie->>IDB: UPDATE + + Store->>DbSvc: deleteMessage(msgId) + DbSvc->>Dexie: db.messages.delete(msgId) + Dexie->>IDB: DELETE + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 🌳 BRANCHING OPERATIONS + %% ═══════════════════════════════════════════════════════════════════════════ + + Store->>DbSvc: updateCurrentNode(convId, nodeId) + DbSvc->>Dexie: db.conversations.update(convId, {currNode: nodeId, lastModified}) + Dexie->>IDB: UPDATE + + Store->>DbSvc: deleteMessageCascading(msgId) + activate DbSvc + DbSvc->>DbSvc: findDescendantMessages(msgId, allMessages) + Note right of DbSvc: Recursively find all children + loop each descendant + DbSvc->>Dexie: db.messages.delete(descendantId) + Dexie->>IDB: DELETE + end + DbSvc->>Dexie: db.messages.delete(msgId) + Dexie->>IDB: DELETE target message + deactivate DbSvc + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 📥 IMPORT + %% ═══════════════════════════════════════════════════════════════════════════ + + Store->>DbSvc: importConversations(data) + activate DbSvc + loop each conversation in data + DbSvc->>DbSvc: Generate new UUIDs (avoid conflicts) + DbSvc->>Dexie: db.conversations.add(conversation) + Dexie->>IDB: INSERT conversation + loop each message + DbSvc->>Dexie: db.messages.add(message) + Dexie->>IDB: INSERT message + end + end + deactivate DbSvc + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over Store,IDB: 🔗 MESSAGE TREE UTILITIES + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over DbSvc: Used by stores (imported from utils): + + rect rgb(240, 255, 240) + Note over DbSvc: filterByLeafNodeId(messages, leafId)
→ Returns path from root to leaf
→ Used to display current branch + end + + rect rgb(240, 255, 240) + Note over DbSvc: findLeafNode(startId, messages)
→ Traverse to deepest child
→ Used for branch navigation + end + + rect rgb(240, 255, 240) + Note over DbSvc: findDescendantMessages(msgId, messages)
→ Find all children recursively
→ Used for cascading deletes + end +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/models-flow.md b/llama.cpp/tools/server/webui/docs/flows/models-flow.md new file mode 100644 index 0000000..c3031b7 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/models-flow.md @@ -0,0 +1,181 @@ +```mermaid +sequenceDiagram + participant UI as 🧩 ModelsSelector + participant Hooks as 🪝 useModelChangeValidation + participant modelsStore as 🗄️ modelsStore + participant serverStore as 🗄️ serverStore + participant convStore as 🗄️ conversationsStore + participant ModelsSvc as ⚙️ ModelsService + participant PropsSvc as ⚙️ PropsService + participant API as 🌐 llama-server + + Note over modelsStore: State:
models: ModelOption[]
routerModels: ApiModelDataEntry[]
selectedModelId, selectedModelName
loading, updating, error
modelLoadingStates (Map)
modelPropsCache (Map)
propsCacheVersion + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🚀 INITIALIZATION (MODEL mode) + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>modelsStore: fetch() + activate modelsStore + modelsStore->>modelsStore: loading = true + + alt serverStore.props not loaded + modelsStore->>serverStore: fetch() + Note over serverStore: → see server-flow.mmd + end + + modelsStore->>ModelsSvc: list() + ModelsSvc->>API: GET /v1/models + API-->>ModelsSvc: ApiModelListResponse {data: [model]} + + modelsStore->>modelsStore: models = $state(mapped) + Note right of modelsStore: Map to ModelOption[]:
{id, name, model, description, capabilities} + + Note over modelsStore: MODEL mode: Get modalities from serverStore.props + modelsStore->>modelsStore: modelPropsCache.set(model.id, serverStore.props) + modelsStore->>modelsStore: models[0].modalities = props.modalities + + modelsStore->>modelsStore: Auto-select single model + Note right of modelsStore: selectedModelId = models[0].id + modelsStore->>modelsStore: loading = false + deactivate modelsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🚀 INITIALIZATION (ROUTER mode) + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>modelsStore: fetch() + activate modelsStore + modelsStore->>ModelsSvc: list() + ModelsSvc->>API: GET /v1/models + API-->>ModelsSvc: ApiModelListResponse + modelsStore->>modelsStore: models = $state(mapped) + deactivate modelsStore + + Note over UI: After models loaded, layout triggers: + UI->>modelsStore: fetchRouterModels() + activate modelsStore + modelsStore->>ModelsSvc: listRouter() + ModelsSvc->>API: GET /v1/models + API-->>ModelsSvc: ApiRouterModelsListResponse + Note right of API: {data: [{id, status, path, in_cache}]} + modelsStore->>modelsStore: routerModels = $state(data) + + modelsStore->>modelsStore: fetchModalitiesForLoadedModels() + loop each model where status === "loaded" + modelsStore->>PropsSvc: fetchForModel(modelId) + PropsSvc->>API: GET /props?model={modelId} + API-->>PropsSvc: ApiLlamaCppServerProps + modelsStore->>modelsStore: modelPropsCache.set(modelId, props) + end + modelsStore->>modelsStore: propsCacheVersion++ + deactivate modelsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🔄 MODEL SELECTION (ROUTER mode) + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>Hooks: useModelChangeValidation({getRequiredModalities, onSuccess?, onValidationFailure?}) + Note over Hooks: Hook configured per-component:
ChatForm: getRequiredModalities = usedModalities
ChatMessage: getRequiredModalities = getModalitiesUpToMessage(msgId) + + UI->>Hooks: handleModelChange(modelId, modelName) + activate Hooks + Hooks->>Hooks: previousSelectedModelId = modelsStore.selectedModelId + Hooks->>modelsStore: isModelLoaded(modelName)? + + alt model NOT loaded + Hooks->>modelsStore: loadModel(modelName) + Note over modelsStore: → see LOAD MODEL section below + end + + Note over Hooks: Always fetch props (from cache or API) + Hooks->>modelsStore: fetchModelProps(modelName) + modelsStore-->>Hooks: props + + Hooks->>convStore: getRequiredModalities() + convStore-->>Hooks: {vision, audio} + + Hooks->>Hooks: Validate: model.modalities ⊇ required? + + alt validation PASSED + Hooks->>modelsStore: selectModelById(modelId) + Hooks-->>UI: return true + else validation FAILED + Hooks->>UI: toast.error("Model doesn't support required modalities") + alt model was just loaded + Hooks->>modelsStore: unloadModel(modelName) + end + alt onValidationFailure provided + Hooks->>modelsStore: selectModelById(previousSelectedModelId) + end + Hooks-->>UI: return false + end + deactivate Hooks + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ⬆️ LOAD MODEL (ROUTER mode) + %% ═══════════════════════════════════════════════════════════════════════════ + + modelsStore->>modelsStore: loadModel(modelId) + activate modelsStore + + alt already loaded + modelsStore-->>modelsStore: return (no-op) + end + + modelsStore->>modelsStore: modelLoadingStates.set(modelId, true) + modelsStore->>ModelsSvc: load(modelId) + ModelsSvc->>API: POST /models/load {model: modelId} + API-->>ModelsSvc: {status: "loading"} + + modelsStore->>modelsStore: pollForModelStatus(modelId, LOADED) + loop poll every 500ms (max 60 attempts) + modelsStore->>modelsStore: fetchRouterModels() + modelsStore->>ModelsSvc: listRouter() + ModelsSvc->>API: GET /v1/models + API-->>ModelsSvc: models[] + modelsStore->>modelsStore: getModelStatus(modelId) + alt status === LOADED + Note right of modelsStore: break loop + else status === LOADING + Note right of modelsStore: wait 500ms, continue + end + end + + modelsStore->>modelsStore: updateModelModalities(modelId) + modelsStore->>PropsSvc: fetchForModel(modelId) + PropsSvc->>API: GET /props?model={modelId} + API-->>PropsSvc: props with modalities + modelsStore->>modelsStore: modelPropsCache.set(modelId, props) + modelsStore->>modelsStore: propsCacheVersion++ + + modelsStore->>modelsStore: modelLoadingStates.set(modelId, false) + deactivate modelsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ⬇️ UNLOAD MODEL (ROUTER mode) + %% ═══════════════════════════════════════════════════════════════════════════ + + modelsStore->>modelsStore: unloadModel(modelId) + activate modelsStore + modelsStore->>modelsStore: modelLoadingStates.set(modelId, true) + modelsStore->>ModelsSvc: unload(modelId) + ModelsSvc->>API: POST /models/unload {model: modelId} + + modelsStore->>modelsStore: pollForModelStatus(modelId, UNLOADED) + loop poll until unloaded + modelsStore->>ModelsSvc: listRouter() + ModelsSvc->>API: GET /v1/models + end + + modelsStore->>modelsStore: modelLoadingStates.set(modelId, false) + deactivate modelsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 📊 COMPUTED GETTERS + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over modelsStore: Getters:
- selectedModel: ModelOption | null
- loadedModelIds: string[] (from routerModels)
- loadingModelIds: string[] (from modelLoadingStates)
- singleModelName: string | null (MODEL mode only) + + Note over modelsStore: Modality helpers:
- getModelModalities(modelId): {vision, audio}
- modelSupportsVision(modelId): boolean
- modelSupportsAudio(modelId): boolean +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/server-flow.md b/llama.cpp/tools/server/webui/docs/flows/server-flow.md new file mode 100644 index 0000000..d6a1611 --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/server-flow.md @@ -0,0 +1,76 @@ +```mermaid +sequenceDiagram + participant UI as 🧩 +layout.svelte + participant serverStore as 🗄️ serverStore + participant PropsSvc as ⚙️ PropsService + participant API as 🌐 llama-server + + Note over serverStore: State:
props: ApiLlamaCppServerProps | null
loading, error
role: ServerRole | null (MODEL | ROUTER)
fetchPromise (deduplication) + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🚀 INITIALIZATION + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>serverStore: fetch() + activate serverStore + + alt fetchPromise exists (already fetching) + serverStore-->>UI: return fetchPromise + Note right of serverStore: Deduplicate concurrent calls + end + + serverStore->>serverStore: loading = true + serverStore->>serverStore: fetchPromise = new Promise() + + serverStore->>PropsSvc: fetch() + PropsSvc->>API: GET /props + API-->>PropsSvc: ApiLlamaCppServerProps + Note right of API: {role, model_path, model_alias,
modalities, default_generation_settings, ...} + + PropsSvc-->>serverStore: props + serverStore->>serverStore: props = $state(data) + + serverStore->>serverStore: detectRole(props) + Note right of serverStore: role = props.role === "router"
? ServerRole.ROUTER
: ServerRole.MODEL + + serverStore->>serverStore: loading = false + serverStore->>serverStore: fetchPromise = null + deactivate serverStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 📊 COMPUTED GETTERS + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over serverStore: Getters from props: + + rect rgb(240, 255, 240) + Note over serverStore: defaultParams
→ props.default_generation_settings.params
(temperature, top_p, top_k, etc.) + end + + rect rgb(240, 255, 240) + Note over serverStore: contextSize
→ props.default_generation_settings.n_ctx + end + + rect rgb(255, 240, 240) + Note over serverStore: isRouterMode
→ role === ServerRole.ROUTER + end + + rect rgb(255, 240, 240) + Note over serverStore: isModelMode
→ role === ServerRole.MODEL + end + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: 🔗 RELATIONSHIPS + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over serverStore: Used by: + Note right of serverStore: - modelsStore: role detection, MODEL mode modalities
- settingsStore: syncWithServerDefaults (defaultParams)
- chatStore: contextSize for processing state
- UI components: isRouterMode for conditional rendering + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,API: ❌ ERROR HANDLING + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over serverStore: getErrorMessage(): string | null
Returns formatted error for UI display + + Note over serverStore: clear(): void
Resets all state (props, error, loading, role) +``` diff --git a/llama.cpp/tools/server/webui/docs/flows/settings-flow.md b/llama.cpp/tools/server/webui/docs/flows/settings-flow.md new file mode 100644 index 0000000..578e01e --- /dev/null +++ b/llama.cpp/tools/server/webui/docs/flows/settings-flow.md @@ -0,0 +1,144 @@ +```mermaid +sequenceDiagram + participant UI as 🧩 ChatSettings + participant settingsStore as 🗄️ settingsStore + participant serverStore as 🗄️ serverStore + participant ParamSvc as ⚙️ ParameterSyncService + participant LS as 💾 LocalStorage + + Note over settingsStore: State:
config: SettingsConfigType
theme: string ("auto" | "light" | "dark")
isInitialized: boolean
userOverrides: Set<string> + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 🚀 INITIALIZATION + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over settingsStore: Auto-initialized in constructor (browser only) + settingsStore->>settingsStore: initialize() + activate settingsStore + + settingsStore->>settingsStore: loadConfig() + settingsStore->>LS: get("llama-config") + LS-->>settingsStore: StoredConfig | null + + alt config exists + settingsStore->>settingsStore: Merge with SETTING_CONFIG_DEFAULT + Note right of settingsStore: Fill missing keys with defaults + else no config + settingsStore->>settingsStore: config = SETTING_CONFIG_DEFAULT + end + + settingsStore->>LS: get("llama-userOverrides") + LS-->>settingsStore: string[] | null + settingsStore->>settingsStore: userOverrides = new Set(data) + + settingsStore->>settingsStore: loadTheme() + settingsStore->>LS: get("llama-theme") + LS-->>settingsStore: theme | "auto" + + settingsStore->>settingsStore: isInitialized = true + deactivate settingsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 🔄 SYNC WITH SERVER DEFAULTS + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over UI: Triggered from +layout.svelte when serverStore.props loaded + UI->>settingsStore: syncWithServerDefaults() + activate settingsStore + + settingsStore->>serverStore: defaultParams + serverStore-->>settingsStore: {temperature, top_p, top_k, ...} + + settingsStore->>ParamSvc: extractServerDefaults(defaultParams) + ParamSvc-->>settingsStore: Record + + settingsStore->>ParamSvc: mergeWithServerDefaults(config, serverDefaults) + Note right of ParamSvc: For each syncable parameter:
- If NOT in userOverrides → use server default
- If in userOverrides → keep user value + ParamSvc-->>settingsStore: mergedConfig + + settingsStore->>settingsStore: config = mergedConfig + settingsStore->>settingsStore: saveConfig() + deactivate settingsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: ⚙️ UPDATE CONFIG + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>settingsStore: updateConfig(key, value) + activate settingsStore + settingsStore->>settingsStore: config[key] = value + settingsStore->>settingsStore: userOverrides.add(key) + Note right of settingsStore: Mark as user-modified (won't be overwritten by server) + settingsStore->>settingsStore: saveConfig() + settingsStore->>LS: set("llama-config", config) + settingsStore->>LS: set("llama-userOverrides", [...userOverrides]) + deactivate settingsStore + + UI->>settingsStore: updateMultipleConfig({key1: val1, key2: val2}) + activate settingsStore + Note right of settingsStore: Batch update, single save + settingsStore->>settingsStore: For each key: config[key] = value + settingsStore->>settingsStore: For each key: userOverrides.add(key) + settingsStore->>settingsStore: saveConfig() + deactivate settingsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 🔄 RESET + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>settingsStore: resetConfig() + activate settingsStore + settingsStore->>settingsStore: config = SETTING_CONFIG_DEFAULT + settingsStore->>settingsStore: userOverrides.clear() + settingsStore->>settingsStore: syncWithServerDefaults() + Note right of settingsStore: Apply server defaults for syncable params + settingsStore->>settingsStore: saveConfig() + deactivate settingsStore + + UI->>settingsStore: resetParameterToServerDefault(key) + activate settingsStore + settingsStore->>settingsStore: userOverrides.delete(key) + settingsStore->>serverStore: defaultParams[key] + settingsStore->>settingsStore: config[key] = serverDefault + settingsStore->>settingsStore: saveConfig() + deactivate settingsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 🎨 THEME + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>settingsStore: updateTheme(newTheme) + activate settingsStore + settingsStore->>settingsStore: theme = newTheme + settingsStore->>settingsStore: saveTheme() + settingsStore->>LS: set("llama-theme", theme) + deactivate settingsStore + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 📊 PARAMETER INFO + %% ═══════════════════════════════════════════════════════════════════════════ + + UI->>settingsStore: getParameterInfo(key) + settingsStore->>ParamSvc: getParameterInfo(key, config, serverDefaults, userOverrides) + ParamSvc-->>settingsStore: ParameterInfo + Note right of ParamSvc: {
currentValue,
serverDefault,
isUserOverride: boolean,
canSync: boolean,
isDifferentFromServer: boolean
} + + UI->>settingsStore: getParameterDiff() + settingsStore->>ParamSvc: createParameterDiff(config, serverDefaults, userOverrides) + ParamSvc-->>settingsStore: ParameterDiff[] + Note right of ParamSvc: Array of parameters where user != server + + %% ═══════════════════════════════════════════════════════════════════════════ + Note over UI,LS: 📋 CONFIG CATEGORIES + %% ═══════════════════════════════════════════════════════════════════════════ + + Note over settingsStore: Syncable with server (from /props): + rect rgb(240, 255, 240) + Note over settingsStore: temperature, top_p, top_k, min_p
repeat_penalty, presence_penalty, frequency_penalty
dynatemp_range, dynatemp_exponent
typ_p, xtc_probability, xtc_threshold
dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n + end + + Note over settingsStore: UI-only (not synced): + rect rgb(255, 240, 240) + Note over settingsStore: systemMessage, custom (JSON)
showStatistics, enableContinueGeneration
autoMicOnEmpty, disableAutoScroll
apiKey, pdfAsImage, disableReasoningFormat + end +``` diff --git a/llama.cpp/tools/server/webui/eslint.config.js b/llama.cpp/tools/server/webui/eslint.config.js new file mode 100644 index 0000000..5baea57 --- /dev/null +++ b/llama.cpp/tools/server/webui/eslint.config.js @@ -0,0 +1,49 @@ +// For more info, see https://github.com/storybookjs/eslint-plugin-storybook#configuration-flat-config-format +import storybook from 'eslint-plugin-storybook'; + +import prettier from 'eslint-config-prettier'; +import { includeIgnoreFile } from '@eslint/compat'; +import js from '@eslint/js'; +import svelte from 'eslint-plugin-svelte'; +import globals from 'globals'; +import { fileURLToPath } from 'node:url'; +import ts from 'typescript-eslint'; +import svelteConfig from './svelte.config.js'; + +const gitignorePath = fileURLToPath(new URL('./.gitignore', import.meta.url)); + +export default ts.config( + includeIgnoreFile(gitignorePath), + js.configs.recommended, + ...ts.configs.recommended, + ...svelte.configs.recommended, + prettier, + ...svelte.configs.prettier, + { + languageOptions: { + globals: { ...globals.browser, ...globals.node } + }, + rules: { + // typescript-eslint strongly recommend that you do not use the no-undef lint rule on TypeScript projects. + // see: https://typescript-eslint.io/troubleshooting/faqs/eslint/#i-get-errors-from-the-no-undef-rule-about-global-variables-not-being-defined-even-though-there-are-no-typescript-errors + 'no-undef': 'off', + 'svelte/no-at-html-tags': 'off' + } + }, + { + files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'], + languageOptions: { + parserOptions: { + projectService: true, + extraFileExtensions: ['.svelte'], + parser: ts.parser, + svelteConfig + } + } + }, + { + // Exclude Storybook files from main ESLint rules + ignores: ['.storybook/**/*'] + }, + storybook.configs['flat/recommended'] +); diff --git a/llama.cpp/tools/server/webui/package-lock.json b/llama.cpp/tools/server/webui/package-lock.json new file mode 100644 index 0000000..6834416 --- /dev/null +++ b/llama.cpp/tools/server/webui/package-lock.json @@ -0,0 +1,9343 @@ +{ + "name": "webui", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "webui", + "version": "1.0.0", + "dependencies": { + "highlight.js": "^11.11.1", + "mode-watcher": "^1.1.0", + "pdfjs-dist": "^5.4.54", + "rehype-highlight": "^7.0.2", + "rehype-stringify": "^10.0.1", + "remark": "^15.0.1", + "remark-breaks": "^4.0.0", + "remark-gfm": "^4.0.1", + "remark-html": "^16.0.1", + "remark-rehype": "^11.1.2", + "svelte-sonner": "^1.0.5", + "unist-util-visit": "^5.0.0" + }, + "devDependencies": { + "@chromatic-com/storybook": "^4.1.2", + "@eslint/compat": "^1.2.5", + "@eslint/js": "^9.18.0", + "@internationalized/date": "^3.10.1", + "@lucide/svelte": "^0.515.0", + "@playwright/test": "^1.49.1", + "@storybook/addon-a11y": "^10.0.7", + "@storybook/addon-docs": "^10.0.7", + "@storybook/addon-svelte-csf": "^5.0.10", + "@storybook/addon-vitest": "^10.0.7", + "@storybook/sveltekit": "^10.0.7", + "@sveltejs/adapter-static": "^3.0.10", + "@sveltejs/kit": "^2.48.4", + "@sveltejs/vite-plugin-svelte": "^6.2.1", + "@tailwindcss/forms": "^0.5.9", + "@tailwindcss/typography": "^0.5.15", + "@tailwindcss/vite": "^4.0.0", + "@types/node": "^22", + "@vitest/browser": "^3.2.3", + "bits-ui": "^2.14.4", + "clsx": "^2.1.1", + "dexie": "^4.0.11", + "eslint": "^9.18.0", + "eslint-config-prettier": "^10.0.1", + "eslint-plugin-storybook": "^10.0.7", + "eslint-plugin-svelte": "^3.0.0", + "fflate": "^0.8.2", + "globals": "^16.0.0", + "http-server": "^14.1.1", + "mdast": "^3.0.0", + "mdsvex": "^0.12.3", + "playwright": "^1.56.1", + "prettier": "^3.4.2", + "prettier-plugin-svelte": "^3.3.3", + "prettier-plugin-tailwindcss": "^0.6.11", + "rehype-katex": "^7.0.1", + "remark-math": "^6.0.0", + "sass": "^1.93.3", + "storybook": "^10.0.7", + "svelte": "^5.38.2", + "svelte-check": "^4.0.0", + "tailwind-merge": "^3.3.1", + "tailwind-variants": "^3.2.2", + "tailwindcss": "^4.0.0", + "tw-animate-css": "^1.3.5", + "typescript": "^5.0.0", + "typescript-eslint": "^8.20.0", + "unified": "^11.0.5", + "uuid": "^13.0.0", + "vite": "^7.2.2", + "vite-plugin-devtools-json": "^0.2.0", + "vitest": "^3.2.3", + "vitest-browser-svelte": "^0.1.0" + } + }, + "node_modules/@adobe/css-tools": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.3.tgz", + "integrity": "sha512-VQKMkwriZbaOgVCby1UDY/LDk5fIjhQicCvVPFqfe+69fWaPWydbWJ3wRt59/YzIwda1I81loas3oCoHxnqvdA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", + "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@chromatic-com/storybook": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@chromatic-com/storybook/-/storybook-4.1.2.tgz", + "integrity": "sha512-QAWGtHwib0qsP5CcO64aJCF75zpFgpKK3jNpxILzQiPK3sVo4EmnVGJVdwcZWpWrGdH8E4YkncGoitw4EXzKMg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@neoconfetti/react": "^1.0.0", + "chromatic": "^12.0.0", + "filesize": "^10.0.12", + "jsonfile": "^6.1.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=20.0.0", + "yarn": ">=1.22.18" + }, + "peerDependencies": { + "storybook": "^0.0.0-0 || ^9.0.0 || ^9.1.0-0 || ^9.2.0-0 || ^10.0.0-0 || ^10.1.0-0 || ^10.2.0-0 || ^10.3.0-0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.8.tgz", + "integrity": "sha512-urAvrUedIqEiFR3FYSLTWQgLu5tb+m0qZw0NBEasUeo6wuqatkMDaRT+1uABiGXEu5vqgPd7FGE1BhsAIy9QVA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.8.tgz", + "integrity": "sha512-RONsAvGCz5oWyePVnLdZY/HHwA++nxYWIX1atInlaW6SEkwq6XkP3+cb825EUcRs5Vss/lGh/2YxAb5xqc07Uw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.8.tgz", + "integrity": "sha512-OD3p7LYzWpLhZEyATcTSJ67qB5D+20vbtr6vHlHWSQYhKtzUYrETuWThmzFpZtFsBIxRvhO07+UgVA9m0i/O1w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.8.tgz", + "integrity": "sha512-yJAVPklM5+4+9dTeKwHOaA+LQkmrKFX96BM0A/2zQrbS6ENCmxc4OVoBs5dPkCCak2roAD+jKCdnmOqKszPkjA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.8.tgz", + "integrity": "sha512-Jw0mxgIaYX6R8ODrdkLLPwBqHTtYHJSmzzd+QeytSugzQ0Vg4c5rDky5VgkoowbZQahCbsv1rT1KW72MPIkevw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.8.tgz", + "integrity": "sha512-Vh2gLxxHnuoQ+GjPNvDSDRpoBCUzY4Pu0kBqMBDlK4fuWbKgGtmDIeEC081xi26PPjn+1tct+Bh8FjyLlw1Zlg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.8.tgz", + "integrity": "sha512-YPJ7hDQ9DnNe5vxOm6jaie9QsTwcKedPvizTVlqWG9GBSq+BuyWEDazlGaDTC5NGU4QJd666V0yqCBL2oWKPfA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.8.tgz", + "integrity": "sha512-MmaEXxQRdXNFsRN/KcIimLnSJrk2r5H8v+WVafRWz5xdSVmWLoITZQXcgehI2ZE6gioE6HirAEToM/RvFBeuhw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.8.tgz", + "integrity": "sha512-FuzEP9BixzZohl1kLf76KEVOsxtIBFwCaLupVuk4eFVnOZfU+Wsn+x5Ryam7nILV2pkq2TqQM9EZPsOBuMC+kg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.8.tgz", + "integrity": "sha512-WIgg00ARWv/uYLU7lsuDK00d/hHSfES5BzdWAdAig1ioV5kaFNrtK8EqGcUBJhYqotlUByUKz5Qo6u8tt7iD/w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.8.tgz", + "integrity": "sha512-A1D9YzRX1i+1AJZuFFUMP1E9fMaYY+GnSQil9Tlw05utlE86EKTUA7RjwHDkEitmLYiFsRd9HwKBPEftNdBfjg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.8.tgz", + "integrity": "sha512-O7k1J/dwHkY1RMVvglFHl1HzutGEFFZ3kNiDMSOyUrB7WcoHGf96Sh+64nTRT26l3GMbCW01Ekh/ThKM5iI7hQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.8.tgz", + "integrity": "sha512-uv+dqfRazte3BzfMp8PAQXmdGHQt2oC/y2ovwpTteqrMx2lwaksiFZ/bdkXJC19ttTvNXBuWH53zy/aTj1FgGw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.8.tgz", + "integrity": "sha512-GyG0KcMi1GBavP5JgAkkstMGyMholMDybAf8wF5A70CALlDM2p/f7YFE7H92eDeH/VBtFJA5MT4nRPDGg4JuzQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.8.tgz", + "integrity": "sha512-rAqDYFv3yzMrq7GIcen3XP7TUEG/4LK86LUPMIz6RT8A6pRIDn0sDcvjudVZBiiTcZCY9y2SgYX2lgK3AF+1eg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.8.tgz", + "integrity": "sha512-Xutvh6VjlbcHpsIIbwY8GVRbwoviWT19tFhgdA7DlenLGC/mbc3lBoVb7jxj9Z+eyGqvcnSyIltYUrkKzWqSvg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.8.tgz", + "integrity": "sha512-ASFQhgY4ElXh3nDcOMTkQero4b1lgubskNlhIfJrsH5OKZXDpUAKBlNS0Kx81jwOBp+HCeZqmoJuihTv57/jvQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.8.tgz", + "integrity": "sha512-d1KfruIeohqAi6SA+gENMuObDbEjn22olAR7egqnkCD9DGBG0wsEARotkLgXDu6c4ncgWTZJtN5vcgxzWRMzcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.8.tgz", + "integrity": "sha512-nVDCkrvx2ua+XQNyfrujIG38+YGyuy2Ru9kKVNyh5jAys6n+l44tTtToqHjino2My8VAY6Lw9H7RI73XFi66Cg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.8.tgz", + "integrity": "sha512-j8HgrDuSJFAujkivSMSfPQSAa5Fxbvk4rgNAS5i3K+r8s1X0p1uOO2Hl2xNsGFppOeHOLAVgYwDVlmxhq5h+SQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.8.tgz", + "integrity": "sha512-1h8MUAwa0VhNCDp6Af0HToI2TJFAn1uqT9Al6DJVzdIBAd21m/G0Yfc77KDM3uF3T/YaOgQq3qTJHPbTOInaIQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.8.tgz", + "integrity": "sha512-r2nVa5SIK9tSWd0kJd9HCffnDHKchTGikb//9c7HX+r+wHYCpQrSgxhlY6KWV1nFo1l4KFbsMlHk+L6fekLsUg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.8.tgz", + "integrity": "sha512-zUlaP2S12YhQ2UzUfcCuMDHQFJyKABkAjvO5YSndMiIkMimPmxA+BYSBikWgsRpvyxuRnow4nS5NPnf9fpv41w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.8.tgz", + "integrity": "sha512-YEGFFWESlPva8hGL+zvj2z/SaK+pH0SwOM0Nc/d+rVnW7GSTFlLBGzZkuSU9kFIGIo8q9X3ucpZhu8PDN5A2sQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.8.tgz", + "integrity": "sha512-hiGgGC6KZ5LZz58OL/+qVVoZiuZlUYlYHNAmczOm7bs2oE1XriPFi5ZHHrS8ACpV5EjySrnoCKmcbQMN+ojnHg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.8.tgz", + "integrity": "sha512-cn3Yr7+OaaZq1c+2pe+8yxC8E144SReCQjN6/2ynubzYjvyqZjTXfQJpAcQpsdJq3My7XADANiYGHoFC69pLQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz", + "integrity": "sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/compat": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@eslint/compat/-/compat-1.3.1.tgz", + "integrity": "sha512-k8MHony59I5EPic6EQTCNOuPoVBnoYXkP+20xvwFjN7t0qI3ImyvyBgg+hIVPwC8JaxVjjUZld+cLfBLFDLucg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "peerDependencies": { + "eslint": "^8.40 || 9" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz", + "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.6", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.0.tgz", + "integrity": "sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.2.tgz", + "integrity": "sha512-78Md3/Rrxh83gCxoUc0EiciuOHsIITzLy53m3d9UyiW8y9Dj2D29FeETqyKA+BRK76tnTp6RXWb3pCay8Oyomg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.31.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.31.0.tgz", + "integrity": "sha512-LOm5OVt7D4qiKCqoiPbA7LWmI+tbw1VbTUowBcUMgQSuM6poJufkFkYDcQpo5KfgD39TnNySV26QjOh7VFpSyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.5.tgz", + "integrity": "sha512-Z5kJ+wU3oA7MMIqVR9tyZRtjYPr4OC004Q4Rw7pgOKUOKkJfZ3O24nz3WYfGRpMDNmcOi3TwQOmgm7B7Tpii0w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.15.2", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.2.tgz", + "integrity": "sha512-wNB5ooIKHQc+Kui96jE/n69rHFWAVoxn5CAzL1Xdd8FG03cgY3MLO+GF9U3W737fYDSgPWA6MReKhBQBop6Pcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.2.tgz", + "integrity": "sha512-7cfaOQuCS27HD7DX+6ib2OrnW+b4ZBwDNnCcT0uTyidcmyWb03FnQqJybDBoCnpdxwBSfA94UAYlRCt7mV+TbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.2", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.6", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", + "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.3.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", + "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@internationalized/date": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@internationalized/date/-/date-3.10.1.tgz", + "integrity": "sha512-oJrXtQiAXLvT9clCf1K4kxp3eKsQhIaZqxEyowkBcsvZDdZkbWrVmnGknxs5flTD0VGsxrxKgBCZty1EzoiMzA==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@swc/helpers": "^0.5.0" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.12", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.12.tgz", + "integrity": "sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.4.tgz", + "integrity": "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.29", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.29.tgz", + "integrity": "sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@lucide/svelte": { + "version": "0.515.0", + "resolved": "https://registry.npmjs.org/@lucide/svelte/-/svelte-0.515.0.tgz", + "integrity": "sha512-CEAyqcZmNBfYzVgaRmK2RFJP5tnbXxekRyDk0XX/eZQRfsJmkDvmQwXNX8C869BgNeryzmrRyjHhUL6g9ZOHNA==", + "dev": true, + "license": "ISC", + "peerDependencies": { + "svelte": "^5" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz", + "integrity": "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@napi-rs/canvas": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas/-/canvas-0.1.76.tgz", + "integrity": "sha512-YIk5okeNN53GzjvWmAyCQFE9xrLeQXzYpudX4TiLvqaz9SqXgIgxIuKPe4DKyB5nccsQMIev7JGKTzZaN5rFdw==", + "license": "MIT", + "optional": true, + "workspaces": [ + "e2e/*" + ], + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@napi-rs/canvas-android-arm64": "0.1.76", + "@napi-rs/canvas-darwin-arm64": "0.1.76", + "@napi-rs/canvas-darwin-x64": "0.1.76", + "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.76", + "@napi-rs/canvas-linux-arm64-gnu": "0.1.76", + "@napi-rs/canvas-linux-arm64-musl": "0.1.76", + "@napi-rs/canvas-linux-riscv64-gnu": "0.1.76", + "@napi-rs/canvas-linux-x64-gnu": "0.1.76", + "@napi-rs/canvas-linux-x64-musl": "0.1.76", + "@napi-rs/canvas-win32-x64-msvc": "0.1.76" + } + }, + "node_modules/@napi-rs/canvas-android-arm64": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-android-arm64/-/canvas-android-arm64-0.1.76.tgz", + "integrity": "sha512-7EAfkLBQo2QoEzpHdInFbfEUYTXsiO2hvtFo1D9zfTzcQM8n5piZdOpJ3EIkmpe8yLoSV8HLyUQtq4bv11x6Tg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-darwin-arm64": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-darwin-arm64/-/canvas-darwin-arm64-0.1.76.tgz", + "integrity": "sha512-Cs8WRMzaWSJWeWY8tvnCe+TuduHUbB0xFhZ0FmOrNy2prPxT4A6aU3FQu8hR9XJw8kKZ7v902wzaDmy9SdhG8A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-darwin-x64": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-darwin-x64/-/canvas-darwin-x64-0.1.76.tgz", + "integrity": "sha512-ya+T6gV9XAq7YAnMa2fKhWXAuRR5cpRny2IoHacoMxgtOARnUkJO/k3hIb52FtMoq7UxLi5+IFGVHU6ZiMu4Ag==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm-gnueabihf": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm-gnueabihf/-/canvas-linux-arm-gnueabihf-0.1.76.tgz", + "integrity": "sha512-fgnPb+FKVuixACvkHGldJqYXExORBwvqGgL0K80uE6SGH2t0UKD2auHw2CtBy14DUzfg82PkupO2ix2w7kB+Xw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm64-gnu": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm64-gnu/-/canvas-linux-arm64-gnu-0.1.76.tgz", + "integrity": "sha512-r8OxIenvBPOa4I014k1ZWTCz2dB0ZTsxMP7+ovMOKO7jkl1Z+YZo2OTAqxArpMhN0wdEeI3Lw9zUcn2HgwEgDA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm64-musl": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm64-musl/-/canvas-linux-arm64-musl-0.1.76.tgz", + "integrity": "sha512-smxwzKfHYaOYG7QXUuDPrFEC7WqjL3Lx4AM6mk8/FxDAS+8o0eoZJwSu+zXsaBLimEQUozEYgEGtJ2JJ0RdL4A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-riscv64-gnu": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-riscv64-gnu/-/canvas-linux-riscv64-gnu-0.1.76.tgz", + "integrity": "sha512-G2PsFwsP+r4syEoNLStV3n1wtNAClwf8s/qB57bexG08R4f4WaiBd+x+d4iYS0Y5o90YIEm8/ewZn4bLIa0wNQ==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-x64-gnu": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-x64-gnu/-/canvas-linux-x64-gnu-0.1.76.tgz", + "integrity": "sha512-SNK+vgge4DnuONYdYE3Y09LivGgUiUPQDU+PdGNZJIzIi0hRDLcA59eag8LGeQfPmJW84c1aZD04voihybKFog==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-x64-musl": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-x64-musl/-/canvas-linux-x64-musl-0.1.76.tgz", + "integrity": "sha512-tWHLBI9iVoR1NsfpHz1MGERTkqcca8akbH/CzX6JQUNC+lJOeYYXeRuK8hKqMIg1LI+4QOMAtHNVeZu8NvjEug==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-win32-x64-msvc": { + "version": "0.1.76", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-win32-x64-msvc/-/canvas-win32-x64-msvc-0.1.76.tgz", + "integrity": "sha512-ifM5HOGw2hP5QLQzCB41Riw3Pq5yKAAjZpn+lJC0sYBmyS2s/Kq6KpTOKxf0CuptkI1wMcRcYQfhLRdeWiYvIg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@neoconfetti/react": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@neoconfetti/react/-/react-1.0.0.tgz", + "integrity": "sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@parcel/watcher": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz", + "integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "dependencies": { + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" + }, + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.1", + "@parcel/watcher-darwin-arm64": "2.5.1", + "@parcel/watcher-darwin-x64": "2.5.1", + "@parcel/watcher-freebsd-x64": "2.5.1", + "@parcel/watcher-linux-arm-glibc": "2.5.1", + "@parcel/watcher-linux-arm-musl": "2.5.1", + "@parcel/watcher-linux-arm64-glibc": "2.5.1", + "@parcel/watcher-linux-arm64-musl": "2.5.1", + "@parcel/watcher-linux-x64-glibc": "2.5.1", + "@parcel/watcher-linux-x64-musl": "2.5.1", + "@parcel/watcher-win32-arm64": "2.5.1", + "@parcel/watcher-win32-ia32": "2.5.1", + "@parcel/watcher-win32-x64": "2.5.1" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz", + "integrity": "sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz", + "integrity": "sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz", + "integrity": "sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz", + "integrity": "sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz", + "integrity": "sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz", + "integrity": "sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz", + "integrity": "sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz", + "integrity": "sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz", + "integrity": "sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz", + "integrity": "sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz", + "integrity": "sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz", + "integrity": "sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz", + "integrity": "sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher/node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/@playwright/test": { + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.56.1.tgz", + "integrity": "sha512-vSMYtL/zOcFpvJCW71Q/OEGQb7KYBPAdKh35WNSkaZA75JlAO8ED8UN6GUNTm3drWomcbcqRPFqQbLae8yBTdg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright": "1.56.1" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.29", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz", + "integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.45.1.tgz", + "integrity": "sha512-NEySIFvMY0ZQO+utJkgoMiCAjMrGvnbDLHvcmlA33UXJpYBCvlBEbMMtV837uCkS+plG2umfhn0T5mMAxGrlRA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.45.1.tgz", + "integrity": "sha512-ujQ+sMXJkg4LRJaYreaVx7Z/VMgBBd89wGS4qMrdtfUFZ+TSY5Rs9asgjitLwzeIbhwdEhyj29zhst3L1lKsRQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.45.1.tgz", + "integrity": "sha512-FSncqHvqTm3lC6Y13xncsdOYfxGSLnP+73k815EfNmpewPs+EyM49haPS105Rh4aF5mJKywk9X0ogzLXZzN9lA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.45.1.tgz", + "integrity": "sha512-2/vVn/husP5XI7Fsf/RlhDaQJ7x9zjvC81anIVbr4b/f0xtSmXQTFcGIQ/B1cXIYM6h2nAhJkdMHTnD7OtQ9Og==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.45.1.tgz", + "integrity": "sha512-4g1kaDxQItZsrkVTdYQ0bxu4ZIQ32cotoQbmsAnW1jAE4XCMbcBPDirX5fyUzdhVCKgPcrwWuucI8yrVRBw2+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.45.1.tgz", + "integrity": "sha512-L/6JsfiL74i3uK1Ti2ZFSNsp5NMiM4/kbbGEcOCps99aZx3g8SJMO1/9Y0n/qKlWZfn6sScf98lEOUe2mBvW9A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.45.1.tgz", + "integrity": "sha512-RkdOTu2jK7brlu+ZwjMIZfdV2sSYHK2qR08FUWcIoqJC2eywHbXr0L8T/pONFwkGukQqERDheaGTeedG+rra6Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.45.1.tgz", + "integrity": "sha512-3kJ8pgfBt6CIIr1o+HQA7OZ9mp/zDk3ctekGl9qn/pRBgrRgfwiffaUmqioUGN9hv0OHv2gxmvdKOkARCtRb8Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.45.1.tgz", + "integrity": "sha512-k3dOKCfIVixWjG7OXTCOmDfJj3vbdhN0QYEqB+OuGArOChek22hn7Uy5A/gTDNAcCy5v2YcXRJ/Qcnm4/ma1xw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.45.1.tgz", + "integrity": "sha512-PmI1vxQetnM58ZmDFl9/Uk2lpBBby6B6rF4muJc65uZbxCs0EA7hhKCk2PKlmZKuyVSHAyIw3+/SiuMLxKxWog==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loongarch64-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.45.1.tgz", + "integrity": "sha512-9UmI0VzGmNJ28ibHW2GpE2nF0PBQqsyiS4kcJ5vK+wuwGnV5RlqdczVocDSUfGX/Na7/XINRVoUgJyFIgipoRg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.45.1.tgz", + "integrity": "sha512-7nR2KY8oEOUTD3pBAxIBBbZr0U7U+R9HDTPNy+5nVVHDXI4ikYniH1oxQz9VoB5PbBU1CZuDGHkLJkd3zLMWsg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.45.1.tgz", + "integrity": "sha512-nlcl3jgUultKROfZijKjRQLUu9Ma0PeNv/VFHkZiKbXTBQXhpytS8CIj5/NfBeECZtY2FJQubm6ltIxm/ftxpw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.45.1.tgz", + "integrity": "sha512-HJV65KLS51rW0VY6rvZkiieiBnurSzpzore1bMKAhunQiECPuxsROvyeaot/tcK3A3aGnI+qTHqisrpSgQrpgA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.45.1.tgz", + "integrity": "sha512-NITBOCv3Qqc6hhwFt7jLV78VEO/il4YcBzoMGGNxznLgRQf43VQDae0aAzKiBeEPIxnDrACiMgbqjuihx08OOw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.45.1.tgz", + "integrity": "sha512-+E/lYl6qu1zqgPEnTrs4WysQtvc/Sh4fC2nByfFExqgYrqkKWp1tWIbe+ELhixnenSpBbLXNi6vbEEJ8M7fiHw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.45.1.tgz", + "integrity": "sha512-a6WIAp89p3kpNoYStITT9RbTbTnqarU7D8N8F2CV+4Cl9fwCOZraLVuVFvlpsW0SbIiYtEnhCZBPLoNdRkjQFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.45.1.tgz", + "integrity": "sha512-T5Bi/NS3fQiJeYdGvRpTAP5P02kqSOpqiopwhj0uaXB6nzs5JVi2XMJb18JUSKhCOX8+UE1UKQufyD6Or48dJg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.45.1.tgz", + "integrity": "sha512-lxV2Pako3ujjuUe9jiU3/s7KSrDfH6IgTSQOnDWr9aJ92YsFd7EurmClK0ly/t8dzMkDtd04g60WX6yl0sGfdw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.45.1.tgz", + "integrity": "sha512-M/fKi4sasCdM8i0aWJjCSFm2qEnYRR8AMLG2kxp6wD13+tMGA4Z1tVAuHkNRjud5SW2EM3naLuK35w9twvf6aA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@storybook/addon-a11y": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/addon-a11y/-/addon-a11y-10.0.7.tgz", + "integrity": "sha512-JsYPpZ/n67/2bI1XJeyrAWHHQkHemPkPHjCA0tAUnMz1Shlo/LV2q1Ahgpxoihx4strbHwZz71bcS4MqkHBduA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/global": "^5.0.0", + "axe-core": "^4.2.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "storybook": "^10.0.7" + } + }, + "node_modules/@storybook/addon-docs": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/addon-docs/-/addon-docs-10.0.7.tgz", + "integrity": "sha512-qQQMoeYZC4W+/8ubfOZiTrE8nYC/f4wWP1uq4peRyDy1N2nIN9SwhyxwMn0m3VpeGmRBga5dLvJY9ko6SnJekg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@mdx-js/react": "^3.0.0", + "@storybook/csf-plugin": "10.0.7", + "@storybook/icons": "^1.6.0", + "@storybook/react-dom-shim": "10.0.7", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "ts-dedent": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "storybook": "^10.0.7" + } + }, + "node_modules/@storybook/addon-svelte-csf": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/@storybook/addon-svelte-csf/-/addon-svelte-csf-5.0.10.tgz", + "integrity": "sha512-poSvTS7VdaQ42ZoqW5e4+2Hv1iLO0mekH9fwn/QuBNse48R4WlTyR8XFbHRTfatl9gdc9ZYC4uWzazrmV6zGIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/csf": "^0.1.13", + "dedent": "^1.5.3", + "es-toolkit": "^1.26.1", + "esrap": "^1.2.2", + "magic-string": "^0.30.12", + "svelte-ast-print": "^0.4.0", + "zimmerframe": "^1.1.2" + }, + "peerDependencies": { + "@storybook/svelte": "^0.0.0-0 || ^8.2.0 || ^9.0.0 || ^9.1.0-0 || ^10.0.0-0", + "@sveltejs/vite-plugin-svelte": "^4.0.0 || ^5.0.0 || ^6.0.0", + "storybook": "^0.0.0-0 || ^8.2.0 || ^9.0.0 || ^9.1.0-0 || ^10.0.0-0", + "svelte": "^5.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@storybook/addon-vitest": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/addon-vitest/-/addon-vitest-10.0.7.tgz", + "integrity": "sha512-i6v/mAl+elrUxb+1f4NdnM17t/fg+KGJWL1U9quflXTd3KiLY0xJB4LwNP6yYo7Imc5NIO2fRkJbGvNqLBRe2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/global": "^5.0.0", + "@storybook/icons": "^1.6.0", + "prompts": "^2.4.0", + "ts-dedent": "^2.2.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "@vitest/browser": "^3.0.0 || ^4.0.0", + "@vitest/browser-playwright": "^4.0.0", + "@vitest/runner": "^3.0.0 || ^4.0.0", + "storybook": "^10.0.7", + "vitest": "^3.0.0 || ^4.0.0" + }, + "peerDependenciesMeta": { + "@vitest/browser": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/runner": { + "optional": true + }, + "vitest": { + "optional": true + } + } + }, + "node_modules/@storybook/builder-vite": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/builder-vite/-/builder-vite-10.0.7.tgz", + "integrity": "sha512-wk2TAoUY5+9t78GWVBndu9rEo9lo6Ec3SRrLT4VpIlcS2GPK+5f26UC2uvIBwOF/N7JrUUKq/zWDZ3m+do9QDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/csf-plugin": "10.0.7", + "ts-dedent": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "storybook": "^10.0.7", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@storybook/csf": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/@storybook/csf/-/csf-0.1.13.tgz", + "integrity": "sha512-7xOOwCLGB3ebM87eemep89MYRFTko+D8qE7EdAAq74lgdqRR5cOUtYWJLjO2dLtP94nqoOdHJo6MdLLKzg412Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^2.19.0" + } + }, + "node_modules/@storybook/csf-plugin": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/csf-plugin/-/csf-plugin-10.0.7.tgz", + "integrity": "sha512-YaYYlCyJBwxaMk7yREOdz+9MDSgxIYGdeJ9EIq/bUndmkoj9SRo1P9/0lC5dseWQoiGy4T3PbZiWruD8uM5m3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "unplugin": "^2.3.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "esbuild": "*", + "rollup": "*", + "storybook": "^10.0.7", + "vite": "*", + "webpack": "*" + }, + "peerDependenciesMeta": { + "esbuild": { + "optional": true + }, + "rollup": { + "optional": true + }, + "vite": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/@storybook/global": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@storybook/global/-/global-5.0.0.tgz", + "integrity": "sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@storybook/icons": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@storybook/icons/-/icons-1.6.0.tgz", + "integrity": "sha512-hcFZIjW8yQz8O8//2WTIXylm5Xsgc+lW9ISLgUk1xGmptIJQRdlhVIXCpSyLrQaaRiyhQRaVg7l3BD9S216BHw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta" + } + }, + "node_modules/@storybook/react-dom-shim": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/react-dom-shim/-/react-dom-shim-10.0.7.tgz", + "integrity": "sha512-bp4OnMtZGwPJQDqNRi4K5iibLbZ2TZZMkWW7oSw5jjPFpGSreSjCe8LH9yj/lDnK8Ox9bGMCBFE5RV5XuML29w==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "storybook": "^10.0.7" + } + }, + "node_modules/@storybook/svelte": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/svelte/-/svelte-10.0.7.tgz", + "integrity": "sha512-rO+YQhHucy47Vh67z318pALmd6x+K1Kj30Fb4a6oOEw4xn4zCo9KTmkMWs24c4oduEXD/eJu3badlRmsVXzyfA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ts-dedent": "^2.0.0", + "type-fest": "~2.19" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "storybook": "^10.0.7", + "svelte": "^5.0.0" + } + }, + "node_modules/@storybook/svelte-vite": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/svelte-vite/-/svelte-vite-10.0.7.tgz", + "integrity": "sha512-q9/RtrhX1CnznO6AO9MDEy1bsccbGeRxW28FLpgUrztV4IGZ/dFUrFIFurKRyuA3/nFsbtzp1F5jFt3RExmmTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/builder-vite": "10.0.7", + "@storybook/svelte": "10.0.7", + "magic-string": "^0.30.0", + "svelte2tsx": "^0.7.44", + "typescript": "^4.9.4 || ^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "@sveltejs/vite-plugin-svelte": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0", + "storybook": "^10.0.7", + "svelte": "^5.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@storybook/sveltekit": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@storybook/sveltekit/-/sveltekit-10.0.7.tgz", + "integrity": "sha512-ujTW7PfWvgBrzd7jzaZe9JgjUeM5YvBKm+xru6t7Dr4bdfmkKqlZHPRdXn/sy+fQNyfg6JL2WKy2KIIeA+RvSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@storybook/builder-vite": "10.0.7", + "@storybook/svelte": "10.0.7", + "@storybook/svelte-vite": "10.0.7" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "storybook": "^10.0.7", + "svelte": "^5.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@sveltejs/acorn-typescript": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@sveltejs/acorn-typescript/-/acorn-typescript-1.0.5.tgz", + "integrity": "sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^8.9.0" + } + }, + "node_modules/@sveltejs/adapter-static": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@sveltejs/adapter-static/-/adapter-static-3.0.10.tgz", + "integrity": "sha512-7D9lYFWJmB7zxZyTE/qxjksvMqzMuYrrsyh1f4AlZqeZeACPRySjbC3aFiY55wb1tWUaKOQG9PVbm74JcN2Iew==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@sveltejs/kit": "^2.0.0" + } + }, + "node_modules/@sveltejs/kit": { + "version": "2.49.2", + "resolved": "https://registry.npmjs.org/@sveltejs/kit/-/kit-2.49.2.tgz", + "integrity": "sha512-Vp3zX/qlwerQmHMP6x0Ry1oY7eKKRcOWGc2P59srOp4zcqyn+etJyQpELgOi4+ZSUgteX8Y387NuwruLgGXLUQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@sveltejs/acorn-typescript": "^1.0.5", + "@types/cookie": "^0.6.0", + "acorn": "^8.14.1", + "cookie": "^0.6.0", + "devalue": "^5.3.2", + "esm-env": "^1.2.2", + "kleur": "^4.1.5", + "magic-string": "^0.30.5", + "mrmime": "^2.0.0", + "sade": "^1.8.1", + "set-cookie-parser": "^2.6.0", + "sirv": "^3.0.0" + }, + "bin": { + "svelte-kit": "svelte-kit.js" + }, + "engines": { + "node": ">=18.13" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0", + "@sveltejs/vite-plugin-svelte": "^3.0.0 || ^4.0.0-next.1 || ^5.0.0 || ^6.0.0-next.0", + "svelte": "^4.0.0 || ^5.0.0-next.0", + "vite": "^5.0.3 || ^6.0.0 || ^7.0.0-beta.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + } + } + }, + "node_modules/@sveltejs/vite-plugin-svelte": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@sveltejs/vite-plugin-svelte/-/vite-plugin-svelte-6.2.1.tgz", + "integrity": "sha512-YZs/OSKOQAQCnJvM/P+F1URotNnYNeU3P2s4oIpzm1uFaqUEqRxUB0g5ejMjEb5Gjb9/PiBI5Ktrq4rUUF8UVQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@sveltejs/vite-plugin-svelte-inspector": "^5.0.0", + "debug": "^4.4.1", + "deepmerge": "^4.3.1", + "magic-string": "^0.30.17", + "vitefu": "^1.1.1" + }, + "engines": { + "node": "^20.19 || ^22.12 || >=24" + }, + "peerDependencies": { + "svelte": "^5.0.0", + "vite": "^6.3.0 || ^7.0.0" + } + }, + "node_modules/@sveltejs/vite-plugin-svelte-inspector": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@sveltejs/vite-plugin-svelte-inspector/-/vite-plugin-svelte-inspector-5.0.0.tgz", + "integrity": "sha512-iwQ8Z4ET6ZFSt/gC+tVfcsSBHwsqc6RumSaiLUkAurW3BCpJam65cmHw0oOlDMTO0u+PZi9hilBRYN+LZNHTUQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.4.1" + }, + "engines": { + "node": "^20.19 || ^22.12 || >=24" + }, + "peerDependencies": { + "@sveltejs/vite-plugin-svelte": "^6.0.0-next.0", + "svelte": "^5.0.0", + "vite": "^6.3.0 || ^7.0.0" + } + }, + "node_modules/@swc/helpers": { + "version": "0.5.17", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.17.tgz", + "integrity": "sha512-5IKx/Y13RsYd+sauPb2x+U/xZikHjolzfuDgTAl/Tdf3Q8rslRvC19NKDLgAJQ6wsqADk10ntlv08nPFw/gO/A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@tailwindcss/forms": { + "version": "0.5.10", + "resolved": "https://registry.npmjs.org/@tailwindcss/forms/-/forms-0.5.10.tgz", + "integrity": "sha512-utI1ONF6uf/pPNO68kmN1b8rEwNXv3czukalo8VtJH8ksIkZXr3Q3VYudZLkCsDd4Wku120uF02hYK25XGPorw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mini-svg-data-uri": "^1.2.3" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || >= 3.0.0-alpha.1 || >= 4.0.0-alpha.20 || >= 4.0.0-beta.1" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.11.tgz", + "integrity": "sha512-yzhzuGRmv5QyU9qLNg4GTlYI6STedBWRE7NjxP45CsFYYq9taI0zJXZBMqIC/c8fViNLhmrbpSFS57EoxUmD6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "enhanced-resolve": "^5.18.1", + "jiti": "^2.4.2", + "lightningcss": "1.30.1", + "magic-string": "^0.30.17", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.11" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.11.tgz", + "integrity": "sha512-Q69XzrtAhuyfHo+5/HMgr1lAiPP/G40OMFAnws7xcFEYqcypZmdW8eGXaOUIeOl1dzPJBPENXgbjsOyhg2nkrg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.4", + "tar": "^7.4.3" + }, + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.11", + "@tailwindcss/oxide-darwin-arm64": "4.1.11", + "@tailwindcss/oxide-darwin-x64": "4.1.11", + "@tailwindcss/oxide-freebsd-x64": "4.1.11", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.11", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.11", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.11", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.11", + "@tailwindcss/oxide-linux-x64-musl": "4.1.11", + "@tailwindcss/oxide-wasm32-wasi": "4.1.11", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.11", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.11" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.11.tgz", + "integrity": "sha512-3IfFuATVRUMZZprEIx9OGDjG3Ou3jG4xQzNTvjDoKmU9JdmoCohQJ83MYd0GPnQIu89YoJqvMM0G3uqLRFtetg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.11.tgz", + "integrity": "sha512-ESgStEOEsyg8J5YcMb1xl8WFOXfeBmrhAwGsFxxB2CxY9evy63+AtpbDLAyRkJnxLy2WsD1qF13E97uQyP1lfQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.11.tgz", + "integrity": "sha512-EgnK8kRchgmgzG6jE10UQNaH9Mwi2n+yw1jWmof9Vyg2lpKNX2ioe7CJdf9M5f8V9uaQxInenZkOxnTVL3fhAw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.11.tgz", + "integrity": "sha512-xdqKtbpHs7pQhIKmqVpxStnY1skuNh4CtbcyOHeX1YBE0hArj2romsFGb6yUmzkq/6M24nkxDqU8GYrKrz+UcA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.11.tgz", + "integrity": "sha512-ryHQK2eyDYYMwB5wZL46uoxz2zzDZsFBwfjssgB7pzytAeCCa6glsiJGjhTEddq/4OsIjsLNMAiMlHNYnkEEeg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.11.tgz", + "integrity": "sha512-mYwqheq4BXF83j/w75ewkPJmPZIqqP1nhoghS9D57CLjsh3Nfq0m4ftTotRYtGnZd3eCztgbSPJ9QhfC91gDZQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.11.tgz", + "integrity": "sha512-m/NVRFNGlEHJrNVk3O6I9ggVuNjXHIPoD6bqay/pubtYC9QIdAMpS+cswZQPBLvVvEF6GtSNONbDkZrjWZXYNQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.11.tgz", + "integrity": "sha512-YW6sblI7xukSD2TdbbaeQVDysIm/UPJtObHJHKxDEcW2exAtY47j52f8jZXkqE1krdnkhCMGqP3dbniu1Te2Fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.11.tgz", + "integrity": "sha512-e3C/RRhGunWYNC3aSF7exsQkdXzQ/M+aYuZHKnw4U7KQwTJotnWsGOIVih0s2qQzmEzOFIJ3+xt7iq67K/p56Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.11.tgz", + "integrity": "sha512-Xo1+/GU0JEN/C/dvcammKHzeM6NqKovG+6921MR6oadee5XPBaKOumrJCXvopJ/Qb5TH7LX/UAywbqrP4lax0g==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@emnapi/wasi-threads": "^1.0.2", + "@napi-rs/wasm-runtime": "^0.2.11", + "@tybys/wasm-util": "^0.9.0", + "tslib": "^2.8.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": { + "version": "1.4.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.0.2", + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": { + "version": "1.4.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": { + "version": "0.2.11", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@tybys/wasm-util": "^0.9.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": { + "version": "0.9.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": { + "version": "2.8.0", + "dev": true, + "inBundle": true, + "license": "0BSD", + "optional": true + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.11.tgz", + "integrity": "sha512-UgKYx5PwEKrac3GPNPf6HVMNhUIGuUh4wlDFR2jYYdkX6pL/rn73zTq/4pzUm8fOjAn5L8zDeHp9iXmUGOXZ+w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.11.tgz", + "integrity": "sha512-YfHoggn1j0LK7wR82TOucWc5LDCguHnoS879idHekmmiR7g9HUtMw9MI0NHatS28u/Xlkfi9w5RJWgz2Dl+5Qg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/typography": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.16.tgz", + "integrity": "sha512-0wDLwCVF5V3x3b1SGXPCDcdsbDHMBe+lkFzBRaHeLvNi+nrrnZ1lA18u+OTWO8iSWU2GxUOCvlXtDuqftc1oiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash.castarray": "^4.4.0", + "lodash.isplainobject": "^4.0.6", + "lodash.merge": "^4.6.2", + "postcss-selector-parser": "6.0.10" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1" + } + }, + "node_modules/@tailwindcss/vite": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.11.tgz", + "integrity": "sha512-RHYhrR3hku0MJFRV+fN2gNbDNEh3dwKvY8XJvTxCSXeMOsCRSr+uKvDWQcbizrHgjML6ZmTE5OwMrl5wKcujCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tailwindcss/node": "4.1.11", + "@tailwindcss/oxide": "4.1.11", + "tailwindcss": "4.1.11" + }, + "peerDependencies": { + "vite": "^5.2.0 || ^6 || ^7" + } + }, + "node_modules/@testing-library/dom": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.0.tgz", + "integrity": "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "chalk": "^4.1.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/jest-dom": { + "version": "6.6.3", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.6.3.tgz", + "integrity": "sha512-IteBhl4XqYNkM54f4ejhLRJiZNqcSCoXUOG2CPK7qbD322KjQozM4kHQOfkG2oln9b9HTYqs+Sae8vBATubxxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "chalk": "^3.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "lodash": "^4.17.21", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/katex": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.16.7.tgz", + "integrity": "sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.16.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.16.5.tgz", + "integrity": "sha512-bJFoMATwIGaxxx8VJPeM8TonI8t579oRvgAuT8zFugJsJZgzqv0Fu8Mhp68iecjzG7cnN3mO2dJQ5uUM2EFrgQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/react": { + "version": "19.1.8", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz", + "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.0.2" + } + }, + "node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.37.0.tgz", + "integrity": "sha512-jsuVWeIkb6ggzB+wPCsR4e6loj+rM72ohW6IBn2C+5NCvfUVY8s33iFPySSVXqtm5Hu29Ne/9bnA0JmyLmgenA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.37.0", + "@typescript-eslint/type-utils": "8.37.0", + "@typescript-eslint/utils": "8.37.0", + "@typescript-eslint/visitor-keys": "8.37.0", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.37.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.37.0.tgz", + "integrity": "sha512-kVIaQE9vrN9RLCQMQ3iyRlVJpTiDUY6woHGb30JDkfJErqrQEmtdWH3gV0PBAfGZgQXoqzXOO0T3K6ioApbbAA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.37.0", + "@typescript-eslint/types": "8.37.0", + "@typescript-eslint/typescript-estree": "8.37.0", + "@typescript-eslint/visitor-keys": "8.37.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.37.0.tgz", + "integrity": "sha512-BIUXYsbkl5A1aJDdYJCBAo8rCEbAvdquQ8AnLb6z5Lp1u3x5PNgSSx9A/zqYc++Xnr/0DVpls8iQ2cJs/izTXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.37.0", + "@typescript-eslint/types": "^8.37.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.37.0.tgz", + "integrity": "sha512-0vGq0yiU1gbjKob2q691ybTg9JX6ShiVXAAfm2jGf3q0hdP6/BruaFjL/ManAR/lj05AvYCH+5bbVo0VtzmjOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.37.0", + "@typescript-eslint/visitor-keys": "8.37.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.37.0.tgz", + "integrity": "sha512-1/YHvAVTimMM9mmlPvTec9NP4bobA1RkDbMydxG8omqwJJLEW/Iy2C4adsAESIXU3WGLXFHSZUU+C9EoFWl4Zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.37.0.tgz", + "integrity": "sha512-SPkXWIkVZxhgwSwVq9rqj/4VFo7MnWwVaRNznfQDc/xPYHjXnPfLWn+4L6FF1cAz6e7dsqBeMawgl7QjUMj4Ow==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.37.0", + "@typescript-eslint/typescript-estree": "8.37.0", + "@typescript-eslint/utils": "8.37.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.37.0.tgz", + "integrity": "sha512-ax0nv7PUF9NOVPs+lmQ7yIE7IQmAf8LGcXbMvHX5Gm+YJUYNAl340XkGnrimxZ0elXyoQJuN5sbg6C4evKA4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.37.0.tgz", + "integrity": "sha512-zuWDMDuzMRbQOM+bHyU4/slw27bAUEcKSKKs3hcv2aNnc/tvE/h7w60dwVw8vnal2Pub6RT1T7BI8tFZ1fE+yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.37.0", + "@typescript-eslint/tsconfig-utils": "8.37.0", + "@typescript-eslint/types": "8.37.0", + "@typescript-eslint/visitor-keys": "8.37.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.37.0.tgz", + "integrity": "sha512-TSFvkIW6gGjN2p6zbXo20FzCABbyUAuq6tBvNRGsKdsSQ6a7rnV6ADfZ7f4iI3lIiXc4F4WWvtUfDw9CJ9pO5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.37.0", + "@typescript-eslint/types": "8.37.0", + "@typescript-eslint/typescript-estree": "8.37.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.37.0.tgz", + "integrity": "sha512-YzfhzcTnZVPiLfP/oeKtDp2evwvHLMe0LOy7oe+hb9KKIumLNohYS9Hgp1ifwpu42YWxhZE8yieggz6JpqO/1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.37.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@vitest/browser": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/browser/-/browser-3.2.4.tgz", + "integrity": "sha512-tJxiPrWmzH8a+w9nLKlQMzAKX/7VjFs50MWgcAj7p9XQ7AQ9/35fByFYptgPELyLw+0aixTnC4pUWV+APcZ/kw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@testing-library/dom": "^10.4.0", + "@testing-library/user-event": "^14.6.1", + "@vitest/mocker": "3.2.4", + "@vitest/utils": "3.2.4", + "magic-string": "^0.30.17", + "sirv": "^3.0.1", + "tinyrainbow": "^2.0.0", + "ws": "^8.18.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "playwright": "*", + "vitest": "3.2.4", + "webdriverio": "^7.0.0 || ^8.0.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "playwright": { + "optional": true + }, + "safaridriver": { + "optional": true + }, + "webdriverio": { + "optional": true + } + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/ast-types": { + "version": "0.16.1", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.16.1.tgz", + "integrity": "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, + "node_modules/axe-core": { + "version": "4.10.3", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.3.tgz", + "integrity": "sha512-Xm7bpRXnDSX2YE2YFfBk2FnF0ep6tmG7xPh8iHee8MIcrgq762Nkce856dYtJYLkuIoYZvGfTs/PbZhideTcEg==", + "dev": true, + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/bits-ui": { + "version": "2.14.4", + "resolved": "https://registry.npmjs.org/bits-ui/-/bits-ui-2.14.4.tgz", + "integrity": "sha512-W6kenhnbd/YVvur+DKkaVJ6GldE53eLewur5AhUCqslYQ0vjZr8eWlOfwZnMiPB+PF5HMVqf61vXBvmyrAmPWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.1", + "@floating-ui/dom": "^1.7.1", + "esm-env": "^1.1.2", + "runed": "^0.35.1", + "svelte-toolbelt": "^0.10.6", + "tabbable": "^6.2.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/huntabyte" + }, + "peerDependencies": { + "@internationalized/date": "^3.8.1", + "svelte": "^5.33.0" + } + }, + "node_modules/bits-ui/node_modules/runed": { + "version": "0.35.1", + "resolved": "https://registry.npmjs.org/runed/-/runed-0.35.1.tgz", + "integrity": "sha512-2F4Q/FZzbeJTFdIS/PuOoPRSm92sA2LhzTnv6FXhCoENb3huf5+fDuNOg1LNvGOouy3u/225qxmuJvcV3IZK5Q==", + "dev": true, + "funding": [ + "https://github.com/sponsors/huntabyte", + "https://github.com/sponsors/tglide" + ], + "license": "MIT", + "dependencies": { + "dequal": "^2.0.3", + "esm-env": "^1.0.0", + "lz-string": "^1.5.0" + }, + "peerDependencies": { + "@sveltejs/kit": "^2.21.0", + "svelte": "^5.7.0" + }, + "peerDependenciesMeta": { + "@sveltejs/kit": { + "optional": true + } + } + }, + "node_modules/bits-ui/node_modules/svelte-toolbelt": { + "version": "0.10.6", + "resolved": "https://registry.npmjs.org/svelte-toolbelt/-/svelte-toolbelt-0.10.6.tgz", + "integrity": "sha512-YWuX+RE+CnWYx09yseAe4ZVMM7e7GRFZM6OYWpBKOb++s+SQ8RBIMMe+Bs/CznBMc0QPLjr+vDBxTAkozXsFXQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/huntabyte" + ], + "dependencies": { + "clsx": "^2.1.1", + "runed": "^0.35.1", + "style-to-object": "^1.0.8" + }, + "engines": { + "node": ">=18", + "pnpm": ">=8.7.0" + }, + "peerDependencies": { + "svelte": "^5.30.2" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chai": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.1.tgz", + "integrity": "sha512-5nFxhUrX0PqtyogoYOA8IPswy5sZFTOsBFl/9bNsmDLgsxYTzSZQJDPppDnZPTQbzSEm0hqGjWPzRemQCYbD6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/chromatic": { + "version": "12.2.0", + "resolved": "https://registry.npmjs.org/chromatic/-/chromatic-12.2.0.tgz", + "integrity": "sha512-GswmBW9ZptAoTns1BMyjbm55Z7EsIJnUvYKdQqXIBZIKbGErmpA+p4c0BYA+nzw5B0M+rb3Iqp1IaH8TFwIQew==", + "dev": true, + "license": "MIT", + "bin": { + "chroma": "dist/bin.js", + "chromatic": "dist/bin.js", + "chromatic-cli": "dist/bin.js" + }, + "peerDependencies": { + "@chromatic-com/cypress": "^0.*.* || ^1.0.0", + "@chromatic-com/playwright": "^0.*.* || ^1.0.0" + }, + "peerDependenciesMeta": { + "@chromatic-com/cypress": { + "optional": true + }, + "@chromatic-com/playwright": { + "optional": true + } + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/corser": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz", + "integrity": "sha512-utCYNzRSQIZNPIcGZdQc92UVJYAhtGAteCFg0yRaFm8f0P+CPtyGyHXJcGXnffjCybUCEx3FQ2G7U3/o9eIkVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dedent": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.6.0.tgz", + "integrity": "sha512-F1Z+5UCFpmQUzJa11agbyPVMbpgT/qA3/SKyJ1jyBgm7dUcUEa8v9JwDkerSQXfakBwFljIxhOJqGkjUwZ9FSA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/dedent-js": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dedent-js/-/dedent-js-1.0.1.tgz", + "integrity": "sha512-OUepMozQULMLUmhxS95Vudo0jb0UchLimi3+pQ2plj61Fcy8axbP9hbiD4Sz6DPqn6XG3kfmziVfQ1rSys5AJQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-libc": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/devalue": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/devalue/-/devalue-5.6.2.tgz", + "integrity": "sha512-nPRkjWzzDQlsejL1WVifk5rvcFi/y1onBRxjaFMjZeR9mFpqu2gmAZ9xUB9/IEanEP/vBtGeGganC/GO1fmufg==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dexie": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/dexie/-/dexie-4.0.11.tgz", + "integrity": "sha512-SOKO002EqlvBYYKQSew3iymBoN2EQ4BDw/3yprjh7kAfFzjBYkaMNa/pZvcA7HSWlcKSQb9XhPe3wKyQ0x4A8A==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT" + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.2", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.2.tgz", + "integrity": "sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-toolkit": { + "version": "1.39.7", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.7.tgz", + "integrity": "sha512-ek/wWryKouBrZIjkwW2BFf91CWOIMvoy2AE5YYgUrfWsJQM2Su1LoLtrw8uusEpN9RfqLlV/0FVNjT0WMv8Bxw==", + "dev": true, + "license": "MIT", + "workspaces": [ + "docs", + "benchmarks" + ] + }, + "node_modules/esbuild": { + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.8.tgz", + "integrity": "sha512-vVC0USHGtMi8+R4Kz8rt6JhEWLxsv9Rnu/lGYbPR8u47B+DCBksq9JarW0zOO7bs37hyOK1l2/oqtbciutL5+Q==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "peer": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.8", + "@esbuild/android-arm": "0.25.8", + "@esbuild/android-arm64": "0.25.8", + "@esbuild/android-x64": "0.25.8", + "@esbuild/darwin-arm64": "0.25.8", + "@esbuild/darwin-x64": "0.25.8", + "@esbuild/freebsd-arm64": "0.25.8", + "@esbuild/freebsd-x64": "0.25.8", + "@esbuild/linux-arm": "0.25.8", + "@esbuild/linux-arm64": "0.25.8", + "@esbuild/linux-ia32": "0.25.8", + "@esbuild/linux-loong64": "0.25.8", + "@esbuild/linux-mips64el": "0.25.8", + "@esbuild/linux-ppc64": "0.25.8", + "@esbuild/linux-riscv64": "0.25.8", + "@esbuild/linux-s390x": "0.25.8", + "@esbuild/linux-x64": "0.25.8", + "@esbuild/netbsd-arm64": "0.25.8", + "@esbuild/netbsd-x64": "0.25.8", + "@esbuild/openbsd-arm64": "0.25.8", + "@esbuild/openbsd-x64": "0.25.8", + "@esbuild/openharmony-arm64": "0.25.8", + "@esbuild/sunos-x64": "0.25.8", + "@esbuild/win32-arm64": "0.25.8", + "@esbuild/win32-ia32": "0.25.8", + "@esbuild/win32-x64": "0.25.8" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.31.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.31.0.tgz", + "integrity": "sha512-QldCVh/ztyKJJZLr4jXNUByx3gR+TDYZCRXEktiZoUR3PGy4qCmSbkxcIle8GEwGpb5JBZazlaJ/CxLidXdEbQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.0", + "@eslint/config-helpers": "^0.3.0", + "@eslint/core": "^0.15.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.31.0", + "@eslint/plugin-kit": "^0.3.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-config-prettier": { + "version": "10.1.8", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.8.tgz", + "integrity": "sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==", + "dev": true, + "license": "MIT", + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "funding": { + "url": "https://opencollective.com/eslint-config-prettier" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, + "node_modules/eslint-plugin-storybook": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/eslint-plugin-storybook/-/eslint-plugin-storybook-10.0.7.tgz", + "integrity": "sha512-qOQq9KdT1jsBgT3qsxUH2n67aj1WR8D1XCoER8Q6yuVlS5TimNwk1mZeWkXVf/o4RQQT6flT2y5cG2gPLZPvJA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/utils": "^8.8.1" + }, + "peerDependencies": { + "eslint": ">=8", + "storybook": "^10.0.7" + } + }, + "node_modules/eslint-plugin-svelte": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-svelte/-/eslint-plugin-svelte-3.11.0.tgz", + "integrity": "sha512-KliWlkieHyEa65aQIkRwUFfHzT5Cn4u3BQQsu3KlkJOs7c1u7ryn84EWaOjEzilbKgttT4OfBURA8Uc4JBSQIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.6.1", + "@jridgewell/sourcemap-codec": "^1.5.0", + "esutils": "^2.0.3", + "globals": "^16.0.0", + "known-css-properties": "^0.37.0", + "postcss": "^8.4.49", + "postcss-load-config": "^3.1.4", + "postcss-safe-parser": "^7.0.0", + "semver": "^7.6.3", + "svelte-eslint-parser": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://github.com/sponsors/ota-meshi" + }, + "peerDependencies": { + "eslint": "^8.57.1 || ^9.0.0", + "svelte": "^3.37.0 || ^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "svelte": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esm-env": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz", + "integrity": "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==", + "license": "MIT" + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrap": { + "version": "1.4.9", + "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.9.tgz", + "integrity": "sha512-3OMlcd0a03UGuZpPeUC1HxR3nA23l+HEyCiZw3b3FumJIN9KphoGzDJKMXI1S72jVS1dsenDyQC0kJlO1U9E1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true, + "license": "MIT" + }, + "node_modules/expect-type": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==", + "dev": true, + "license": "MIT" + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/filesize": { + "version": "10.1.6", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-10.1.6.tgz", + "integrity": "sha512-sJslQKU2uM33qH5nqewAwVB2QgR6w1aMNsYUp3aN5rMRyXEwJGmZvaWzeJFNTOXWlHQyBFCWrdj3fV/fsTOX8w==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 10.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "16.3.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.3.0.tgz", + "integrity": "sha512-bqWEnJ1Nt3neqx2q5SFfGS8r/ahumIakg3HcwtNlrVlwXIeNumWn/c7Pn/wKzGhf6SaW6H6uWXLqC30STCMchQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-dom": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-dom/-/hast-util-from-dom-5.0.1.tgz", + "integrity": "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "@types/hast": "^3.0.0", + "hastscript": "^9.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz", + "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.1.0", + "hast-util-from-parse5": "^8.0.0", + "parse5": "^7.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html-isomorphic": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hast-util-from-html-isomorphic/-/hast-util-from-html-isomorphic-2.0.0.tgz", + "integrity": "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-from-dom": "^5.0.0", + "hast-util-from-html": "^2.0.0", + "unist-util-remove-position": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/hast-util-from-html/node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html/node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/hast-util-is-element": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", + "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-sanitize": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/hast-util-sanitize/-/hast-util-sanitize-5.0.2.tgz", + "integrity": "sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "unist-util-position": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/hast-util-to-text": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", + "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unist-util-find-after": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/highlight.js": { + "version": "11.11.1", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", + "integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-server": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz", + "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-auth": "^2.0.1", + "chalk": "^4.1.2", + "corser": "^2.0.1", + "he": "^1.2.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy": "^1.18.1", + "mime": "^1.6.0", + "minimist": "^1.2.6", + "opener": "^1.5.1", + "portfinder": "^1.0.28", + "secure-compare": "3.0.1", + "union": "~0.5.0", + "url-join": "^4.0.1" + }, + "bin": { + "http-server": "bin/http-server" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/immutable": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.1.4.tgz", + "integrity": "sha512-p6u1bG3YSnINT5RQmx/yRZBpenIl30kVxkTLDyHLIMk0gict704Q9n+thfDI7lTRm9vXdDYutVzXhzcThxTnXA==", + "dev": true, + "license": "MIT" + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==", + "license": "MIT" + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jiti": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.4.2.tgz", + "integrity": "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/katex": { + "version": "0.16.22", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz", + "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==", + "dev": true, + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/known-css-properties": { + "version": "0.37.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.37.0.tgz", + "integrity": "sha512-JCDrsP4Z1Sb9JwG0aJ8Eo2r7k4Ou5MwmThS/6lcIe1ICyb7UBJKGRIUUdqc2ASdE/42lgz6zFUnzAIhtXnBVrQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.1.tgz", + "integrity": "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.30.1", + "lightningcss-darwin-x64": "1.30.1", + "lightningcss-freebsd-x64": "1.30.1", + "lightningcss-linux-arm-gnueabihf": "1.30.1", + "lightningcss-linux-arm64-gnu": "1.30.1", + "lightningcss-linux-arm64-musl": "1.30.1", + "lightningcss-linux-x64-gnu": "1.30.1", + "lightningcss-linux-x64-musl": "1.30.1", + "lightningcss-win32-arm64-msvc": "1.30.1", + "lightningcss-win32-x64-msvc": "1.30.1" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz", + "integrity": "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.1.tgz", + "integrity": "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.1.tgz", + "integrity": "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.1.tgz", + "integrity": "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.1.tgz", + "integrity": "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.1.tgz", + "integrity": "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.1.tgz", + "integrity": "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.1.tgz", + "integrity": "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.1.tgz", + "integrity": "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.1.tgz", + "integrity": "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/locate-character": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", + "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.castarray": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.castarray/-/lodash.castarray-4.4.0.tgz", + "integrity": "sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loupe": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.4.tgz", + "integrity": "sha512-wJzkKwJrheKtknCOKNEtDK4iqg/MxmZheEMtSTYvnzRdEYaZzmgH976nenp8WdJRdx5Vc1X/9MO0Oszl6ezeXg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lowlight": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz", + "integrity": "sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.0.0", + "highlight.js": "~11.11.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast/-/mdast-3.0.0.tgz", + "integrity": "sha512-xySmf8g4fPKMeC07jXGz971EkLbWAJ83s4US2Tj9lEdnZ142UP5grN73H1Xd3HzrdbU5o9GYYP/y8F9ZSwLE9g==", + "dev": true, + "license": "MIT" + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/mdast-util-from-markdown/node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-math": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-math/-/mdast-util-math-3.0.0.tgz", + "integrity": "sha512-Tl9GBNeG/AhJnQM221bJR2HPvLOSnLE/T9cJI9tlc6zwQk2nPk/4f0cHkOdEixQPC/j8UtKDdITswvLAy1OZ1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "longest-streak": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.1.0", + "unist-util-remove-position": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-newline-to-break": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-newline-to-break/-/mdast-util-newline-to-break-2.0.0.tgz", + "integrity": "sha512-MbgeFca0hLYIEx/2zGsszCSEJJ1JSCdiY5xQxRcLDDGa8EPvlLPupJ4DSajbMPAnC0je8jfb9TiUATnxxrHUog==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-find-and-replace": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdsvex": { + "version": "0.12.6", + "resolved": "https://registry.npmjs.org/mdsvex/-/mdsvex-0.12.6.tgz", + "integrity": "sha512-pupx2gzWh3hDtm/iDW4WuCpljmyHbHi34r7ktOqpPGvyiM4MyfNgdJ3qMizXdgCErmvYC9Nn/qyjePy+4ss9Wg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.4", + "@types/unist": "^2.0.3", + "prism-svelte": "^0.4.7", + "prismjs": "^1.17.1", + "unist-util-visit": "^2.0.1", + "vfile-message": "^2.0.4" + }, + "peerDependencies": { + "svelte": "^3.56.0 || ^4.0.0 || ^5.0.0-next.120" + } + }, + "node_modules/mdsvex/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdsvex/node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdsvex/node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-math": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-math/-/micromark-extension-math-3.1.0.tgz", + "integrity": "sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/katex": "^0.16.0", + "devlop": "^1.0.0", + "katex": "^0.16.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/mini-svg-data-uri": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/mini-svg-data-uri/-/mini-svg-data-uri-1.4.4.tgz", + "integrity": "sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==", + "dev": true, + "license": "MIT", + "bin": { + "mini-svg-data-uri": "cli.js" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mode-watcher": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/mode-watcher/-/mode-watcher-1.1.0.tgz", + "integrity": "sha512-mUT9RRGPDYenk59qJauN1rhsIMKBmWA3xMF+uRwE8MW/tjhaDSCCARqkSuDTq8vr4/2KcAxIGVjACxTjdk5C3g==", + "license": "MIT", + "dependencies": { + "runed": "^0.25.0", + "svelte-toolbelt": "^0.7.1" + }, + "peerDependencies": { + "svelte": "^5.27.0" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-addon-api": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "dev": true, + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/pdfjs-dist": { + "version": "5.4.54", + "resolved": "https://registry.npmjs.org/pdfjs-dist/-/pdfjs-dist-5.4.54.tgz", + "integrity": "sha512-TBAiTfQw89gU/Z4LW98Vahzd2/LoCFprVGvGbTgFt+QCB1F+woyOPmNNVgLa6djX9Z9GGTnj7qE1UzpOVJiINw==", + "license": "Apache-2.0", + "engines": { + "node": ">=20.16.0 || >=22.3.0" + }, + "optionalDependencies": { + "@napi-rs/canvas": "^0.1.74" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/playwright": { + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.56.1.tgz", + "integrity": "sha512-aFi5B0WovBHTEvpM3DzXTUaeN6eN0qWnTkKx4NQaH4Wvcmc153PdaY2UBdSYKaGYw+UyWXSVyxDUg5DoPEttjw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright-core": "1.56.1" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "fsevents": "2.3.2" + } + }, + "node_modules/playwright-core": { + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.56.1.tgz", + "integrity": "sha512-hutraynyn31F+Bifme+Ps9Vq59hKuUCz7H1kDOcBs+2oGguKkWTU50bBWrtz34OUWmIwpBTWDxaRPXrIXkgvmQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "playwright-core": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/portfinder": { + "version": "1.0.38", + "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.38.tgz", + "integrity": "sha512-rEwq/ZHlJIKw++XtLAO8PPuOQA/zaPJOZJ37BVuN97nLpMJeuDVLVGRwbFoBgLudgdTMP2hdRJP++H+8QOA3vg==", + "dev": true, + "license": "MIT", + "dependencies": { + "async": "^3.2.6", + "debug": "^4.3.6" + }, + "engines": { + "node": ">= 10.12" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-load-config": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.4.tgz", + "integrity": "sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "lilconfig": "^2.0.5", + "yaml": "^1.10.2" + }, + "engines": { + "node": ">= 10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-load-config/node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss-safe-parser": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-7.0.1.tgz", + "integrity": "sha512-0AioNCJZ2DPYz5ABT6bddIqlhgwhpHZ/l65YAYo0BCIn0xiDpsnTHz0gnoTGk0OXZW0JRs+cDwL8u/teRdz+8A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss-safe-parser" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-scss": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.9.tgz", + "integrity": "sha512-AjKOeiwAitL/MXxQW2DliT28EKukvvbEWx3LBmJIRN8KfBGZbRTxNYW0kSqi1COiTZ57nZ9NW06S6ux//N1c9A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss-scss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.4.29" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-plugin-svelte": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/prettier-plugin-svelte/-/prettier-plugin-svelte-3.4.0.tgz", + "integrity": "sha512-pn1ra/0mPObzqoIQn/vUTR3ZZI6UuZ0sHqMK5x2jMLGrs53h0sXhkVuDcrlssHwIMk7FYrMjHBPoUSyyEEDlBQ==", + "dev": true, + "license": "MIT", + "peer": true, + "peerDependencies": { + "prettier": "^3.0.0", + "svelte": "^3.2.0 || ^4.0.0-next.0 || ^5.0.0-next.0" + } + }, + "node_modules/prettier-plugin-tailwindcss": { + "version": "0.6.14", + "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.6.14.tgz", + "integrity": "sha512-pi2e/+ZygeIqntN+vC573BcW5Cve8zUB0SSAGxqpB4f96boZF4M3phPVoOFCeypwkpRYdi7+jQ5YJJUwrkGUAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "@ianvs/prettier-plugin-sort-imports": "*", + "@prettier/plugin-hermes": "*", + "@prettier/plugin-oxc": "*", + "@prettier/plugin-pug": "*", + "@shopify/prettier-plugin-liquid": "*", + "@trivago/prettier-plugin-sort-imports": "*", + "@zackad/prettier-plugin-twig": "*", + "prettier": "^3.0", + "prettier-plugin-astro": "*", + "prettier-plugin-css-order": "*", + "prettier-plugin-import-sort": "*", + "prettier-plugin-jsdoc": "*", + "prettier-plugin-marko": "*", + "prettier-plugin-multiline-arrays": "*", + "prettier-plugin-organize-attributes": "*", + "prettier-plugin-organize-imports": "*", + "prettier-plugin-sort-imports": "*", + "prettier-plugin-style-order": "*", + "prettier-plugin-svelte": "*" + }, + "peerDependenciesMeta": { + "@ianvs/prettier-plugin-sort-imports": { + "optional": true + }, + "@prettier/plugin-hermes": { + "optional": true + }, + "@prettier/plugin-oxc": { + "optional": true + }, + "@prettier/plugin-pug": { + "optional": true + }, + "@shopify/prettier-plugin-liquid": { + "optional": true + }, + "@trivago/prettier-plugin-sort-imports": { + "optional": true + }, + "@zackad/prettier-plugin-twig": { + "optional": true + }, + "prettier-plugin-astro": { + "optional": true + }, + "prettier-plugin-css-order": { + "optional": true + }, + "prettier-plugin-import-sort": { + "optional": true + }, + "prettier-plugin-jsdoc": { + "optional": true + }, + "prettier-plugin-marko": { + "optional": true + }, + "prettier-plugin-multiline-arrays": { + "optional": true + }, + "prettier-plugin-organize-attributes": { + "optional": true + }, + "prettier-plugin-organize-imports": { + "optional": true + }, + "prettier-plugin-sort-imports": { + "optional": true + }, + "prettier-plugin-style-order": { + "optional": true + }, + "prettier-plugin-svelte": { + "optional": true + } + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prism-svelte": { + "version": "0.4.7", + "resolved": "https://registry.npmjs.org/prism-svelte/-/prism-svelte-0.4.7.tgz", + "integrity": "sha512-yABh19CYbM24V7aS7TuPYRNMqthxwbvx6FF/Rw920YbyBWO3tnyPIqRMgHuSVsLmuHkkBS1Akyof463FVdkeDQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prompts/node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", + "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz", + "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.26.0" + }, + "peerDependencies": { + "react": "^19.1.0" + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT" + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/recast": { + "version": "0.23.11", + "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.11.tgz", + "integrity": "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ast-types": "^0.16.1", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", + "tslib": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/rehype-highlight": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/rehype-highlight/-/rehype-highlight-7.0.2.tgz", + "integrity": "sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-to-text": "^4.0.0", + "lowlight": "^3.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-katex": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", + "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/katex": "^0.16.0", + "hast-util-from-html-isomorphic": "^2.0.0", + "hast-util-to-text": "^4.0.0", + "katex": "^0.16.0", + "unist-util-visit-parents": "^6.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-stringify": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/rehype-stringify/-/rehype-stringify-10.0.1.tgz", + "integrity": "sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-to-html": "^9.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark": { + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/remark/-/remark-15.0.1.tgz", + "integrity": "sha512-Eht5w30ruCXgFmxVUSlNWQ9iiimq07URKeFS3hNc8cUWy1llX4KDWfyEDZRycMc+znsN9Ux5/tJ/BFdgdOwA3A==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-breaks": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-breaks/-/remark-breaks-4.0.0.tgz", + "integrity": "sha512-IjEjJOkH4FuJvHZVIW0QCDWxcG96kCq7An/KVH2NfJe6rKZU2AsHeB3OEjPNRxi4QC34Xdx7I2KGYn6IpT7gxQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-newline-to-break": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-html": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/remark-html/-/remark-html-16.0.1.tgz", + "integrity": "sha512-B9JqA5i0qZe0Nsf49q3OXyGvyXuZFDzAP2iOFLEumymuYJITVpiH1IgsTEwTpdptDmZlMDMWeDmSawdaJIGCXQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "hast-util-sanitize": "^5.0.0", + "hast-util-to-html": "^9.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-math": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz", + "integrity": "sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-math": "^3.0.0", + "micromark-extension-math": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.45.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.45.1.tgz", + "integrity": "sha512-4iya7Jb76fVpQyLoiVpzUrsjQ12r3dM7fIVz+4NwoYvZOShknRmiv+iu9CClZml5ZLGb0XMcYLutK6w9tgxHDw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.45.1", + "@rollup/rollup-android-arm64": "4.45.1", + "@rollup/rollup-darwin-arm64": "4.45.1", + "@rollup/rollup-darwin-x64": "4.45.1", + "@rollup/rollup-freebsd-arm64": "4.45.1", + "@rollup/rollup-freebsd-x64": "4.45.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.45.1", + "@rollup/rollup-linux-arm-musleabihf": "4.45.1", + "@rollup/rollup-linux-arm64-gnu": "4.45.1", + "@rollup/rollup-linux-arm64-musl": "4.45.1", + "@rollup/rollup-linux-loongarch64-gnu": "4.45.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.45.1", + "@rollup/rollup-linux-riscv64-gnu": "4.45.1", + "@rollup/rollup-linux-riscv64-musl": "4.45.1", + "@rollup/rollup-linux-s390x-gnu": "4.45.1", + "@rollup/rollup-linux-x64-gnu": "4.45.1", + "@rollup/rollup-linux-x64-musl": "4.45.1", + "@rollup/rollup-win32-arm64-msvc": "4.45.1", + "@rollup/rollup-win32-ia32-msvc": "4.45.1", + "@rollup/rollup-win32-x64-msvc": "4.45.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/runed": { + "version": "0.25.0", + "resolved": "https://registry.npmjs.org/runed/-/runed-0.25.0.tgz", + "integrity": "sha512-7+ma4AG9FT2sWQEA0Egf6mb7PBT2vHyuHail1ie8ropfSjvZGtEAx8YTmUjv/APCsdRRxEVvArNjALk9zFSOrg==", + "funding": [ + "https://github.com/sponsors/huntabyte", + "https://github.com/sponsors/tglide" + ], + "dependencies": { + "esm-env": "^1.0.0" + }, + "peerDependencies": { + "svelte": "^5.7.0" + } + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dev": true, + "license": "MIT", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/sass": { + "version": "1.93.3", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.93.3.tgz", + "integrity": "sha512-elOcIZRTM76dvxNAjqYrucTSI0teAF/L2Lv0s6f6b7FOwcwIuA357bIE871580AjHJuSvLIRUosgV+lIWx6Rgg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "chokidar": "^4.0.0", + "immutable": "^5.0.2", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + }, + "optionalDependencies": { + "@parcel/watcher": "^2.4.1" + } + }, + "node_modules/scheduler": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/scule": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/scule/-/scule-1.3.0.tgz", + "integrity": "sha512-6FtHJEvt+pVMIB9IBY+IcCJ6Z5f1iQnytgyfKMhDKgmzYG+TeH/wx1y3l27rshSbLiSanrR9ffZDrEsmjlQF2g==", + "dev": true, + "license": "MIT" + }, + "node_modules/secure-compare": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz", + "integrity": "sha512-AckIIV90rPDcBcglUwXPF3kg0P0qmPsPXAj6BBEENQE1p5yA1xfmDJzfi1Tappj37Pv2mVbKpL3Z1T+Nn7k1Qw==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-cookie-parser": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/sirv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.1.tgz", + "integrity": "sha512-FoqMu0NCGBLCcAkS1qA+XJIQTR6/JHfQXl+uGteNCQ76T91DMUjPa9xfmeqMY3z80nLSg9yQmNjK0Px6RWsH/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", + "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", + "dev": true, + "license": "MIT" + }, + "node_modules/storybook": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/storybook/-/storybook-10.0.7.tgz", + "integrity": "sha512-7smAu0o+kdm378Q2uIddk32pn0UdIbrtTVU+rXRVtTVTCrK/P2cCui2y4JH+Bl3NgEq1bbBQpCAF/HKrDjk2Qw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@storybook/global": "^5.0.0", + "@storybook/icons": "^1.6.0", + "@testing-library/jest-dom": "^6.6.3", + "@testing-library/user-event": "^14.6.1", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/spy": "3.2.4", + "esbuild": "^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0 || ^0.25.0", + "recast": "^0.23.5", + "semver": "^7.6.2", + "ws": "^8.18.0" + }, + "bin": { + "storybook": "dist/bin/dispatcher.js" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/storybook" + }, + "peerDependencies": { + "prettier": "^2 || ^3" + }, + "peerDependenciesMeta": { + "prettier": { + "optional": true + } + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", + "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/style-to-object": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz", + "integrity": "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/svelte": { + "version": "5.48.3", + "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.48.3.tgz", + "integrity": "sha512-w7QZ398cdNherTdiQ/v3SYLLGOO4948Jgjh04PYqtTYVohmBvbmFwLmo7pp8gp4/1tceRWfSTjHgjtfpCVNJmQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "@jridgewell/sourcemap-codec": "^1.5.0", + "@sveltejs/acorn-typescript": "^1.0.5", + "@types/estree": "^1.0.5", + "acorn": "^8.12.1", + "aria-query": "^5.3.1", + "axobject-query": "^4.1.0", + "clsx": "^2.1.1", + "devalue": "^5.6.2", + "esm-env": "^1.2.1", + "esrap": "^2.2.1", + "is-reference": "^3.0.3", + "locate-character": "^3.0.0", + "magic-string": "^0.30.11", + "zimmerframe": "^1.1.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/svelte-ast-print": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/svelte-ast-print/-/svelte-ast-print-0.4.2.tgz", + "integrity": "sha512-hRHHufbJoArFmDYQKCpCvc0xUuIEfwYksvyLYEQyH+1xb5LD5sM/IthfooCdXZQtOIqXz6xm7NmaqdfwG4kh6w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/xeho91" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/xeho91" + } + ], + "license": "MIT", + "dependencies": { + "esrap": "1.2.2", + "zimmerframe": "1.1.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "svelte": "^5.0.0" + } + }, + "node_modules/svelte-ast-print/node_modules/esrap": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.2.2.tgz", + "integrity": "sha512-F2pSJklxx1BlQIQgooczXCPHmcWpn6EsP5oo73LQfonG9fIlIENQ8vMmfGXeojP9MrkzUNAfyU5vdFlR9shHAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15", + "@types/estree": "^1.0.1" + } + }, + "node_modules/svelte-check": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/svelte-check/-/svelte-check-4.3.0.tgz", + "integrity": "sha512-Iz8dFXzBNAM7XlEIsUjUGQhbEE+Pvv9odb9+0+ITTgFWZBGeJRRYqHUUglwe2EkLD5LIsQaAc4IUJyvtKuOO5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "chokidar": "^4.0.1", + "fdir": "^6.2.0", + "picocolors": "^1.0.0", + "sade": "^1.7.4" + }, + "bin": { + "svelte-check": "bin/svelte-check" + }, + "engines": { + "node": ">= 18.0.0" + }, + "peerDependencies": { + "svelte": "^4.0.0 || ^5.0.0-next.0", + "typescript": ">=5.0.0" + } + }, + "node_modules/svelte-eslint-parser": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/svelte-eslint-parser/-/svelte-eslint-parser-1.3.0.tgz", + "integrity": "sha512-VCgMHKV7UtOGcGLGNFSbmdm6kEKjtzo5nnpGU/mnx4OsFY6bZ7QwRF5DUx+Hokw5Lvdyo8dpk8B1m8mliomrNg==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-scope": "^8.2.0", + "eslint-visitor-keys": "^4.0.0", + "espree": "^10.0.0", + "postcss": "^8.4.49", + "postcss-scss": "^4.0.9", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://github.com/sponsors/ota-meshi" + }, + "peerDependencies": { + "svelte": "^3.37.0 || ^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "svelte": { + "optional": true + } + } + }, + "node_modules/svelte-eslint-parser/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/svelte-sonner": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/svelte-sonner/-/svelte-sonner-1.0.5.tgz", + "integrity": "sha512-9dpGPFqKb/QWudYqGnEz93vuY+NgCEvyNvxoCLMVGw6sDN/3oVeKV1xiEirW2E1N3vJEyj5imSBNOGltQHA7mg==", + "license": "MIT", + "dependencies": { + "runed": "^0.28.0" + }, + "peerDependencies": { + "svelte": "^5.0.0" + } + }, + "node_modules/svelte-sonner/node_modules/runed": { + "version": "0.28.0", + "resolved": "https://registry.npmjs.org/runed/-/runed-0.28.0.tgz", + "integrity": "sha512-k2xx7RuO9hWcdd9f+8JoBeqWtYrm5CALfgpkg2YDB80ds/QE4w0qqu34A7fqiAwiBBSBQOid7TLxwxVC27ymWQ==", + "funding": [ + "https://github.com/sponsors/huntabyte", + "https://github.com/sponsors/tglide" + ], + "license": "MIT", + "dependencies": { + "esm-env": "^1.0.0" + }, + "peerDependencies": { + "svelte": "^5.7.0" + } + }, + "node_modules/svelte-toolbelt": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/svelte-toolbelt/-/svelte-toolbelt-0.7.1.tgz", + "integrity": "sha512-HcBOcR17Vx9bjaOceUvxkY3nGmbBmCBBbuWLLEWO6jtmWH8f/QoWmbyUfQZrpDINH39en1b8mptfPQT9VKQ1xQ==", + "funding": [ + "https://github.com/sponsors/huntabyte" + ], + "dependencies": { + "clsx": "^2.1.1", + "runed": "^0.23.2", + "style-to-object": "^1.0.8" + }, + "engines": { + "node": ">=18", + "pnpm": ">=8.7.0" + }, + "peerDependencies": { + "svelte": "^5.0.0" + } + }, + "node_modules/svelte-toolbelt/node_modules/runed": { + "version": "0.23.4", + "resolved": "https://registry.npmjs.org/runed/-/runed-0.23.4.tgz", + "integrity": "sha512-9q8oUiBYeXIDLWNK5DfCWlkL0EW3oGbk845VdKlPeia28l751VpfesaB/+7pI6rnbx1I6rqoZ2fZxptOJLxILA==", + "funding": [ + "https://github.com/sponsors/huntabyte", + "https://github.com/sponsors/tglide" + ], + "dependencies": { + "esm-env": "^1.0.0" + }, + "peerDependencies": { + "svelte": "^5.7.0" + } + }, + "node_modules/svelte/node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/svelte/node_modules/esrap": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.2.tgz", + "integrity": "sha512-zA6497ha+qKvoWIK+WM9NAh5ni17sKZKhbS5B3PoYbBvaYHZWoS33zmFybmyqpn07RLUxSmn+RCls2/XF+d0oQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/svelte/node_modules/is-reference": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", + "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.6" + } + }, + "node_modules/svelte2tsx": { + "version": "0.7.45", + "resolved": "https://registry.npmjs.org/svelte2tsx/-/svelte2tsx-0.7.45.tgz", + "integrity": "sha512-cSci+mYGygYBHIZLHlm/jYlEc1acjAHqaQaDFHdEBpUueM9kSTnPpvPtSl5VkJOU1qSJ7h1K+6F/LIUYiqC8VA==", + "dev": true, + "license": "MIT", + "dependencies": { + "dedent-js": "^1.0.1", + "scule": "^1.3.0" + }, + "peerDependencies": { + "svelte": "^3.55 || ^4.0.0-next.0 || ^4.0 || ^5.0.0-next.0", + "typescript": "^4.9.4 || ^5.0.0" + } + }, + "node_modules/tabbable": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", + "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==", + "dev": true, + "license": "MIT" + }, + "node_modules/tailwind-merge": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.3.1.tgz", + "integrity": "sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==", + "dev": true, + "license": "MIT", + "peer": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwind-variants": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/tailwind-variants/-/tailwind-variants-3.2.2.tgz", + "integrity": "sha512-Mi4kHeMTLvKlM98XPnK+7HoBPmf4gygdFmqQPaDivc3DpYS6aIY6KiG/PgThrGvii5YZJqRsPz0aPyhoFzmZgg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16.x", + "pnpm": ">=7.x" + }, + "peerDependencies": { + "tailwind-merge": ">=3.0.0", + "tailwindcss": "*" + }, + "peerDependenciesMeta": { + "tailwind-merge": { + "optional": true + } + } + }, + "node_modules/tailwindcss": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.11.tgz", + "integrity": "sha512-2E9TBm6MDD/xKYe+dvJZAmg3yxIEDNRc0jwlNyDg/4Fil2QcSLjFKGVff0lAf1jjeaArlG/M75Ey/EYr/OJtBA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/tapable": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.2.tgz", + "integrity": "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.3.tgz", + "integrity": "sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/tw-animate-css": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.3.5.tgz", + "integrity": "sha512-t3u+0YNoloIhj1mMXs779P6MO9q3p3mvGn4k1n3nJPqJw/glZcuijG2qTSN4z4mgNRfW5ZC3aXJFLwDtiipZXA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Wombosvideo" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.37.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.37.0.tgz", + "integrity": "sha512-TnbEjzkE9EmcO0Q2zM+GE8NQLItNAJpMmED1BdgoBMYNdqMhzlbqfdSwiRlAzEK2pA9UzVW0gzaaIzXWg2BjfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.37.0", + "@typescript-eslint/parser": "8.37.0", + "@typescript-eslint/typescript-estree": "8.37.0", + "@typescript-eslint/utils": "8.37.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/union": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz", + "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==", + "dev": true, + "dependencies": { + "qs": "^6.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/unist-util-find-after": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", + "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-find-after/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/unist-util-remove-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", + "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/unist-util-visit/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unplugin": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-2.3.10.tgz", + "integrity": "sha512-6NCPkv1ClwH+/BGE9QeoTIl09nuiAt0gS28nn1PvYXsGKRwM2TCbFA2QiilmehPDTXIe684k4rZI1yl3A1PCUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "acorn": "^8.15.0", + "picomatch": "^4.0.3", + "webpack-virtual-modules": "^0.6.2" + }, + "engines": { + "node": ">=18.12.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true, + "license": "MIT" + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.0.tgz", + "integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist-node/bin/uuid" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/vfile/node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile/node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.2.2.tgz", + "integrity": "sha512-BxAKBWmIbrDgrokdGZH1IgkIk/5mMHDreLDmCJ0qpyJaAteP8NvMhkwr/ZCQNqNH97bw/dANTE9PDzqwJghfMQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vite-plugin-devtools-json": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/vite-plugin-devtools-json/-/vite-plugin-devtools-json-0.2.1.tgz", + "integrity": "sha512-5aiNvf/iLTuLR1dUqoI5CLLGgeK2hd6u+tA+RIp7GUZDyAcM6ECaUEWOOtGpidbcxbkKq++KtmSqA3jhMbPwMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "uuid": "^11.1.0" + }, + "peerDependencies": { + "vite": "^2.7.0 || ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/vite-plugin-devtools-json/node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/vite/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/vitefu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/vitefu/-/vitefu-1.1.1.tgz", + "integrity": "sha512-B/Fegf3i8zh0yFbpzZ21amWzHmuNlLlmJT6n7bu5e+pCHUKQIfXSYokrqOBGEMMe9UG2sostKQF9mml/vYaWJQ==", + "dev": true, + "license": "MIT", + "workspaces": [ + "tests/deps/*", + "tests/projects/*", + "tests/projects/workspace/packages/*" + ], + "peerDependencies": { + "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0" + }, + "peerDependenciesMeta": { + "vite": { + "optional": true + } + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest-browser-svelte": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/vitest-browser-svelte/-/vitest-browser-svelte-0.1.0.tgz", + "integrity": "sha512-YB6ZUZZQNqU1T9NzvTEDpwpPv35Ng1NZMPBh81zDrLEdOgROGE6nJb79NWb1Eu/a8VkHifqArpOZfJfALge6xQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/browser": "^2.1.0 || ^3.0.0-0", + "svelte": ">3.0.0", + "vitest": "^2.1.0 || ^3.0.0-0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack-virtual-modules": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz", + "integrity": "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zimmerframe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.2.tgz", + "integrity": "sha512-rAbqEGa8ovJy4pyBxZM70hg4pE6gDgaQ0Sl9M3enG3I0d6H4XSAM3GeNGLKnsBpuijUow064sf7ww1nutC5/3w==", + "license": "MIT" + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/llama.cpp/tools/server/webui/package.json b/llama.cpp/tools/server/webui/package.json new file mode 100644 index 0000000..a361ce7 --- /dev/null +++ b/llama.cpp/tools/server/webui/package.json @@ -0,0 +1,94 @@ +{ + "name": "webui", + "private": true, + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "bash scripts/dev.sh", + "build": "vite build && ./scripts/post-build.sh", + "preview": "vite preview", + "prepare": "svelte-kit sync || echo ''", + "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", + "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", + "reset": "rm -rf .svelte-kit node_modules", + "format": "prettier --write .", + "lint": "prettier --check . && eslint .", + "test": "npm run test:ui -- --run && npm run test:client -- --run && npm run test:unit -- --run && npm run test:e2e", + "test:e2e": "playwright test", + "test:client": "vitest --project=client", + "test:unit": "vitest --project=unit", + "test:ui": "vitest --project=ui", + "storybook": "storybook dev -p 6006", + "build-storybook": "storybook build", + "cleanup": "rm -rf .svelte-kit build node_modules test-results" + }, + "devDependencies": { + "@chromatic-com/storybook": "^4.1.2", + "@eslint/compat": "^1.2.5", + "@eslint/js": "^9.18.0", + "@internationalized/date": "^3.10.1", + "@lucide/svelte": "^0.515.0", + "@playwright/test": "^1.49.1", + "@storybook/addon-a11y": "^10.0.7", + "@storybook/addon-docs": "^10.0.7", + "@storybook/addon-svelte-csf": "^5.0.10", + "@storybook/addon-vitest": "^10.0.7", + "@storybook/sveltekit": "^10.0.7", + "@sveltejs/adapter-static": "^3.0.10", + "@sveltejs/kit": "^2.48.4", + "@sveltejs/vite-plugin-svelte": "^6.2.1", + "@tailwindcss/forms": "^0.5.9", + "@tailwindcss/typography": "^0.5.15", + "@tailwindcss/vite": "^4.0.0", + "@types/node": "^22", + "@vitest/browser": "^3.2.3", + "bits-ui": "^2.14.4", + "clsx": "^2.1.1", + "dexie": "^4.0.11", + "eslint": "^9.18.0", + "eslint-config-prettier": "^10.0.1", + "eslint-plugin-storybook": "^10.0.7", + "eslint-plugin-svelte": "^3.0.0", + "fflate": "^0.8.2", + "globals": "^16.0.0", + "http-server": "^14.1.1", + "mdast": "^3.0.0", + "mdsvex": "^0.12.3", + "playwright": "^1.56.1", + "prettier": "^3.4.2", + "prettier-plugin-svelte": "^3.3.3", + "prettier-plugin-tailwindcss": "^0.6.11", + "rehype-katex": "^7.0.1", + "remark-math": "^6.0.0", + "sass": "^1.93.3", + "storybook": "^10.0.7", + "svelte": "^5.38.2", + "svelte-check": "^4.0.0", + "tailwind-merge": "^3.3.1", + "tailwind-variants": "^3.2.2", + "tailwindcss": "^4.0.0", + "tw-animate-css": "^1.3.5", + "typescript": "^5.0.0", + "typescript-eslint": "^8.20.0", + "unified": "^11.0.5", + "uuid": "^13.0.0", + "vite": "^7.2.2", + "vite-plugin-devtools-json": "^0.2.0", + "vitest": "^3.2.3", + "vitest-browser-svelte": "^0.1.0" + }, + "dependencies": { + "highlight.js": "^11.11.1", + "mode-watcher": "^1.1.0", + "pdfjs-dist": "^5.4.54", + "rehype-highlight": "^7.0.2", + "rehype-stringify": "^10.0.1", + "remark": "^15.0.1", + "remark-breaks": "^4.0.0", + "remark-gfm": "^4.0.1", + "remark-html": "^16.0.1", + "remark-rehype": "^11.1.2", + "svelte-sonner": "^1.0.5", + "unist-util-visit": "^5.0.0" + } +} diff --git a/llama.cpp/tools/server/webui/playwright.config.ts b/llama.cpp/tools/server/webui/playwright.config.ts new file mode 100644 index 0000000..26d3be5 --- /dev/null +++ b/llama.cpp/tools/server/webui/playwright.config.ts @@ -0,0 +1,11 @@ +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ + webServer: { + command: 'npm run build && http-server ../public -p 8181', + port: 8181, + timeout: 120000, + reuseExistingServer: false + }, + testDir: 'tests/e2e' +}); diff --git a/llama.cpp/tools/server/webui/scripts/dev.sh b/llama.cpp/tools/server/webui/scripts/dev.sh new file mode 100644 index 0000000..b7539c2 --- /dev/null +++ b/llama.cpp/tools/server/webui/scripts/dev.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Development script for llama.cpp webui +# +# This script starts the webui development servers (Storybook and Vite). +# Note: You need to start llama-server separately. +# +# Usage: +# bash scripts/dev.sh +# npm run dev + +cd ../../../ + +# Check and install git hooks if missing +check_and_install_hooks() { + local hooks_missing=false + + # Check for required hooks + if [ ! -f ".git/hooks/pre-commit" ] || [ ! -f ".git/hooks/pre-push" ] || [ ! -f ".git/hooks/post-push" ]; then + hooks_missing=true + fi + + if [ "$hooks_missing" = true ]; then + echo "🔧 Git hooks missing, installing them..." + cd tools/server/webui + if bash scripts/install-git-hooks.sh; then + echo "✅ Git hooks installed successfully" + else + echo "⚠️ Failed to install git hooks, continuing anyway..." + fi + cd ../../../ + else + echo "✅ Git hooks already installed" + fi +} + +# Install git hooks if needed +check_and_install_hooks + +# Cleanup function +cleanup() { + echo "🧹 Cleaning up..." + exit +} + +# Set up signal handlers +trap cleanup SIGINT SIGTERM + +echo "🚀 Starting development servers..." +echo "📝 Note: Make sure to start llama-server separately if needed" +cd tools/server/webui +# Use --insecure-http-parser to handle malformed HTTP responses from llama-server +# (some responses have both Content-Length and Transfer-Encoding headers) +storybook dev -p 6006 --ci & NODE_OPTIONS="--insecure-http-parser" vite dev --host 0.0.0.0 & + +# Wait for all background processes +wait diff --git a/llama.cpp/tools/server/webui/scripts/install-git-hooks.sh b/llama.cpp/tools/server/webui/scripts/install-git-hooks.sh new file mode 100755 index 0000000..d14e281 --- /dev/null +++ b/llama.cpp/tools/server/webui/scripts/install-git-hooks.sh @@ -0,0 +1,202 @@ +#!/bin/bash + +# Script to install pre-commit and pre-push hooks for webui +# Pre-commit: formats code and runs checks +# Pre-push: builds the project, stashes unstaged changes + +REPO_ROOT=$(git rev-parse --show-toplevel) +PRE_COMMIT_HOOK="$REPO_ROOT/.git/hooks/pre-commit" +PRE_PUSH_HOOK="$REPO_ROOT/.git/hooks/pre-push" + +echo "Installing pre-commit and pre-push hooks for webui..." + +# Create the pre-commit hook +cat > "$PRE_COMMIT_HOOK" << 'EOF' +#!/bin/bash + +# Check if there are any changes in the webui directory +if git diff --cached --name-only | grep -q "^tools/server/webui/"; then + echo "Formatting and checking webui code..." + + # Change to webui directory and run format + cd tools/server/webui + + # Check if npm is available and package.json exists + if [ ! -f "package.json" ]; then + echo "Error: package.json not found in tools/server/webui" + exit 1 + fi + + # Run the format command + npm run format + + # Check if format command succeeded + if [ $? -ne 0 ]; then + echo "Error: npm run format failed" + exit 1 + fi + + # Run the lint command + npm run lint + + # Check if lint command succeeded + if [ $? -ne 0 ]; then + echo "Error: npm run lint failed" + exit 1 + fi + + # Run the check command + npm run check + + # Check if check command succeeded + if [ $? -ne 0 ]; then + echo "Error: npm run check failed" + exit 1 + fi + + # Go back to repo root + cd ../../.. + + echo "✅ Webui code formatted and checked successfully" +fi + +exit 0 +EOF + +# Create the pre-push hook +cat > "$PRE_PUSH_HOOK" << 'EOF' +#!/bin/bash + +# Check if there are any webui changes that need building +WEBUI_CHANGES=$(git diff --name-only @{push}..HEAD | grep "^tools/server/webui/" || true) + +if [ -n "$WEBUI_CHANGES" ]; then + echo "Webui changes detected, checking if build is up-to-date..." + + # Change to webui directory + cd tools/server/webui + + # Check if npm is available and package.json exists + if [ ! -f "package.json" ]; then + echo "Error: package.json not found in tools/server/webui" + exit 1 + fi + + # Check if build output exists and is newer than source files + BUILD_FILE="../public/index.html.gz" + NEEDS_BUILD=false + + if [ ! -f "$BUILD_FILE" ]; then + echo "Build output not found, building..." + NEEDS_BUILD=true + else + # Check if any source files are newer than the build output + if find src -newer "$BUILD_FILE" -type f | head -1 | grep -q .; then + echo "Source files are newer than build output, rebuilding..." + NEEDS_BUILD=true + fi + fi + + if [ "$NEEDS_BUILD" = true ]; then + echo "Building webui..." + + # Stash any unstaged changes to avoid conflicts during build + echo "Checking for unstaged changes..." + if ! git diff --quiet || ! git diff --cached --quiet --diff-filter=A; then + echo "Stashing unstaged changes..." + git stash push --include-untracked -m "Pre-push hook: stashed unstaged changes" + STASH_CREATED=$? + else + echo "No unstaged changes to stash" + STASH_CREATED=1 + fi + + # Run the build command + npm run build + + # Check if build command succeeded + if [ $? -ne 0 ]; then + echo "Error: npm run build failed" + if [ $STASH_CREATED -eq 0 ]; then + echo "You can restore your unstaged changes with: git stash pop" + fi + exit 1 + fi + + # Go back to repo root + cd ../../.. + + # Check if build output was created/updated + if [ -f "tools/server/public/index.html.gz" ]; then + # Add the build output and commit it + git add tools/server/public/index.html.gz + if ! git diff --cached --quiet; then + echo "Committing updated build output..." + git commit -m "chore: update webui build output" + echo "✅ Build output committed successfully" + else + echo "Build output unchanged" + fi + else + echo "Error: Build output not found after build" + if [ $STASH_CREATED -eq 0 ]; then + echo "You can restore your unstaged changes with: git stash pop" + fi + exit 1 + fi + + if [ $STASH_CREATED -eq 0 ]; then + echo "✅ Build completed. Your unstaged changes have been stashed." + echo "They will be automatically restored after the push." + # Create a marker file to indicate stash was created by pre-push hook + touch .git/WEBUI_PUSH_STASH_MARKER + fi + else + echo "✅ Build output is up-to-date" + fi + + echo "✅ Webui ready for push" +fi + +exit 0 +EOF + +# Create the post-push hook (for restoring stashed changes after push) +cat > "$REPO_ROOT/.git/hooks/post-push" << 'EOF' +#!/bin/bash + +# Check if we have a stash marker from the pre-push hook +if [ -f .git/WEBUI_PUSH_STASH_MARKER ]; then + echo "Restoring your unstaged changes after push..." + git stash pop + rm -f .git/WEBUI_PUSH_STASH_MARKER + echo "✅ Your unstaged changes have been restored." +fi + +exit 0 +EOF + +# Make all hooks executable +chmod +x "$PRE_COMMIT_HOOK" +chmod +x "$PRE_PUSH_HOOK" +chmod +x "$REPO_ROOT/.git/hooks/post-push" + +if [ $? -eq 0 ]; then + echo "✅ Git hooks installed successfully!" + echo " Pre-commit: $PRE_COMMIT_HOOK" + echo " Pre-push: $PRE_PUSH_HOOK" + echo " Post-push: $REPO_ROOT/.git/hooks/post-push" + echo "" + echo "The hooks will automatically:" + echo " • Format and check webui code before commits (pre-commit)" + echo " • Build webui code before pushes (pre-push)" + echo " • Stash unstaged changes during build process" + echo " • Restore your unstaged changes after the push" + echo "" + echo "To test the hooks:" + echo " • Make a change to a file in the webui directory and commit it (triggers format/check)" + echo " • Push your commits to trigger the build process" +else + echo "❌ Failed to make hooks executable" + exit 1 +fi diff --git a/llama.cpp/tools/server/webui/scripts/post-build.sh b/llama.cpp/tools/server/webui/scripts/post-build.sh new file mode 100755 index 0000000..a49d6cc --- /dev/null +++ b/llama.cpp/tools/server/webui/scripts/post-build.sh @@ -0,0 +1,3 @@ +rm -rf ../public/_app; +rm ../public/favicon.svg; +rm ../public/index.html; diff --git a/llama.cpp/tools/server/webui/src/app.css b/llama.cpp/tools/server/webui/src/app.css new file mode 100644 index 0000000..9705040 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/app.css @@ -0,0 +1,138 @@ +@import 'tailwindcss'; + +@import 'tw-animate-css'; + +@custom-variant dark (&:is(.dark *)); + +:root { + --radius: 0.625rem; + --background: oklch(1 0 0); + --foreground: oklch(0.145 0 0); + --card: oklch(1 0 0); + --card-foreground: oklch(0.145 0 0); + --popover: oklch(1 0 0); + --popover-foreground: oklch(0.145 0 0); + --primary: oklch(0.205 0 0); + --primary-foreground: oklch(0.985 0 0); + --secondary: oklch(0.97 0 0); + --secondary-foreground: oklch(0.205 0 0); + --muted: oklch(0.97 0 0); + --muted-foreground: oklch(0.556 0 0); + --accent: oklch(0.97 0 0); + --accent-foreground: oklch(0.205 0 0); + --destructive: oklch(0.577 0.245 27.325); + --border: oklch(0.875 0 0); + --input: oklch(0.92 0 0); + --ring: oklch(0.708 0 0); + --chart-1: oklch(0.646 0.222 41.116); + --chart-2: oklch(0.6 0.118 184.704); + --chart-3: oklch(0.398 0.07 227.392); + --chart-4: oklch(0.828 0.189 84.429); + --chart-5: oklch(0.769 0.188 70.08); + --sidebar: oklch(0.987 0 0); + --sidebar-foreground: oklch(0.145 0 0); + --sidebar-primary: oklch(0.205 0 0); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.97 0 0); + --sidebar-accent-foreground: oklch(0.205 0 0); + --sidebar-border: oklch(0.922 0 0); + --sidebar-ring: oklch(0.708 0 0); + --code-background: oklch(0.975 0 0); + --code-foreground: oklch(0.145 0 0); + --layer-popover: 1000000; +} + +.dark { + --background: oklch(0.16 0 0); + --foreground: oklch(0.985 0 0); + --card: oklch(0.205 0 0); + --card-foreground: oklch(0.985 0 0); + --popover: oklch(0.205 0 0); + --popover-foreground: oklch(0.985 0 0); + --primary: oklch(0.922 0 0); + --primary-foreground: oklch(0.205 0 0); + --secondary: oklch(0.269 0 0); + --secondary-foreground: oklch(0.985 0 0); + --muted: oklch(0.269 0 0); + --muted-foreground: oklch(0.708 0 0); + --accent: oklch(0.269 0 0); + --accent-foreground: oklch(0.985 0 0); + --destructive: oklch(0.704 0.191 22.216); + --border: oklch(1 0 0 / 30%); + --input: oklch(1 0 0 / 30%); + --ring: oklch(0.556 0 0); + --chart-1: oklch(0.488 0.243 264.376); + --chart-2: oklch(0.696 0.17 162.48); + --chart-3: oklch(0.769 0.188 70.08); + --chart-4: oklch(0.627 0.265 303.9); + --chart-5: oklch(0.645 0.246 16.439); + --sidebar: oklch(0.19 0 0); + --sidebar-foreground: oklch(0.985 0 0); + --sidebar-primary: oklch(0.488 0.243 264.376); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.269 0 0); + --sidebar-accent-foreground: oklch(0.985 0 0); + --sidebar-border: oklch(1 0 0 / 10%); + --sidebar-ring: oklch(0.556 0 0); + --code-background: oklch(0.225 0 0); + --code-foreground: oklch(0.875 0 0); +} + +@theme inline { + --radius-sm: calc(var(--radius) - 4px); + --radius-md: calc(var(--radius) - 2px); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) + 4px); + --color-background: var(--background); + --color-foreground: var(--foreground); + --color-card: var(--card); + --color-card-foreground: var(--card-foreground); + --color-popover: var(--popover); + --color-popover-foreground: var(--popover-foreground); + --color-primary: var(--primary); + --color-primary-foreground: var(--primary-foreground); + --color-secondary: var(--secondary); + --color-secondary-foreground: var(--secondary-foreground); + --color-muted: var(--muted); + --color-muted-foreground: var(--muted-foreground); + --color-accent: var(--accent); + --color-accent-foreground: var(--accent-foreground); + --color-destructive: var(--destructive); + --color-border: var(--border); + --color-input: var(--input); + --color-ring: var(--ring); + --color-chart-1: var(--chart-1); + --color-chart-2: var(--chart-2); + --color-chart-3: var(--chart-3); + --color-chart-4: var(--chart-4); + --color-chart-5: var(--chart-5); + --color-sidebar: var(--sidebar); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-ring: var(--sidebar-ring); +} + +@layer base { + * { + @apply border-border outline-ring/50; + } + body { + @apply bg-background text-foreground; + } +} + +@layer utilities { + .scrollbar-hide { + /* Hide scrollbar for Chrome, Safari and Opera */ + &::-webkit-scrollbar { + display: none; + } + /* Hide scrollbar for IE, Edge and Firefox */ + -ms-overflow-style: none; + scrollbar-width: none; + } +} diff --git a/llama.cpp/tools/server/webui/src/app.d.ts b/llama.cpp/tools/server/webui/src/app.d.ts new file mode 100644 index 0000000..73287d9 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/app.d.ts @@ -0,0 +1,133 @@ +// See https://svelte.dev/docs/kit/types#app.d.ts +// for information about these interfaces + +// Import chat types from dedicated module + +import type { + // API types + ApiChatCompletionRequest, + ApiChatCompletionResponse, + ApiChatCompletionStreamChunk, + ApiChatCompletionToolCall, + ApiChatCompletionToolCallDelta, + ApiChatMessageData, + ApiChatMessageContentPart, + ApiContextSizeError, + ApiErrorResponse, + ApiLlamaCppServerProps, + ApiModelDataEntry, + ApiModelListResponse, + ApiProcessingState, + ApiRouterModelMeta, + ApiRouterModelsLoadRequest, + ApiRouterModelsLoadResponse, + ApiRouterModelsStatusRequest, + ApiRouterModelsStatusResponse, + ApiRouterModelsListResponse, + ApiRouterModelsUnloadRequest, + ApiRouterModelsUnloadResponse, + // Chat types + ChatAttachmentDisplayItem, + ChatAttachmentPreviewItem, + ChatMessageType, + ChatRole, + ChatUploadedFile, + ChatMessageSiblingInfo, + ChatMessagePromptProgress, + ChatMessageTimings, + // Database types + DatabaseConversation, + DatabaseMessage, + DatabaseMessageExtra, + DatabaseMessageExtraAudioFile, + DatabaseMessageExtraImageFile, + DatabaseMessageExtraTextFile, + DatabaseMessageExtraPdfFile, + DatabaseMessageExtraLegacyContext, + ExportedConversation, + ExportedConversations, + // Model types + ModelModalities, + ModelOption, + // Settings types + SettingsChatServiceOptions, + SettingsConfigValue, + SettingsFieldConfig, + SettingsConfigType +} from '$lib/types'; + +import { ServerRole, ServerModelStatus, ModelModality } from '$lib/enums'; + +declare global { + // namespace App { + // interface Error {} + // interface Locals {} + // interface PageData {} + // interface PageState {} + // interface Platform {} + // } + + export { + // API types + ApiChatCompletionRequest, + ApiChatCompletionResponse, + ApiChatCompletionStreamChunk, + ApiChatCompletionToolCall, + ApiChatCompletionToolCallDelta, + ApiChatMessageData, + ApiChatMessageContentPart, + ApiContextSizeError, + ApiErrorResponse, + ApiLlamaCppServerProps, + ApiModelDataEntry, + ApiModelListResponse, + ApiProcessingState, + ApiRouterModelMeta, + ApiRouterModelsLoadRequest, + ApiRouterModelsLoadResponse, + ApiRouterModelsStatusRequest, + ApiRouterModelsStatusResponse, + ApiRouterModelsListResponse, + ApiRouterModelsUnloadRequest, + ApiRouterModelsUnloadResponse, + // Chat types + ChatAttachmentDisplayItem, + ChatAttachmentPreviewItem, + ChatMessagePromptProgress, + ChatMessageSiblingInfo, + ChatMessageTimings, + ChatMessageType, + ChatRole, + ChatUploadedFile, + // Database types + DatabaseConversation, + DatabaseMessage, + DatabaseMessageExtra, + DatabaseMessageExtraAudioFile, + DatabaseMessageExtraImageFile, + DatabaseMessageExtraTextFile, + DatabaseMessageExtraPdfFile, + DatabaseMessageExtraLegacyContext, + ExportedConversation, + ExportedConversations, + // Enum types + ModelModality, + ServerRole, + ServerModelStatus, + // Model types + ModelModalities, + ModelOption, + // Settings types + SettingsChatServiceOptions, + SettingsConfigValue, + SettingsFieldConfig, + SettingsConfigType + }; +} + +declare global { + interface Window { + idxThemeStyle?: number; + idxCodeBlock?: number; + } +} diff --git a/llama.cpp/tools/server/webui/src/app.html b/llama.cpp/tools/server/webui/src/app.html new file mode 100644 index 0000000..1391f88 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/app.html @@ -0,0 +1,12 @@ + + + + + + + %sveltekit.head% + + +
%sveltekit.body%
+ + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentPreview.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentPreview.svelte new file mode 100644 index 0000000..0b0bf52 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentPreview.svelte @@ -0,0 +1,283 @@ + + +
+
+ {#if isPdf} +
+ + + +
+ {/if} +
+ +
+ {#if isImage && displayPreview} +
+ {displayName} +
+ {:else if isPdf && pdfViewMode === 'pages'} + {#if !hasVisionModality && activeModelId} + + + Preview only + + + The selected model does not support vision. Only the extracted + + + (pdfViewMode = 'text')}> + text + + will be sent to the model. + + + + {/if} + + {#if pdfImagesLoading} +
+
+
+ +

Converting PDF to images...

+
+
+ {:else if pdfImagesError} +
+
+ + +

Failed to load PDF images

+ +

{pdfImagesError}

+ + +
+
+ {:else if pdfImages.length > 0} +
+ {#each pdfImages as image, index (image)} +
+

Page {index + 1}

+ + PDF Page {index + 1} +
+ {/each} +
+ {:else} +
+
+ + +

No PDF pages available

+
+
+ {/if} + {:else if (isText || (isPdf && pdfViewMode === 'text')) && displayTextContent} + + {:else if isAudio} +
+
+ + + {#if uploadedFile?.preview} + + {:else if isAudio && attachment && 'mimeType' in attachment && 'base64Data' in attachment} + + {:else} +

Audio preview not available

+ {/if} + +

+ {displayName} +

+
+
+ {:else} +
+
+ {#if IconComponent} + + {/if} + +

Preview not available for this file type

+
+
+ {/if} +
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailFile.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailFile.svelte new file mode 100644 index 0000000..908db58 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailFile.svelte @@ -0,0 +1,165 @@ + + +{#if isText} + {#if readonly} + + + {:else} + + + {/if} +{:else} + +{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailImage.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailImage.svelte new file mode 100644 index 0000000..ba711a9 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentThumbnailImage.svelte @@ -0,0 +1,64 @@ + + +
+ {#if onClick} + + {:else} + {name} + {/if} + + {#if !readonly} +
+ +
+ {/if} +
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte new file mode 100644 index 0000000..a1f5af5 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte @@ -0,0 +1,243 @@ + + +{#if displayItems.length > 0} +
+ {#if limitToSingleRow} +
+ + +
+ {#each displayItems as item (item.id)} + {#if item.isImage && item.preview} + openPreview(item, event)} + /> + {:else} + openPreview(item, event)} + /> + {/if} + {/each} +
+ + +
+ + {#if showViewAll} +
+ +
+ {/if} + {:else} +
+ {#each displayItems as item (item.id)} + {#if item.isImage && item.preview} + openPreview(item, event)} + /> + {:else} + openPreview(item, event)} + /> + {/if} + {/each} +
+ {/if} +
+{/if} + +{#if previewItem} + +{/if} + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsViewAll.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsViewAll.svelte new file mode 100644 index 0000000..279b2e2 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsViewAll.svelte @@ -0,0 +1,117 @@ + + +
+
+ {#if fileItems.length > 0} +
+

Files ({fileItems.length})

+
+ {#each fileItems as item (item.id)} + openPreview(item, event)} + /> + {/each} +
+
+ {/if} + + {#if imageItems.length > 0} +
+

Images ({imageItems.length})

+
+ {#each imageItems as item (item.id)} + {#if item.preview} + openPreview(item, event)} + /> + {/if} + {/each} +
+
+ {/if} +
+
+ +{#if previewItem} + +{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte new file mode 100644 index 0000000..27ab975 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte @@ -0,0 +1,315 @@ + + + + +
+ + +
+ + + 0 || uploadedFiles.length > 0} + hasText={message.trim().length > 0} + {disabled} + {isLoading} + {isRecording} + {uploadedFiles} + onFileUpload={handleFileUpload} + onMicClick={handleMicClick} + onStop={handleStop} + /> +
+ + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionFileAttachments.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionFileAttachments.svelte new file mode 100644 index 0000000..dd37268 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionFileAttachments.svelte @@ -0,0 +1,123 @@ + + +
+ + + + + + + + +

{fileUploadTooltipText}

+
+
+
+ + + + + onFileUpload?.()} + > + + + Images + + + + {#if !hasVisionModality} + +

Images require vision models to be processed

+
+ {/if} +
+ + + + onFileUpload?.()} + > + + + Audio Files + + + + {#if !hasAudioModality} + +

Audio files require audio models to be processed

+
+ {/if} +
+ + onFileUpload?.()} + > + + + Text Files + + + + + onFileUpload?.()} + > + + + PDF Files + + + + {#if !hasVisionModality} + +

PDFs will be converted to text. Image-based PDFs may not work properly.

+
+ {/if} +
+
+
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionRecord.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionRecord.svelte new file mode 100644 index 0000000..f1b0849 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionRecord.svelte @@ -0,0 +1,52 @@ + + +
+ + + + + + {#if !hasAudioModality} + +

Current model does not support audio

+
+ {/if} +
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionSubmit.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionSubmit.svelte new file mode 100644 index 0000000..861cd18 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActionSubmit.svelte @@ -0,0 +1,55 @@ + + +{#snippet submitButton(props = {})} + +{/snippet} + +{#if tooltipLabel} + + + {@render submitButton()} + + + +

{tooltipLabel}

+
+
+{:else} + {@render submitButton()} +{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte new file mode 100644 index 0000000..dde9bda --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte @@ -0,0 +1,204 @@ + + +
+ + + + + {#if isLoading} + + {:else if shouldShowRecordButton} + + {:else} + + {/if} +
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormFileInputInvisible.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormFileInputInvisible.svelte new file mode 100644 index 0000000..d758822 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormFileInputInvisible.svelte @@ -0,0 +1,30 @@ + + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormHelperText.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormHelperText.svelte new file mode 100644 index 0000000..f8246f2 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormHelperText.svelte @@ -0,0 +1,17 @@ + + +{#if show} +
+

+ Press Enter to send, + Shift + Enter for new line +

+
+{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormTextarea.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormTextarea.svelte new file mode 100644 index 0000000..19b763f --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormTextarea.svelte @@ -0,0 +1,59 @@ + + +
+ +
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte new file mode 100644 index 0000000..220276f --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte @@ -0,0 +1,286 @@ + + +{#if message.role === 'system'} + +{:else if message.role === 'user'} + +{:else} + (shouldBranchAfterEdit = value)} + {showDeleteDialog} + {siblingInfo} + {thinkingContent} + {toolCallContent} + /> +{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageActions.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageActions.svelte new file mode 100644 index 0000000..3cb4815 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageActions.svelte @@ -0,0 +1,100 @@ + + +
+
+ {#if siblingInfo && siblingInfo.totalSiblings > 1} + + {/if} + +
+ + + {#if onEdit} + + {/if} + + {#if role === 'assistant' && onRegenerate} + onRegenerate()} /> + {/if} + + {#if role === 'assistant' && onContinue} + + {/if} + + +
+
+
+ + 1 + ? `This will delete ${deletionInfo.totalCount} messages including: ${deletionInfo.userMessages} user message${deletionInfo.userMessages > 1 ? 's' : ''} and ${deletionInfo.assistantMessages} assistant response${deletionInfo.assistantMessages > 1 ? 's' : ''}. All messages in this branch and their responses will be permanently removed. This action cannot be undone.` + : 'Are you sure you want to delete this message? This action cannot be undone.'} + confirmText={deletionInfo && deletionInfo.totalCount > 1 + ? `Delete ${deletionInfo.totalCount} Messages` + : 'Delete'} + cancelText="Cancel" + variant="destructive" + icon={Trash2} + onConfirm={handleConfirmDelete} + onCancel={() => onShowDeleteDialogChange(false)} +/> diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageAssistant.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageAssistant.svelte new file mode 100644 index 0000000..2b34b1c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageAssistant.svelte @@ -0,0 +1,418 @@ + + +
+ {#if thinkingContent} + + {/if} + + {#if message?.role === 'assistant' && isLoading() && !message?.content?.trim()} +
+
+ + {processingState.getPromptProgressText() ?? processingState.getProcessingMessage()} + +
+
+ {/if} + + {#if isEditing} +
+ + +
+
+ onShouldBranchAfterEditChange?.(checked === true)} + /> + +
+
+ + + +
+
+
+ {:else if message.role === 'assistant'} + {#if config().disableReasoningFormat} +
{messageContent || ''}
+ {:else} + + {/if} + {:else} +
+ {messageContent} +
+ {/if} + +
+ {#if displayedModel()} +
+ {#if isRouter} + + {:else} + + {/if} + + {#if currentConfig.showMessageStats && message.timings && message.timings.predicted_n && message.timings.predicted_ms} + + {:else if isLoading() && currentConfig.showMessageStats} + {@const liveStats = processingState.getLiveProcessingStats()} + {@const genStats = processingState.getLiveGenerationStats()} + {@const promptProgress = processingState.processingState?.promptProgress} + {@const isStillProcessingPrompt = + promptProgress && promptProgress.processed < promptProgress.total} + + {#if liveStats || genStats} + + {/if} + {/if} +
+ {/if} + + {#if config().showToolCalls} + {#if (toolCalls && toolCalls.length > 0) || fallbackToolCalls} + + + + + Tool calls: + + + {#if toolCalls && toolCalls.length > 0} + {#each toolCalls as toolCall, index (toolCall.id ?? `${index}`)} + {@const badge = formatToolCallBadge(toolCall, index)} + + {/each} + {:else if fallbackToolCalls} + + {/if} + + {/if} + {/if} +
+ + {#if message.timestamp && !isEditing} + + {/if} +
+ + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageBranchingControls.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageBranchingControls.svelte new file mode 100644 index 0000000..7420bb1 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageBranchingControls.svelte @@ -0,0 +1,84 @@ + + +{#if siblingInfo && siblingInfo.totalSiblings > 1} + +{/if} diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageEditForm.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageEditForm.svelte new file mode 100644 index 0000000..f812ea2 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageEditForm.svelte @@ -0,0 +1,391 @@ + + + + + + +
+ { + if (fileId.startsWith('attachment-')) { + const index = parseInt(fileId.replace('attachment-', ''), 10); + if (!isNaN(index) && index >= 0 && index < editedExtras.length) { + handleRemoveExistingAttachment(index); + } + } else { + handleRemoveUploadedFile(fileId); + } + }} + limitToSingleRow + class="py-5" + style="scroll-padding: 1rem;" + /> + +
+ + +
+ + +
+ + {#if isRouter} + + {/if} + + +
+
+
+ +
+ {#if showSaveOnlyOption && onSaveEditOnly} +
+ + + +
+ {:else} +
+ {/if} + + +
+ + (showDiscardDialog = false)} +/> diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageStatistics.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageStatistics.svelte new file mode 100644 index 0000000..24fe592 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageStatistics.svelte @@ -0,0 +1,175 @@ + + +
+
+ {#if hasPromptStats || isLive} + + + + + +

Reading (prompt processing)

+
+
+ {/if} + + + + + +

+ {isGenerationDisabled + ? 'Generation (waiting for tokens...)' + : 'Generation (token output)'} +

+
+
+
+ +
+ {#if activeView === ChatMessageStatsView.GENERATION && hasGenerationStats} + + + + {:else if hasPromptStats} + + + + {/if} +
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageSystem.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageSystem.svelte new file mode 100644 index 0000000..c203822 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageSystem.svelte @@ -0,0 +1,216 @@ + + +
+ {#if isEditing} +
+ + +
+ + + +
+
+ {:else} + {#if message.content.trim()} +
+ +
+ {/if} +
+ + {#if isExpanded && showExpandButton} +
+ +
+ {/if} + + + + {/if} + + {#if message.timestamp} +
+ +
+ {/if} + {/if} + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageThinkingBlock.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageThinkingBlock.svelte new file mode 100644 index 0000000..9245ad5 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageThinkingBlock.svelte @@ -0,0 +1,68 @@ + + + + + +
+ + + + {isStreaming ? 'Reasoning...' : 'Reasoning'} + +
+ +
+ + + Toggle reasoning content +
+
+ + +
+
+
+ {reasoningContent ?? ''} +
+
+
+
+
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageUser.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageUser.svelte new file mode 100644 index 0000000..041c6bd --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessageUser.svelte @@ -0,0 +1,163 @@ + + +
+ {#if isEditing} + + {:else} + {#if message.extra && message.extra.length > 0} +
+ +
+ {/if} + + {#if message.content.trim()} + + {#if currentConfig.renderUserContentAsMarkdown} +
+ +
+ {:else} + + {message.content} + + {/if} +
+ {/if} + + {#if message.timestamp} +
+ +
+ {/if} + {/if} +
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessages.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessages.svelte new file mode 100644 index 0000000..c203f10 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessages.svelte @@ -0,0 +1,143 @@ + + +
+ {#each displayMessages as { message, siblingInfo } (message.id)} + + {/each} +
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte new file mode 100644 index 0000000..2743955 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte @@ -0,0 +1,617 @@ + + +{#if isDragOver} + +{/if} + + + + + +{#if !isEmpty} +
+ { + if (!disableAutoScroll) { + userScrolledUp = false; + autoScrollEnabled = true; + scrollChatToBottom(); + } + }} + /> + +
+ + + {#if hasPropsError} +
+ + + + Server unavailable + + + {serverError()} + +
+ {/if} + +
+ chatStore.stopGeneration()} + showHelperText={false} + bind:uploadedFiles + /> +
+
+
+{:else if isServerLoading} + + +{:else} +
+
+
+

llama.cpp

+ +

+ {serverStore.props?.modalities?.audio + ? 'Record audio, type a message ' + : 'Type a message'} or upload files to get started +

+
+ + {#if hasPropsError} +
+ + + + Server unavailable + + + {serverError()} + +
+ {/if} + +
+ chatStore.stopGeneration()} + showHelperText={true} + bind:uploadedFiles + /> +
+
+
+{/if} + + + + + + + + + File Upload Error + + + Some files cannot be uploaded with the current model. + + + +
+ {#if fileErrorData.generallyUnsupported.length > 0} +
+

Unsupported File Types

+ +
+ {#each fileErrorData.generallyUnsupported as file (file.name)} +
+

+ {file.name} +

+ +

File type not supported

+
+ {/each} +
+
+ {/if} + + {#if fileErrorData.modalityUnsupported.length > 0} +
+
+ {#each fileErrorData.modalityUnsupported as file (file.name)} +
+

+ {file.name} +

+ +

+ {fileErrorData.modalityReasons[file.name] || 'Not supported by current model'} +

+
+ {/each} +
+
+ {/if} +
+ +
+

This model supports:

+ +

+ {fileErrorData.supportedTypes.join(', ')} +

+
+ + + (showFileErrorDialog = false)}> + Got it + + +
+
+
+ + (showDeleteDialog = false)} +/> + + { + if (!open) { + emptyFileNames = []; + } + }} +/> + + + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenDragOverlay.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenDragOverlay.svelte new file mode 100644 index 0000000..ab4adb2 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenDragOverlay.svelte @@ -0,0 +1,17 @@ + + +
+
+ + +

Attach a file

+ +

Drop your files here to upload

+
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenHeader.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenHeader.svelte new file mode 100644 index 0000000..874140f --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenHeader.svelte @@ -0,0 +1,28 @@ + + +
+
+ +
+
+ + (settingsOpen = open)} /> diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenProcessingInfo.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenProcessingInfo.svelte new file mode 100644 index 0000000..a60ae9e --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreenProcessingInfo.svelte @@ -0,0 +1,120 @@ + + +
+
+ {#each processingDetails as detail (detail)} + {detail} + {/each} +
+
+ + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettings.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettings.svelte new file mode 100644 index 0000000..5a668aa --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettings.svelte @@ -0,0 +1,508 @@ + + +
+ + + + +
+
+ +
+ + +
+
+ {#each settingSections as section (section.title)} + + {/each} +
+
+ + +
+
+
+ + +
+
+ + + {#if currentSection.title === 'Import/Export'} + + {:else} +
+ +
+ {/if} +
+ +
+

Settings are saved in browser's localStorage

+
+
+
+
+ + diff --git a/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsFields.svelte b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsFields.svelte new file mode 100644 index 0000000..a6f51f4 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/app/chat/ChatSettings/ChatSettingsFields.svelte @@ -0,0 +1,255 @@ + + +{#each fields as field (field.key)} +
+ {#if field.type === 'input'} + {@const paramInfo = getParameterSourceInfo(field.key)} + {@const currentValue = String(localConfig[field.key] ?? '')} + {@const propsDefault = paramInfo?.serverDefault} + {@const isCustomRealTime = (() => { + if (!paramInfo || propsDefault === undefined) return false; + + // Apply same rounding logic for real-time comparison + const inputValue = currentValue; + const numericInput = parseFloat(inputValue); + const normalizedInput = !isNaN(numericInput) + ? Math.round(numericInput * 1000000) / 1000000 + : inputValue; + const normalizedDefault = + typeof propsDefault === 'number' + ? Math.round(propsDefault * 1000000) / 1000000 + : propsDefault; + + return normalizedInput !== normalizedDefault; + })()} + +
+ + {#if isCustomRealTime} + + {/if} +
+ +
+ { + // Update local config immediately for real-time badge feedback + onConfigChange(field.key, e.currentTarget.value); + }} + placeholder={`Default: ${SETTING_CONFIG_DEFAULT[field.key] ?? 'none'}`} + class="w-full {isCustomRealTime ? 'pr-8' : ''}" + /> + {#if isCustomRealTime} + + {/if} +
+ {#if field.help || SETTING_CONFIG_INFO[field.key]} +

+ {@html field.help || SETTING_CONFIG_INFO[field.key]} +

+ {/if} + {:else if field.type === 'textarea'} + + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/index.ts b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/index.ts new file mode 100644 index 0000000..273d831 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/index.ts @@ -0,0 +1,21 @@ +import { Tooltip as TooltipPrimitive } from 'bits-ui'; +import Trigger from './tooltip-trigger.svelte'; +import Content from './tooltip-content.svelte'; + +const Root = TooltipPrimitive.Root; +const Provider = TooltipPrimitive.Provider; +const Portal = TooltipPrimitive.Portal; + +export { + Root, + Trigger, + Content, + Provider, + Portal, + // + Root as Tooltip, + Content as TooltipContent, + Trigger as TooltipTrigger, + Provider as TooltipProvider, + Portal as TooltipPortal +}; diff --git a/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-content.svelte b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-content.svelte new file mode 100644 index 0000000..72ea93a --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-content.svelte @@ -0,0 +1,47 @@ + + + + + {@render children?.()} + + {#snippet child({ props })} +
+ {/snippet} +
+
+
diff --git a/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-trigger.svelte b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-trigger.svelte new file mode 100644 index 0000000..5631d1b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/ui/tooltip/tooltip-trigger.svelte @@ -0,0 +1,7 @@ + + + diff --git a/llama.cpp/tools/server/webui/src/lib/components/ui/utils.ts b/llama.cpp/tools/server/webui/src/lib/components/ui/utils.ts new file mode 100644 index 0000000..f92bfcb --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/components/ui/utils.ts @@ -0,0 +1,13 @@ +import { clsx, type ClassValue } from 'clsx'; +import { twMerge } from 'tailwind-merge'; + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type WithoutChild = T extends { child?: any } ? Omit : T; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type WithoutChildren = T extends { children?: any } ? Omit : T; +export type WithoutChildrenOrChild = WithoutChildren>; +export type WithElementRef = T & { ref?: U | null }; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/auto-scroll.ts b/llama.cpp/tools/server/webui/src/lib/constants/auto-scroll.ts new file mode 100644 index 0000000..098f435 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/auto-scroll.ts @@ -0,0 +1,3 @@ +export const AUTO_SCROLL_INTERVAL = 100; +export const INITIAL_SCROLL_DELAY = 50; +export const AUTO_SCROLL_AT_BOTTOM_THRESHOLD = 10; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/binary-detection.ts b/llama.cpp/tools/server/webui/src/lib/constants/binary-detection.ts new file mode 100644 index 0000000..a4440fd --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/binary-detection.ts @@ -0,0 +1,14 @@ +export interface BinaryDetectionOptions { + /** Number of characters to check from the beginning of the file */ + prefixLength: number; + /** Maximum ratio of suspicious characters allowed (0.0 to 1.0) */ + suspiciousCharThresholdRatio: number; + /** Maximum absolute number of null bytes allowed */ + maxAbsoluteNullBytes: number; +} + +export const DEFAULT_BINARY_DETECTION_OPTIONS: BinaryDetectionOptions = { + prefixLength: 1024 * 10, // Check the first 10KB of the string + suspiciousCharThresholdRatio: 0.15, // Allow up to 15% suspicious chars + maxAbsoluteNullBytes: 2 +}; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/default-context.ts b/llama.cpp/tools/server/webui/src/lib/constants/default-context.ts new file mode 100644 index 0000000..78f3111 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/default-context.ts @@ -0,0 +1 @@ +export const DEFAULT_CONTEXT = 4096; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/floating-ui-constraints.ts b/llama.cpp/tools/server/webui/src/lib/constants/floating-ui-constraints.ts new file mode 100644 index 0000000..003fc77 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/floating-ui-constraints.ts @@ -0,0 +1,2 @@ +export const VIEWPORT_GUTTER = 8; +export const MENU_OFFSET = 6; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/icons.ts b/llama.cpp/tools/server/webui/src/lib/constants/icons.ts new file mode 100644 index 0000000..1e88ab5 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/icons.ts @@ -0,0 +1,32 @@ +/** + * Icon mappings for file types and model modalities + * Centralized configuration to ensure consistent icon usage across the app + */ + +import { + File as FileIcon, + FileText as FileTextIcon, + Image as ImageIcon, + Eye as VisionIcon, + Mic as AudioIcon +} from '@lucide/svelte'; +import { FileTypeCategory, ModelModality } from '$lib/enums'; + +export const FILE_TYPE_ICONS = { + [FileTypeCategory.IMAGE]: ImageIcon, + [FileTypeCategory.AUDIO]: AudioIcon, + [FileTypeCategory.TEXT]: FileTextIcon, + [FileTypeCategory.PDF]: FileIcon +} as const; + +export const DEFAULT_FILE_ICON = FileIcon; + +export const MODALITY_ICONS = { + [ModelModality.VISION]: VisionIcon, + [ModelModality.AUDIO]: AudioIcon +} as const; + +export const MODALITY_LABELS = { + [ModelModality.VISION]: 'Vision', + [ModelModality.AUDIO]: 'Audio' +} as const; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/input-classes.ts b/llama.cpp/tools/server/webui/src/lib/constants/input-classes.ts new file mode 100644 index 0000000..a541cfc --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/input-classes.ts @@ -0,0 +1,6 @@ +export const INPUT_CLASSES = ` + bg-muted/70 dark:bg-muted/85 + border border-border/30 focus-within:border-border dark:border-border/20 dark:focus-within:border-border + outline-none + text-foreground +`; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/latex-protection.ts b/llama.cpp/tools/server/webui/src/lib/constants/latex-protection.ts new file mode 100644 index 0000000..27c88e7 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/latex-protection.ts @@ -0,0 +1,35 @@ +/** + * Matches common Markdown code blocks to exclude them from further processing (e.g. LaTeX). + * - Fenced: ```...``` + * - Inline: `...` (does NOT support nested backticks or multi-backtick syntax) + * + * Note: This pattern does not handle advanced cases like: + * `` `code with `backticks` `` or \\``...\\`` + */ +export const CODE_BLOCK_REGEXP = /(```[\s\S]*?```|`[^`\n]+`)/g; + +/** + * Matches LaTeX math delimiters \(...\) and \[...\] only when not preceded by a backslash (i.e., not escaped), + * while also capturing code blocks (```, `...`) so they can be skipped during processing. + * + * Uses negative lookbehind `(? = { + // Note: in order not to introduce breaking changes, please keep the same data type (number, string, etc) if you want to change the default value. Do not use null or undefined for default value. + // Do not use nested objects, keep it single level. Prefix the key if you need to group them. + apiKey: '', + systemMessage: '', + showSystemMessage: true, + theme: 'system', + showThoughtInProgress: false, + showToolCalls: false, + disableReasoningFormat: false, + keepStatsVisible: false, + showMessageStats: true, + askForTitleConfirmation: false, + pasteLongTextToFileLen: 2500, + copyTextAttachmentsAsPlainText: false, + pdfAsImage: false, + disableAutoScroll: false, + renderUserContentAsMarkdown: false, + alwaysShowSidebarOnDesktop: false, + autoShowSidebarOnNewChat: true, + autoMicOnEmpty: false, + // make sure these default values are in sync with `common.h` + samplers: 'top_k;typ_p;top_p;min_p;temperature', + backend_sampling: false, + temperature: 0.8, + dynatemp_range: 0.0, + dynatemp_exponent: 1.0, + top_k: 40, + top_p: 0.95, + min_p: 0.05, + xtc_probability: 0.0, + xtc_threshold: 0.1, + typ_p: 1.0, + repeat_last_n: 64, + repeat_penalty: 1.0, + presence_penalty: 0.0, + frequency_penalty: 0.0, + dry_multiplier: 0.0, + dry_base: 1.75, + dry_allowed_length: 2, + dry_penalty_last_n: -1, + max_tokens: -1, + custom: '', // custom json-stringified object + // experimental features + pyInterpreterEnabled: false, + enableContinueGeneration: false +}; + +export const SETTING_CONFIG_INFO: Record = { + apiKey: 'Set the API Key if you are using --api-key option for the server.', + systemMessage: 'The starting message that defines how model should behave.', + showSystemMessage: 'Display the system message at the top of each conversation.', + theme: + 'Choose the color theme for the interface. You can choose between System (follows your device settings), Light, or Dark.', + pasteLongTextToFileLen: + 'On pasting long text, it will be converted to a file. You can control the file length by setting the value of this parameter. Value 0 means disable.', + copyTextAttachmentsAsPlainText: + 'When copying a message with text attachments, combine them into a single plain text string instead of a special format that can be pasted back as attachments.', + samplers: + 'The order at which samplers are applied, in simplified way. Default is "top_k;typ_p;top_p;min_p;temperature": top_k->typ_p->top_p->min_p->temperature', + backend_sampling: + 'Enable backend-based samplers. When enabled, supported samplers run on the accelerator backend for faster sampling.', + temperature: + 'Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.', + dynatemp_range: + 'Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.', + dynatemp_exponent: + 'Addon for the temperature sampler. Smoothes out the probability redistribution based on the most probable token.', + top_k: 'Keeps only k top tokens.', + top_p: 'Limits tokens to those that together have a cumulative probability of at least p', + min_p: + 'Limits tokens based on the minimum probability for a token to be considered, relative to the probability of the most likely token.', + xtc_probability: + 'XTC sampler cuts out top tokens; this parameter controls the chance of cutting tokens at all. 0 disables XTC.', + xtc_threshold: + 'XTC sampler cuts out top tokens; this parameter controls the token probability that is required to cut that token.', + typ_p: 'Sorts and limits tokens based on the difference between log-probability and entropy.', + repeat_last_n: 'Last n tokens to consider for penalizing repetition', + repeat_penalty: 'Controls the repetition of token sequences in the generated text', + presence_penalty: 'Limits tokens based on whether they appear in the output or not.', + frequency_penalty: 'Limits tokens based on how often they appear in the output.', + dry_multiplier: + 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling multiplier.', + dry_base: + 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling base value.', + dry_allowed_length: + 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the allowed length for DRY sampling.', + dry_penalty_last_n: + 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets DRY penalty for the last n tokens.', + max_tokens: 'The maximum number of token per output. Use -1 for infinite (no limit).', + custom: 'Custom JSON parameters to send to the API. Must be valid JSON format.', + showThoughtInProgress: 'Expand thought process by default when generating messages.', + showToolCalls: + 'Display tool call labels and payloads from Harmony-compatible delta.tool_calls data below assistant messages.', + disableReasoningFormat: + 'Show raw LLM output without backend parsing and frontend Markdown rendering to inspect streaming across different models.', + keepStatsVisible: 'Keep processing statistics visible after generation finishes.', + showMessageStats: + 'Display generation statistics (tokens/second, token count, duration) below each assistant message.', + askForTitleConfirmation: + 'Ask for confirmation before automatically changing conversation title when editing the first message.', + pdfAsImage: + 'Parse PDF as image instead of text. Automatically falls back to text processing for non-vision models.', + disableAutoScroll: + 'Disable automatic scrolling while messages stream so you can control the viewport position manually.', + renderUserContentAsMarkdown: 'Render user messages using markdown formatting in the chat.', + alwaysShowSidebarOnDesktop: + 'Always keep the sidebar visible on desktop instead of auto-hiding it.', + autoShowSidebarOnNewChat: + 'Automatically show sidebar when starting a new chat. Disable to keep the sidebar hidden until you click on it.', + autoMicOnEmpty: + 'Automatically show microphone button instead of send button when textarea is empty for models with audio modality support.', + pyInterpreterEnabled: + 'Enable Python interpreter using Pyodide. Allows running Python code in markdown code blocks.', + enableContinueGeneration: + 'Enable "Continue" button for assistant messages. Currently works only with non-reasoning models.' +}; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/supported-file-types.ts b/llama.cpp/tools/server/webui/src/lib/constants/supported-file-types.ts new file mode 100644 index 0000000..0d955ad --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/supported-file-types.ts @@ -0,0 +1,217 @@ +/** + * Comprehensive dictionary of all supported file types in webui + * Organized by category with TypeScript enums for better type safety + */ + +import { + FileExtensionAudio, + FileExtensionImage, + FileExtensionPdf, + FileExtensionText, + FileTypeAudio, + FileTypeImage, + FileTypePdf, + FileTypeText, + MimeTypeAudio, + MimeTypeImage, + MimeTypeApplication, + MimeTypeText +} from '$lib/enums'; + +// File type configuration using enums +export const AUDIO_FILE_TYPES = { + [FileTypeAudio.MP3]: { + extensions: [FileExtensionAudio.MP3], + mimeTypes: [MimeTypeAudio.MP3_MPEG, MimeTypeAudio.MP3] + }, + [FileTypeAudio.WAV]: { + extensions: [FileExtensionAudio.WAV], + mimeTypes: [MimeTypeAudio.WAV] + } +} as const; + +export const IMAGE_FILE_TYPES = { + [FileTypeImage.JPEG]: { + extensions: [FileExtensionImage.JPG, FileExtensionImage.JPEG], + mimeTypes: [MimeTypeImage.JPEG] + }, + [FileTypeImage.PNG]: { + extensions: [FileExtensionImage.PNG], + mimeTypes: [MimeTypeImage.PNG] + }, + [FileTypeImage.GIF]: { + extensions: [FileExtensionImage.GIF], + mimeTypes: [MimeTypeImage.GIF] + }, + [FileTypeImage.WEBP]: { + extensions: [FileExtensionImage.WEBP], + mimeTypes: [MimeTypeImage.WEBP] + }, + [FileTypeImage.SVG]: { + extensions: [FileExtensionImage.SVG], + mimeTypes: [MimeTypeImage.SVG] + } +} as const; + +export const PDF_FILE_TYPES = { + [FileTypePdf.PDF]: { + extensions: [FileExtensionPdf.PDF], + mimeTypes: [MimeTypeApplication.PDF] + } +} as const; + +export const TEXT_FILE_TYPES = { + [FileTypeText.PLAIN_TEXT]: { + extensions: [FileExtensionText.TXT], + mimeTypes: [MimeTypeText.PLAIN] + }, + [FileTypeText.MARKDOWN]: { + extensions: [FileExtensionText.MD], + mimeTypes: [MimeTypeText.MARKDOWN] + }, + [FileTypeText.ASCIIDOC]: { + extensions: [FileExtensionText.ADOC], + mimeTypes: [MimeTypeText.ASCIIDOC] + }, + [FileTypeText.JAVASCRIPT]: { + extensions: [FileExtensionText.JS], + mimeTypes: [MimeTypeText.JAVASCRIPT, MimeTypeText.JAVASCRIPT_APP] + }, + [FileTypeText.TYPESCRIPT]: { + extensions: [FileExtensionText.TS], + mimeTypes: [MimeTypeText.TYPESCRIPT] + }, + [FileTypeText.JSX]: { + extensions: [FileExtensionText.JSX], + mimeTypes: [MimeTypeText.JSX] + }, + [FileTypeText.TSX]: { + extensions: [FileExtensionText.TSX], + mimeTypes: [MimeTypeText.TSX] + }, + [FileTypeText.CSS]: { + extensions: [FileExtensionText.CSS], + mimeTypes: [MimeTypeText.CSS] + }, + [FileTypeText.HTML]: { + extensions: [FileExtensionText.HTML, FileExtensionText.HTM], + mimeTypes: [MimeTypeText.HTML] + }, + [FileTypeText.JSON]: { + extensions: [FileExtensionText.JSON], + mimeTypes: [MimeTypeText.JSON] + }, + [FileTypeText.XML]: { + extensions: [FileExtensionText.XML], + mimeTypes: [MimeTypeText.XML_TEXT, MimeTypeText.XML_APP] + }, + [FileTypeText.YAML]: { + extensions: [FileExtensionText.YAML, FileExtensionText.YML], + mimeTypes: [MimeTypeText.YAML_TEXT, MimeTypeText.YAML_APP] + }, + [FileTypeText.CSV]: { + extensions: [FileExtensionText.CSV], + mimeTypes: [MimeTypeText.CSV] + }, + [FileTypeText.LOG]: { + extensions: [FileExtensionText.LOG], + mimeTypes: [MimeTypeText.PLAIN] + }, + [FileTypeText.PYTHON]: { + extensions: [FileExtensionText.PY], + mimeTypes: [MimeTypeText.PYTHON] + }, + [FileTypeText.JAVA]: { + extensions: [FileExtensionText.JAVA], + mimeTypes: [MimeTypeText.JAVA] + }, + [FileTypeText.CPP]: { + extensions: [ + FileExtensionText.CPP, + FileExtensionText.C, + FileExtensionText.H, + FileExtensionText.HPP + ], + mimeTypes: [MimeTypeText.CPP_SRC, MimeTypeText.CPP_HDR, MimeTypeText.C_SRC, MimeTypeText.C_HDR] + }, + [FileTypeText.PHP]: { + extensions: [FileExtensionText.PHP], + mimeTypes: [MimeTypeText.PHP] + }, + [FileTypeText.RUBY]: { + extensions: [FileExtensionText.RB], + mimeTypes: [MimeTypeText.RUBY] + }, + [FileTypeText.GO]: { + extensions: [FileExtensionText.GO], + mimeTypes: [MimeTypeText.GO] + }, + [FileTypeText.RUST]: { + extensions: [FileExtensionText.RS], + mimeTypes: [MimeTypeText.RUST] + }, + [FileTypeText.SHELL]: { + extensions: [FileExtensionText.SH, FileExtensionText.BAT], + mimeTypes: [MimeTypeText.SHELL, MimeTypeText.BAT] + }, + [FileTypeText.SQL]: { + extensions: [FileExtensionText.SQL], + mimeTypes: [MimeTypeText.SQL] + }, + [FileTypeText.R]: { + extensions: [FileExtensionText.R], + mimeTypes: [MimeTypeText.R] + }, + [FileTypeText.SCALA]: { + extensions: [FileExtensionText.SCALA], + mimeTypes: [MimeTypeText.SCALA] + }, + [FileTypeText.KOTLIN]: { + extensions: [FileExtensionText.KT], + mimeTypes: [MimeTypeText.KOTLIN] + }, + [FileTypeText.SWIFT]: { + extensions: [FileExtensionText.SWIFT], + mimeTypes: [MimeTypeText.SWIFT] + }, + [FileTypeText.DART]: { + extensions: [FileExtensionText.DART], + mimeTypes: [MimeTypeText.DART] + }, + [FileTypeText.VUE]: { + extensions: [FileExtensionText.VUE], + mimeTypes: [MimeTypeText.VUE] + }, + [FileTypeText.SVELTE]: { + extensions: [FileExtensionText.SVELTE], + mimeTypes: [MimeTypeText.SVELTE] + }, + [FileTypeText.LATEX]: { + extensions: [FileExtensionText.TEX], + mimeTypes: [MimeTypeText.LATEX, MimeTypeText.TEX, MimeTypeText.TEX_APP] + }, + [FileTypeText.BIBTEX]: { + extensions: [FileExtensionText.BIB], + mimeTypes: [MimeTypeText.BIBTEX] + }, + [FileTypeText.CUDA]: { + extensions: [FileExtensionText.CU, FileExtensionText.CUH], + mimeTypes: [MimeTypeText.CUDA] + }, + [FileTypeText.VULKAN]: { + extensions: [FileExtensionText.COMP], + mimeTypes: [MimeTypeText.PLAIN] + }, + [FileTypeText.HASKELL]: { + extensions: [FileExtensionText.HS], + mimeTypes: [MimeTypeText.HASKELL] + }, + [FileTypeText.CSHARP]: { + extensions: [FileExtensionText.CS], + mimeTypes: [MimeTypeText.CSHARP] + }, + [FileTypeText.PROPERTIES]: { + extensions: [FileExtensionText.PROPERTIES], + mimeTypes: [MimeTypeText.PROPERTIES] + } +} as const; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/table-html-restorer.ts b/llama.cpp/tools/server/webui/src/lib/constants/table-html-restorer.ts new file mode 100644 index 0000000..e5d5b12 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/table-html-restorer.ts @@ -0,0 +1,20 @@ +/** + * Matches
,
,
tags (case-insensitive). + * Used to detect line breaks in table cell text content. + */ +export const BR_PATTERN = //gi; + +/** + * Matches a complete
    ...
block. + * Captures the inner content (group 1) for further
  • extraction. + * Case-insensitive, allows multiline content. + */ +export const LIST_PATTERN = /^
      ([\s\S]*)<\/ul>$/i; + +/** + * Matches individual
    • ...
    • elements within a list. + * Captures the inner content (group 1) of each list item. + * Non-greedy to handle multiple consecutive items. + * Case-insensitive, allows multiline content. + */ +export const LI_PATTERN = /
    • ([\s\S]*?)<\/li>/gi; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/tooltip-config.ts b/llama.cpp/tools/server/webui/src/lib/constants/tooltip-config.ts new file mode 100644 index 0000000..3c30c8c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/tooltip-config.ts @@ -0,0 +1 @@ +export const TOOLTIP_DELAY_DURATION = 100; diff --git a/llama.cpp/tools/server/webui/src/lib/constants/viewport.ts b/llama.cpp/tools/server/webui/src/lib/constants/viewport.ts new file mode 100644 index 0000000..26e202c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/constants/viewport.ts @@ -0,0 +1 @@ +export const DEFAULT_MOBILE_BREAKPOINT = 768; diff --git a/llama.cpp/tools/server/webui/src/lib/enums/attachment.ts b/llama.cpp/tools/server/webui/src/lib/enums/attachment.ts new file mode 100644 index 0000000..7c7d0da --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/attachment.ts @@ -0,0 +1,10 @@ +/** + * Attachment type enum for database message extras + */ +export enum AttachmentType { + AUDIO = 'AUDIO', + IMAGE = 'IMAGE', + PDF = 'PDF', + TEXT = 'TEXT', + LEGACY_CONTEXT = 'context' // Legacy attachment type for backward compatibility +} diff --git a/llama.cpp/tools/server/webui/src/lib/enums/chat.ts b/llama.cpp/tools/server/webui/src/lib/enums/chat.ts new file mode 100644 index 0000000..2b9eb7b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/chat.ts @@ -0,0 +1,4 @@ +export enum ChatMessageStatsView { + GENERATION = 'generation', + READING = 'reading' +} diff --git a/llama.cpp/tools/server/webui/src/lib/enums/files.ts b/llama.cpp/tools/server/webui/src/lib/enums/files.ts new file mode 100644 index 0000000..a4f079d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/files.ts @@ -0,0 +1,206 @@ +/** + * Comprehensive dictionary of all supported file types in webui + * Organized by category with TypeScript enums for better type safety + */ + +// File type category enum +export enum FileTypeCategory { + IMAGE = 'image', + AUDIO = 'audio', + PDF = 'pdf', + TEXT = 'text' +} + +// Specific file type enums for each category +export enum FileTypeImage { + JPEG = 'jpeg', + PNG = 'png', + GIF = 'gif', + WEBP = 'webp', + SVG = 'svg' +} + +export enum FileTypeAudio { + MP3 = 'mp3', + WAV = 'wav', + WEBM = 'webm' +} + +export enum FileTypePdf { + PDF = 'pdf' +} + +export enum FileTypeText { + PLAIN_TEXT = 'plainText', + MARKDOWN = 'md', + ASCIIDOC = 'asciidoc', + JAVASCRIPT = 'js', + TYPESCRIPT = 'ts', + JSX = 'jsx', + TSX = 'tsx', + CSS = 'css', + HTML = 'html', + JSON = 'json', + XML = 'xml', + YAML = 'yaml', + CSV = 'csv', + LOG = 'log', + PYTHON = 'python', + JAVA = 'java', + CPP = 'cpp', + PHP = 'php', + RUBY = 'ruby', + GO = 'go', + RUST = 'rust', + SHELL = 'shell', + SQL = 'sql', + R = 'r', + SCALA = 'scala', + KOTLIN = 'kotlin', + SWIFT = 'swift', + DART = 'dart', + VUE = 'vue', + SVELTE = 'svelte', + LATEX = 'latex', + BIBTEX = 'bibtex', + CUDA = 'cuda', + VULKAN = 'vulkan', + HASKELL = 'haskell', + CSHARP = 'csharp', + PROPERTIES = 'properties' +} + +// File extension enums +export enum FileExtensionImage { + JPG = '.jpg', + JPEG = '.jpeg', + PNG = '.png', + GIF = '.gif', + WEBP = '.webp', + SVG = '.svg' +} + +export enum FileExtensionAudio { + MP3 = '.mp3', + WAV = '.wav' +} + +export enum FileExtensionPdf { + PDF = '.pdf' +} + +export enum FileExtensionText { + TXT = '.txt', + MD = '.md', + ADOC = '.adoc', + JS = '.js', + TS = '.ts', + JSX = '.jsx', + TSX = '.tsx', + CSS = '.css', + HTML = '.html', + HTM = '.htm', + JSON = '.json', + XML = '.xml', + YAML = '.yaml', + YML = '.yml', + CSV = '.csv', + LOG = '.log', + PY = '.py', + JAVA = '.java', + CPP = '.cpp', + C = '.c', + H = '.h', + PHP = '.php', + RB = '.rb', + GO = '.go', + RS = '.rs', + SH = '.sh', + BAT = '.bat', + SQL = '.sql', + R = '.r', + SCALA = '.scala', + KT = '.kt', + SWIFT = '.swift', + DART = '.dart', + VUE = '.vue', + SVELTE = '.svelte', + TEX = '.tex', + BIB = '.bib', + CU = '.cu', + CUH = '.cuh', + COMP = '.comp', + HPP = '.hpp', + HS = '.hs', + PROPERTIES = '.properties', + CS = '.cs' +} + +// MIME type enums +export enum MimeTypeApplication { + PDF = 'application/pdf' +} + +export enum MimeTypeAudio { + MP3_MPEG = 'audio/mpeg', + MP3 = 'audio/mp3', + MP4 = 'audio/mp4', + WAV = 'audio/wav', + WEBM = 'audio/webm', + WEBM_OPUS = 'audio/webm;codecs=opus' +} + +export enum MimeTypeImage { + JPEG = 'image/jpeg', + PNG = 'image/png', + GIF = 'image/gif', + WEBP = 'image/webp', + SVG = 'image/svg+xml' +} + +export enum MimeTypeText { + PLAIN = 'text/plain', + MARKDOWN = 'text/markdown', + ASCIIDOC = 'text/asciidoc', + JAVASCRIPT = 'text/javascript', + JAVASCRIPT_APP = 'application/javascript', + TYPESCRIPT = 'text/typescript', + JSX = 'text/jsx', + TSX = 'text/tsx', + CSS = 'text/css', + HTML = 'text/html', + JSON = 'application/json', + XML_TEXT = 'text/xml', + XML_APP = 'application/xml', + YAML_TEXT = 'text/yaml', + YAML_APP = 'application/yaml', + CSV = 'text/csv', + PYTHON = 'text/x-python', + JAVA = 'text/x-java-source', + CPP_HDR = 'text/x-c++hdr', + CPP_SRC = 'text/x-c++src', + CSHARP = 'text/x-csharp', + HASKELL = 'text/x-haskell', + C_SRC = 'text/x-csrc', + C_HDR = 'text/x-chdr', + PHP = 'text/x-php', + RUBY = 'text/x-ruby', + GO = 'text/x-go', + RUST = 'text/x-rust', + SHELL = 'text/x-shellscript', + BAT = 'application/x-bat', + SQL = 'text/x-sql', + R = 'text/x-r', + SCALA = 'text/x-scala', + KOTLIN = 'text/x-kotlin', + SWIFT = 'text/x-swift', + DART = 'text/x-dart', + VUE = 'text/x-vue', + SVELTE = 'text/x-svelte', + TEX = 'text/x-tex', + TEX_APP = 'application/x-tex', + LATEX = 'application/x-latex', + BIBTEX = 'text/x-bibtex', + CUDA = 'text/x-cuda', + PROPERTIES = 'text/properties' +} diff --git a/llama.cpp/tools/server/webui/src/lib/enums/index.ts b/llama.cpp/tools/server/webui/src/lib/enums/index.ts new file mode 100644 index 0000000..83c86ca --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/index.ts @@ -0,0 +1,23 @@ +export { AttachmentType } from './attachment'; + +export { ChatMessageStatsView } from './chat'; + +export { + FileTypeCategory, + FileTypeImage, + FileTypeAudio, + FileTypePdf, + FileTypeText, + FileExtensionImage, + FileExtensionAudio, + FileExtensionPdf, + FileExtensionText, + MimeTypeApplication, + MimeTypeAudio, + MimeTypeImage, + MimeTypeText +} from './files'; + +export { ModelModality } from './model'; + +export { ServerRole, ServerModelStatus } from './server'; diff --git a/llama.cpp/tools/server/webui/src/lib/enums/model.ts b/llama.cpp/tools/server/webui/src/lib/enums/model.ts new file mode 100644 index 0000000..7729ecf --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/model.ts @@ -0,0 +1,5 @@ +export enum ModelModality { + TEXT = 'TEXT', + AUDIO = 'AUDIO', + VISION = 'VISION' +} diff --git a/llama.cpp/tools/server/webui/src/lib/enums/server.ts b/llama.cpp/tools/server/webui/src/lib/enums/server.ts new file mode 100644 index 0000000..7f30eab --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/enums/server.ts @@ -0,0 +1,20 @@ +/** + * Server role enum - used for single/multi-model mode + */ +export enum ServerRole { + /** Single model mode - server running with a specific model loaded */ + MODEL = 'model', + /** Router mode - server managing multiple model instances */ + ROUTER = 'router' +} + +/** + * Model status enum - matches tools/server/server-models.h from C++ server + * Used as the `value` field in the status object from /models endpoint + */ +export enum ServerModelStatus { + UNLOADED = 'unloaded', + LOADING = 'loading', + LOADED = 'loaded', + FAILED = 'failed' +} diff --git a/llama.cpp/tools/server/webui/src/lib/hooks/is-mobile.svelte.ts b/llama.cpp/tools/server/webui/src/lib/hooks/is-mobile.svelte.ts new file mode 100644 index 0000000..22c74f4 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/hooks/is-mobile.svelte.ts @@ -0,0 +1,8 @@ +import { DEFAULT_MOBILE_BREAKPOINT } from '$lib/constants/viewport'; +import { MediaQuery } from 'svelte/reactivity'; + +export class IsMobile extends MediaQuery { + constructor(breakpoint: number = DEFAULT_MOBILE_BREAKPOINT) { + super(`max-width: ${breakpoint - 1}px`); + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/hooks/use-model-change-validation.svelte.ts b/llama.cpp/tools/server/webui/src/lib/hooks/use-model-change-validation.svelte.ts new file mode 100644 index 0000000..bb66615 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/hooks/use-model-change-validation.svelte.ts @@ -0,0 +1,118 @@ +import { modelsStore } from '$lib/stores/models.svelte'; +import { isRouterMode } from '$lib/stores/server.svelte'; +import { toast } from 'svelte-sonner'; + +interface UseModelChangeValidationOptions { + /** + * Function to get required modalities for validation. + * For ChatForm: () => usedModalities() - all messages + * For ChatMessageAssistant: () => getModalitiesUpToMessage(messageId) - messages before + */ + getRequiredModalities: () => ModelModalities; + + /** + * Optional callback to execute after successful validation. + * For ChatForm: undefined - just select model + * For ChatMessageAssistant: (modelName) => onRegenerate(modelName) + */ + onSuccess?: (modelName: string) => void; + + /** + * Optional callback for rollback on validation failure. + * For ChatForm: (previousId) => selectModelById(previousId) + * For ChatMessageAssistant: undefined - no rollback needed + */ + onValidationFailure?: (previousModelId: string | null) => Promise; +} + +export function useModelChangeValidation(options: UseModelChangeValidationOptions) { + const { getRequiredModalities, onSuccess, onValidationFailure } = options; + + let previousSelectedModelId: string | null = null; + const isRouter = $derived(isRouterMode()); + + async function handleModelChange(modelId: string, modelName: string): Promise { + try { + // Store previous selection for potential rollback + if (onValidationFailure) { + previousSelectedModelId = modelsStore.selectedModelId; + } + + // Load model if not already loaded (router mode only) + let hasLoadedModel = false; + const isModelLoadedBefore = modelsStore.isModelLoaded(modelName); + + if (isRouter && !isModelLoadedBefore) { + try { + await modelsStore.loadModel(modelName); + hasLoadedModel = true; + } catch { + toast.error(`Failed to load model "${modelName}"`); + return false; + } + } + + // Fetch model props to validate modalities + const props = await modelsStore.fetchModelProps(modelName); + + if (props?.modalities) { + const requiredModalities = getRequiredModalities(); + + // Check if model supports required modalities + const missingModalities: string[] = []; + if (requiredModalities.vision && !props.modalities.vision) { + missingModalities.push('vision'); + } + if (requiredModalities.audio && !props.modalities.audio) { + missingModalities.push('audio'); + } + + if (missingModalities.length > 0) { + toast.error( + `Model "${modelName}" doesn't support required modalities: ${missingModalities.join(', ')}. Please select a different model.` + ); + + // Unload the model if we just loaded it + if (isRouter && hasLoadedModel) { + try { + await modelsStore.unloadModel(modelName); + } catch (error) { + console.error('Failed to unload incompatible model:', error); + } + } + + // Execute rollback callback if provided + if (onValidationFailure && previousSelectedModelId) { + await onValidationFailure(previousSelectedModelId); + } + + return false; + } + } + + // Select the model (validation passed) + await modelsStore.selectModelById(modelId); + + // Execute success callback if provided + if (onSuccess) { + onSuccess(modelName); + } + + return true; + } catch (error) { + console.error('Failed to change model:', error); + toast.error('Failed to validate model capabilities'); + + // Execute rollback callback on error if provided + if (onValidationFailure && previousSelectedModelId) { + await onValidationFailure(previousSelectedModelId); + } + + return false; + } + } + + return { + handleModelChange + }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/hooks/use-processing-state.svelte.ts b/llama.cpp/tools/server/webui/src/lib/hooks/use-processing-state.svelte.ts new file mode 100644 index 0000000..c06cf28 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/hooks/use-processing-state.svelte.ts @@ -0,0 +1,262 @@ +import { activeProcessingState } from '$lib/stores/chat.svelte'; +import { config } from '$lib/stores/settings.svelte'; + +export interface LiveProcessingStats { + tokensProcessed: number; + totalTokens: number; + timeMs: number; + tokensPerSecond: number; + etaSecs?: number; +} + +export interface LiveGenerationStats { + tokensGenerated: number; + timeMs: number; + tokensPerSecond: number; +} + +export interface UseProcessingStateReturn { + readonly processingState: ApiProcessingState | null; + getProcessingDetails(): string[]; + getProcessingMessage(): string; + getPromptProgressText(): string | null; + getLiveProcessingStats(): LiveProcessingStats | null; + getLiveGenerationStats(): LiveGenerationStats | null; + shouldShowDetails(): boolean; + startMonitoring(): void; + stopMonitoring(): void; +} + +/** + * useProcessingState - Reactive processing state hook + * + * This hook provides reactive access to the processing state of the server. + * It directly reads from chatStore's reactive state and provides + * formatted processing details for UI display. + * + * **Features:** + * - Real-time processing state via direct reactive state binding + * - Context and output token tracking + * - Tokens per second calculation + * - Automatic updates when streaming data arrives + * - Supports multiple concurrent conversations + * + * @returns Hook interface with processing state and control methods + */ +export function useProcessingState(): UseProcessingStateReturn { + let isMonitoring = $state(false); + let lastKnownState = $state(null); + let lastKnownProcessingStats = $state(null); + + // Derive processing state reactively from chatStore's direct state + const processingState = $derived.by(() => { + if (!isMonitoring) { + return lastKnownState; + } + // Read directly from the reactive state export + return activeProcessingState(); + }); + + // Track last known state for keepStatsVisible functionality + $effect(() => { + if (processingState && isMonitoring) { + lastKnownState = processingState; + } + }); + + // Track last known processing stats for when promptProgress disappears + $effect(() => { + if (processingState?.promptProgress) { + const { processed, total, time_ms, cache } = processingState.promptProgress; + const actualProcessed = processed - cache; + const actualTotal = total - cache; + + if (actualProcessed > 0 && time_ms > 0) { + const tokensPerSecond = actualProcessed / (time_ms / 1000); + lastKnownProcessingStats = { + tokensProcessed: actualProcessed, + totalTokens: actualTotal, + timeMs: time_ms, + tokensPerSecond + }; + } + } + }); + + function getETASecs(done: number, total: number, elapsedMs: number): number | undefined { + const elapsedSecs = elapsedMs / 1000; + const progressETASecs = + done === 0 || elapsedSecs < 0.5 + ? undefined // can be the case for the 0% progress report + : elapsedSecs * (total / done - 1); + return progressETASecs; + } + + function startMonitoring(): void { + if (isMonitoring) return; + isMonitoring = true; + } + + function stopMonitoring(): void { + if (!isMonitoring) return; + isMonitoring = false; + + // Only clear last known state if keepStatsVisible is disabled + const currentConfig = config(); + if (!currentConfig.keepStatsVisible) { + lastKnownState = null; + lastKnownProcessingStats = null; + } + } + + function getProcessingMessage(): string { + if (!processingState) { + return 'Processing...'; + } + + switch (processingState.status) { + case 'initializing': + return 'Initializing...'; + case 'preparing': + if (processingState.progressPercent !== undefined) { + return `Processing (${processingState.progressPercent}%)`; + } + return 'Preparing response...'; + case 'generating': + return ''; + default: + return 'Processing...'; + } + } + + function getProcessingDetails(): string[] { + // Use current processing state or fall back to last known state + const stateToUse = processingState || lastKnownState; + if (!stateToUse) { + return []; + } + + const details: string[] = []; + + // Always show context info when we have valid data + if (stateToUse.contextUsed >= 0 && stateToUse.contextTotal > 0) { + const contextPercent = Math.round((stateToUse.contextUsed / stateToUse.contextTotal) * 100); + + details.push( + `Context: ${stateToUse.contextUsed}/${stateToUse.contextTotal} (${contextPercent}%)` + ); + } + + if (stateToUse.outputTokensUsed > 0) { + // Handle infinite max_tokens (-1) case + if (stateToUse.outputTokensMax <= 0) { + details.push(`Output: ${stateToUse.outputTokensUsed}/∞`); + } else { + const outputPercent = Math.round( + (stateToUse.outputTokensUsed / stateToUse.outputTokensMax) * 100 + ); + + details.push( + `Output: ${stateToUse.outputTokensUsed}/${stateToUse.outputTokensMax} (${outputPercent}%)` + ); + } + } + + if (stateToUse.tokensPerSecond && stateToUse.tokensPerSecond > 0) { + details.push(`${stateToUse.tokensPerSecond.toFixed(1)} tokens/sec`); + } + + if (stateToUse.speculative) { + details.push('Speculative decoding enabled'); + } + + return details; + } + + function shouldShowDetails(): boolean { + return processingState !== null && processingState.status !== 'idle'; + } + + /** + * Returns a short progress message with percent + */ + function getPromptProgressText(): string | null { + if (!processingState?.promptProgress) return null; + + const { processed, total, cache } = processingState.promptProgress; + + const actualProcessed = processed - cache; + const actualTotal = total - cache; + const percent = Math.round((actualProcessed / actualTotal) * 100); + const eta = getETASecs(actualProcessed, actualTotal, processingState.promptProgress.time_ms); + + if (eta !== undefined) { + const etaSecs = Math.ceil(eta); + return `Processing ${percent}% (ETA: ${etaSecs}s)`; + } + + return `Processing ${percent}%`; + } + + /** + * Returns live processing statistics for display (prompt processing phase) + * Returns last known stats when promptProgress becomes unavailable + */ + function getLiveProcessingStats(): LiveProcessingStats | null { + if (processingState?.promptProgress) { + const { processed, total, time_ms, cache } = processingState.promptProgress; + + const actualProcessed = processed - cache; + const actualTotal = total - cache; + + if (actualProcessed > 0 && time_ms > 0) { + const tokensPerSecond = actualProcessed / (time_ms / 1000); + + return { + tokensProcessed: actualProcessed, + totalTokens: actualTotal, + timeMs: time_ms, + tokensPerSecond + }; + } + } + + // Return last known stats if promptProgress is no longer available + return lastKnownProcessingStats; + } + + /** + * Returns live generation statistics for display (token generation phase) + */ + function getLiveGenerationStats(): LiveGenerationStats | null { + if (!processingState) return null; + + const { tokensDecoded, tokensPerSecond } = processingState; + + if (tokensDecoded <= 0) return null; + + // Calculate time from tokens and speed + const timeMs = + tokensPerSecond && tokensPerSecond > 0 ? (tokensDecoded / tokensPerSecond) * 1000 : 0; + + return { + tokensGenerated: tokensDecoded, + timeMs, + tokensPerSecond: tokensPerSecond || 0 + }; + } + + return { + get processingState() { + return processingState; + }, + getProcessingDetails, + getProcessingMessage, + getPromptProgressText, + getLiveProcessingStats, + getLiveGenerationStats, + shouldShowDetails, + startMonitoring, + stopMonitoring + }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/markdown/enhance-code-blocks.ts b/llama.cpp/tools/server/webui/src/lib/markdown/enhance-code-blocks.ts new file mode 100644 index 0000000..6f0e03e --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/markdown/enhance-code-blocks.ts @@ -0,0 +1,162 @@ +/** + * Rehype plugin to enhance code blocks with wrapper, header, and action buttons. + * + * Wraps
       elements with a container that includes:
      + * - Language label
      + * - Copy button
      + * - Preview button (for HTML code blocks)
      + *
      + * This operates directly on the HAST tree for better performance,
      + * avoiding the need to stringify and re-parse HTML.
      + */
      +
      +import type { Plugin } from 'unified';
      +import type { Root, Element, ElementContent } from 'hast';
      +import { visit } from 'unist-util-visit';
      +
      +declare global {
      +	interface Window {
      +		idxCodeBlock?: number;
      +	}
      +}
      +
      +const COPY_ICON_SVG = ``;
      +
      +const PREVIEW_ICON_SVG = ``;
      +
      +/**
      + * Creates an SVG element node from raw SVG string.
      + * Since we can't parse HTML in HAST directly, we use the raw property.
      + */
      +function createRawHtmlElement(html: string): Element {
      +	return {
      +		type: 'element',
      +		tagName: 'span',
      +		properties: {},
      +		children: [{ type: 'raw', value: html } as unknown as ElementContent]
      +	};
      +}
      +
      +function createCopyButton(codeId: string): Element {
      +	return {
      +		type: 'element',
      +		tagName: 'button',
      +		properties: {
      +			className: ['copy-code-btn'],
      +			'data-code-id': codeId,
      +			title: 'Copy code',
      +			type: 'button'
      +		},
      +		children: [createRawHtmlElement(COPY_ICON_SVG)]
      +	};
      +}
      +
      +function createPreviewButton(codeId: string): Element {
      +	return {
      +		type: 'element',
      +		tagName: 'button',
      +		properties: {
      +			className: ['preview-code-btn'],
      +			'data-code-id': codeId,
      +			title: 'Preview code',
      +			type: 'button'
      +		},
      +		children: [createRawHtmlElement(PREVIEW_ICON_SVG)]
      +	};
      +}
      +
      +function createHeader(language: string, codeId: string): Element {
      +	const actions: Element[] = [createCopyButton(codeId)];
      +
      +	if (language.toLowerCase() === 'html') {
      +		actions.push(createPreviewButton(codeId));
      +	}
      +
      +	return {
      +		type: 'element',
      +		tagName: 'div',
      +		properties: { className: ['code-block-header'] },
      +		children: [
      +			{
      +				type: 'element',
      +				tagName: 'span',
      +				properties: { className: ['code-language'] },
      +				children: [{ type: 'text', value: language }]
      +			},
      +			{
      +				type: 'element',
      +				tagName: 'div',
      +				properties: { className: ['code-block-actions'] },
      +				children: actions
      +			}
      +		]
      +	};
      +}
      +
      +function createWrapper(header: Element, preElement: Element): Element {
      +	return {
      +		type: 'element',
      +		tagName: 'div',
      +		properties: { className: ['code-block-wrapper'] },
      +		children: [header, preElement]
      +	};
      +}
      +
      +function extractLanguage(codeElement: Element): string {
      +	const className = codeElement.properties?.className;
      +	if (!Array.isArray(className)) return 'text';
      +
      +	for (const cls of className) {
      +		if (typeof cls === 'string' && cls.startsWith('language-')) {
      +			return cls.replace('language-', '');
      +		}
      +	}
      +
      +	return 'text';
      +}
      +
      +/**
      + * Generates a unique code block ID using a global counter.
      + */
      +function generateCodeId(): string {
      +	if (typeof window !== 'undefined') {
      +		return `code-${(window.idxCodeBlock = (window.idxCodeBlock ?? 0) + 1)}`;
      +	}
      +	// Fallback for SSR - use timestamp + random
      +	return `code-${Date.now()}-${Math.random().toString(36).slice(2, 7)}`;
      +}
      +
      +/**
      + * Rehype plugin to enhance code blocks with wrapper, header, and action buttons.
      + * This plugin wraps 
       elements with a container that includes:
      + * - Language label
      + * - Copy button
      + * - Preview button (for HTML code blocks)
      + */
      +export const rehypeEnhanceCodeBlocks: Plugin<[], Root> = () => {
      +	return (tree: Root) => {
      +		visit(tree, 'element', (node: Element, index, parent) => {
      +			if (node.tagName !== 'pre' || !parent || index === undefined) return;
      +
      +			const codeElement = node.children.find(
      +				(child): child is Element => child.type === 'element' && child.tagName === 'code'
      +			);
      +
      +			if (!codeElement) return;
      +
      +			const language = extractLanguage(codeElement);
      +			const codeId = generateCodeId();
      +
      +			codeElement.properties = {
      +				...codeElement.properties,
      +				'data-code-id': codeId
      +			};
      +
      +			const header = createHeader(language, codeId);
      +			const wrapper = createWrapper(header, node);
      +
      +			// Replace pre with wrapper in parent
      +			(parent.children as ElementContent[])[index] = wrapper;
      +		});
      +	};
      +};
      diff --git a/llama.cpp/tools/server/webui/src/lib/markdown/enhance-links.ts b/llama.cpp/tools/server/webui/src/lib/markdown/enhance-links.ts
      new file mode 100644
      index 0000000..b5fbcbd
      --- /dev/null
      +++ b/llama.cpp/tools/server/webui/src/lib/markdown/enhance-links.ts
      @@ -0,0 +1,33 @@
      +/**
      + * Rehype plugin to enhance links with security attributes.
      + *
      + * Adds target="_blank" and rel="noopener noreferrer" to all anchor elements,
      + * ensuring external links open in new tabs safely.
      + */
      +
      +import type { Plugin } from 'unified';
      +import type { Root, Element } from 'hast';
      +import { visit } from 'unist-util-visit';
      +
      +/**
      + * Rehype plugin that adds security attributes to all links.
      + * This plugin ensures external links open in new tabs safely by adding:
      + * - target="_blank"
      + * - rel="noopener noreferrer"
      + */
      +export const rehypeEnhanceLinks: Plugin<[], Root> = () => {
      +	return (tree: Root) => {
      +		visit(tree, 'element', (node: Element) => {
      +			if (node.tagName !== 'a') return;
      +
      +			const props = node.properties ?? {};
      +
      +			// Only modify if href exists
      +			if (!props.href) return;
      +
      +			props.target = '_blank';
      +			props.rel = 'noopener noreferrer';
      +			node.properties = props;
      +		});
      +	};
      +};
      diff --git a/llama.cpp/tools/server/webui/src/lib/markdown/literal-html.ts b/llama.cpp/tools/server/webui/src/lib/markdown/literal-html.ts
      new file mode 100644
      index 0000000..d4ace01
      --- /dev/null
      +++ b/llama.cpp/tools/server/webui/src/lib/markdown/literal-html.ts
      @@ -0,0 +1,121 @@
      +import type { Plugin } from 'unified';
      +import { visit } from 'unist-util-visit';
      +import type { Break, Content, Paragraph, PhrasingContent, Root, Text } from 'mdast';
      +import { LINE_BREAK, NBSP, PHRASE_PARENTS, TAB_AS_SPACES } from '$lib/constants/literal-html';
      +
      +/**
      + * remark plugin that rewrites raw HTML nodes into plain-text equivalents.
      + *
      + * remark parses inline HTML into `html` nodes even when we do not want to render
      + * them. We turn each of those nodes into regular text (plus `
      ` break markers) + * so the downstream rehype pipeline escapes the characters instead of executing + * them. Leading spaces and tab characters are converted to non‑breaking spaces to + * keep indentation identical to the original author input. + */ + +function preserveIndent(line: string): string { + let index = 0; + let output = ''; + + while (index < line.length) { + const char = line[index]; + + if (char === ' ') { + output += NBSP; + index += 1; + continue; + } + + if (char === '\t') { + output += TAB_AS_SPACES; + index += 1; + continue; + } + + break; + } + + return output + line.slice(index); +} + +function createLiteralChildren(value: string): PhrasingContent[] { + const lines = value.split(LINE_BREAK); + const nodes: PhrasingContent[] = []; + + for (const [lineIndex, rawLine] of lines.entries()) { + if (lineIndex > 0) { + nodes.push({ type: 'break' } as Break as unknown as PhrasingContent); + } + + nodes.push({ + type: 'text', + value: preserveIndent(rawLine) + } as Text as unknown as PhrasingContent); + } + + if (!nodes.length) { + nodes.push({ type: 'text', value: '' } as Text as unknown as PhrasingContent); + } + + return nodes; +} + +export const remarkLiteralHtml: Plugin<[], Root> = () => { + return (tree) => { + visit(tree, 'html', (node, index, parent) => { + if (!parent || typeof index !== 'number') { + return; + } + + const replacement = createLiteralChildren(node.value); + + if (!PHRASE_PARENTS.has(parent.type as string)) { + const paragraph: Paragraph = { + type: 'paragraph', + children: replacement as Paragraph['children'], + data: { literalHtml: true } + }; + + const siblings = parent.children as unknown as Content[]; + siblings.splice(index, 1, paragraph as unknown as Content); + + if (index > 0) { + const previous = siblings[index - 1] as Paragraph | undefined; + + if ( + previous?.type === 'paragraph' && + (previous.data as { literalHtml?: boolean } | undefined)?.literalHtml + ) { + const prevChildren = previous.children as unknown as PhrasingContent[]; + + if (prevChildren.length) { + const lastChild = prevChildren[prevChildren.length - 1]; + + if (lastChild.type !== 'break') { + prevChildren.push({ + type: 'break' + } as Break as unknown as PhrasingContent); + } + } + + prevChildren.push(...(paragraph.children as unknown as PhrasingContent[])); + + siblings.splice(index, 1); + + return index; + } + } + + return index + 1; + } + + (parent.children as unknown as PhrasingContent[]).splice( + index, + 1, + ...(replacement as unknown as PhrasingContent[]) + ); + + return index + replacement.length; + }); + }; +}; diff --git a/llama.cpp/tools/server/webui/src/lib/markdown/table-html-restorer.ts b/llama.cpp/tools/server/webui/src/lib/markdown/table-html-restorer.ts new file mode 100644 index 0000000..918aa46 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/markdown/table-html-restorer.ts @@ -0,0 +1,181 @@ +/** + * Rehype plugin to restore limited HTML elements inside Markdown table cells. + * + * ## Problem + * The remark/rehype pipeline neutralizes inline HTML as literal text + * (remarkLiteralHtml) so that XML/HTML snippets in LLM responses display + * as-is instead of being rendered. This causes
      and
        markup in + * table cells to show as plain text. + * + * ## Solution + * This plugin traverses the HAST post-conversion, parses whitelisted HTML + * patterns from text nodes, and replaces them with actual HAST element nodes + * that will be rendered as real HTML. + * + * ## Supported HTML + * - `
        ` / `
        ` / `
        ` - Line breaks (inline) + * - `
        • ...
        ` - Unordered lists (block) + * + * ## Key Implementation Details + * + * ### 1. Sibling Combination (Critical) + * The Markdown pipeline may fragment content across multiple text nodes and `
        ` + * elements. For example, `
        • a
        ` might arrive as: + * - Text: `"
          "` + * - Element: `
          ` + * - Text: `"
        • a
        "` + * + * We must combine consecutive text nodes and `
        ` elements into a single string + * before attempting to parse list markup. Without this, list detection fails. + * + * ### 2. visitParents for Deep Traversal + * Table cell content may be wrapped in intermediate elements (e.g., `

        ` tags). + * Using `visitParents` instead of direct child iteration ensures we find text + * nodes at any depth within the cell. + * + * ### 3. Reference Comparison for No-Op Detection + * When checking if `
        ` expansion changed anything, we compare: + * `expanded.length !== 1 || expanded[0] !== textNode` + * + * This catches both cases: + * - Multiple nodes created (text was split) + * - Single NEW node created (original had only `
        `, now it's an element) + * + * A simple `length > 1` check would miss the single `
        ` case. + * + * ### 4. Strict List Validation + * `parseList()` rejects malformed markup by checking for garbage text between + * `

      • ` elements. This prevents creating broken DOM from partial matches like + * `
          garbage
        • a
        `. + * + * ### 5. Newline Substitution for `
        ` in Combined String + * When combining siblings, existing `
        ` elements become `\n` in the combined + * string. This allows list content to span visual lines while still being parsed + * as a single unit. + * + * @example + * // Input Markdown: + * // | Feature | Notes | + * // |---------|-------| + * // | Multi-line | First
        Second | + * // | List |
        • A
        • B
        | + * // + * // Without this plugin:
        and
          render as literal text + * // With this plugin:
          becomes line break,
            becomes actual list + */ + +import type { Plugin } from 'unified'; +import type { Element, ElementContent, Root, Text } from 'hast'; +import { visit } from 'unist-util-visit'; +import { visitParents } from 'unist-util-visit-parents'; +import { BR_PATTERN, LIST_PATTERN, LI_PATTERN } from '$lib/constants/table-html-restorer'; + +/** + * Expands text containing `
            ` tags into an array of text nodes and br elements. + */ +function expandBrTags(value: string): ElementContent[] { + const matches = [...value.matchAll(BR_PATTERN)]; + if (!matches.length) return [{ type: 'text', value } as Text]; + + const result: ElementContent[] = []; + let cursor = 0; + + for (const m of matches) { + if (m.index! > cursor) { + result.push({ type: 'text', value: value.slice(cursor, m.index) } as Text); + } + result.push({ type: 'element', tagName: 'br', properties: {}, children: [] } as Element); + cursor = m.index! + m[0].length; + } + + if (cursor < value.length) { + result.push({ type: 'text', value: value.slice(cursor) } as Text); + } + + return result; +} + +/** + * Parses a `
            • ...
            ` string into a HAST element. + * Returns null if the markup is malformed or contains unexpected content. + */ +function parseList(value: string): Element | null { + const match = value.trim().match(LIST_PATTERN); + if (!match) return null; + + const body = match[1]; + const items: ElementContent[] = []; + let cursor = 0; + + for (const liMatch of body.matchAll(LI_PATTERN)) { + // Reject if there's non-whitespace between list items + if (body.slice(cursor, liMatch.index!).trim()) return null; + + items.push({ + type: 'element', + tagName: 'li', + properties: {}, + children: expandBrTags(liMatch[1] ?? '') + } as Element); + + cursor = liMatch.index! + liMatch[0].length; + } + + // Reject if no items found or trailing garbage exists + if (!items.length || body.slice(cursor).trim()) return null; + + return { type: 'element', tagName: 'ul', properties: {}, children: items } as Element; +} + +/** + * Processes a single table cell, restoring HTML elements from text content. + */ +function processCell(cell: Element) { + visitParents(cell, 'text', (textNode: Text, ancestors) => { + const parent = ancestors[ancestors.length - 1]; + if (!parent || parent.type !== 'element') return; + + const parentEl = parent as Element; + const siblings = parentEl.children as ElementContent[]; + const startIndex = siblings.indexOf(textNode as ElementContent); + if (startIndex === -1) return; + + // Combine consecutive text nodes and
            elements into one string + let combined = ''; + let endIndex = startIndex; + + for (let i = startIndex; i < siblings.length; i++) { + const sib = siblings[i]; + if (sib.type === 'text') { + combined += (sib as Text).value; + endIndex = i; + } else if (sib.type === 'element' && (sib as Element).tagName === 'br') { + combined += '\n'; + endIndex = i; + } else { + break; + } + } + + // Try parsing as list first (replaces entire combined range) + const list = parseList(combined); + if (list) { + siblings.splice(startIndex, endIndex - startIndex + 1, list); + return; + } + + // Otherwise, just expand
            tags in this text node + const expanded = expandBrTags(textNode.value); + if (expanded.length !== 1 || expanded[0] !== textNode) { + siblings.splice(startIndex, 1, ...expanded); + } + }); +} + +export const rehypeRestoreTableHtml: Plugin<[], Root> = () => (tree) => { + visit(tree, 'element', (node: Element) => { + if (node.tagName === 'td' || node.tagName === 'th') { + processCell(node); + } + }); +}; diff --git a/llama.cpp/tools/server/webui/src/lib/services/chat.ts b/llama.cpp/tools/server/webui/src/lib/services/chat.ts new file mode 100644 index 0000000..02fc638 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/chat.ts @@ -0,0 +1,784 @@ +import { getJsonHeaders } from '$lib/utils'; +import { AttachmentType } from '$lib/enums'; + +/** + * ChatService - Low-level API communication layer for Chat Completions + * + * **Terminology - Chat vs Conversation:** + * - **Chat**: The active interaction space with the Chat Completions API. This service + * handles the real-time communication with the AI backend - sending messages, receiving + * streaming responses, and managing request lifecycles. "Chat" is ephemeral and runtime-focused. + * - **Conversation**: The persistent database entity storing all messages and metadata. + * Managed by ConversationsService/Store, conversations persist across sessions. + * + * This service handles direct communication with the llama-server's Chat Completions API. + * It provides the network layer abstraction for AI model interactions while remaining + * stateless and focused purely on API communication. + * + * **Architecture & Relationships:** + * - **ChatService** (this class): Stateless API communication layer + * - Handles HTTP requests/responses with the llama-server + * - Manages streaming and non-streaming response parsing + * - Provides per-conversation request abortion capabilities + * - Converts database messages to API format + * - Handles error translation for server responses + * + * - **chatStore**: Uses ChatService for all AI model communication + * - **conversationsStore**: Provides message context for API requests + * + * **Key Responsibilities:** + * - Message format conversion (DatabaseMessage → API format) + * - Streaming response handling with real-time callbacks + * - Reasoning content extraction and processing + * - File attachment processing (images, PDFs, audio, text) + * - Request lifecycle management (abort via AbortSignal) + */ +export class ChatService { + // ───────────────────────────────────────────────────────────────────────────── + // Messaging + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Sends a chat completion request to the llama.cpp server. + * Supports both streaming and non-streaming responses with comprehensive parameter configuration. + * Automatically converts database messages with attachments to the appropriate API format. + * + * @param messages - Array of chat messages to send to the API (supports both ApiChatMessageData and DatabaseMessage with attachments) + * @param options - Configuration options for the chat completion request. See `SettingsChatServiceOptions` type for details. + * @returns {Promise} that resolves to the complete response string (non-streaming) or void (streaming) + * @throws {Error} if the request fails or is aborted + */ + static async sendMessage( + messages: ApiChatMessageData[] | (DatabaseMessage & { extra?: DatabaseMessageExtra[] })[], + options: SettingsChatServiceOptions = {}, + conversationId?: string, + signal?: AbortSignal + ): Promise { + const { + stream, + onChunk, + onComplete, + onError, + onReasoningChunk, + onToolCallChunk, + onModel, + onTimings, + // Generation parameters + temperature, + max_tokens, + // Sampling parameters + dynatemp_range, + dynatemp_exponent, + top_k, + top_p, + min_p, + xtc_probability, + xtc_threshold, + typ_p, + // Penalty parameters + repeat_last_n, + repeat_penalty, + presence_penalty, + frequency_penalty, + dry_multiplier, + dry_base, + dry_allowed_length, + dry_penalty_last_n, + // Other parameters + samplers, + backend_sampling, + custom, + timings_per_token, + // Config options + disableReasoningFormat + } = options; + + const normalizedMessages: ApiChatMessageData[] = messages + .map((msg) => { + if ('id' in msg && 'convId' in msg && 'timestamp' in msg) { + const dbMsg = msg as DatabaseMessage & { extra?: DatabaseMessageExtra[] }; + return ChatService.convertDbMessageToApiChatMessageData(dbMsg); + } else { + return msg as ApiChatMessageData; + } + }) + .filter((msg) => { + // Filter out empty system messages + if (msg.role === 'system') { + const content = typeof msg.content === 'string' ? msg.content : ''; + + return content.trim().length > 0; + } + + return true; + }); + + const requestBody: ApiChatCompletionRequest = { + messages: normalizedMessages.map((msg: ApiChatMessageData) => ({ + role: msg.role, + content: msg.content + })), + stream, + return_progress: stream ? true : undefined + }; + + // Include model in request if provided (required in ROUTER mode) + if (options.model) { + requestBody.model = options.model; + } + + requestBody.reasoning_format = disableReasoningFormat ? 'none' : 'auto'; + + if (temperature !== undefined) requestBody.temperature = temperature; + if (max_tokens !== undefined) { + // Set max_tokens to -1 (infinite) when explicitly configured as 0 or null + requestBody.max_tokens = max_tokens !== null && max_tokens !== 0 ? max_tokens : -1; + } + + if (dynatemp_range !== undefined) requestBody.dynatemp_range = dynatemp_range; + if (dynatemp_exponent !== undefined) requestBody.dynatemp_exponent = dynatemp_exponent; + if (top_k !== undefined) requestBody.top_k = top_k; + if (top_p !== undefined) requestBody.top_p = top_p; + if (min_p !== undefined) requestBody.min_p = min_p; + if (xtc_probability !== undefined) requestBody.xtc_probability = xtc_probability; + if (xtc_threshold !== undefined) requestBody.xtc_threshold = xtc_threshold; + if (typ_p !== undefined) requestBody.typ_p = typ_p; + + if (repeat_last_n !== undefined) requestBody.repeat_last_n = repeat_last_n; + if (repeat_penalty !== undefined) requestBody.repeat_penalty = repeat_penalty; + if (presence_penalty !== undefined) requestBody.presence_penalty = presence_penalty; + if (frequency_penalty !== undefined) requestBody.frequency_penalty = frequency_penalty; + if (dry_multiplier !== undefined) requestBody.dry_multiplier = dry_multiplier; + if (dry_base !== undefined) requestBody.dry_base = dry_base; + if (dry_allowed_length !== undefined) requestBody.dry_allowed_length = dry_allowed_length; + if (dry_penalty_last_n !== undefined) requestBody.dry_penalty_last_n = dry_penalty_last_n; + + if (samplers !== undefined) { + requestBody.samplers = + typeof samplers === 'string' + ? samplers.split(';').filter((s: string) => s.trim()) + : samplers; + } + + if (backend_sampling !== undefined) requestBody.backend_sampling = backend_sampling; + + if (timings_per_token !== undefined) requestBody.timings_per_token = timings_per_token; + + if (custom) { + try { + const customParams = typeof custom === 'string' ? JSON.parse(custom) : custom; + Object.assign(requestBody, customParams); + } catch (error) { + console.warn('Failed to parse custom parameters:', error); + } + } + + try { + const response = await fetch(`./v1/chat/completions`, { + method: 'POST', + headers: getJsonHeaders(), + body: JSON.stringify(requestBody), + signal + }); + + if (!response.ok) { + const error = await ChatService.parseErrorResponse(response); + if (onError) { + onError(error); + } + throw error; + } + + if (stream) { + await ChatService.handleStreamResponse( + response, + onChunk, + onComplete, + onError, + onReasoningChunk, + onToolCallChunk, + onModel, + onTimings, + conversationId, + signal + ); + return; + } else { + return ChatService.handleNonStreamResponse( + response, + onComplete, + onError, + onToolCallChunk, + onModel + ); + } + } catch (error) { + if (error instanceof Error && error.name === 'AbortError') { + console.log('Chat completion request was aborted'); + return; + } + + let userFriendlyError: Error; + + if (error instanceof Error) { + if (error.name === 'TypeError' && error.message.includes('fetch')) { + userFriendlyError = new Error( + 'Unable to connect to server - please check if the server is running' + ); + userFriendlyError.name = 'NetworkError'; + } else if (error.message.includes('ECONNREFUSED')) { + userFriendlyError = new Error('Connection refused - server may be offline'); + userFriendlyError.name = 'NetworkError'; + } else if (error.message.includes('ETIMEDOUT')) { + userFriendlyError = new Error('Request timed out - the server took too long to respond'); + userFriendlyError.name = 'TimeoutError'; + } else { + userFriendlyError = error; + } + } else { + userFriendlyError = new Error('Unknown error occurred while sending message'); + } + + console.error('Error in sendMessage:', error); + if (onError) { + onError(userFriendlyError); + } + throw userFriendlyError; + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Streaming + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Handles streaming response from the chat completion API + * @param response - The Response object from the fetch request + * @param onChunk - Optional callback invoked for each content chunk received + * @param onComplete - Optional callback invoked when the stream is complete with full response + * @param onError - Optional callback invoked if an error occurs during streaming + * @param onReasoningChunk - Optional callback invoked for each reasoning content chunk + * @param conversationId - Optional conversation ID for per-conversation state tracking + * @returns {Promise} Promise that resolves when streaming is complete + * @throws {Error} if the stream cannot be read or parsed + */ + private static async handleStreamResponse( + response: Response, + onChunk?: (chunk: string) => void, + onComplete?: ( + response: string, + reasoningContent?: string, + timings?: ChatMessageTimings, + toolCalls?: string + ) => void, + onError?: (error: Error) => void, + onReasoningChunk?: (chunk: string) => void, + onToolCallChunk?: (chunk: string) => void, + onModel?: (model: string) => void, + onTimings?: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => void, + conversationId?: string, + abortSignal?: AbortSignal + ): Promise { + const reader = response.body?.getReader(); + + if (!reader) { + throw new Error('No response body'); + } + + const decoder = new TextDecoder(); + let aggregatedContent = ''; + let fullReasoningContent = ''; + let aggregatedToolCalls: ApiChatCompletionToolCall[] = []; + let lastTimings: ChatMessageTimings | undefined; + let streamFinished = false; + let modelEmitted = false; + let toolCallIndexOffset = 0; + let hasOpenToolCallBatch = false; + + const finalizeOpenToolCallBatch = () => { + if (!hasOpenToolCallBatch) { + return; + } + + toolCallIndexOffset = aggregatedToolCalls.length; + hasOpenToolCallBatch = false; + }; + + const processToolCallDelta = (toolCalls?: ApiChatCompletionToolCallDelta[]) => { + if (!toolCalls || toolCalls.length === 0) { + return; + } + + aggregatedToolCalls = ChatService.mergeToolCallDeltas( + aggregatedToolCalls, + toolCalls, + toolCallIndexOffset + ); + + if (aggregatedToolCalls.length === 0) { + return; + } + + hasOpenToolCallBatch = true; + + const serializedToolCalls = JSON.stringify(aggregatedToolCalls); + + if (!serializedToolCalls) { + return; + } + + if (!abortSignal?.aborted) { + onToolCallChunk?.(serializedToolCalls); + } + }; + + try { + let chunk = ''; + while (true) { + if (abortSignal?.aborted) break; + + const { done, value } = await reader.read(); + if (done) break; + + if (abortSignal?.aborted) break; + + chunk += decoder.decode(value, { stream: true }); + const lines = chunk.split('\n'); + chunk = lines.pop() || ''; + + for (const line of lines) { + if (abortSignal?.aborted) break; + + if (line.startsWith('data: ')) { + const data = line.slice(6); + if (data === '[DONE]') { + streamFinished = true; + continue; + } + + try { + const parsed: ApiChatCompletionStreamChunk = JSON.parse(data); + const content = parsed.choices[0]?.delta?.content; + const reasoningContent = parsed.choices[0]?.delta?.reasoning_content; + const toolCalls = parsed.choices[0]?.delta?.tool_calls; + const timings = parsed.timings; + const promptProgress = parsed.prompt_progress; + + const chunkModel = ChatService.extractModelName(parsed); + if (chunkModel && !modelEmitted) { + modelEmitted = true; + onModel?.(chunkModel); + } + + if (promptProgress) { + ChatService.notifyTimings(undefined, promptProgress, onTimings); + } + + if (timings) { + ChatService.notifyTimings(timings, promptProgress, onTimings); + lastTimings = timings; + } + + if (content) { + finalizeOpenToolCallBatch(); + aggregatedContent += content; + if (!abortSignal?.aborted) { + onChunk?.(content); + } + } + + if (reasoningContent) { + finalizeOpenToolCallBatch(); + fullReasoningContent += reasoningContent; + if (!abortSignal?.aborted) { + onReasoningChunk?.(reasoningContent); + } + } + + processToolCallDelta(toolCalls); + } catch (e) { + console.error('Error parsing JSON chunk:', e); + } + } + } + + if (abortSignal?.aborted) break; + } + + if (abortSignal?.aborted) return; + + if (streamFinished) { + finalizeOpenToolCallBatch(); + + const finalToolCalls = + aggregatedToolCalls.length > 0 ? JSON.stringify(aggregatedToolCalls) : undefined; + + onComplete?.( + aggregatedContent, + fullReasoningContent || undefined, + lastTimings, + finalToolCalls + ); + } + } catch (error) { + const err = error instanceof Error ? error : new Error('Stream error'); + + onError?.(err); + + throw err; + } finally { + reader.releaseLock(); + } + } + + /** + * Handles non-streaming response from the chat completion API. + * Parses the JSON response and extracts the generated content. + * + * @param response - The fetch Response object containing the JSON data + * @param onComplete - Optional callback invoked when response is successfully parsed + * @param onError - Optional callback invoked if an error occurs during parsing + * @returns {Promise} Promise that resolves to the generated content string + * @throws {Error} if the response cannot be parsed or is malformed + */ + private static async handleNonStreamResponse( + response: Response, + onComplete?: ( + response: string, + reasoningContent?: string, + timings?: ChatMessageTimings, + toolCalls?: string + ) => void, + onError?: (error: Error) => void, + onToolCallChunk?: (chunk: string) => void, + onModel?: (model: string) => void + ): Promise { + try { + const responseText = await response.text(); + + if (!responseText.trim()) { + const noResponseError = new Error('No response received from server. Please try again.'); + throw noResponseError; + } + + const data: ApiChatCompletionResponse = JSON.parse(responseText); + + const responseModel = ChatService.extractModelName(data); + if (responseModel) { + onModel?.(responseModel); + } + + const content = data.choices[0]?.message?.content || ''; + const reasoningContent = data.choices[0]?.message?.reasoning_content; + const toolCalls = data.choices[0]?.message?.tool_calls; + + if (reasoningContent) { + console.log('Full reasoning content:', reasoningContent); + } + + let serializedToolCalls: string | undefined; + + if (toolCalls && toolCalls.length > 0) { + const mergedToolCalls = ChatService.mergeToolCallDeltas([], toolCalls); + + if (mergedToolCalls.length > 0) { + serializedToolCalls = JSON.stringify(mergedToolCalls); + if (serializedToolCalls) { + onToolCallChunk?.(serializedToolCalls); + } + } + } + + if (!content.trim() && !serializedToolCalls) { + const noResponseError = new Error('No response received from server. Please try again.'); + throw noResponseError; + } + + onComplete?.(content, reasoningContent, undefined, serializedToolCalls); + + return content; + } catch (error) { + const err = error instanceof Error ? error : new Error('Parse error'); + + onError?.(err); + + throw err; + } + } + + /** + * Merges tool call deltas into an existing array of tool calls. + * Handles both existing and new tool calls, updating existing ones and adding new ones. + * + * @param existing - The existing array of tool calls to merge into + * @param deltas - The array of tool call deltas to merge + * @param indexOffset - Optional offset to apply to the index of new tool calls + * @returns {ApiChatCompletionToolCall[]} The merged array of tool calls + */ + private static mergeToolCallDeltas( + existing: ApiChatCompletionToolCall[], + deltas: ApiChatCompletionToolCallDelta[], + indexOffset = 0 + ): ApiChatCompletionToolCall[] { + const result = existing.map((call) => ({ + ...call, + function: call.function ? { ...call.function } : undefined + })); + + for (const delta of deltas) { + const index = + typeof delta.index === 'number' && delta.index >= 0 + ? delta.index + indexOffset + : result.length; + + while (result.length <= index) { + result.push({ function: undefined }); + } + + const target = result[index]!; + + if (delta.id) { + target.id = delta.id; + } + + if (delta.type) { + target.type = delta.type; + } + + if (delta.function) { + const fn = target.function ? { ...target.function } : {}; + + if (delta.function.name) { + fn.name = delta.function.name; + } + + if (delta.function.arguments) { + fn.arguments = (fn.arguments ?? '') + delta.function.arguments; + } + + target.function = fn; + } + } + + return result; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Conversion + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Converts a database message with attachments to API chat message format. + * Processes various attachment types (images, text files, PDFs) and formats them + * as content parts suitable for the chat completion API. + * + * @param message - Database message object with optional extra attachments + * @param message.content - The text content of the message + * @param message.role - The role of the message sender (user, assistant, system) + * @param message.extra - Optional array of message attachments (images, files, etc.) + * @returns {ApiChatMessageData} object formatted for the chat completion API + * @static + */ + static convertDbMessageToApiChatMessageData( + message: DatabaseMessage & { extra?: DatabaseMessageExtra[] } + ): ApiChatMessageData { + if (!message.extra || message.extra.length === 0) { + return { + role: message.role as 'user' | 'assistant' | 'system', + content: message.content + }; + } + + const contentParts: ApiChatMessageContentPart[] = []; + + if (message.content) { + contentParts.push({ + type: 'text', + text: message.content + }); + } + + const imageFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraImageFile => + extra.type === AttachmentType.IMAGE + ); + + for (const image of imageFiles) { + contentParts.push({ + type: 'image_url', + image_url: { url: image.base64Url } + }); + } + + const textFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraTextFile => + extra.type === AttachmentType.TEXT + ); + + for (const textFile of textFiles) { + contentParts.push({ + type: 'text', + text: `\n\n--- File: ${textFile.name} ---\n${textFile.content}` + }); + } + + // Handle legacy 'context' type from old webui (pasted content) + const legacyContextFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraLegacyContext => + extra.type === AttachmentType.LEGACY_CONTEXT + ); + + for (const legacyContextFile of legacyContextFiles) { + contentParts.push({ + type: 'text', + text: `\n\n--- File: ${legacyContextFile.name} ---\n${legacyContextFile.content}` + }); + } + + const audioFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraAudioFile => + extra.type === AttachmentType.AUDIO + ); + + for (const audio of audioFiles) { + contentParts.push({ + type: 'input_audio', + input_audio: { + data: audio.base64Data, + format: audio.mimeType.includes('wav') ? 'wav' : 'mp3' + } + }); + } + + const pdfFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraPdfFile => + extra.type === AttachmentType.PDF + ); + + for (const pdfFile of pdfFiles) { + if (pdfFile.processedAsImages && pdfFile.images) { + for (let i = 0; i < pdfFile.images.length; i++) { + contentParts.push({ + type: 'image_url', + image_url: { url: pdfFile.images[i] } + }); + } + } else { + contentParts.push({ + type: 'text', + text: `\n\n--- PDF File: ${pdfFile.name} ---\n${pdfFile.content}` + }); + } + } + + return { + role: message.role as 'user' | 'assistant' | 'system', + content: contentParts + }; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Parses error response and creates appropriate error with context information + * @param response - HTTP response object + * @returns Promise - Parsed error with context info if available + */ + private static async parseErrorResponse( + response: Response + ): Promise { + try { + const errorText = await response.text(); + const errorData: ApiErrorResponse = JSON.parse(errorText); + + const message = errorData.error?.message || 'Unknown server error'; + const error = new Error(message) as Error & { + contextInfo?: { n_prompt_tokens: number; n_ctx: number }; + }; + error.name = response.status === 400 ? 'ServerError' : 'HttpError'; + + if (errorData.error && 'n_prompt_tokens' in errorData.error && 'n_ctx' in errorData.error) { + error.contextInfo = { + n_prompt_tokens: errorData.error.n_prompt_tokens, + n_ctx: errorData.error.n_ctx + }; + } + + return error; + } catch { + const fallback = new Error( + `Server error (${response.status}): ${response.statusText}` + ) as Error & { + contextInfo?: { n_prompt_tokens: number; n_ctx: number }; + }; + fallback.name = 'HttpError'; + return fallback; + } + } + + /** + * Extracts model name from Chat Completions API response data. + * Handles various response formats including streaming chunks and final responses. + * + * WORKAROUND: In single model mode, llama-server returns a default/incorrect model name + * in the response. We override it with the actual model name from serverStore. + * + * @param data - Raw response data from the Chat Completions API + * @returns Model name string if found, undefined otherwise + * @private + */ + private static extractModelName(data: unknown): string | undefined { + const asRecord = (value: unknown): Record | undefined => { + return typeof value === 'object' && value !== null + ? (value as Record) + : undefined; + }; + + const getTrimmedString = (value: unknown): string | undefined => { + return typeof value === 'string' && value.trim() ? value.trim() : undefined; + }; + + const root = asRecord(data); + if (!root) return undefined; + + // 1) root (some implementations provide `model` at the top level) + const rootModel = getTrimmedString(root.model); + if (rootModel) return rootModel; + + // 2) streaming choice (delta) or final response (message) + const firstChoice = Array.isArray(root.choices) ? asRecord(root.choices[0]) : undefined; + if (!firstChoice) return undefined; + + // priority: delta.model (first chunk) else message.model (final response) + const deltaModel = getTrimmedString(asRecord(firstChoice.delta)?.model); + if (deltaModel) return deltaModel; + + const messageModel = getTrimmedString(asRecord(firstChoice.message)?.model); + if (messageModel) return messageModel; + + // avoid guessing from non-standard locations (metadata, etc.) + return undefined; + } + + /** + * Calls the onTimings callback with timing data from streaming response. + * + * @param timings - Timing information from the Chat Completions API response + * @param promptProgress - Prompt processing progress data + * @param onTimingsCallback - Callback function to invoke with timing data + * @private + */ + private static notifyTimings( + timings: ChatMessageTimings | undefined, + promptProgress: ChatMessagePromptProgress | undefined, + onTimingsCallback: + | ((timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => void) + | undefined + ): void { + if (!onTimingsCallback || (!timings && !promptProgress)) return; + + onTimingsCallback(timings, promptProgress); + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/services/database.ts b/llama.cpp/tools/server/webui/src/lib/services/database.ts new file mode 100644 index 0000000..3b24628 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/database.ts @@ -0,0 +1,400 @@ +import Dexie, { type EntityTable } from 'dexie'; +import { findDescendantMessages } from '$lib/utils'; + +class LlamacppDatabase extends Dexie { + conversations!: EntityTable; + messages!: EntityTable; + + constructor() { + super('LlamacppWebui'); + + this.version(1).stores({ + conversations: 'id, lastModified, currNode, name', + messages: 'id, convId, type, role, timestamp, parent, children' + }); + } +} + +const db = new LlamacppDatabase(); +import { v4 as uuid } from 'uuid'; + +/** + * DatabaseService - Stateless IndexedDB communication layer + * + * **Terminology - Chat vs Conversation:** + * - **Chat**: The active interaction space with the Chat Completions API (ephemeral, runtime). + * - **Conversation**: The persistent database entity storing all messages and metadata. + * This service handles raw database operations for conversations - the lowest layer + * in the persistence stack. + * + * This service provides a stateless data access layer built on IndexedDB using Dexie ORM. + * It handles all low-level storage operations for conversations and messages with support + * for complex branching and message threading. All methods are static - no instance state. + * + * **Architecture & Relationships (bottom to top):** + * - **DatabaseService** (this class): Stateless IndexedDB operations + * - Lowest layer - direct Dexie/IndexedDB communication + * - Pure CRUD operations without business logic + * - Handles branching tree structure (parent-child relationships) + * - Provides transaction safety for multi-table operations + * + * - **ConversationsService**: Stateless business logic layer + * - Uses DatabaseService for all persistence operations + * - Adds import/export, navigation, and higher-level operations + * + * - **conversationsStore**: Reactive state management for conversations + * - Uses ConversationsService for database operations + * - Manages conversation list, active conversation, and messages in memory + * + * - **chatStore**: Active AI interaction management + * - Uses conversationsStore for conversation context + * - Directly uses DatabaseService for message CRUD during streaming + * + * **Key Features:** + * - **Conversation CRUD**: Create, read, update, delete conversations + * - **Message CRUD**: Add, update, delete messages with branching support + * - **Branch Operations**: Create branches, find descendants, cascade deletions + * - **Transaction Safety**: Atomic operations for data consistency + * + * **Database Schema:** + * - `conversations`: id, lastModified, currNode, name + * - `messages`: id, convId, type, role, timestamp, parent, children + * + * **Branching Model:** + * Messages form a tree structure where each message can have multiple children, + * enabling conversation branching and alternative response paths. The conversation's + * `currNode` tracks the currently active branch endpoint. + */ +export class DatabaseService { + // ───────────────────────────────────────────────────────────────────────────── + // Conversations + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Creates a new conversation. + * + * @param name - Name of the conversation + * @returns The created conversation + */ + static async createConversation(name: string): Promise { + const conversation: DatabaseConversation = { + id: uuid(), + name, + lastModified: Date.now(), + currNode: '' + }; + + await db.conversations.add(conversation); + return conversation; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Messages + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Creates a new message branch by adding a message and updating parent/child relationships. + * Also updates the conversation's currNode to point to the new message. + * + * @param message - Message to add (without id) + * @param parentId - Parent message ID to attach to + * @returns The created message + */ + static async createMessageBranch( + message: Omit, + parentId: string | null + ): Promise { + return await db.transaction('rw', [db.conversations, db.messages], async () => { + // Handle null parent (root message case) + if (parentId !== null) { + const parentMessage = await db.messages.get(parentId); + if (!parentMessage) { + throw new Error(`Parent message ${parentId} not found`); + } + } + + const newMessage: DatabaseMessage = { + ...message, + id: uuid(), + parent: parentId, + toolCalls: message.toolCalls ?? '', + children: [] + }; + + await db.messages.add(newMessage); + + // Update parent's children array if parent exists + if (parentId !== null) { + const parentMessage = await db.messages.get(parentId); + if (parentMessage) { + await db.messages.update(parentId, { + children: [...parentMessage.children, newMessage.id] + }); + } + } + + await this.updateConversation(message.convId, { + currNode: newMessage.id + }); + + return newMessage; + }); + } + + /** + * Creates a root message for a new conversation. + * Root messages are not displayed but serve as the tree root for branching. + * + * @param convId - Conversation ID + * @returns The created root message + */ + static async createRootMessage(convId: string): Promise { + const rootMessage: DatabaseMessage = { + id: uuid(), + convId, + type: 'root', + timestamp: Date.now(), + role: 'system', + content: '', + parent: null, + thinking: '', + toolCalls: '', + children: [] + }; + + await db.messages.add(rootMessage); + return rootMessage.id; + } + + /** + * Creates a system prompt message for a conversation. + * + * @param convId - Conversation ID + * @param systemPrompt - The system prompt content (must be non-empty) + * @param parentId - Parent message ID (typically the root message) + * @returns The created system message + * @throws Error if systemPrompt is empty + */ + static async createSystemMessage( + convId: string, + systemPrompt: string, + parentId: string + ): Promise { + const trimmedPrompt = systemPrompt.trim(); + if (!trimmedPrompt) { + throw new Error('Cannot create system message with empty content'); + } + + const systemMessage: DatabaseMessage = { + id: uuid(), + convId, + type: 'system', + timestamp: Date.now(), + role: 'system', + content: trimmedPrompt, + parent: parentId, + thinking: '', + children: [] + }; + + await db.messages.add(systemMessage); + + const parentMessage = await db.messages.get(parentId); + if (parentMessage) { + await db.messages.update(parentId, { + children: [...parentMessage.children, systemMessage.id] + }); + } + + return systemMessage; + } + + /** + * Deletes a conversation and all its messages. + * + * @param id - Conversation ID + */ + static async deleteConversation(id: string): Promise { + await db.transaction('rw', [db.conversations, db.messages], async () => { + await db.conversations.delete(id); + await db.messages.where('convId').equals(id).delete(); + }); + } + + /** + * Deletes a message and removes it from its parent's children array. + * + * @param messageId - ID of the message to delete + */ + static async deleteMessage(messageId: string): Promise { + await db.transaction('rw', db.messages, async () => { + const message = await db.messages.get(messageId); + if (!message) return; + + // Remove this message from its parent's children array + if (message.parent) { + const parent = await db.messages.get(message.parent); + if (parent) { + parent.children = parent.children.filter((childId: string) => childId !== messageId); + await db.messages.put(parent); + } + } + + // Delete the message + await db.messages.delete(messageId); + }); + } + + /** + * Deletes a message and all its descendant messages (cascading deletion). + * This removes the entire branch starting from the specified message. + * + * @param conversationId - ID of the conversation containing the message + * @param messageId - ID of the root message to delete (along with all descendants) + * @returns Array of all deleted message IDs + */ + static async deleteMessageCascading( + conversationId: string, + messageId: string + ): Promise { + return await db.transaction('rw', db.messages, async () => { + // Get all messages in the conversation to find descendants + const allMessages = await db.messages.where('convId').equals(conversationId).toArray(); + + // Find all descendant messages + const descendants = findDescendantMessages(allMessages, messageId); + const allToDelete = [messageId, ...descendants]; + + // Get the message to delete for parent cleanup + const message = await db.messages.get(messageId); + if (message && message.parent) { + const parent = await db.messages.get(message.parent); + if (parent) { + parent.children = parent.children.filter((childId: string) => childId !== messageId); + await db.messages.put(parent); + } + } + + // Delete all messages in the branch + await db.messages.bulkDelete(allToDelete); + + return allToDelete; + }); + } + + /** + * Gets all conversations, sorted by last modified time (newest first). + * + * @returns Array of conversations + */ + static async getAllConversations(): Promise { + return await db.conversations.orderBy('lastModified').reverse().toArray(); + } + + /** + * Gets a conversation by ID. + * + * @param id - Conversation ID + * @returns The conversation if found, otherwise undefined + */ + static async getConversation(id: string): Promise { + return await db.conversations.get(id); + } + + /** + * Gets all messages in a conversation, sorted by timestamp (oldest first). + * + * @param convId - Conversation ID + * @returns Array of messages in the conversation + */ + static async getConversationMessages(convId: string): Promise { + return await db.messages.where('convId').equals(convId).sortBy('timestamp'); + } + + /** + * Updates a conversation. + * + * @param id - Conversation ID + * @param updates - Partial updates to apply + * @returns Promise that resolves when the conversation is updated + */ + static async updateConversation( + id: string, + updates: Partial> + ): Promise { + await db.conversations.update(id, { + ...updates, + lastModified: Date.now() + }); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Navigation + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Updates the conversation's current node (active branch). + * This determines which conversation path is currently being viewed. + * + * @param convId - Conversation ID + * @param nodeId - Message ID to set as current node + */ + static async updateCurrentNode(convId: string, nodeId: string): Promise { + await this.updateConversation(convId, { + currNode: nodeId + }); + } + + /** + * Updates a message. + * + * @param id - Message ID + * @param updates - Partial updates to apply + * @returns Promise that resolves when the message is updated + */ + static async updateMessage( + id: string, + updates: Partial> + ): Promise { + await db.messages.update(id, updates); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Import + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Imports multiple conversations and their messages. + * Skips conversations that already exist. + * + * @param data - Array of { conv, messages } objects + */ + static async importConversations( + data: { conv: DatabaseConversation; messages: DatabaseMessage[] }[] + ): Promise<{ imported: number; skipped: number }> { + let importedCount = 0; + let skippedCount = 0; + + return await db.transaction('rw', [db.conversations, db.messages], async () => { + for (const item of data) { + const { conv, messages } = item; + + const existing = await db.conversations.get(conv.id); + if (existing) { + console.warn(`Conversation "${conv.name}" already exists, skipping...`); + skippedCount++; + continue; + } + + await db.conversations.add(conv); + for (const msg of messages) { + await db.messages.put(msg); + } + + importedCount++; + } + + return { imported: importedCount, skipped: skippedCount }; + }); + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/services/index.ts b/llama.cpp/tools/server/webui/src/lib/services/index.ts new file mode 100644 index 0000000..c36c64a --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/index.ts @@ -0,0 +1,5 @@ +export { ChatService } from './chat'; +export { DatabaseService } from './database'; +export { ModelsService } from './models'; +export { PropsService } from './props'; +export { ParameterSyncService } from './parameter-sync'; diff --git a/llama.cpp/tools/server/webui/src/lib/services/models.ts b/llama.cpp/tools/server/webui/src/lib/services/models.ts new file mode 100644 index 0000000..eecb7fa --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/models.ts @@ -0,0 +1,124 @@ +import { base } from '$app/paths'; +import { ServerModelStatus } from '$lib/enums'; +import { getJsonHeaders } from '$lib/utils'; + +/** + * ModelsService - Stateless service for model management API communication + * + * This service handles communication with model-related endpoints: + * - `/v1/models` - OpenAI-compatible model list (MODEL + ROUTER mode) + * - `/models/load`, `/models/unload` - Router-specific model management (ROUTER mode only) + * + * **Responsibilities:** + * - List available models + * - Load/unload models (ROUTER mode) + * - Check model status (ROUTER mode) + * + * **Used by:** + * - modelsStore: Primary consumer for model state management + */ +export class ModelsService { + // ───────────────────────────────────────────────────────────────────────────── + // Listing + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Fetch list of models from OpenAI-compatible endpoint + * Works in both MODEL and ROUTER modes + */ + static async list(): Promise { + const response = await fetch(`${base}/v1/models`, { + headers: getJsonHeaders() + }); + + if (!response.ok) { + throw new Error(`Failed to fetch model list (status ${response.status})`); + } + + return response.json() as Promise; + } + + /** + * Fetch list of all models with detailed metadata (ROUTER mode) + * Returns models with load status, paths, and other metadata + */ + static async listRouter(): Promise { + const response = await fetch(`${base}/v1/models`, { + headers: getJsonHeaders() + }); + + if (!response.ok) { + throw new Error(`Failed to fetch router models list (status ${response.status})`); + } + + return response.json() as Promise; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Load/Unload + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Load a model (ROUTER mode) + * POST /models/load + * @param modelId - Model identifier to load + * @param extraArgs - Optional additional arguments to pass to the model instance + */ + static async load(modelId: string, extraArgs?: string[]): Promise { + const payload: { model: string; extra_args?: string[] } = { model: modelId }; + if (extraArgs && extraArgs.length > 0) { + payload.extra_args = extraArgs; + } + + const response = await fetch(`${base}/models/load`, { + method: 'POST', + headers: getJsonHeaders(), + body: JSON.stringify(payload) + }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({})); + throw new Error(errorData.error || `Failed to load model (status ${response.status})`); + } + + return response.json() as Promise; + } + + /** + * Unload a model (ROUTER mode) + * POST /models/unload + * @param modelId - Model identifier to unload + */ + static async unload(modelId: string): Promise { + const response = await fetch(`${base}/models/unload`, { + method: 'POST', + headers: getJsonHeaders(), + body: JSON.stringify({ model: modelId }) + }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({})); + throw new Error(errorData.error || `Failed to unload model (status ${response.status})`); + } + + return response.json() as Promise; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Status + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Check if a model is loaded based on its metadata + */ + static isModelLoaded(model: ApiModelDataEntry): boolean { + return model.status.value === ServerModelStatus.LOADED; + } + + /** + * Check if a model is currently loading + */ + static isModelLoading(model: ApiModelDataEntry): boolean { + return model.status.value === ServerModelStatus.LOADING; + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.spec.ts b/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.spec.ts new file mode 100644 index 0000000..6b5c58a --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.spec.ts @@ -0,0 +1,148 @@ +import { describe, it, expect } from 'vitest'; +import { ParameterSyncService } from './parameter-sync'; + +describe('ParameterSyncService', () => { + describe('roundFloatingPoint', () => { + it('should fix JavaScript floating-point precision issues', () => { + // Test the specific values from the screenshot + const mockServerParams = { + top_p: 0.949999988079071, + min_p: 0.009999999776482582, + temperature: 0.800000011920929, + top_k: 40, + samplers: ['top_k', 'typ_p', 'top_p', 'min_p', 'temperature'] + }; + + const result = ParameterSyncService.extractServerDefaults({ + ...mockServerParams, + // Add other required fields to match the API type + n_predict: 512, + seed: -1, + dynatemp_range: 0.0, + dynatemp_exponent: 1.0, + xtc_probability: 0.0, + xtc_threshold: 0.1, + typ_p: 1.0, + repeat_last_n: 64, + repeat_penalty: 1.0, + presence_penalty: 0.0, + frequency_penalty: 0.0, + dry_multiplier: 0.0, + dry_base: 1.75, + dry_allowed_length: 2, + dry_penalty_last_n: -1, + mirostat: 0, + mirostat_tau: 5.0, + mirostat_eta: 0.1, + stop: [], + max_tokens: -1, + n_keep: 0, + n_discard: 0, + ignore_eos: false, + stream: true, + logit_bias: [], + n_probs: 0, + min_keep: 0, + grammar: '', + grammar_lazy: false, + grammar_triggers: [], + preserved_tokens: [], + chat_format: '', + reasoning_format: '', + reasoning_in_content: false, + thinking_forced_open: false, + 'speculative.n_max': 0, + 'speculative.n_min': 0, + 'speculative.p_min': 0.0, + timings_per_token: false, + post_sampling_probs: false, + lora: [], + top_n_sigma: 0.0, + dry_sequence_breakers: [] + } as ApiLlamaCppServerProps['default_generation_settings']['params']); + + // Check that the problematic floating-point values are rounded correctly + expect(result.top_p).toBe(0.95); + expect(result.min_p).toBe(0.01); + expect(result.temperature).toBe(0.8); + expect(result.top_k).toBe(40); // Integer should remain unchanged + expect(result.samplers).toBe('top_k;typ_p;top_p;min_p;temperature'); + }); + + it('should preserve non-numeric values', () => { + const mockServerParams = { + samplers: ['top_k', 'temperature'], + max_tokens: -1, + temperature: 0.7 + }; + + const result = ParameterSyncService.extractServerDefaults({ + ...mockServerParams, + // Minimal required fields + n_predict: 512, + seed: -1, + dynatemp_range: 0.0, + dynatemp_exponent: 1.0, + top_k: 40, + top_p: 0.95, + min_p: 0.05, + xtc_probability: 0.0, + xtc_threshold: 0.1, + typ_p: 1.0, + repeat_last_n: 64, + repeat_penalty: 1.0, + presence_penalty: 0.0, + frequency_penalty: 0.0, + dry_multiplier: 0.0, + dry_base: 1.75, + dry_allowed_length: 2, + dry_penalty_last_n: -1, + mirostat: 0, + mirostat_tau: 5.0, + mirostat_eta: 0.1, + stop: [], + n_keep: 0, + n_discard: 0, + ignore_eos: false, + stream: true, + logit_bias: [], + n_probs: 0, + min_keep: 0, + grammar: '', + grammar_lazy: false, + grammar_triggers: [], + preserved_tokens: [], + chat_format: '', + reasoning_format: '', + reasoning_in_content: false, + thinking_forced_open: false, + 'speculative.n_max': 0, + 'speculative.n_min': 0, + 'speculative.p_min': 0.0, + timings_per_token: false, + post_sampling_probs: false, + lora: [], + top_n_sigma: 0.0, + dry_sequence_breakers: [] + } as ApiLlamaCppServerProps['default_generation_settings']['params']); + + expect(result.samplers).toBe('top_k;temperature'); + expect(result.max_tokens).toBe(-1); + expect(result.temperature).toBe(0.7); + }); + + it('should merge webui settings from props when provided', () => { + const result = ParameterSyncService.extractServerDefaults(null, { + pasteLongTextToFileLen: 0, + pdfAsImage: true, + renderUserContentAsMarkdown: false, + theme: 'dark' + }); + + expect(result.pasteLongTextToFileLen).toBe(0); + expect(result.pdfAsImage).toBe(true); + expect(result.renderUserContentAsMarkdown).toBe(false); + expect(result.theme).toBeUndefined(); + }); + }); +}); diff --git a/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.ts b/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.ts new file mode 100644 index 0000000..d124cf5 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/parameter-sync.ts @@ -0,0 +1,279 @@ +/** + * ParameterSyncService - Handles synchronization between server defaults and user settings + * + * This service manages the complex logic of merging server-provided default parameters + * with user-configured overrides, ensuring the UI reflects the actual server state + * while preserving user customizations. + * + * **Key Responsibilities:** + * - Extract syncable parameters from server props + * - Merge server defaults with user overrides + * - Track parameter sources (server, user, default) + * - Provide sync utilities for settings store integration + */ + +import { normalizeFloatingPoint } from '$lib/utils'; + +export type ParameterSource = 'default' | 'custom'; +export type ParameterValue = string | number | boolean; +export type ParameterRecord = Record; + +export interface ParameterInfo { + value: string | number | boolean; + source: ParameterSource; + serverDefault?: string | number | boolean; + userOverride?: string | number | boolean; +} + +export interface SyncableParameter { + key: string; + serverKey: string; + type: 'number' | 'string' | 'boolean'; + canSync: boolean; +} + +/** + * Mapping of webui setting keys to server parameter keys + * Only parameters that should be synced from server are included + */ +export const SYNCABLE_PARAMETERS: SyncableParameter[] = [ + { key: 'temperature', serverKey: 'temperature', type: 'number', canSync: true }, + { key: 'top_k', serverKey: 'top_k', type: 'number', canSync: true }, + { key: 'top_p', serverKey: 'top_p', type: 'number', canSync: true }, + { key: 'min_p', serverKey: 'min_p', type: 'number', canSync: true }, + { key: 'dynatemp_range', serverKey: 'dynatemp_range', type: 'number', canSync: true }, + { key: 'dynatemp_exponent', serverKey: 'dynatemp_exponent', type: 'number', canSync: true }, + { key: 'xtc_probability', serverKey: 'xtc_probability', type: 'number', canSync: true }, + { key: 'xtc_threshold', serverKey: 'xtc_threshold', type: 'number', canSync: true }, + { key: 'typ_p', serverKey: 'typ_p', type: 'number', canSync: true }, + { key: 'repeat_last_n', serverKey: 'repeat_last_n', type: 'number', canSync: true }, + { key: 'repeat_penalty', serverKey: 'repeat_penalty', type: 'number', canSync: true }, + { key: 'presence_penalty', serverKey: 'presence_penalty', type: 'number', canSync: true }, + { key: 'frequency_penalty', serverKey: 'frequency_penalty', type: 'number', canSync: true }, + { key: 'dry_multiplier', serverKey: 'dry_multiplier', type: 'number', canSync: true }, + { key: 'dry_base', serverKey: 'dry_base', type: 'number', canSync: true }, + { key: 'dry_allowed_length', serverKey: 'dry_allowed_length', type: 'number', canSync: true }, + { key: 'dry_penalty_last_n', serverKey: 'dry_penalty_last_n', type: 'number', canSync: true }, + { key: 'max_tokens', serverKey: 'max_tokens', type: 'number', canSync: true }, + { key: 'samplers', serverKey: 'samplers', type: 'string', canSync: true }, + { + key: 'pasteLongTextToFileLen', + serverKey: 'pasteLongTextToFileLen', + type: 'number', + canSync: true + }, + { key: 'pdfAsImage', serverKey: 'pdfAsImage', type: 'boolean', canSync: true }, + { + key: 'showThoughtInProgress', + serverKey: 'showThoughtInProgress', + type: 'boolean', + canSync: true + }, + { key: 'showToolCalls', serverKey: 'showToolCalls', type: 'boolean', canSync: true }, + { + key: 'disableReasoningFormat', + serverKey: 'disableReasoningFormat', + type: 'boolean', + canSync: true + }, + { key: 'keepStatsVisible', serverKey: 'keepStatsVisible', type: 'boolean', canSync: true }, + { key: 'showMessageStats', serverKey: 'showMessageStats', type: 'boolean', canSync: true }, + { + key: 'askForTitleConfirmation', + serverKey: 'askForTitleConfirmation', + type: 'boolean', + canSync: true + }, + { key: 'disableAutoScroll', serverKey: 'disableAutoScroll', type: 'boolean', canSync: true }, + { + key: 'renderUserContentAsMarkdown', + serverKey: 'renderUserContentAsMarkdown', + type: 'boolean', + canSync: true + }, + { key: 'autoMicOnEmpty', serverKey: 'autoMicOnEmpty', type: 'boolean', canSync: true }, + { + key: 'pyInterpreterEnabled', + serverKey: 'pyInterpreterEnabled', + type: 'boolean', + canSync: true + }, + { + key: 'enableContinueGeneration', + serverKey: 'enableContinueGeneration', + type: 'boolean', + canSync: true + } +]; + +export class ParameterSyncService { + // ───────────────────────────────────────────────────────────────────────────── + // Extraction + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Round floating-point numbers to avoid JavaScript precision issues + */ + private static roundFloatingPoint(value: ParameterValue): ParameterValue { + return normalizeFloatingPoint(value) as ParameterValue; + } + + /** + * Extract server default parameters that can be synced + */ + static extractServerDefaults( + serverParams: ApiLlamaCppServerProps['default_generation_settings']['params'] | null, + webuiSettings?: Record + ): ParameterRecord { + const extracted: ParameterRecord = {}; + + if (serverParams) { + for (const param of SYNCABLE_PARAMETERS) { + if (param.canSync && param.serverKey in serverParams) { + const value = (serverParams as unknown as Record)[ + param.serverKey + ]; + if (value !== undefined) { + // Apply precision rounding to avoid JavaScript floating-point issues + extracted[param.key] = this.roundFloatingPoint(value); + } + } + } + + // Handle samplers array conversion to string + if (serverParams.samplers && Array.isArray(serverParams.samplers)) { + extracted.samplers = serverParams.samplers.join(';'); + } + } + + if (webuiSettings) { + for (const param of SYNCABLE_PARAMETERS) { + if (param.canSync && param.serverKey in webuiSettings) { + const value = webuiSettings[param.serverKey]; + if (value !== undefined) { + extracted[param.key] = this.roundFloatingPoint(value); + } + } + } + } + + return extracted; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Merging + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Merge server defaults with current user settings + * Returns updated settings that respect user overrides while using server defaults + */ + static mergeWithServerDefaults( + currentSettings: ParameterRecord, + serverDefaults: ParameterRecord, + userOverrides: Set = new Set() + ): ParameterRecord { + const merged = { ...currentSettings }; + + for (const [key, serverValue] of Object.entries(serverDefaults)) { + // Only update if user hasn't explicitly overridden this parameter + if (!userOverrides.has(key)) { + merged[key] = this.roundFloatingPoint(serverValue); + } + } + + return merged; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Info + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Get parameter information including source and values + */ + static getParameterInfo( + key: string, + currentValue: ParameterValue, + propsDefaults: ParameterRecord, + userOverrides: Set + ): ParameterInfo { + const hasPropsDefault = propsDefaults[key] !== undefined; + const isUserOverride = userOverrides.has(key); + + // Simple logic: either using default (from props) or custom (user override) + const source: ParameterSource = isUserOverride ? 'custom' : 'default'; + + return { + value: currentValue, + source, + serverDefault: hasPropsDefault ? propsDefaults[key] : undefined, // Keep same field name for compatibility + userOverride: isUserOverride ? currentValue : undefined + }; + } + + /** + * Check if a parameter can be synced from server + */ + static canSyncParameter(key: string): boolean { + return SYNCABLE_PARAMETERS.some((param) => param.key === key && param.canSync); + } + + /** + * Get all syncable parameter keys + */ + static getSyncableParameterKeys(): string[] { + return SYNCABLE_PARAMETERS.filter((param) => param.canSync).map((param) => param.key); + } + + /** + * Validate server parameter value + */ + static validateServerParameter(key: string, value: ParameterValue): boolean { + const param = SYNCABLE_PARAMETERS.find((p) => p.key === key); + if (!param) return false; + + switch (param.type) { + case 'number': + return typeof value === 'number' && !isNaN(value); + case 'string': + return typeof value === 'string'; + case 'boolean': + return typeof value === 'boolean'; + default: + return false; + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Diff + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Create a diff between current settings and server defaults + */ + static createParameterDiff( + currentSettings: ParameterRecord, + serverDefaults: ParameterRecord + ): Record { + const diff: Record< + string, + { current: ParameterValue; server: ParameterValue; differs: boolean } + > = {}; + + for (const key of this.getSyncableParameterKeys()) { + const currentValue = currentSettings[key]; + const serverValue = serverDefaults[key]; + + if (serverValue !== undefined) { + diff[key] = { + current: currentValue, + server: serverValue, + differs: currentValue !== serverValue + }; + } + } + + return diff; + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/services/props.ts b/llama.cpp/tools/server/webui/src/lib/services/props.ts new file mode 100644 index 0000000..01fead9 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/services/props.ts @@ -0,0 +1,77 @@ +import { getAuthHeaders } from '$lib/utils'; + +/** + * PropsService - Server properties management + * + * This service handles communication with the /props endpoint to retrieve + * server configuration, model information, and capabilities. + * + * **Responsibilities:** + * - Fetch server properties from /props endpoint + * - Handle API authentication + * - Parse and validate server response + * + * **Used by:** + * - serverStore: Primary consumer for server state management + */ +export class PropsService { + // ───────────────────────────────────────────────────────────────────────────── + // Fetching + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Fetches server properties from the /props endpoint + * + * @param autoload - If false, prevents automatic model loading (default: false) + * @returns {Promise} Server properties + * @throws {Error} If the request fails or returns invalid data + */ + static async fetch(autoload = false): Promise { + const url = new URL('./props', window.location.href); + if (!autoload) { + url.searchParams.set('autoload', 'false'); + } + + const response = await fetch(url.toString(), { + headers: getAuthHeaders() + }); + + if (!response.ok) { + throw new Error( + `Failed to fetch server properties: ${response.status} ${response.statusText}` + ); + } + + const data = await response.json(); + return data as ApiLlamaCppServerProps; + } + + /** + * Fetches server properties for a specific model (ROUTER mode) + * + * @param modelId - The model ID to fetch properties for + * @param autoload - If false, prevents automatic model loading (default: false) + * @returns {Promise} Server properties for the model + * @throws {Error} If the request fails or returns invalid data + */ + static async fetchForModel(modelId: string, autoload = false): Promise { + const url = new URL('./props', window.location.href); + url.searchParams.set('model', modelId); + if (!autoload) { + url.searchParams.set('autoload', 'false'); + } + + const response = await fetch(url.toString(), { + headers: getAuthHeaders() + }); + + if (!response.ok) { + throw new Error( + `Failed to fetch model properties: ${response.status} ${response.statusText}` + ); + } + + const data = await response.json(); + return data as ApiLlamaCppServerProps; + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/stores/chat.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/chat.svelte.ts new file mode 100644 index 0000000..879b2f3 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/chat.svelte.ts @@ -0,0 +1,1487 @@ +import { DatabaseService, ChatService } from '$lib/services'; +import { conversationsStore } from '$lib/stores/conversations.svelte'; +import { config } from '$lib/stores/settings.svelte'; +import { contextSize, isRouterMode } from '$lib/stores/server.svelte'; +import { + selectedModelName, + modelsStore, + selectedModelContextSize +} from '$lib/stores/models.svelte'; +import { + normalizeModelName, + filterByLeafNodeId, + findDescendantMessages, + findLeafNode +} from '$lib/utils'; +import { SvelteMap } from 'svelte/reactivity'; +import { DEFAULT_CONTEXT } from '$lib/constants/default-context'; + +/** + * chatStore - Active AI interaction and streaming state management + * + * **Terminology - Chat vs Conversation:** + * - **Chat**: The active interaction space with the Chat Completions API. Represents the + * real-time streaming session, loading states, and UI visualization of AI communication. + * A "chat" is ephemeral - it exists only while the user is actively interacting with the AI. + * - **Conversation**: The persistent database entity storing all messages and metadata. + * Managed by conversationsStore, conversations persist across sessions and page reloads. + * + * This store manages all active AI interactions including real-time streaming, response + * generation, and per-chat loading states. It handles the runtime layer between UI and + * AI backend, supporting concurrent streaming across multiple conversations. + * + * **Architecture & Relationships:** + * - **chatStore** (this class): Active AI session and streaming management + * - Manages real-time AI response streaming via ChatService + * - Tracks per-chat loading and streaming states for concurrent sessions + * - Handles message operations (send, edit, regenerate, branch) + * - Coordinates with conversationsStore for persistence + * + * - **conversationsStore**: Provides conversation data and message arrays for chat context + * - **ChatService**: Low-level API communication with llama.cpp server + * - **DatabaseService**: Message persistence and retrieval + * + * **Key Features:** + * - **AI Streaming**: Real-time token streaming with abort support + * - **Concurrent Chats**: Independent loading/streaming states per conversation + * - **Message Branching**: Edit, regenerate, and branch conversation trees + * - **Error Handling**: Timeout and server error recovery with user feedback + * - **Graceful Stop**: Save partial responses when stopping generation + * + * **State Management:** + * - Global `isLoading` and `currentResponse` for active chat UI + * - `chatLoadingStates` Map for per-conversation streaming tracking + * - `chatStreamingStates` Map for per-conversation streaming content + * - `processingStates` Map for per-conversation processing state (timing/context info) + * - Automatic state sync when switching between conversations + */ +class ChatStore { + // ───────────────────────────────────────────────────────────────────────────── + // State + // ───────────────────────────────────────────────────────────────────────────── + + activeProcessingState = $state(null); + currentResponse = $state(''); + errorDialogState = $state<{ + type: 'timeout' | 'server'; + message: string; + contextInfo?: { n_prompt_tokens: number; n_ctx: number }; + } | null>(null); + isLoading = $state(false); + chatLoadingStates = new SvelteMap(); + chatStreamingStates = new SvelteMap(); + private abortControllers = new SvelteMap(); + private processingStates = new SvelteMap(); + private activeConversationId = $state(null); + private isStreamingActive = $state(false); + private isEditModeActive = $state(false); + private addFilesHandler: ((files: File[]) => void) | null = $state(null); + + // ───────────────────────────────────────────────────────────────────────────── + // Loading State + // ───────────────────────────────────────────────────────────────────────────── + + private setChatLoading(convId: string, loading: boolean): void { + if (loading) { + this.chatLoadingStates.set(convId, true); + if (conversationsStore.activeConversation?.id === convId) this.isLoading = true; + } else { + this.chatLoadingStates.delete(convId); + if (conversationsStore.activeConversation?.id === convId) this.isLoading = false; + } + } + + private isChatLoading(convId: string): boolean { + return this.chatLoadingStates.get(convId) || false; + } + + private setChatStreaming(convId: string, response: string, messageId: string): void { + this.chatStreamingStates.set(convId, { response, messageId }); + if (conversationsStore.activeConversation?.id === convId) this.currentResponse = response; + } + + private clearChatStreaming(convId: string): void { + this.chatStreamingStates.delete(convId); + if (conversationsStore.activeConversation?.id === convId) this.currentResponse = ''; + } + + private getChatStreaming(convId: string): { response: string; messageId: string } | undefined { + return this.chatStreamingStates.get(convId); + } + + syncLoadingStateForChat(convId: string): void { + this.isLoading = this.isChatLoading(convId); + const streamingState = this.getChatStreaming(convId); + this.currentResponse = streamingState?.response || ''; + } + + /** + * Clears global UI state without affecting background streaming. + * Used when navigating to empty/new chat while other chats stream in background. + */ + clearUIState(): void { + this.isLoading = false; + this.currentResponse = ''; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Processing State + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Set the active conversation for statistics display + */ + setActiveProcessingConversation(conversationId: string | null): void { + this.activeConversationId = conversationId; + + if (conversationId) { + this.activeProcessingState = this.processingStates.get(conversationId) || null; + } else { + this.activeProcessingState = null; + } + } + + /** + * Get processing state for a specific conversation + */ + getProcessingState(conversationId: string): ApiProcessingState | null { + return this.processingStates.get(conversationId) || null; + } + + /** + * Clear processing state for a specific conversation + */ + clearProcessingState(conversationId: string): void { + this.processingStates.delete(conversationId); + + if (conversationId === this.activeConversationId) { + this.activeProcessingState = null; + } + } + + /** + * Get the current processing state for the active conversation (reactive) + * Returns the direct reactive state for UI binding + */ + getActiveProcessingState(): ApiProcessingState | null { + return this.activeProcessingState; + } + + /** + * Updates processing state with timing data from streaming response + */ + updateProcessingStateFromTimings( + timingData: { + prompt_n: number; + prompt_ms?: number; + predicted_n: number; + predicted_per_second: number; + cache_n: number; + prompt_progress?: ChatMessagePromptProgress; + }, + conversationId?: string + ): void { + const processingState = this.parseTimingData(timingData); + + if (processingState === null) { + console.warn('Failed to parse timing data - skipping update'); + return; + } + + const targetId = conversationId || this.activeConversationId; + if (targetId) { + this.processingStates.set(targetId, processingState); + + if (targetId === this.activeConversationId) { + this.activeProcessingState = processingState; + } + } + } + + /** + * Get current processing state (sync version for reactive access) + */ + getCurrentProcessingStateSync(): ApiProcessingState | null { + return this.activeProcessingState; + } + + /** + * Restore processing state from last assistant message timings + * Call this when keepStatsVisible is enabled and we need to show last known stats + */ + restoreProcessingStateFromMessages(messages: DatabaseMessage[], conversationId: string): void { + for (let i = messages.length - 1; i >= 0; i--) { + const message = messages[i]; + if (message.role === 'assistant' && message.timings) { + const restoredState = this.parseTimingData({ + prompt_n: message.timings.prompt_n || 0, + prompt_ms: message.timings.prompt_ms, + predicted_n: message.timings.predicted_n || 0, + predicted_per_second: + message.timings.predicted_n && message.timings.predicted_ms + ? (message.timings.predicted_n / message.timings.predicted_ms) * 1000 + : 0, + cache_n: message.timings.cache_n || 0 + }); + + if (restoredState) { + this.processingStates.set(conversationId, restoredState); + + if (conversationId === this.activeConversationId) { + this.activeProcessingState = restoredState; + } + + return; + } + } + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Streaming + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Start streaming session tracking + */ + startStreaming(): void { + this.isStreamingActive = true; + } + + /** + * Stop streaming session tracking + */ + stopStreaming(): void { + this.isStreamingActive = false; + } + + /** + * Check if currently in a streaming session + */ + isStreaming(): boolean { + return this.isStreamingActive; + } + + private getContextTotal(): number { + const activeState = this.getActiveProcessingState(); + + if (activeState && activeState.contextTotal > 0) { + return activeState.contextTotal; + } + + if (isRouterMode()) { + const modelContextSize = selectedModelContextSize(); + if (modelContextSize && modelContextSize > 0) { + return modelContextSize; + } + } + + const propsContextSize = contextSize(); + if (propsContextSize && propsContextSize > 0) { + return propsContextSize; + } + + return DEFAULT_CONTEXT; + } + + private parseTimingData(timingData: Record): ApiProcessingState | null { + const promptTokens = (timingData.prompt_n as number) || 0; + const promptMs = (timingData.prompt_ms as number) || undefined; + const predictedTokens = (timingData.predicted_n as number) || 0; + const tokensPerSecond = (timingData.predicted_per_second as number) || 0; + const cacheTokens = (timingData.cache_n as number) || 0; + const promptProgress = timingData.prompt_progress as + | { + total: number; + cache: number; + processed: number; + time_ms: number; + } + | undefined; + + const contextTotal = this.getContextTotal(); + const currentConfig = config(); + const outputTokensMax = currentConfig.max_tokens || -1; + + // Note: for timings data, the n_prompt does NOT include cache tokens + const contextUsed = promptTokens + cacheTokens + predictedTokens; + const outputTokensUsed = predictedTokens; + + // Note: for prompt progress, the "processed" DOES include cache tokens + // we need to exclude them to get the real prompt tokens processed count + const progressCache = promptProgress?.cache || 0; + const progressActualDone = (promptProgress?.processed ?? 0) - progressCache; + const progressActualTotal = (promptProgress?.total ?? 0) - progressCache; + const progressPercent = promptProgress + ? Math.round((progressActualDone / progressActualTotal) * 100) + : undefined; + + return { + status: predictedTokens > 0 ? 'generating' : promptProgress ? 'preparing' : 'idle', + tokensDecoded: predictedTokens, + tokensRemaining: outputTokensMax - predictedTokens, + contextUsed, + contextTotal, + outputTokensUsed, + outputTokensMax, + hasNextToken: predictedTokens > 0, + tokensPerSecond, + temperature: currentConfig.temperature ?? 0.8, + topP: currentConfig.top_p ?? 0.95, + speculative: false, + progressPercent, + promptProgress, + promptTokens, + promptMs, + cacheTokens + }; + } + + /** + * Gets the model used in a conversation based on the latest assistant message. + * Returns the model from the most recent assistant message that has a model field set. + * + * @param messages - Array of messages to search through + * @returns The model name or null if no model found + */ + getConversationModel(messages: DatabaseMessage[]): string | null { + // Search backwards through messages to find most recent assistant message with model + for (let i = messages.length - 1; i >= 0; i--) { + const message = messages[i]; + if (message.role === 'assistant' && message.model) { + return message.model; + } + } + return null; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Error Handling + // ───────────────────────────────────────────────────────────────────────────── + + private isAbortError(error: unknown): boolean { + return error instanceof Error && (error.name === 'AbortError' || error instanceof DOMException); + } + + private showErrorDialog( + type: 'timeout' | 'server', + message: string, + contextInfo?: { n_prompt_tokens: number; n_ctx: number } + ): void { + this.errorDialogState = { type, message, contextInfo }; + } + + dismissErrorDialog(): void { + this.errorDialogState = null; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Message Operations + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Finds a message by ID and optionally validates its role. + * Returns message and index, or null if not found or role doesn't match. + */ + private getMessageByIdWithRole( + messageId: string, + expectedRole?: ChatRole + ): { message: DatabaseMessage; index: number } | null { + const index = conversationsStore.findMessageIndex(messageId); + if (index === -1) return null; + + const message = conversationsStore.activeMessages[index]; + if (expectedRole && message.role !== expectedRole) return null; + + return { message, index }; + } + + async addMessage( + role: ChatRole, + content: string, + type: ChatMessageType = 'text', + parent: string = '-1', + extras?: DatabaseMessageExtra[] + ): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) { + console.error('No active conversation when trying to add message'); + return null; + } + + try { + let parentId: string | null = null; + + if (parent === '-1') { + const activeMessages = conversationsStore.activeMessages; + if (activeMessages.length > 0) { + parentId = activeMessages[activeMessages.length - 1].id; + } else { + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const rootMessage = allMessages.find((m) => m.parent === null && m.type === 'root'); + if (!rootMessage) { + parentId = await DatabaseService.createRootMessage(activeConv.id); + } else { + parentId = rootMessage.id; + } + } + } else { + parentId = parent; + } + + const message = await DatabaseService.createMessageBranch( + { + convId: activeConv.id, + role, + content, + type, + timestamp: Date.now(), + thinking: '', + toolCalls: '', + children: [], + extra: extras + }, + parentId + ); + + conversationsStore.addMessageToActive(message); + await conversationsStore.updateCurrentNode(message.id); + conversationsStore.updateConversationTimestamp(); + + return message; + } catch (error) { + console.error('Failed to add message:', error); + return null; + } + } + + private async createAssistantMessage(parentId?: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) return null; + + return await DatabaseService.createMessageBranch( + { + convId: activeConv.id, + type: 'text', + role: 'assistant', + content: '', + timestamp: Date.now(), + thinking: '', + toolCalls: '', + children: [], + model: null + }, + parentId || null + ); + } + + private async streamChatCompletion( + allMessages: DatabaseMessage[], + assistantMessage: DatabaseMessage, + onComplete?: (content: string) => Promise, + onError?: (error: Error) => void, + modelOverride?: string | null + ): Promise { + // Ensure model props are cached before streaming (for correct n_ctx in processing info) + if (isRouterMode()) { + const modelName = modelOverride || selectedModelName(); + if (modelName && !modelsStore.getModelProps(modelName)) { + await modelsStore.fetchModelProps(modelName); + } + } + + let streamedContent = ''; + let streamedReasoningContent = ''; + let streamedToolCallContent = ''; + let resolvedModel: string | null = null; + let modelPersisted = false; + + const recordModel = (modelName: string | null | undefined, persistImmediately = true): void => { + if (!modelName) return; + const normalizedModel = normalizeModelName(modelName); + if (!normalizedModel || normalizedModel === resolvedModel) return; + resolvedModel = normalizedModel; + const messageIndex = conversationsStore.findMessageIndex(assistantMessage.id); + conversationsStore.updateMessageAtIndex(messageIndex, { model: normalizedModel }); + if (persistImmediately && !modelPersisted) { + modelPersisted = true; + DatabaseService.updateMessage(assistantMessage.id, { model: normalizedModel }).catch(() => { + modelPersisted = false; + resolvedModel = null; + }); + } + }; + + this.startStreaming(); + this.setActiveProcessingConversation(assistantMessage.convId); + + const abortController = this.getOrCreateAbortController(assistantMessage.convId); + + await ChatService.sendMessage( + allMessages, + { + ...this.getApiOptions(), + ...(modelOverride ? { model: modelOverride } : {}), + onChunk: (chunk: string) => { + streamedContent += chunk; + this.setChatStreaming(assistantMessage.convId, streamedContent, assistantMessage.id); + const idx = conversationsStore.findMessageIndex(assistantMessage.id); + conversationsStore.updateMessageAtIndex(idx, { content: streamedContent }); + }, + onReasoningChunk: (reasoningChunk: string) => { + streamedReasoningContent += reasoningChunk; + const idx = conversationsStore.findMessageIndex(assistantMessage.id); + conversationsStore.updateMessageAtIndex(idx, { thinking: streamedReasoningContent }); + }, + onToolCallChunk: (toolCallChunk: string) => { + const chunk = toolCallChunk.trim(); + if (!chunk) return; + streamedToolCallContent = chunk; + const idx = conversationsStore.findMessageIndex(assistantMessage.id); + conversationsStore.updateMessageAtIndex(idx, { toolCalls: streamedToolCallContent }); + }, + onModel: (modelName: string) => recordModel(modelName), + onTimings: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => { + const tokensPerSecond = + timings?.predicted_ms && timings?.predicted_n + ? (timings.predicted_n / timings.predicted_ms) * 1000 + : 0; + this.updateProcessingStateFromTimings( + { + prompt_n: timings?.prompt_n || 0, + prompt_ms: timings?.prompt_ms, + predicted_n: timings?.predicted_n || 0, + predicted_per_second: tokensPerSecond, + cache_n: timings?.cache_n || 0, + prompt_progress: promptProgress + }, + assistantMessage.convId + ); + }, + onComplete: async ( + finalContent?: string, + reasoningContent?: string, + timings?: ChatMessageTimings, + toolCallContent?: string + ) => { + this.stopStreaming(); + + const updateData: Record = { + content: finalContent || streamedContent, + thinking: reasoningContent || streamedReasoningContent, + toolCalls: toolCallContent || streamedToolCallContent, + timings + }; + if (resolvedModel && !modelPersisted) { + updateData.model = resolvedModel; + } + await DatabaseService.updateMessage(assistantMessage.id, updateData); + + const idx = conversationsStore.findMessageIndex(assistantMessage.id); + const uiUpdate: Partial = { + content: updateData.content as string, + toolCalls: updateData.toolCalls as string + }; + if (timings) uiUpdate.timings = timings; + if (resolvedModel) uiUpdate.model = resolvedModel; + + conversationsStore.updateMessageAtIndex(idx, uiUpdate); + await conversationsStore.updateCurrentNode(assistantMessage.id); + + if (onComplete) await onComplete(streamedContent); + this.setChatLoading(assistantMessage.convId, false); + this.clearChatStreaming(assistantMessage.convId); + this.clearProcessingState(assistantMessage.convId); + + if (isRouterMode()) { + modelsStore.fetchRouterModels().catch(console.error); + } + }, + onError: (error: Error) => { + this.stopStreaming(); + + if (this.isAbortError(error)) { + this.setChatLoading(assistantMessage.convId, false); + this.clearChatStreaming(assistantMessage.convId); + this.clearProcessingState(assistantMessage.convId); + + return; + } + + console.error('Streaming error:', error); + + this.setChatLoading(assistantMessage.convId, false); + this.clearChatStreaming(assistantMessage.convId); + this.clearProcessingState(assistantMessage.convId); + + const idx = conversationsStore.findMessageIndex(assistantMessage.id); + + if (idx !== -1) { + const failedMessage = conversationsStore.removeMessageAtIndex(idx); + if (failedMessage) DatabaseService.deleteMessage(failedMessage.id).catch(console.error); + } + + const contextInfo = ( + error as Error & { contextInfo?: { n_prompt_tokens: number; n_ctx: number } } + ).contextInfo; + + this.showErrorDialog( + error.name === 'TimeoutError' ? 'timeout' : 'server', + error.message, + contextInfo + ); + + if (onError) onError(error); + } + }, + assistantMessage.convId, + abortController.signal + ); + } + + async sendMessage(content: string, extras?: DatabaseMessageExtra[]): Promise { + if (!content.trim() && (!extras || extras.length === 0)) return; + const activeConv = conversationsStore.activeConversation; + if (activeConv && this.isChatLoading(activeConv.id)) return; + + let isNewConversation = false; + if (!activeConv) { + await conversationsStore.createConversation(); + isNewConversation = true; + } + const currentConv = conversationsStore.activeConversation; + if (!currentConv) return; + + this.errorDialogState = null; + this.setChatLoading(currentConv.id, true); + this.clearChatStreaming(currentConv.id); + + try { + if (isNewConversation) { + const rootId = await DatabaseService.createRootMessage(currentConv.id); + const currentConfig = config(); + const systemPrompt = currentConfig.systemMessage?.toString().trim(); + + if (systemPrompt) { + const systemMessage = await DatabaseService.createSystemMessage( + currentConv.id, + systemPrompt, + rootId + ); + + conversationsStore.addMessageToActive(systemMessage); + } + } + + const userMessage = await this.addMessage('user', content, 'text', '-1', extras); + if (!userMessage) throw new Error('Failed to add user message'); + if (isNewConversation && content) + await conversationsStore.updateConversationName(currentConv.id, content.trim()); + + const assistantMessage = await this.createAssistantMessage(userMessage.id); + + if (!assistantMessage) throw new Error('Failed to create assistant message'); + + conversationsStore.addMessageToActive(assistantMessage); + await this.streamChatCompletion( + conversationsStore.activeMessages.slice(0, -1), + assistantMessage + ); + } catch (error) { + if (this.isAbortError(error)) { + this.setChatLoading(currentConv.id, false); + return; + } + console.error('Failed to send message:', error); + this.setChatLoading(currentConv.id, false); + if (!this.errorDialogState) { + const dialogType = + error instanceof Error && error.name === 'TimeoutError' ? 'timeout' : 'server'; + const contextInfo = ( + error as Error & { contextInfo?: { n_prompt_tokens: number; n_ctx: number } } + ).contextInfo; + + this.showErrorDialog( + dialogType, + error instanceof Error ? error.message : 'Unknown error', + contextInfo + ); + } + } + } + + async stopGeneration(): Promise { + const activeConv = conversationsStore.activeConversation; + + if (!activeConv) return; + + await this.stopGenerationForChat(activeConv.id); + } + + async stopGenerationForChat(convId: string): Promise { + await this.savePartialResponseIfNeeded(convId); + + this.stopStreaming(); + this.abortRequest(convId); + this.setChatLoading(convId, false); + this.clearChatStreaming(convId); + this.clearProcessingState(convId); + } + + /** + * Gets or creates an AbortController for a conversation + */ + private getOrCreateAbortController(convId: string): AbortController { + let controller = this.abortControllers.get(convId); + if (!controller || controller.signal.aborted) { + controller = new AbortController(); + this.abortControllers.set(convId, controller); + } + return controller; + } + + /** + * Aborts any ongoing request for a conversation + */ + private abortRequest(convId?: string): void { + if (convId) { + const controller = this.abortControllers.get(convId); + if (controller) { + controller.abort(); + this.abortControllers.delete(convId); + } + } else { + for (const controller of this.abortControllers.values()) { + controller.abort(); + } + this.abortControllers.clear(); + } + } + + private async savePartialResponseIfNeeded(convId?: string): Promise { + const conversationId = convId || conversationsStore.activeConversation?.id; + + if (!conversationId) return; + + const streamingState = this.chatStreamingStates.get(conversationId); + + if (!streamingState || !streamingState.response.trim()) return; + + const messages = + conversationId === conversationsStore.activeConversation?.id + ? conversationsStore.activeMessages + : await conversationsStore.getConversationMessages(conversationId); + + if (!messages.length) return; + + const lastMessage = messages[messages.length - 1]; + + if (lastMessage?.role === 'assistant') { + try { + const updateData: { content: string; thinking?: string; timings?: ChatMessageTimings } = { + content: streamingState.response + }; + if (lastMessage.thinking?.trim()) updateData.thinking = lastMessage.thinking; + const lastKnownState = this.getProcessingState(conversationId); + if (lastKnownState) { + updateData.timings = { + prompt_n: lastKnownState.promptTokens || 0, + prompt_ms: lastKnownState.promptMs, + predicted_n: lastKnownState.tokensDecoded || 0, + cache_n: lastKnownState.cacheTokens || 0, + predicted_ms: + lastKnownState.tokensPerSecond && lastKnownState.tokensDecoded + ? (lastKnownState.tokensDecoded / lastKnownState.tokensPerSecond) * 1000 + : undefined + }; + } + + await DatabaseService.updateMessage(lastMessage.id, updateData); + + lastMessage.content = this.currentResponse; + + if (updateData.thinking) lastMessage.thinking = updateData.thinking; + + if (updateData.timings) lastMessage.timings = updateData.timings; + } catch (error) { + lastMessage.content = this.currentResponse; + console.error('Failed to save partial response:', error); + } + } + } + + async updateMessage(messageId: string, newContent: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) return; + if (this.isLoading) this.stopGeneration(); + + const result = this.getMessageByIdWithRole(messageId, 'user'); + if (!result) return; + const { message: messageToUpdate, index: messageIndex } = result; + const originalContent = messageToUpdate.content; + + try { + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const rootMessage = allMessages.find((m) => m.type === 'root' && m.parent === null); + const isFirstUserMessage = rootMessage && messageToUpdate.parent === rootMessage.id; + + conversationsStore.updateMessageAtIndex(messageIndex, { content: newContent }); + await DatabaseService.updateMessage(messageId, { content: newContent }); + + if (isFirstUserMessage && newContent.trim()) { + await conversationsStore.updateConversationTitleWithConfirmation( + activeConv.id, + newContent.trim(), + conversationsStore.titleUpdateConfirmationCallback + ); + } + + const messagesToRemove = conversationsStore.activeMessages.slice(messageIndex + 1); + + for (const message of messagesToRemove) await DatabaseService.deleteMessage(message.id); + + conversationsStore.sliceActiveMessages(messageIndex + 1); + conversationsStore.updateConversationTimestamp(); + + this.setChatLoading(activeConv.id, true); + this.clearChatStreaming(activeConv.id); + + const assistantMessage = await this.createAssistantMessage(); + + if (!assistantMessage) throw new Error('Failed to create assistant message'); + + conversationsStore.addMessageToActive(assistantMessage); + + await conversationsStore.updateCurrentNode(assistantMessage.id); + await this.streamChatCompletion( + conversationsStore.activeMessages.slice(0, -1), + assistantMessage, + undefined, + () => { + conversationsStore.updateMessageAtIndex(conversationsStore.findMessageIndex(messageId), { + content: originalContent + }); + } + ); + } catch (error) { + if (!this.isAbortError(error)) console.error('Failed to update message:', error); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Regeneration + // ───────────────────────────────────────────────────────────────────────────── + + async regenerateMessage(messageId: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv || this.isLoading) return; + + const result = this.getMessageByIdWithRole(messageId, 'assistant'); + if (!result) return; + const { index: messageIndex } = result; + + try { + const messagesToRemove = conversationsStore.activeMessages.slice(messageIndex); + for (const message of messagesToRemove) await DatabaseService.deleteMessage(message.id); + conversationsStore.sliceActiveMessages(messageIndex); + conversationsStore.updateConversationTimestamp(); + + this.setChatLoading(activeConv.id, true); + this.clearChatStreaming(activeConv.id); + + const parentMessageId = + conversationsStore.activeMessages.length > 0 + ? conversationsStore.activeMessages[conversationsStore.activeMessages.length - 1].id + : undefined; + const assistantMessage = await this.createAssistantMessage(parentMessageId); + if (!assistantMessage) throw new Error('Failed to create assistant message'); + conversationsStore.addMessageToActive(assistantMessage); + await this.streamChatCompletion( + conversationsStore.activeMessages.slice(0, -1), + assistantMessage + ); + } catch (error) { + if (!this.isAbortError(error)) console.error('Failed to regenerate message:', error); + this.setChatLoading(activeConv?.id || '', false); + } + } + + async getDeletionInfo(messageId: string): Promise<{ + totalCount: number; + userMessages: number; + assistantMessages: number; + messageTypes: string[]; + }> { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) + return { totalCount: 0, userMessages: 0, assistantMessages: 0, messageTypes: [] }; + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const descendants = findDescendantMessages(allMessages, messageId); + const allToDelete = [messageId, ...descendants]; + const messagesToDelete = allMessages.filter((m) => allToDelete.includes(m.id)); + let userMessages = 0, + assistantMessages = 0; + const messageTypes: string[] = []; + for (const msg of messagesToDelete) { + if (msg.role === 'user') { + userMessages++; + if (!messageTypes.includes('user message')) messageTypes.push('user message'); + } else if (msg.role === 'assistant') { + assistantMessages++; + if (!messageTypes.includes('assistant response')) messageTypes.push('assistant response'); + } + } + return { totalCount: allToDelete.length, userMessages, assistantMessages, messageTypes }; + } + + async deleteMessage(messageId: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) return; + try { + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const messageToDelete = allMessages.find((m) => m.id === messageId); + if (!messageToDelete) return; + + const currentPath = filterByLeafNodeId(allMessages, activeConv.currNode || '', false); + const isInCurrentPath = currentPath.some((m) => m.id === messageId); + + if (isInCurrentPath && messageToDelete.parent) { + const siblings = allMessages.filter( + (m) => m.parent === messageToDelete.parent && m.id !== messageId + ); + + if (siblings.length > 0) { + const latestSibling = siblings.reduce((latest, sibling) => + sibling.timestamp > latest.timestamp ? sibling : latest + ); + await conversationsStore.updateCurrentNode(findLeafNode(allMessages, latestSibling.id)); + } else if (messageToDelete.parent) { + await conversationsStore.updateCurrentNode( + findLeafNode(allMessages, messageToDelete.parent) + ); + } + } + await DatabaseService.deleteMessageCascading(activeConv.id, messageId); + await conversationsStore.refreshActiveMessages(); + + conversationsStore.updateConversationTimestamp(); + } catch (error) { + console.error('Failed to delete message:', error); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Editing + // ───────────────────────────────────────────────────────────────────────────── + + clearEditMode(): void { + this.isEditModeActive = false; + this.addFilesHandler = null; + } + + async continueAssistantMessage(messageId: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv || this.isLoading) return; + + const result = this.getMessageByIdWithRole(messageId, 'assistant'); + if (!result) return; + const { message: msg, index: idx } = result; + + if (this.isChatLoading(activeConv.id)) return; + + try { + this.errorDialogState = null; + this.setChatLoading(activeConv.id, true); + this.clearChatStreaming(activeConv.id); + + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const dbMessage = allMessages.find((m) => m.id === messageId); + + if (!dbMessage) { + this.setChatLoading(activeConv.id, false); + + return; + } + + const originalContent = dbMessage.content; + const originalThinking = dbMessage.thinking || ''; + + const conversationContext = conversationsStore.activeMessages.slice(0, idx); + const contextWithContinue = [ + ...conversationContext, + { role: 'assistant' as const, content: originalContent } + ]; + + let appendedContent = '', + appendedThinking = '', + hasReceivedContent = false; + + const abortController = this.getOrCreateAbortController(msg.convId); + + await ChatService.sendMessage( + contextWithContinue, + { + ...this.getApiOptions(), + + onChunk: (chunk: string) => { + hasReceivedContent = true; + appendedContent += chunk; + const fullContent = originalContent + appendedContent; + this.setChatStreaming(msg.convId, fullContent, msg.id); + conversationsStore.updateMessageAtIndex(idx, { content: fullContent }); + }, + + onReasoningChunk: (reasoningChunk: string) => { + hasReceivedContent = true; + appendedThinking += reasoningChunk; + conversationsStore.updateMessageAtIndex(idx, { + thinking: originalThinking + appendedThinking + }); + }, + + onTimings: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => { + const tokensPerSecond = + timings?.predicted_ms && timings?.predicted_n + ? (timings.predicted_n / timings.predicted_ms) * 1000 + : 0; + this.updateProcessingStateFromTimings( + { + prompt_n: timings?.prompt_n || 0, + prompt_ms: timings?.prompt_ms, + predicted_n: timings?.predicted_n || 0, + predicted_per_second: tokensPerSecond, + cache_n: timings?.cache_n || 0, + prompt_progress: promptProgress + }, + msg.convId + ); + }, + + onComplete: async ( + finalContent?: string, + reasoningContent?: string, + timings?: ChatMessageTimings + ) => { + const fullContent = originalContent + (finalContent || appendedContent); + const fullThinking = originalThinking + (reasoningContent || appendedThinking); + await DatabaseService.updateMessage(msg.id, { + content: fullContent, + thinking: fullThinking, + timestamp: Date.now(), + timings + }); + conversationsStore.updateMessageAtIndex(idx, { + content: fullContent, + thinking: fullThinking, + timestamp: Date.now(), + timings + }); + conversationsStore.updateConversationTimestamp(); + this.setChatLoading(msg.convId, false); + this.clearChatStreaming(msg.convId); + this.clearProcessingState(msg.convId); + }, + + onError: async (error: Error) => { + if (this.isAbortError(error)) { + if (hasReceivedContent && appendedContent) { + await DatabaseService.updateMessage(msg.id, { + content: originalContent + appendedContent, + thinking: originalThinking + appendedThinking, + timestamp: Date.now() + }); + conversationsStore.updateMessageAtIndex(idx, { + content: originalContent + appendedContent, + thinking: originalThinking + appendedThinking, + timestamp: Date.now() + }); + } + this.setChatLoading(msg.convId, false); + this.clearChatStreaming(msg.convId); + this.clearProcessingState(msg.convId); + return; + } + console.error('Continue generation error:', error); + conversationsStore.updateMessageAtIndex(idx, { + content: originalContent, + thinking: originalThinking + }); + await DatabaseService.updateMessage(msg.id, { + content: originalContent, + thinking: originalThinking + }); + this.setChatLoading(msg.convId, false); + this.clearChatStreaming(msg.convId); + this.clearProcessingState(msg.convId); + this.showErrorDialog( + error.name === 'TimeoutError' ? 'timeout' : 'server', + error.message + ); + } + }, + msg.convId, + abortController.signal + ); + } catch (error) { + if (!this.isAbortError(error)) console.error('Failed to continue message:', error); + if (activeConv) this.setChatLoading(activeConv.id, false); + } + } + + async editAssistantMessage( + messageId: string, + newContent: string, + shouldBranch: boolean + ): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv || this.isLoading) return; + + const result = this.getMessageByIdWithRole(messageId, 'assistant'); + if (!result) return; + const { message: msg, index: idx } = result; + + try { + if (shouldBranch) { + const newMessage = await DatabaseService.createMessageBranch( + { + convId: msg.convId, + type: msg.type, + timestamp: Date.now(), + role: msg.role, + content: newContent, + thinking: msg.thinking || '', + toolCalls: msg.toolCalls || '', + children: [], + model: msg.model + }, + msg.parent! + ); + await conversationsStore.updateCurrentNode(newMessage.id); + } else { + await DatabaseService.updateMessage(msg.id, { content: newContent }); + await conversationsStore.updateCurrentNode(msg.id); + conversationsStore.updateMessageAtIndex(idx, { + content: newContent + }); + } + conversationsStore.updateConversationTimestamp(); + await conversationsStore.refreshActiveMessages(); + } catch (error) { + console.error('Failed to edit assistant message:', error); + } + } + + async editUserMessagePreserveResponses( + messageId: string, + newContent: string, + newExtras?: DatabaseMessageExtra[] + ): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv) return; + + const result = this.getMessageByIdWithRole(messageId, 'user'); + if (!result) return; + const { message: msg, index: idx } = result; + + try { + const updateData: Partial = { + content: newContent + }; + + // Update extras if provided (including empty array to clear attachments) + // Deep clone to avoid Proxy objects from Svelte reactivity + if (newExtras !== undefined) { + updateData.extra = JSON.parse(JSON.stringify(newExtras)); + } + + await DatabaseService.updateMessage(messageId, updateData); + conversationsStore.updateMessageAtIndex(idx, updateData); + + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const rootMessage = allMessages.find((m) => m.type === 'root' && m.parent === null); + + if (rootMessage && msg.parent === rootMessage.id && newContent.trim()) { + await conversationsStore.updateConversationTitleWithConfirmation( + activeConv.id, + newContent.trim(), + conversationsStore.titleUpdateConfirmationCallback + ); + } + conversationsStore.updateConversationTimestamp(); + } catch (error) { + console.error('Failed to edit user message:', error); + } + } + + async editMessageWithBranching( + messageId: string, + newContent: string, + newExtras?: DatabaseMessageExtra[] + ): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv || this.isLoading) return; + + let result = this.getMessageByIdWithRole(messageId, 'user'); + + if (!result) { + result = this.getMessageByIdWithRole(messageId, 'system'); + } + + if (!result) return; + const { message: msg } = result; + + try { + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const rootMessage = allMessages.find((m) => m.type === 'root' && m.parent === null); + const isFirstUserMessage = + msg.role === 'user' && rootMessage && msg.parent === rootMessage.id; + + const parentId = msg.parent || rootMessage?.id; + if (!parentId) return; + + // Use newExtras if provided, otherwise copy existing extras + // Deep clone to avoid Proxy objects from Svelte reactivity + const extrasToUse = + newExtras !== undefined + ? JSON.parse(JSON.stringify(newExtras)) + : msg.extra + ? JSON.parse(JSON.stringify(msg.extra)) + : undefined; + + const newMessage = await DatabaseService.createMessageBranch( + { + convId: msg.convId, + type: msg.type, + timestamp: Date.now(), + role: msg.role, + content: newContent, + thinking: msg.thinking || '', + toolCalls: msg.toolCalls || '', + children: [], + extra: extrasToUse, + model: msg.model + }, + parentId + ); + await conversationsStore.updateCurrentNode(newMessage.id); + conversationsStore.updateConversationTimestamp(); + + if (isFirstUserMessage && newContent.trim()) { + await conversationsStore.updateConversationTitleWithConfirmation( + activeConv.id, + newContent.trim(), + conversationsStore.titleUpdateConfirmationCallback + ); + } + await conversationsStore.refreshActiveMessages(); + + if (msg.role === 'user') { + await this.generateResponseForMessage(newMessage.id); + } + } catch (error) { + console.error('Failed to edit message with branching:', error); + } + } + + async regenerateMessageWithBranching(messageId: string, modelOverride?: string): Promise { + const activeConv = conversationsStore.activeConversation; + if (!activeConv || this.isLoading) return; + try { + const idx = conversationsStore.findMessageIndex(messageId); + if (idx === -1) return; + const msg = conversationsStore.activeMessages[idx]; + if (msg.role !== 'assistant') return; + + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const parentMessage = allMessages.find((m) => m.id === msg.parent); + if (!parentMessage) return; + + this.setChatLoading(activeConv.id, true); + this.clearChatStreaming(activeConv.id); + + const newAssistantMessage = await DatabaseService.createMessageBranch( + { + convId: activeConv.id, + type: 'text', + timestamp: Date.now(), + role: 'assistant', + content: '', + thinking: '', + toolCalls: '', + children: [], + model: null + }, + parentMessage.id + ); + await conversationsStore.updateCurrentNode(newAssistantMessage.id); + conversationsStore.updateConversationTimestamp(); + await conversationsStore.refreshActiveMessages(); + + const conversationPath = filterByLeafNodeId( + allMessages, + parentMessage.id, + false + ) as DatabaseMessage[]; + // Use modelOverride if provided, otherwise use the original message's model + // If neither is available, don't pass model (will use global selection) + const modelToUse = modelOverride || msg.model || undefined; + await this.streamChatCompletion( + conversationPath, + newAssistantMessage, + undefined, + undefined, + modelToUse + ); + } catch (error) { + if (!this.isAbortError(error)) + console.error('Failed to regenerate message with branching:', error); + this.setChatLoading(activeConv?.id || '', false); + } + } + + private async generateResponseForMessage(userMessageId: string): Promise { + const activeConv = conversationsStore.activeConversation; + + if (!activeConv) return; + + this.errorDialogState = null; + this.setChatLoading(activeConv.id, true); + this.clearChatStreaming(activeConv.id); + + try { + const allMessages = await conversationsStore.getConversationMessages(activeConv.id); + const conversationPath = filterByLeafNodeId( + allMessages, + userMessageId, + false + ) as DatabaseMessage[]; + const assistantMessage = await DatabaseService.createMessageBranch( + { + convId: activeConv.id, + type: 'text', + timestamp: Date.now(), + role: 'assistant', + content: '', + thinking: '', + toolCalls: '', + children: [], + model: null + }, + userMessageId + ); + conversationsStore.addMessageToActive(assistantMessage); + await this.streamChatCompletion(conversationPath, assistantMessage); + } catch (error) { + console.error('Failed to generate response:', error); + this.setChatLoading(activeConv.id, false); + } + } + + getAddFilesHandler(): ((files: File[]) => void) | null { + return this.addFilesHandler; + } + + public getAllLoadingChats(): string[] { + return Array.from(this.chatLoadingStates.keys()); + } + + public getAllStreamingChats(): string[] { + return Array.from(this.chatStreamingStates.keys()); + } + + public getChatStreamingPublic( + convId: string + ): { response: string; messageId: string } | undefined { + return this.getChatStreaming(convId); + } + + public isChatLoadingPublic(convId: string): boolean { + return this.isChatLoading(convId); + } + + isEditing(): boolean { + return this.isEditModeActive; + } + + setEditModeActive(handler: (files: File[]) => void): void { + this.isEditModeActive = true; + this.addFilesHandler = handler; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + private getApiOptions(): Record { + const currentConfig = config(); + const hasValue = (value: unknown): boolean => + value !== undefined && value !== null && value !== ''; + + const apiOptions: Record = { stream: true, timings_per_token: true }; + + // Model selection (required in ROUTER mode) + if (isRouterMode()) { + const modelName = selectedModelName(); + if (modelName) apiOptions.model = modelName; + } + + // Config options needed by ChatService + if (currentConfig.systemMessage) apiOptions.systemMessage = currentConfig.systemMessage; + if (currentConfig.disableReasoningFormat) apiOptions.disableReasoningFormat = true; + + if (hasValue(currentConfig.temperature)) + apiOptions.temperature = Number(currentConfig.temperature); + if (hasValue(currentConfig.max_tokens)) + apiOptions.max_tokens = Number(currentConfig.max_tokens); + if (hasValue(currentConfig.dynatemp_range)) + apiOptions.dynatemp_range = Number(currentConfig.dynatemp_range); + if (hasValue(currentConfig.dynatemp_exponent)) + apiOptions.dynatemp_exponent = Number(currentConfig.dynatemp_exponent); + if (hasValue(currentConfig.top_k)) apiOptions.top_k = Number(currentConfig.top_k); + if (hasValue(currentConfig.top_p)) apiOptions.top_p = Number(currentConfig.top_p); + if (hasValue(currentConfig.min_p)) apiOptions.min_p = Number(currentConfig.min_p); + if (hasValue(currentConfig.xtc_probability)) + apiOptions.xtc_probability = Number(currentConfig.xtc_probability); + if (hasValue(currentConfig.xtc_threshold)) + apiOptions.xtc_threshold = Number(currentConfig.xtc_threshold); + if (hasValue(currentConfig.typ_p)) apiOptions.typ_p = Number(currentConfig.typ_p); + if (hasValue(currentConfig.repeat_last_n)) + apiOptions.repeat_last_n = Number(currentConfig.repeat_last_n); + if (hasValue(currentConfig.repeat_penalty)) + apiOptions.repeat_penalty = Number(currentConfig.repeat_penalty); + if (hasValue(currentConfig.presence_penalty)) + apiOptions.presence_penalty = Number(currentConfig.presence_penalty); + if (hasValue(currentConfig.frequency_penalty)) + apiOptions.frequency_penalty = Number(currentConfig.frequency_penalty); + if (hasValue(currentConfig.dry_multiplier)) + apiOptions.dry_multiplier = Number(currentConfig.dry_multiplier); + if (hasValue(currentConfig.dry_base)) apiOptions.dry_base = Number(currentConfig.dry_base); + if (hasValue(currentConfig.dry_allowed_length)) + apiOptions.dry_allowed_length = Number(currentConfig.dry_allowed_length); + if (hasValue(currentConfig.dry_penalty_last_n)) + apiOptions.dry_penalty_last_n = Number(currentConfig.dry_penalty_last_n); + if (currentConfig.samplers) apiOptions.samplers = currentConfig.samplers; + if (currentConfig.backend_sampling) + apiOptions.backend_sampling = currentConfig.backend_sampling; + if (currentConfig.custom) apiOptions.custom = currentConfig.custom; + + return apiOptions; + } +} + +export const chatStore = new ChatStore(); + +export const activeProcessingState = () => chatStore.activeProcessingState; +export const clearEditMode = () => chatStore.clearEditMode(); +export const currentResponse = () => chatStore.currentResponse; +export const errorDialog = () => chatStore.errorDialogState; +export const getAddFilesHandler = () => chatStore.getAddFilesHandler(); +export const getAllLoadingChats = () => chatStore.getAllLoadingChats(); +export const getAllStreamingChats = () => chatStore.getAllStreamingChats(); +export const getChatStreaming = (convId: string) => chatStore.getChatStreamingPublic(convId); +export const isChatLoading = (convId: string) => chatStore.isChatLoadingPublic(convId); +export const isChatStreaming = () => chatStore.isStreaming(); +export const isEditing = () => chatStore.isEditing(); +export const isLoading = () => chatStore.isLoading; +export const setEditModeActive = (handler: (files: File[]) => void) => + chatStore.setEditModeActive(handler); diff --git a/llama.cpp/tools/server/webui/src/lib/stores/conversations.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/conversations.svelte.ts new file mode 100644 index 0000000..3300eb3 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/conversations.svelte.ts @@ -0,0 +1,662 @@ +import { browser } from '$app/environment'; +import { goto } from '$app/navigation'; +import { toast } from 'svelte-sonner'; +import { DatabaseService } from '$lib/services/database'; +import { config } from '$lib/stores/settings.svelte'; +import { filterByLeafNodeId, findLeafNode } from '$lib/utils'; +import { AttachmentType } from '$lib/enums'; + +/** + * conversationsStore - Persistent conversation data and lifecycle management + * + * **Terminology - Chat vs Conversation:** + * - **Chat**: The active interaction space with the Chat Completions API. Represents the + * real-time streaming session, loading states, and UI visualization of AI communication. + * Managed by chatStore, a "chat" is ephemeral and exists during active AI interactions. + * - **Conversation**: The persistent database entity storing all messages and metadata. + * A "conversation" survives across sessions, page reloads, and browser restarts. + * It contains the complete message history, branching structure, and conversation metadata. + * + * This store manages all conversation-level data and operations including creation, loading, + * deletion, and navigation. It maintains the list of conversations and the currently active + * conversation with its message history, providing reactive state for UI components. + * + * **Architecture & Relationships:** + * - **conversationsStore** (this class): Persistent conversation data management + * - Manages conversation list and active conversation state + * - Handles conversation CRUD operations via DatabaseService + * - Maintains active message array for current conversation + * - Coordinates branching navigation (currNode tracking) + * + * - **chatStore**: Uses conversation data as context for active AI streaming + * - **DatabaseService**: Low-level IndexedDB storage for conversations and messages + * + * **Key Features:** + * - **Conversation Lifecycle**: Create, load, update, delete conversations + * - **Message Management**: Active message array with branching support + * - **Import/Export**: JSON-based conversation backup and restore + * - **Branch Navigation**: Navigate between message tree branches + * - **Title Management**: Auto-update titles with confirmation dialogs + * - **Reactive State**: Svelte 5 runes for automatic UI updates + * + * **State Properties:** + * - `conversations`: All conversations sorted by last modified + * - `activeConversation`: Currently viewed conversation + * - `activeMessages`: Messages in current conversation path + * - `isInitialized`: Store initialization status + */ +class ConversationsStore { + // ───────────────────────────────────────────────────────────────────────────── + // State + // ───────────────────────────────────────────────────────────────────────────── + + /** List of all conversations */ + conversations = $state([]); + + /** Currently active conversation */ + activeConversation = $state(null); + + /** Messages in the active conversation (filtered by currNode path) */ + activeMessages = $state([]); + + /** Whether the store has been initialized */ + isInitialized = $state(false); + + /** Callback for title update confirmation dialog */ + titleUpdateConfirmationCallback?: (currentTitle: string, newTitle: string) => Promise; + + // ───────────────────────────────────────────────────────────────────────────── + // Modalities + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Modalities used in the active conversation. + * Computed from attachments in activeMessages. + * Used to filter available models - models must support all used modalities. + */ + usedModalities: ModelModalities = $derived.by(() => { + return this.calculateModalitiesFromMessages(this.activeMessages); + }); + + /** + * Calculate modalities from a list of messages. + * Helper method used by both usedModalities and getModalitiesUpToMessage. + */ + private calculateModalitiesFromMessages(messages: DatabaseMessage[]): ModelModalities { + const modalities: ModelModalities = { vision: false, audio: false }; + + for (const message of messages) { + if (!message.extra) continue; + + for (const extra of message.extra) { + if (extra.type === AttachmentType.IMAGE) { + modalities.vision = true; + } + + // PDF only requires vision if processed as images + if (extra.type === AttachmentType.PDF) { + const pdfExtra = extra as DatabaseMessageExtraPdfFile; + + if (pdfExtra.processedAsImages) { + modalities.vision = true; + } + } + + if (extra.type === AttachmentType.AUDIO) { + modalities.audio = true; + } + } + + if (modalities.vision && modalities.audio) break; + } + + return modalities; + } + + /** + * Get modalities used in messages BEFORE the specified message. + * Used for regeneration - only consider context that was available when generating this message. + */ + getModalitiesUpToMessage(messageId: string): ModelModalities { + const messageIndex = this.activeMessages.findIndex((m) => m.id === messageId); + + if (messageIndex === -1) { + return this.usedModalities; + } + + const messagesBefore = this.activeMessages.slice(0, messageIndex); + return this.calculateModalitiesFromMessages(messagesBefore); + } + + constructor() { + if (browser) { + this.initialize(); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Lifecycle + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Initializes the conversations store by loading conversations from the database + */ + async initialize(): Promise { + try { + await this.loadConversations(); + this.isInitialized = true; + } catch (error) { + console.error('Failed to initialize conversations store:', error); + } + } + + /** + * Loads all conversations from the database + */ + async loadConversations(): Promise { + this.conversations = await DatabaseService.getAllConversations(); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Conversation CRUD + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Creates a new conversation and navigates to it + * @param name - Optional name for the conversation + * @returns The ID of the created conversation + */ + async createConversation(name?: string): Promise { + const conversationName = name || `Chat ${new Date().toLocaleString()}`; + const conversation = await DatabaseService.createConversation(conversationName); + + this.conversations.unshift(conversation); + this.activeConversation = conversation; + this.activeMessages = []; + + await goto(`#/chat/${conversation.id}`); + + return conversation.id; + } + + /** + * Loads a specific conversation and its messages + * @param convId - The conversation ID to load + * @returns True if conversation was loaded successfully + */ + async loadConversation(convId: string): Promise { + try { + const conversation = await DatabaseService.getConversation(convId); + + if (!conversation) { + return false; + } + + this.activeConversation = conversation; + + if (conversation.currNode) { + const allMessages = await DatabaseService.getConversationMessages(convId); + this.activeMessages = filterByLeafNodeId( + allMessages, + conversation.currNode, + false + ) as DatabaseMessage[]; + } else { + this.activeMessages = await DatabaseService.getConversationMessages(convId); + } + + return true; + } catch (error) { + console.error('Failed to load conversation:', error); + return false; + } + } + + /** + * Clears the active conversation and messages + * Used when navigating away from chat or starting fresh + */ + clearActiveConversation(): void { + this.activeConversation = null; + this.activeMessages = []; + // Active processing conversation is now managed by chatStore + } + + // ───────────────────────────────────────────────────────────────────────────── + // Message Management + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Refreshes active messages based on currNode after branch navigation + */ + async refreshActiveMessages(): Promise { + if (!this.activeConversation) return; + + const allMessages = await DatabaseService.getConversationMessages(this.activeConversation.id); + + if (allMessages.length === 0) { + this.activeMessages = []; + return; + } + + const leafNodeId = + this.activeConversation.currNode || + allMessages.reduce((latest, msg) => (msg.timestamp > latest.timestamp ? msg : latest)).id; + + const currentPath = filterByLeafNodeId(allMessages, leafNodeId, false) as DatabaseMessage[]; + + this.activeMessages.length = 0; + this.activeMessages.push(...currentPath); + } + + /** + * Updates the name of a conversation + * @param convId - The conversation ID to update + * @param name - The new name for the conversation + */ + async updateConversationName(convId: string, name: string): Promise { + try { + await DatabaseService.updateConversation(convId, { name }); + + const convIndex = this.conversations.findIndex((c) => c.id === convId); + + if (convIndex !== -1) { + this.conversations[convIndex].name = name; + } + + if (this.activeConversation?.id === convId) { + this.activeConversation.name = name; + } + } catch (error) { + console.error('Failed to update conversation name:', error); + } + } + + /** + * Updates conversation title with optional confirmation dialog based on settings + * @param convId - The conversation ID to update + * @param newTitle - The new title content + * @param onConfirmationNeeded - Callback when user confirmation is needed + * @returns True if title was updated, false if cancelled + */ + async updateConversationTitleWithConfirmation( + convId: string, + newTitle: string, + onConfirmationNeeded?: (currentTitle: string, newTitle: string) => Promise + ): Promise { + try { + const currentConfig = config(); + + if (currentConfig.askForTitleConfirmation && onConfirmationNeeded) { + const conversation = await DatabaseService.getConversation(convId); + if (!conversation) return false; + + const shouldUpdate = await onConfirmationNeeded(conversation.name, newTitle); + if (!shouldUpdate) return false; + } + + await this.updateConversationName(convId, newTitle); + return true; + } catch (error) { + console.error('Failed to update conversation title with confirmation:', error); + return false; + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Navigation + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Updates the current node of the active conversation + * @param nodeId - The new current node ID + */ + async updateCurrentNode(nodeId: string): Promise { + if (!this.activeConversation) return; + + await DatabaseService.updateCurrentNode(this.activeConversation.id, nodeId); + this.activeConversation.currNode = nodeId; + } + + /** + * Updates conversation lastModified timestamp and moves it to top of list + */ + updateConversationTimestamp(): void { + if (!this.activeConversation) return; + + const chatIndex = this.conversations.findIndex((c) => c.id === this.activeConversation!.id); + + if (chatIndex !== -1) { + this.conversations[chatIndex].lastModified = Date.now(); + const updatedConv = this.conversations.splice(chatIndex, 1)[0]; + this.conversations.unshift(updatedConv); + } + } + + /** + * Navigates to a specific sibling branch by updating currNode and refreshing messages + * @param siblingId - The sibling message ID to navigate to + */ + async navigateToSibling(siblingId: string): Promise { + if (!this.activeConversation) return; + + const allMessages = await DatabaseService.getConversationMessages(this.activeConversation.id); + const rootMessage = allMessages.find((m) => m.type === 'root' && m.parent === null); + const currentFirstUserMessage = this.activeMessages.find( + (m) => m.role === 'user' && m.parent === rootMessage?.id + ); + + const currentLeafNodeId = findLeafNode(allMessages, siblingId); + + await DatabaseService.updateCurrentNode(this.activeConversation.id, currentLeafNodeId); + this.activeConversation.currNode = currentLeafNodeId; + await this.refreshActiveMessages(); + + // Only show title dialog if we're navigating between different first user message siblings + if (rootMessage && this.activeMessages.length > 0) { + const newFirstUserMessage = this.activeMessages.find( + (m) => m.role === 'user' && m.parent === rootMessage.id + ); + + if ( + newFirstUserMessage && + newFirstUserMessage.content.trim() && + (!currentFirstUserMessage || + newFirstUserMessage.id !== currentFirstUserMessage.id || + newFirstUserMessage.content.trim() !== currentFirstUserMessage.content.trim()) + ) { + await this.updateConversationTitleWithConfirmation( + this.activeConversation.id, + newFirstUserMessage.content.trim(), + this.titleUpdateConfirmationCallback + ); + } + } + } + + /** + * Deletes a conversation and all its messages + * @param convId - The conversation ID to delete + */ + async deleteConversation(convId: string): Promise { + try { + await DatabaseService.deleteConversation(convId); + + this.conversations = this.conversations.filter((c) => c.id !== convId); + + if (this.activeConversation?.id === convId) { + this.clearActiveConversation(); + await goto(`?new_chat=true#/`); + } + } catch (error) { + console.error('Failed to delete conversation:', error); + } + } + + /** + * Deletes all conversations and their messages + */ + async deleteAll(): Promise { + try { + const allConversations = await DatabaseService.getAllConversations(); + + for (const conv of allConversations) { + await DatabaseService.deleteConversation(conv.id); + } + + this.clearActiveConversation(); + this.conversations = []; + + toast.success('All conversations deleted'); + + await goto(`?new_chat=true#/`); + } catch (error) { + console.error('Failed to delete all conversations:', error); + toast.error('Failed to delete conversations'); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Import/Export + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Downloads a conversation as JSON file + * @param convId - The conversation ID to download + */ + async downloadConversation(convId: string): Promise { + let conversation: DatabaseConversation | null; + let messages: DatabaseMessage[]; + + if (this.activeConversation?.id === convId) { + conversation = this.activeConversation; + messages = this.activeMessages; + } else { + conversation = await DatabaseService.getConversation(convId); + if (!conversation) return; + messages = await DatabaseService.getConversationMessages(convId); + } + + this.triggerDownload({ conv: conversation, messages }); + } + + /** + * Exports all conversations with their messages as a JSON file + * @returns The list of exported conversations + */ + async exportAllConversations(): Promise { + const allConversations = await DatabaseService.getAllConversations(); + + if (allConversations.length === 0) { + throw new Error('No conversations to export'); + } + + const allData = await Promise.all( + allConversations.map(async (conv) => { + const messages = await DatabaseService.getConversationMessages(conv.id); + return { conv, messages }; + }) + ); + + const blob = new Blob([JSON.stringify(allData, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `all_conversations_${new Date().toISOString().split('T')[0]}.json`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + + toast.success(`All conversations (${allConversations.length}) prepared for download`); + + return allConversations; + } + + /** + * Imports conversations from a JSON file + * Opens file picker and processes the selected file + * @returns The list of imported conversations + */ + async importConversations(): Promise { + return new Promise((resolve, reject) => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = '.json'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement)?.files?.[0]; + + if (!file) { + reject(new Error('No file selected')); + return; + } + + try { + const text = await file.text(); + const parsedData = JSON.parse(text); + let importedData: ExportedConversations; + + if (Array.isArray(parsedData)) { + importedData = parsedData; + } else if ( + parsedData && + typeof parsedData === 'object' && + 'conv' in parsedData && + 'messages' in parsedData + ) { + importedData = [parsedData]; + } else { + throw new Error('Invalid file format'); + } + + const result = await DatabaseService.importConversations(importedData); + toast.success(`Imported ${result.imported} conversation(s), skipped ${result.skipped}`); + + await this.loadConversations(); + + const importedConversations = ( + Array.isArray(importedData) ? importedData : [importedData] + ).map((item) => item.conv); + + resolve(importedConversations); + } catch (err: unknown) { + const message = err instanceof Error ? err.message : 'Unknown error'; + console.error('Failed to import conversations:', err); + toast.error('Import failed', { description: message }); + reject(new Error(`Import failed: ${message}`)); + } + }; + + input.click(); + }); + } + + /** + * Gets all messages for a specific conversation + * @param convId - The conversation ID + * @returns Array of messages + */ + async getConversationMessages(convId: string): Promise { + return await DatabaseService.getConversationMessages(convId); + } + + /** + * Imports conversations from provided data (without file picker) + * @param data - Array of conversation data with messages + * @returns Import result with counts + */ + async importConversationsData( + data: ExportedConversations + ): Promise<{ imported: number; skipped: number }> { + const result = await DatabaseService.importConversations(data); + await this.loadConversations(); + return result; + } + + /** + * Adds a message to the active messages array + * Used by chatStore when creating new messages + * @param message - The message to add + */ + addMessageToActive(message: DatabaseMessage): void { + this.activeMessages.push(message); + } + + /** + * Updates a message at a specific index in active messages + * Creates a new object to trigger Svelte 5 reactivity + * @param index - The index of the message to update + * @param updates - Partial message data to update + */ + updateMessageAtIndex(index: number, updates: Partial): void { + if (index !== -1 && this.activeMessages[index]) { + // Create new object to trigger Svelte 5 reactivity + this.activeMessages[index] = { ...this.activeMessages[index], ...updates }; + } + } + + /** + * Finds the index of a message in active messages + * @param messageId - The message ID to find + * @returns The index of the message, or -1 if not found + */ + findMessageIndex(messageId: string): number { + return this.activeMessages.findIndex((m) => m.id === messageId); + } + + /** + * Removes messages from active messages starting at an index + * @param startIndex - The index to start removing from + */ + sliceActiveMessages(startIndex: number): void { + this.activeMessages = this.activeMessages.slice(0, startIndex); + } + + /** + * Removes a message from active messages by index + * @param index - The index to remove + * @returns The removed message or undefined + */ + removeMessageAtIndex(index: number): DatabaseMessage | undefined { + if (index !== -1) { + return this.activeMessages.splice(index, 1)[0]; + } + return undefined; + } + + /** + * Triggers file download in browser + * @param data - The data to download + * @param filename - Optional filename for the download + */ + private triggerDownload(data: ExportedConversations, filename?: string): void { + const conversation = + 'conv' in data ? data.conv : Array.isArray(data) ? data[0]?.conv : undefined; + + if (!conversation) { + console.error('Invalid data: missing conversation'); + return; + } + + const conversationName = conversation.name?.trim() || ''; + const truncatedSuffix = conversationName + .toLowerCase() + .replace(/[^a-z0-9]/gi, '_') + .replace(/_+/g, '_') + .substring(0, 20); + const downloadFilename = filename || `conversation_${conversation.id}_${truncatedSuffix}.json`; + + const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = downloadFilename; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Sets the callback function for title update confirmations + * @param callback - Function to call when confirmation is needed + */ + setTitleUpdateConfirmationCallback( + callback: (currentTitle: string, newTitle: string) => Promise + ): void { + this.titleUpdateConfirmationCallback = callback; + } +} + +export const conversationsStore = new ConversationsStore(); + +export const conversations = () => conversationsStore.conversations; +export const activeConversation = () => conversationsStore.activeConversation; +export const activeMessages = () => conversationsStore.activeMessages; +export const isConversationsInitialized = () => conversationsStore.isInitialized; +export const usedModalities = () => conversationsStore.usedModalities; diff --git a/llama.cpp/tools/server/webui/src/lib/stores/models.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/models.svelte.ts new file mode 100644 index 0000000..34b2640 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/models.svelte.ts @@ -0,0 +1,605 @@ +import { SvelteSet } from 'svelte/reactivity'; +import { ModelsService } from '$lib/services/models'; +import { PropsService } from '$lib/services/props'; +import { ServerModelStatus, ModelModality } from '$lib/enums'; +import { serverStore } from '$lib/stores/server.svelte'; + +/** + * modelsStore - Reactive store for model management in both MODEL and ROUTER modes + * + * This store manages: + * - Available models list + * - Selected model for new conversations + * - Loaded models tracking (ROUTER mode) + * - Model usage tracking per conversation + * - Automatic unloading of unused models + * + * **Architecture & Relationships:** + * - **ModelsService**: Stateless service for model API communication + * - **PropsService**: Stateless service for props/modalities fetching + * - **modelsStore** (this class): Reactive store for model state + * - **conversationsStore**: Tracks which conversations use which models + * + * **API Inconsistency Workaround:** + * In MODEL mode, `/props` returns modalities for the single model. + * In ROUTER mode, `/props` has no modalities - must use `/props?model=` per model. + * This store normalizes this behavior so consumers don't need to know the server mode. + * + * **Key Features:** + * - **MODEL mode**: Single model, always loaded + * - **ROUTER mode**: Multi-model with load/unload capability + * - **Auto-unload**: Automatically unloads models not used by any conversation + * - **Lazy loading**: ensureModelLoaded() loads models on demand + */ +class ModelsStore { + // ───────────────────────────────────────────────────────────────────────────── + // State + // ───────────────────────────────────────────────────────────────────────────── + + models = $state([]); + routerModels = $state([]); + loading = $state(false); + updating = $state(false); + error = $state(null); + selectedModelId = $state(null); + selectedModelName = $state(null); + + private modelUsage = $state>>(new Map()); + private modelLoadingStates = $state>(new Map()); + + /** + * Model-specific props cache + * Key: modelId, Value: props data including modalities + */ + private modelPropsCache = $state>(new Map()); + private modelPropsFetching = $state>(new Set()); + + /** + * Version counter for props cache - used to trigger reactivity when props are updated + */ + propsCacheVersion = $state(0); + + // ───────────────────────────────────────────────────────────────────────────── + // Computed Getters + // ───────────────────────────────────────────────────────────────────────────── + + get selectedModel(): ModelOption | null { + if (!this.selectedModelId) return null; + return this.models.find((model) => model.id === this.selectedModelId) ?? null; + } + + get loadedModelIds(): string[] { + return this.routerModels + .filter((m) => m.status.value === ServerModelStatus.LOADED) + .map((m) => m.id); + } + + get loadingModelIds(): string[] { + return Array.from(this.modelLoadingStates.entries()) + .filter(([, loading]) => loading) + .map(([id]) => id); + } + + /** + * Get model name in MODEL mode (single model). + * Extracts from model_path or model_alias from server props. + * In ROUTER mode, returns null (model is per-conversation). + */ + get singleModelName(): string | null { + if (serverStore.isRouterMode) return null; + + const props = serverStore.props; + if (props?.model_alias) return props.model_alias; + if (!props?.model_path) return null; + + return props.model_path.split(/(\\|\/)/).pop() || null; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Modalities + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Get modalities for a specific model + * Returns cached modalities from model props + */ + getModelModalities(modelId: string): ModelModalities | null { + // First check if modalities are stored in the model option + const model = this.models.find((m) => m.model === modelId || m.id === modelId); + if (model?.modalities) { + return model.modalities; + } + + // Fall back to props cache + const props = this.modelPropsCache.get(modelId); + if (props?.modalities) { + return { + vision: props.modalities.vision ?? false, + audio: props.modalities.audio ?? false + }; + } + + return null; + } + + /** + * Check if a model supports vision modality + */ + modelSupportsVision(modelId: string): boolean { + return this.getModelModalities(modelId)?.vision ?? false; + } + + /** + * Check if a model supports audio modality + */ + modelSupportsAudio(modelId: string): boolean { + return this.getModelModalities(modelId)?.audio ?? false; + } + + /** + * Get model modalities as an array of ModelModality enum values + */ + getModelModalitiesArray(modelId: string): ModelModality[] { + const modalities = this.getModelModalities(modelId); + if (!modalities) return []; + + const result: ModelModality[] = []; + + if (modalities.vision) result.push(ModelModality.VISION); + if (modalities.audio) result.push(ModelModality.AUDIO); + + return result; + } + + /** + * Get props for a specific model (from cache) + */ + getModelProps(modelId: string): ApiLlamaCppServerProps | null { + return this.modelPropsCache.get(modelId) ?? null; + } + + /** + * Get context size (n_ctx) for a specific model from cached props + */ + getModelContextSize(modelId: string): number | null { + const props = this.modelPropsCache.get(modelId); + return props?.default_generation_settings?.n_ctx ?? null; + } + + /** + * Get context size for the currently selected model or null if no model is selected + */ + get selectedModelContextSize(): number | null { + if (!this.selectedModelName) return null; + return this.getModelContextSize(this.selectedModelName); + } + + /** + * Check if props are being fetched for a model + */ + isModelPropsFetching(modelId: string): boolean { + return this.modelPropsFetching.has(modelId); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Status Queries + // ───────────────────────────────────────────────────────────────────────────── + + isModelLoaded(modelId: string): boolean { + const model = this.routerModels.find((m) => m.id === modelId); + return model?.status.value === ServerModelStatus.LOADED || false; + } + + isModelOperationInProgress(modelId: string): boolean { + return this.modelLoadingStates.get(modelId) ?? false; + } + + getModelStatus(modelId: string): ServerModelStatus | null { + const model = this.routerModels.find((m) => m.id === modelId); + return model?.status.value ?? null; + } + + getModelUsage(modelId: string): SvelteSet { + return this.modelUsage.get(modelId) ?? new SvelteSet(); + } + + isModelInUse(modelId: string): boolean { + const usage = this.modelUsage.get(modelId); + return usage !== undefined && usage.size > 0; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Data Fetching + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Fetch list of models from server and detect server role + * Also fetches modalities for MODEL mode (single model) + */ + async fetch(force = false): Promise { + if (this.loading) return; + if (this.models.length > 0 && !force) return; + + this.loading = true; + this.error = null; + + try { + // Ensure server props are loaded (for role detection and MODEL mode modalities) + if (!serverStore.props) { + await serverStore.fetch(); + } + + const response = await ModelsService.list(); + + const models: ModelOption[] = response.data.map((item: ApiModelDataEntry, index: number) => { + const details = response.models?.[index]; + const rawCapabilities = Array.isArray(details?.capabilities) ? details?.capabilities : []; + const displayNameSource = + details?.name && details.name.trim().length > 0 ? details.name : item.id; + const displayName = this.toDisplayName(displayNameSource); + + return { + id: item.id, + name: displayName, + model: details?.model || item.id, + description: details?.description, + capabilities: rawCapabilities.filter((value: unknown): value is string => Boolean(value)), + details: details?.details, + meta: item.meta ?? null + } satisfies ModelOption; + }); + + this.models = models; + + // In MODEL mode, populate modalities from serverStore.props (single model) + // WORKAROUND: In MODEL mode, /props returns modalities for the single model, + // but /v1/models doesn't include modalities. We bridge this gap here. + const serverProps = serverStore.props; + if (serverStore.isModelMode && this.models.length > 0 && serverProps?.modalities) { + const modalities: ModelModalities = { + vision: serverProps.modalities.vision ?? false, + audio: serverProps.modalities.audio ?? false + }; + // Cache props for the single model + this.modelPropsCache.set(this.models[0].model, serverProps); + // Update model with modalities + this.models = this.models.map((model, index) => + index === 0 ? { ...model, modalities } : model + ); + } + } catch (error) { + this.models = []; + this.error = error instanceof Error ? error.message : 'Failed to load models'; + throw error; + } finally { + this.loading = false; + } + } + + /** + * Fetch router models with full metadata (ROUTER mode only) + * This fetches the /models endpoint which returns status info for each model + */ + async fetchRouterModels(): Promise { + try { + const response = await ModelsService.listRouter(); + this.routerModels = response.data; + await this.fetchModalitiesForLoadedModels(); + } catch (error) { + console.warn('Failed to fetch router models:', error); + this.routerModels = []; + } + } + + /** + * Fetch props for a specific model from /props endpoint + * Uses caching to avoid redundant requests + * + * In ROUTER mode, this will only fetch props if the model is loaded, + * since unloaded models return 400 from /props endpoint. + * + * @param modelId - Model identifier to fetch props for + * @returns Props data or null if fetch failed or model not loaded + */ + async fetchModelProps(modelId: string): Promise { + // Return cached props if available + const cached = this.modelPropsCache.get(modelId); + if (cached) return cached; + + if (serverStore.isRouterMode && !this.isModelLoaded(modelId)) { + return null; + } + + // Avoid duplicate fetches + if (this.modelPropsFetching.has(modelId)) return null; + + this.modelPropsFetching.add(modelId); + + try { + const props = await PropsService.fetchForModel(modelId); + this.modelPropsCache.set(modelId, props); + return props; + } catch (error) { + console.warn(`Failed to fetch props for model ${modelId}:`, error); + return null; + } finally { + this.modelPropsFetching.delete(modelId); + } + } + + /** + * Fetch modalities for all loaded models from /props endpoint + * This updates the modalities field in models array + */ + async fetchModalitiesForLoadedModels(): Promise { + const loadedModelIds = this.loadedModelIds; + if (loadedModelIds.length === 0) return; + + // Fetch props for each loaded model in parallel + const propsPromises = loadedModelIds.map((modelId) => this.fetchModelProps(modelId)); + + try { + const results = await Promise.all(propsPromises); + + // Update models with modalities + this.models = this.models.map((model) => { + const modelIndex = loadedModelIds.indexOf(model.model); + if (modelIndex === -1) return model; + + const props = results[modelIndex]; + if (!props?.modalities) return model; + + const modalities: ModelModalities = { + vision: props.modalities.vision ?? false, + audio: props.modalities.audio ?? false + }; + + return { ...model, modalities }; + }); + + // Increment version to trigger reactivity + this.propsCacheVersion++; + } catch (error) { + console.warn('Failed to fetch modalities for loaded models:', error); + } + } + + /** + * Update modalities for a specific model + * Called when a model is loaded or when we need fresh modality data + */ + async updateModelModalities(modelId: string): Promise { + try { + const props = await this.fetchModelProps(modelId); + if (!props?.modalities) return; + + const modalities: ModelModalities = { + vision: props.modalities.vision ?? false, + audio: props.modalities.audio ?? false + }; + + this.models = this.models.map((model) => + model.model === modelId ? { ...model, modalities } : model + ); + + // Increment version to trigger reactivity + this.propsCacheVersion++; + } catch (error) { + console.warn(`Failed to update modalities for model ${modelId}:`, error); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Model Selection + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Select a model for new conversations + */ + async selectModelById(modelId: string): Promise { + if (!modelId || this.updating) return; + if (this.selectedModelId === modelId) return; + + const option = this.models.find((model) => model.id === modelId); + if (!option) throw new Error('Selected model is not available'); + + this.updating = true; + this.error = null; + + try { + this.selectedModelId = option.id; + this.selectedModelName = option.model; + } finally { + this.updating = false; + } + } + + /** + * Select a model by its model name (used for syncing with conversation model) + * @param modelName - Model name to select (e.g., "unsloth/gemma-3-12b-it-GGUF:latest") + */ + selectModelByName(modelName: string): void { + const option = this.models.find((model) => model.model === modelName); + if (option) { + this.selectedModelId = option.id; + this.selectedModelName = option.model; + } + } + + clearSelection(): void { + this.selectedModelId = null; + this.selectedModelName = null; + } + + findModelByName(modelName: string): ModelOption | null { + return this.models.find((model) => model.model === modelName) ?? null; + } + + findModelById(modelId: string): ModelOption | null { + return this.models.find((model) => model.id === modelId) ?? null; + } + + hasModel(modelName: string): boolean { + return this.models.some((model) => model.model === modelName); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Loading/Unloading Models + // ───────────────────────────────────────────────────────────────────────────── + + /** + * WORKAROUND: Polling for model status after load/unload operations. + * + * Currently, the `/models/load` and `/models/unload` endpoints return success + * before the operation actually completes on the server. This means an immediate + * request to `/models` returns stale status (e.g., "loading" after load request, + * "loaded" after unload request). + * + * TODO: Remove this polling once llama-server properly waits for the operation + * to complete before returning success from `/load` and `/unload` endpoints. + * At that point, a single `fetchRouterModels()` call after the operation will + * be sufficient to get the correct status. + */ + + /** Polling interval in ms for checking model status */ + private static readonly STATUS_POLL_INTERVAL = 500; + /** Maximum polling attempts before giving up */ + private static readonly STATUS_POLL_MAX_ATTEMPTS = 60; // 30 seconds max + + /** + * Poll for expected model status after load/unload operation. + * Keeps polling until the model reaches the expected status or max attempts reached. + * + * @param modelId - Model identifier to check + * @param expectedStatus - Expected status to wait for + * @returns Promise that resolves when expected status is reached + */ + private async pollForModelStatus( + modelId: string, + expectedStatus: ServerModelStatus + ): Promise { + for (let attempt = 0; attempt < ModelsStore.STATUS_POLL_MAX_ATTEMPTS; attempt++) { + await this.fetchRouterModels(); + + const currentStatus = this.getModelStatus(modelId); + if (currentStatus === expectedStatus) { + return; + } + + // Wait before next poll + await new Promise((resolve) => setTimeout(resolve, ModelsStore.STATUS_POLL_INTERVAL)); + } + + console.warn( + `Model ${modelId} did not reach expected status ${expectedStatus} after ${ModelsStore.STATUS_POLL_MAX_ATTEMPTS} attempts` + ); + } + + /** + * Load a model (ROUTER mode) + * @param modelId - Model identifier to load + */ + async loadModel(modelId: string): Promise { + if (this.isModelLoaded(modelId)) { + return; + } + + if (this.modelLoadingStates.get(modelId)) return; + + this.modelLoadingStates.set(modelId, true); + this.error = null; + + try { + await ModelsService.load(modelId); + + // Poll until model is loaded + await this.pollForModelStatus(modelId, ServerModelStatus.LOADED); + + await this.updateModelModalities(modelId); + } catch (error) { + this.error = error instanceof Error ? error.message : 'Failed to load model'; + throw error; + } finally { + this.modelLoadingStates.set(modelId, false); + } + } + + /** + * Unload a model (ROUTER mode) + * @param modelId - Model identifier to unload + */ + async unloadModel(modelId: string): Promise { + if (!this.isModelLoaded(modelId)) { + return; + } + + if (this.modelLoadingStates.get(modelId)) return; + + this.modelLoadingStates.set(modelId, true); + this.error = null; + + try { + await ModelsService.unload(modelId); + + await this.pollForModelStatus(modelId, ServerModelStatus.UNLOADED); + } catch (error) { + this.error = error instanceof Error ? error.message : 'Failed to unload model'; + throw error; + } finally { + this.modelLoadingStates.set(modelId, false); + } + } + + /** + * Ensure a model is loaded before use + * @param modelId - Model identifier to ensure is loaded + */ + async ensureModelLoaded(modelId: string): Promise { + if (this.isModelLoaded(modelId)) { + return; + } + + await this.loadModel(modelId); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + private toDisplayName(id: string): string { + const segments = id.split(/\\|\//); + const candidate = segments.pop(); + + return candidate && candidate.trim().length > 0 ? candidate : id; + } + + clear(): void { + this.models = []; + this.routerModels = []; + this.loading = false; + this.updating = false; + this.error = null; + this.selectedModelId = null; + this.selectedModelName = null; + this.modelUsage.clear(); + this.modelLoadingStates.clear(); + this.modelPropsCache.clear(); + this.modelPropsFetching.clear(); + } +} + +export const modelsStore = new ModelsStore(); + +export const modelOptions = () => modelsStore.models; +export const routerModels = () => modelsStore.routerModels; +export const modelsLoading = () => modelsStore.loading; +export const modelsUpdating = () => modelsStore.updating; +export const modelsError = () => modelsStore.error; +export const selectedModelId = () => modelsStore.selectedModelId; +export const selectedModelName = () => modelsStore.selectedModelName; +export const selectedModelOption = () => modelsStore.selectedModel; +export const loadedModelIds = () => modelsStore.loadedModelIds; +export const loadingModelIds = () => modelsStore.loadingModelIds; +export const propsCacheVersion = () => modelsStore.propsCacheVersion; +export const singleModelName = () => modelsStore.singleModelName; +export const selectedModelContextSize = () => modelsStore.selectedModelContextSize; diff --git a/llama.cpp/tools/server/webui/src/lib/stores/persisted.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/persisted.svelte.ts new file mode 100644 index 0000000..1e07f80 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/persisted.svelte.ts @@ -0,0 +1,50 @@ +import { browser } from '$app/environment'; + +type PersistedValue = { + get value(): T; + set value(newValue: T); +}; + +export function persisted(key: string, initialValue: T): PersistedValue { + let value = initialValue; + + if (browser) { + try { + const stored = localStorage.getItem(key); + + if (stored !== null) { + value = JSON.parse(stored) as T; + } + } catch (error) { + console.warn(`Failed to load ${key}:`, error); + } + } + + const persist = (next: T) => { + if (!browser) { + return; + } + + try { + if (next === null || next === undefined) { + localStorage.removeItem(key); + return; + } + + localStorage.setItem(key, JSON.stringify(next)); + } catch (error) { + console.warn(`Failed to persist ${key}:`, error); + } + }; + + return { + get value() { + return value; + }, + + set value(newValue: T) { + value = newValue; + persist(newValue); + } + }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/stores/server.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/server.svelte.ts new file mode 100644 index 0000000..facfd33 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/server.svelte.ts @@ -0,0 +1,140 @@ +import { PropsService } from '$lib/services/props'; +import { ServerRole } from '$lib/enums'; + +/** + * serverStore - Server connection state, configuration, and role detection + * + * This store manages the server connection state and properties fetched from `/props`. + * It provides reactive state for server configuration and role detection. + * + * **Architecture & Relationships:** + * - **PropsService**: Stateless service for fetching `/props` data + * - **serverStore** (this class): Reactive store for server state + * - **modelsStore**: Independent store for model management (uses PropsService directly) + * + * **Key Features:** + * - **Server State**: Connection status, loading, error handling + * - **Role Detection**: MODEL (single model) vs ROUTER (multi-model) + * - **Default Params**: Server-wide generation defaults + */ +class ServerStore { + // ───────────────────────────────────────────────────────────────────────────── + // State + // ───────────────────────────────────────────────────────────────────────────── + + props = $state(null); + loading = $state(false); + error = $state(null); + role = $state(null); + private fetchPromise: Promise | null = null; + + // ───────────────────────────────────────────────────────────────────────────── + // Getters + // ───────────────────────────────────────────────────────────────────────────── + + get defaultParams(): ApiLlamaCppServerProps['default_generation_settings']['params'] | null { + return this.props?.default_generation_settings?.params || null; + } + + get contextSize(): number | null { + return this.props?.default_generation_settings?.n_ctx ?? null; + } + + get webuiSettings(): Record | undefined { + return this.props?.webui_settings; + } + + get isRouterMode(): boolean { + return this.role === ServerRole.ROUTER; + } + + get isModelMode(): boolean { + return this.role === ServerRole.MODEL; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Data Handling + // ───────────────────────────────────────────────────────────────────────────── + + async fetch(): Promise { + if (this.fetchPromise) return this.fetchPromise; + + this.loading = true; + this.error = null; + + const fetchPromise = (async () => { + try { + const props = await PropsService.fetch(); + this.props = props; + this.error = null; + this.detectRole(props); + } catch (error) { + this.error = this.getErrorMessage(error); + console.error('Error fetching server properties:', error); + } finally { + this.loading = false; + this.fetchPromise = null; + } + })(); + + this.fetchPromise = fetchPromise; + await fetchPromise; + } + + private getErrorMessage(error: unknown): string { + if (error instanceof Error) { + const message = error.message || ''; + + if (error.name === 'TypeError' && message.includes('fetch')) { + return 'Server is not running or unreachable'; + } else if (message.includes('ECONNREFUSED')) { + return 'Connection refused - server may be offline'; + } else if (message.includes('ENOTFOUND')) { + return 'Server not found - check server address'; + } else if (message.includes('ETIMEDOUT')) { + return 'Request timed out'; + } else if (message.includes('503')) { + return 'Server temporarily unavailable'; + } else if (message.includes('500')) { + return 'Server error - check server logs'; + } else if (message.includes('404')) { + return 'Server endpoint not found'; + } else if (message.includes('403') || message.includes('401')) { + return 'Access denied'; + } + } + + return 'Failed to connect to server'; + } + + clear(): void { + this.props = null; + this.error = null; + this.loading = false; + this.role = null; + this.fetchPromise = null; + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + private detectRole(props: ApiLlamaCppServerProps): void { + const newRole = props?.role === ServerRole.ROUTER ? ServerRole.ROUTER : ServerRole.MODEL; + if (this.role !== newRole) { + this.role = newRole; + console.info(`Server running in ${newRole === ServerRole.ROUTER ? 'ROUTER' : 'MODEL'} mode`); + } + } +} + +export const serverStore = new ServerStore(); + +export const serverProps = () => serverStore.props; +export const serverLoading = () => serverStore.loading; +export const serverError = () => serverStore.error; +export const serverRole = () => serverStore.role; +export const defaultParams = () => serverStore.defaultParams; +export const contextSize = () => serverStore.contextSize; +export const isRouterMode = () => serverStore.isRouterMode; +export const isModelMode = () => serverStore.isModelMode; diff --git a/llama.cpp/tools/server/webui/src/lib/stores/settings.svelte.ts b/llama.cpp/tools/server/webui/src/lib/stores/settings.svelte.ts new file mode 100644 index 0000000..cda940b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/stores/settings.svelte.ts @@ -0,0 +1,421 @@ +/** + * settingsStore - Application configuration and theme management + * + * This store manages all application settings including AI model parameters, UI preferences, + * and theme configuration. It provides persistent storage through localStorage with reactive + * state management using Svelte 5 runes. + * + * **Architecture & Relationships:** + * - **settingsStore** (this class): Configuration state management + * - Manages AI model parameters (temperature, max tokens, etc.) + * - Handles theme switching and persistence + * - Provides localStorage synchronization + * - Offers reactive configuration access + * + * - **ChatService**: Reads model parameters for API requests + * - **UI Components**: Subscribe to theme and configuration changes + * + * **Key Features:** + * - **Model Parameters**: Temperature, max tokens, top-p, top-k, repeat penalty + * - **Theme Management**: Auto, light, dark theme switching + * - **Persistence**: Automatic localStorage synchronization + * - **Reactive State**: Svelte 5 runes for automatic UI updates + * - **Default Handling**: Graceful fallback to defaults for missing settings + * - **Batch Updates**: Efficient multi-setting updates + * - **Reset Functionality**: Restore defaults for individual or all settings + * + * **Configuration Categories:** + * - Generation parameters (temperature, tokens, sampling) + * - UI preferences (theme, display options) + * - System settings (model selection, prompts) + * - Advanced options (seed, penalties, context handling) + */ + +import { browser } from '$app/environment'; +import { SETTING_CONFIG_DEFAULT } from '$lib/constants/settings-config'; +import { ParameterSyncService } from '$lib/services/parameter-sync'; +import { serverStore } from '$lib/stores/server.svelte'; +import { + configToParameterRecord, + normalizeFloatingPoint, + getConfigValue, + setConfigValue +} from '$lib/utils'; +import { + CONFIG_LOCALSTORAGE_KEY, + USER_OVERRIDES_LOCALSTORAGE_KEY +} from '$lib/constants/localstorage-keys'; + +class SettingsStore { + // ───────────────────────────────────────────────────────────────────────────── + // State + // ───────────────────────────────────────────────────────────────────────────── + + config = $state({ ...SETTING_CONFIG_DEFAULT }); + theme = $state('auto'); + isInitialized = $state(false); + userOverrides = $state>(new Set()); + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities (private helpers) + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Helper method to get server defaults with null safety + * Centralizes the pattern of getting and extracting server defaults + */ + private getServerDefaults(): Record { + const serverParams = serverStore.defaultParams; + const webuiSettings = serverStore.webuiSettings; + return ParameterSyncService.extractServerDefaults(serverParams, webuiSettings); + } + + constructor() { + if (browser) { + this.initialize(); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Lifecycle + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Initialize the settings store by loading from localStorage + */ + initialize() { + try { + this.loadConfig(); + this.loadTheme(); + this.isInitialized = true; + } catch (error) { + console.error('Failed to initialize settings store:', error); + } + } + + /** + * Load configuration from localStorage + * Returns default values for missing keys to prevent breaking changes + */ + private loadConfig() { + if (!browser) return; + + try { + const storedConfigRaw = localStorage.getItem(CONFIG_LOCALSTORAGE_KEY); + const savedVal = JSON.parse(storedConfigRaw || '{}'); + + // Merge with defaults to prevent breaking changes + this.config = { + ...SETTING_CONFIG_DEFAULT, + ...savedVal + }; + + // Load user overrides + const savedOverrides = JSON.parse( + localStorage.getItem(USER_OVERRIDES_LOCALSTORAGE_KEY) || '[]' + ); + this.userOverrides = new Set(savedOverrides); + } catch (error) { + console.warn('Failed to parse config from localStorage, using defaults:', error); + this.config = { ...SETTING_CONFIG_DEFAULT }; + this.userOverrides = new Set(); + } + } + + /** + * Load theme from localStorage + */ + private loadTheme() { + if (!browser) return; + + this.theme = localStorage.getItem('theme') || 'auto'; + } + // ───────────────────────────────────────────────────────────────────────────── + // Config Updates + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Update a specific configuration setting + * @param key - The configuration key to update + * @param value - The new value for the configuration key + */ + updateConfig(key: K, value: SettingsConfigType[K]): void { + this.config[key] = value; + + if (ParameterSyncService.canSyncParameter(key as string)) { + const propsDefaults = this.getServerDefaults(); + const propsDefault = propsDefaults[key as string]; + + if (propsDefault !== undefined) { + const normalizedValue = normalizeFloatingPoint(value); + const normalizedDefault = normalizeFloatingPoint(propsDefault); + + if (normalizedValue === normalizedDefault) { + this.userOverrides.delete(key as string); + } else { + this.userOverrides.add(key as string); + } + } + } + + this.saveConfig(); + } + + /** + * Update multiple configuration settings at once + * @param updates - Object containing the configuration updates + */ + updateMultipleConfig(updates: Partial) { + Object.assign(this.config, updates); + + const propsDefaults = this.getServerDefaults(); + + for (const [key, value] of Object.entries(updates)) { + if (ParameterSyncService.canSyncParameter(key)) { + const propsDefault = propsDefaults[key]; + + if (propsDefault !== undefined) { + const normalizedValue = normalizeFloatingPoint(value); + const normalizedDefault = normalizeFloatingPoint(propsDefault); + + if (normalizedValue === normalizedDefault) { + this.userOverrides.delete(key); + } else { + this.userOverrides.add(key); + } + } + } + } + + this.saveConfig(); + } + + /** + * Save the current configuration to localStorage + */ + private saveConfig() { + if (!browser) return; + + try { + localStorage.setItem(CONFIG_LOCALSTORAGE_KEY, JSON.stringify(this.config)); + + localStorage.setItem( + USER_OVERRIDES_LOCALSTORAGE_KEY, + JSON.stringify(Array.from(this.userOverrides)) + ); + } catch (error) { + console.error('Failed to save config to localStorage:', error); + } + } + + /** + * Update the theme setting + * @param newTheme - The new theme value + */ + updateTheme(newTheme: string) { + this.theme = newTheme; + this.saveTheme(); + } + + /** + * Save the current theme to localStorage + */ + private saveTheme() { + if (!browser) return; + + try { + if (this.theme === 'auto') { + localStorage.removeItem('theme'); + } else { + localStorage.setItem('theme', this.theme); + } + } catch (error) { + console.error('Failed to save theme to localStorage:', error); + } + } + + // ───────────────────────────────────────────────────────────────────────────── + // Reset + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Reset configuration to defaults + */ + resetConfig() { + this.config = { ...SETTING_CONFIG_DEFAULT }; + this.saveConfig(); + } + + /** + * Reset theme to auto + */ + resetTheme() { + this.theme = 'auto'; + this.saveTheme(); + } + + /** + * Reset all settings to defaults + */ + resetAll() { + this.resetConfig(); + this.resetTheme(); + } + + /** + * Reset a parameter to server default (or webui default if no server default) + */ + resetParameterToServerDefault(key: string): void { + const serverDefaults = this.getServerDefaults(); + + if (serverDefaults[key] !== undefined) { + const value = normalizeFloatingPoint(serverDefaults[key]); + + this.config[key as keyof SettingsConfigType] = + value as SettingsConfigType[keyof SettingsConfigType]; + } else { + if (key in SETTING_CONFIG_DEFAULT) { + const defaultValue = getConfigValue(SETTING_CONFIG_DEFAULT, key); + + setConfigValue(this.config, key, defaultValue); + } + } + + this.userOverrides.delete(key); + this.saveConfig(); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Server Sync + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Initialize settings with props defaults when server properties are first loaded + * This sets up the default values from /props endpoint + */ + syncWithServerDefaults(): void { + const propsDefaults = this.getServerDefaults(); + + if (Object.keys(propsDefaults).length === 0) { + console.warn('No server defaults available for initialization'); + + return; + } + + for (const [key, propsValue] of Object.entries(propsDefaults)) { + const currentValue = getConfigValue(this.config, key); + + const normalizedCurrent = normalizeFloatingPoint(currentValue); + const normalizedDefault = normalizeFloatingPoint(propsValue); + + if (normalizedCurrent === normalizedDefault) { + this.userOverrides.delete(key); + setConfigValue(this.config, key, propsValue); + } else if (!this.userOverrides.has(key)) { + setConfigValue(this.config, key, propsValue); + } + } + + this.saveConfig(); + console.log('Settings initialized with props defaults:', propsDefaults); + console.log('Current user overrides after sync:', Array.from(this.userOverrides)); + } + + /** + * Reset all parameters to their default values (from props) + * This is used by the "Reset to Default" functionality + * Prioritizes server defaults from /props, falls back to webui defaults + */ + forceSyncWithServerDefaults(): void { + const propsDefaults = this.getServerDefaults(); + const syncableKeys = ParameterSyncService.getSyncableParameterKeys(); + + for (const key of syncableKeys) { + if (propsDefaults[key] !== undefined) { + const normalizedValue = normalizeFloatingPoint(propsDefaults[key]); + + setConfigValue(this.config, key, normalizedValue); + } else { + if (key in SETTING_CONFIG_DEFAULT) { + const defaultValue = getConfigValue(SETTING_CONFIG_DEFAULT, key); + + setConfigValue(this.config, key, defaultValue); + } + } + + this.userOverrides.delete(key); + } + + this.saveConfig(); + } + + // ───────────────────────────────────────────────────────────────────────────── + // Utilities + // ───────────────────────────────────────────────────────────────────────────── + + /** + * Get a specific configuration value + * @param key - The configuration key to get + * @returns The configuration value + */ + getConfig(key: K): SettingsConfigType[K] { + return this.config[key]; + } + + /** + * Get the entire configuration object + * @returns The complete configuration object + */ + getAllConfig(): SettingsConfigType { + return { ...this.config }; + } + + canSyncParameter(key: string): boolean { + return ParameterSyncService.canSyncParameter(key); + } + + /** + * Get parameter information including source for a specific parameter + */ + getParameterInfo(key: string) { + const propsDefaults = this.getServerDefaults(); + const currentValue = getConfigValue(this.config, key); + + return ParameterSyncService.getParameterInfo( + key, + currentValue ?? '', + propsDefaults, + this.userOverrides + ); + } + + /** + * Get diff between current settings and server defaults + */ + getParameterDiff() { + const serverDefaults = this.getServerDefaults(); + if (Object.keys(serverDefaults).length === 0) return {}; + + const configAsRecord = configToParameterRecord( + this.config, + ParameterSyncService.getSyncableParameterKeys() + ); + + return ParameterSyncService.createParameterDiff(configAsRecord, serverDefaults); + } + + /** + * Clear all user overrides (for debugging) + */ + clearAllUserOverrides(): void { + this.userOverrides.clear(); + this.saveConfig(); + console.log('Cleared all user overrides'); + } +} + +export const settingsStore = new SettingsStore(); + +export const config = () => settingsStore.config; +export const theme = () => settingsStore.theme; +export const isInitialized = () => settingsStore.isInitialized; diff --git a/llama.cpp/tools/server/webui/src/lib/types/api.d.ts b/llama.cpp/tools/server/webui/src/lib/types/api.d.ts new file mode 100644 index 0000000..714509f --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/api.d.ts @@ -0,0 +1,430 @@ +import type { ServerModelStatus, ServerRole } from '$lib/enums'; +import type { ChatMessagePromptProgress } from './chat'; + +export interface ApiChatMessageContentPart { + type: 'text' | 'image_url' | 'input_audio'; + text?: string; + image_url?: { + url: string; + }; + input_audio?: { + data: string; + format: 'wav' | 'mp3'; + }; +} + +export interface ApiContextSizeError { + code: number; + message: string; + type: 'exceed_context_size_error'; + n_prompt_tokens: number; + n_ctx: number; +} + +export interface ApiErrorResponse { + error: + | ApiContextSizeError + | { + code: number; + message: string; + type?: string; + }; +} + +export interface ApiChatMessageData { + role: ChatRole; + content: string | ApiChatMessageContentPart[]; + timestamp?: number; +} + +/** + * Model status object from /models endpoint + */ +export interface ApiModelStatus { + /** Status value: loaded, unloaded, loading, failed */ + value: ServerModelStatus; + /** Command line arguments used when loading (only for loaded models) */ + args?: string[]; +} + +/** + * Model entry from /models endpoint (ROUTER mode) + * Based on actual API response structure + */ +export interface ApiModelDataEntry { + /** Model identifier (e.g., "ggml-org/Qwen2.5-Omni-7B-GGUF:latest") */ + id: string; + /** Model name (optional, usually same as id - not always returned by API) */ + name?: string; + /** Object type, always "model" */ + object: string; + /** Owner, usually "llamacpp" */ + owned_by: string; + /** Creation timestamp */ + created: number; + /** Whether model files are in HuggingFace cache */ + in_cache: boolean; + /** Path to model manifest file */ + path: string; + /** Current status of the model */ + status: ApiModelStatus; + /** Legacy meta field (may be present in older responses) */ + meta?: Record | null; +} + +export interface ApiModelDetails { + name: string; + model: string; + modified_at?: string; + size?: string | number; + digest?: string; + type?: string; + description?: string; + tags?: string[]; + capabilities?: string[]; + parameters?: string; + details?: { + parent_model?: string; + format?: string; + family?: string; + families?: string[]; + parameter_size?: string; + quantization_level?: string; + }; +} + +export interface ApiModelListResponse { + object: string; + data: ApiModelDataEntry[]; + models?: ApiModelDetails[]; +} + +export interface ApiLlamaCppServerProps { + default_generation_settings: { + id: number; + id_task: number; + n_ctx: number; + speculative: boolean; + is_processing: boolean; + params: { + n_predict: number; + seed: number; + temperature: number; + dynatemp_range: number; + dynatemp_exponent: number; + top_k: number; + top_p: number; + min_p: number; + top_n_sigma: number; + xtc_probability: number; + xtc_threshold: number; + typ_p: number; + repeat_last_n: number; + repeat_penalty: number; + presence_penalty: number; + frequency_penalty: number; + dry_multiplier: number; + dry_base: number; + dry_allowed_length: number; + dry_penalty_last_n: number; + dry_sequence_breakers: string[]; + mirostat: number; + mirostat_tau: number; + mirostat_eta: number; + stop: string[]; + max_tokens: number; + n_keep: number; + n_discard: number; + ignore_eos: boolean; + stream: boolean; + logit_bias: Array<[number, number]>; + n_probs: number; + min_keep: number; + grammar: string; + grammar_lazy: boolean; + grammar_triggers: string[]; + preserved_tokens: number[]; + chat_format: string; + reasoning_format: string; + reasoning_in_content: boolean; + thinking_forced_open: boolean; + samplers: string[]; + backend_sampling: boolean; + 'speculative.n_max': number; + 'speculative.n_min': number; + 'speculative.p_min': number; + timings_per_token: boolean; + post_sampling_probs: boolean; + lora: Array<{ name: string; scale: number }>; + }; + prompt: string; + next_token: { + has_next_token: boolean; + has_new_line: boolean; + n_remain: number; + n_decoded: number; + stopping_word: string; + }; + }; + total_slots: number; + model_path: string; + role: ServerRole; + modalities: { + vision: boolean; + audio: boolean; + }; + chat_template: string; + bos_token: string; + eos_token: string; + build_info: string; + webui_settings?: Record; +} + +export interface ApiChatCompletionRequest { + messages: Array<{ + role: ChatRole; + content: string | ApiChatMessageContentPart[]; + }>; + stream?: boolean; + model?: string; + return_progress?: boolean; + // Reasoning parameters + reasoning_format?: string; + // Generation parameters + temperature?: number; + max_tokens?: number; + // Sampling parameters + dynatemp_range?: number; + dynatemp_exponent?: number; + top_k?: number; + top_p?: number; + min_p?: number; + xtc_probability?: number; + xtc_threshold?: number; + typ_p?: number; + // Penalty parameters + repeat_last_n?: number; + repeat_penalty?: number; + presence_penalty?: number; + frequency_penalty?: number; + dry_multiplier?: number; + dry_base?: number; + dry_allowed_length?: number; + dry_penalty_last_n?: number; + // Sampler configuration + samplers?: string[]; + backend_sampling?: boolean; + // Custom parameters (JSON string) + custom?: Record; + timings_per_token?: boolean; +} + +export interface ApiChatCompletionToolCallFunctionDelta { + name?: string; + arguments?: string; +} + +export interface ApiChatCompletionToolCallDelta { + index?: number; + id?: string; + type?: string; + function?: ApiChatCompletionToolCallFunctionDelta; +} + +export interface ApiChatCompletionToolCall extends ApiChatCompletionToolCallDelta { + function?: ApiChatCompletionToolCallFunctionDelta & { arguments?: string }; +} + +export interface ApiChatCompletionStreamChunk { + object?: string; + model?: string; + choices: Array<{ + model?: string; + metadata?: { model?: string }; + delta: { + content?: string; + reasoning_content?: string; + model?: string; + tool_calls?: ApiChatCompletionToolCallDelta[]; + }; + }>; + timings?: { + prompt_n?: number; + prompt_ms?: number; + predicted_n?: number; + predicted_ms?: number; + cache_n?: number; + }; + prompt_progress?: ChatMessagePromptProgress; +} + +export interface ApiChatCompletionResponse { + model?: string; + choices: Array<{ + model?: string; + metadata?: { model?: string }; + message: { + content: string; + reasoning_content?: string; + model?: string; + tool_calls?: ApiChatCompletionToolCallDelta[]; + }; + }>; +} + +export interface ApiSlotData { + id: number; + id_task: number; + n_ctx: number; + speculative: boolean; + is_processing: boolean; + params: { + n_predict: number; + seed: number; + temperature: number; + dynatemp_range: number; + dynatemp_exponent: number; + top_k: number; + top_p: number; + min_p: number; + top_n_sigma: number; + xtc_probability: number; + xtc_threshold: number; + typical_p: number; + repeat_last_n: number; + repeat_penalty: number; + presence_penalty: number; + frequency_penalty: number; + dry_multiplier: number; + dry_base: number; + dry_allowed_length: number; + dry_penalty_last_n: number; + mirostat: number; + mirostat_tau: number; + mirostat_eta: number; + max_tokens: number; + n_keep: number; + n_discard: number; + ignore_eos: boolean; + stream: boolean; + n_probs: number; + min_keep: number; + chat_format: string; + reasoning_format: string; + reasoning_in_content: boolean; + thinking_forced_open: boolean; + samplers: string[]; + backend_sampling: boolean; + 'speculative.n_max': number; + 'speculative.n_min': number; + 'speculative.p_min': number; + timings_per_token: boolean; + post_sampling_probs: boolean; + lora: Array<{ name: string; scale: number }>; + }; + next_token: { + has_next_token: boolean; + has_new_line: boolean; + n_remain: number; + n_decoded: number; + }; +} + +export interface ApiProcessingState { + status: 'initializing' | 'generating' | 'preparing' | 'idle'; + tokensDecoded: number; + tokensRemaining: number; + contextUsed: number; + contextTotal: number; + outputTokensUsed: number; // Total output tokens (thinking + regular content) + outputTokensMax: number; // Max output tokens allowed + temperature: number; + topP: number; + speculative: boolean; + hasNextToken: boolean; + tokensPerSecond?: number; + // Progress information from prompt_progress + progressPercent?: number; + promptProgress?: ChatMessagePromptProgress; + promptTokens?: number; + promptMs?: number; + cacheTokens?: number; +} + +/** + * Router model metadata - extended from ApiModelDataEntry with additional router-specific fields + * @deprecated Use ApiModelDataEntry instead - the /models endpoint returns this structure directly + */ +export interface ApiRouterModelMeta { + /** Model identifier (e.g., "ggml-org/Qwen2.5-Omni-7B-GGUF:latest") */ + name: string; + /** Path to model file or manifest */ + path: string; + /** Optional path to multimodal projector */ + path_mmproj?: string; + /** Whether model is in HuggingFace cache */ + in_cache: boolean; + /** Port where model instance is running (0 if not loaded) */ + port?: number; + /** Current status of the model */ + status: ApiModelStatus; + /** Error message if status is FAILED */ + error?: string; +} + +/** + * Request to load a model + */ +export interface ApiRouterModelsLoadRequest { + model: string; +} + +/** + * Response from loading a model + */ +export interface ApiRouterModelsLoadResponse { + success: boolean; + error?: string; +} + +/** + * Request to check model status + */ +export interface ApiRouterModelsStatusRequest { + model: string; +} + +/** + * Response with model status + */ +export interface ApiRouterModelsStatusResponse { + model: string; + status: ModelStatus; + port?: number; + error?: string; +} + +/** + * Response with list of all models from /models endpoint + * Note: This is the same as ApiModelListResponse - the endpoint returns the same structure + * regardless of server mode (MODEL or ROUTER) + */ +export interface ApiRouterModelsListResponse { + object: string; + data: ApiModelDataEntry[]; +} + +/** + * Request to unload a model + */ +export interface ApiRouterModelsUnloadRequest { + model: string; +} + +/** + * Response from unloading a model + */ +export interface ApiRouterModelsUnloadResponse { + success: boolean; + error?: string; +} diff --git a/llama.cpp/tools/server/webui/src/lib/types/chat.d.ts b/llama.cpp/tools/server/webui/src/lib/types/chat.d.ts new file mode 100644 index 0000000..0e706b7 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/chat.d.ts @@ -0,0 +1,55 @@ +export type ChatMessageType = 'root' | 'text' | 'think' | 'system'; +export type ChatRole = 'user' | 'assistant' | 'system'; + +export interface ChatUploadedFile { + id: string; + name: string; + size: number; + type: string; + file: File; + preview?: string; + textContent?: string; +} + +export interface ChatAttachmentDisplayItem { + id: string; + name: string; + size?: number; + preview?: string; + isImage: boolean; + uploadedFile?: ChatUploadedFile; + attachment?: DatabaseMessageExtra; + attachmentIndex?: number; + textContent?: string; +} + +export interface ChatAttachmentPreviewItem { + uploadedFile?: ChatUploadedFile; + attachment?: DatabaseMessageExtra; + preview?: string; + name?: string; + size?: number; + textContent?: string; +} + +export interface ChatMessageSiblingInfo { + message: DatabaseMessage; + siblingIds: string[]; + currentIndex: number; + totalSiblings: number; +} + +export interface ChatMessagePromptProgress { + cache: number; + processed: number; + time_ms: number; + total: number; +} + +export interface ChatMessageTimings { + cache_n?: number; + predicted_ms?: number; + predicted_n?: number; + prompt_ms?: number; + prompt_n?: number; +} diff --git a/llama.cpp/tools/server/webui/src/lib/types/database.d.ts b/llama.cpp/tools/server/webui/src/lib/types/database.d.ts new file mode 100644 index 0000000..1a336e0 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/database.d.ts @@ -0,0 +1,85 @@ +import type { ChatMessageTimings, ChatRole, ChatMessageType } from '$lib/types/chat'; +import { AttachmentType } from '$lib/enums'; + +export interface DatabaseConversation { + currNode: string | null; + id: string; + lastModified: number; + name: string; +} + +export interface DatabaseMessageExtraAudioFile { + type: AttachmentType.AUDIO; + name: string; + base64Data: string; + mimeType: string; +} + +export interface DatabaseMessageExtraImageFile { + type: AttachmentType.IMAGE; + name: string; + base64Url: string; +} + +/** + * Legacy format from old webui - pasted content was stored as "context" type + * @deprecated Use DatabaseMessageExtraTextFile instead + */ +export interface DatabaseMessageExtraLegacyContext { + type: AttachmentType.LEGACY_CONTEXT; + name: string; + content: string; +} + +export interface DatabaseMessageExtraPdfFile { + type: AttachmentType.PDF; + base64Data: string; + name: string; + content: string; // Text content extracted from PDF + images?: string[]; // Optional: PDF pages as base64 images + processedAsImages: boolean; // Whether PDF was processed as images +} + +export interface DatabaseMessageExtraTextFile { + type: AttachmentType.TEXT; + name: string; + content: string; +} + +export type DatabaseMessageExtra = + | DatabaseMessageExtraImageFile + | DatabaseMessageExtraTextFile + | DatabaseMessageExtraAudioFile + | DatabaseMessageExtraPdfFile + | DatabaseMessageExtraLegacyContext; + +export interface DatabaseMessage { + id: string; + convId: string; + type: ChatMessageType; + timestamp: number; + role: ChatRole; + content: string; + parent: string; + thinking: string; + toolCalls?: string; + children: string[]; + extra?: DatabaseMessageExtra[]; + timings?: ChatMessageTimings; + model?: string; +} + +/** + * Represents a single conversation with its associated messages, + * typically used for import/export operations. + */ +export type ExportedConversation = { + conv: DatabaseConversation; + messages: DatabaseMessage[]; +}; + +/** + * Type representing one or more exported conversations. + * Can be a single conversation object or an array of them. + */ +export type ExportedConversations = ExportedConversation | ExportedConversation[]; diff --git a/llama.cpp/tools/server/webui/src/lib/types/index.ts b/llama.cpp/tools/server/webui/src/lib/types/index.ts new file mode 100644 index 0000000..2a21c6d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/index.ts @@ -0,0 +1,70 @@ +/** + * Unified exports for all type definitions + * Import types from '$lib/types' for cleaner imports + */ + +// API types +export type { + ApiChatMessageContentPart, + ApiContextSizeError, + ApiErrorResponse, + ApiChatMessageData, + ApiModelStatus, + ApiModelDataEntry, + ApiModelDetails, + ApiModelListResponse, + ApiLlamaCppServerProps, + ApiChatCompletionRequest, + ApiChatCompletionToolCallFunctionDelta, + ApiChatCompletionToolCallDelta, + ApiChatCompletionToolCall, + ApiChatCompletionStreamChunk, + ApiChatCompletionResponse, + ApiSlotData, + ApiProcessingState, + ApiRouterModelMeta, + ApiRouterModelsLoadRequest, + ApiRouterModelsLoadResponse, + ApiRouterModelsStatusRequest, + ApiRouterModelsStatusResponse, + ApiRouterModelsListResponse, + ApiRouterModelsUnloadRequest, + ApiRouterModelsUnloadResponse +} from './api'; + +// Chat types +export type { + ChatMessageType, + ChatRole, + ChatUploadedFile, + ChatAttachmentDisplayItem, + ChatAttachmentPreviewItem, + ChatMessageSiblingInfo, + ChatMessagePromptProgress, + ChatMessageTimings +} from './chat'; + +// Database types +export type { + DatabaseConversation, + DatabaseMessageExtraAudioFile, + DatabaseMessageExtraImageFile, + DatabaseMessageExtraLegacyContext, + DatabaseMessageExtraPdfFile, + DatabaseMessageExtraTextFile, + DatabaseMessageExtra, + DatabaseMessage, + ExportedConversation, + ExportedConversations +} from './database'; + +// Model types +export type { ModelModalities, ModelOption } from './models'; + +// Settings types +export type { + SettingsConfigValue, + SettingsFieldConfig, + SettingsChatServiceOptions, + SettingsConfigType +} from './settings'; diff --git a/llama.cpp/tools/server/webui/src/lib/types/models.d.ts b/llama.cpp/tools/server/webui/src/lib/types/models.d.ts new file mode 100644 index 0000000..ef44a2c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/models.d.ts @@ -0,0 +1,21 @@ +import type { ApiModelDataEntry, ApiModelDetails } from '$lib/types/api'; + +/** + * Model modalities - vision and audio capabilities + */ +export interface ModelModalities { + vision: boolean; + audio: boolean; +} + +export interface ModelOption { + id: string; + name: string; + model: string; + description?: string; + capabilities: string[]; + /** Model modalities from /props endpoint */ + modalities?: ModelModalities; + details?: ApiModelDetails['details']; + meta?: ApiModelDataEntry['meta']; +} diff --git a/llama.cpp/tools/server/webui/src/lib/types/settings.d.ts b/llama.cpp/tools/server/webui/src/lib/types/settings.d.ts new file mode 100644 index 0000000..38b3047 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/types/settings.d.ts @@ -0,0 +1,67 @@ +import type { SETTING_CONFIG_DEFAULT } from '$lib/constants/settings-config'; +import type { ChatMessageTimings } from './chat'; + +export type SettingsConfigValue = string | number | boolean; + +export interface SettingsFieldConfig { + key: string; + label: string; + type: 'input' | 'textarea' | 'checkbox' | 'select'; + isExperimental?: boolean; + help?: string; + options?: Array<{ value: string; label: string; icon?: typeof import('@lucide/svelte').Icon }>; +} + +export interface SettingsChatServiceOptions { + stream?: boolean; + // Model (required in ROUTER mode, optional in MODEL mode) + model?: string; + // System message to inject + systemMessage?: string; + // Disable reasoning format (use 'none' instead of 'auto') + disableReasoningFormat?: boolean; + // Generation parameters + temperature?: number; + max_tokens?: number; + // Sampling parameters + dynatemp_range?: number; + dynatemp_exponent?: number; + top_k?: number; + top_p?: number; + min_p?: number; + xtc_probability?: number; + xtc_threshold?: number; + typ_p?: number; + // Penalty parameters + repeat_last_n?: number; + repeat_penalty?: number; + presence_penalty?: number; + frequency_penalty?: number; + dry_multiplier?: number; + dry_base?: number; + dry_allowed_length?: number; + dry_penalty_last_n?: number; + // Sampler configuration + samplers?: string | string[]; + backend_sampling?: boolean; + // Custom parameters + custom?: string; + timings_per_token?: boolean; + // Callbacks + onChunk?: (chunk: string) => void; + onReasoningChunk?: (chunk: string) => void; + onToolCallChunk?: (chunk: string) => void; + onModel?: (model: string) => void; + onTimings?: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => void; + onComplete?: ( + response: string, + reasoningContent?: string, + timings?: ChatMessageTimings, + toolCalls?: string + ) => void; + onError?: (error: Error) => void; +} + +export type SettingsConfigType = typeof SETTING_CONFIG_DEFAULT & { + [key: string]: SettingsConfigValue; +}; diff --git a/llama.cpp/tools/server/webui/src/lib/utils/api-headers.ts b/llama.cpp/tools/server/webui/src/lib/utils/api-headers.ts new file mode 100644 index 0000000..77ce3e8 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/api-headers.ts @@ -0,0 +1,22 @@ +import { config } from '$lib/stores/settings.svelte'; + +/** + * Get authorization headers for API requests + * Includes Bearer token if API key is configured + */ +export function getAuthHeaders(): Record { + const currentConfig = config(); + const apiKey = currentConfig.apiKey?.toString().trim(); + + return apiKey ? { Authorization: `Bearer ${apiKey}` } : {}; +} + +/** + * Get standard JSON headers with optional authorization + */ +export function getJsonHeaders(): Record { + return { + 'Content-Type': 'application/json', + ...getAuthHeaders() + }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/api-key-validation.ts b/llama.cpp/tools/server/webui/src/lib/utils/api-key-validation.ts new file mode 100644 index 0000000..948b7d7 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/api-key-validation.ts @@ -0,0 +1,45 @@ +import { base } from '$app/paths'; +import { error } from '@sveltejs/kit'; +import { browser } from '$app/environment'; +import { config } from '$lib/stores/settings.svelte'; + +/** + * Validates API key by making a request to the server props endpoint + * Throws SvelteKit errors for authentication failures or server issues + */ +export async function validateApiKey(fetch: typeof globalThis.fetch): Promise { + if (!browser) { + return; + } + + try { + const apiKey = config().apiKey; + + const headers: Record = { + 'Content-Type': 'application/json' + }; + + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + + const response = await fetch(`${base}/props`, { headers }); + + if (!response.ok) { + if (response.status === 401 || response.status === 403) { + throw error(401, 'Access denied'); + } + + console.warn(`Server responded with status ${response.status} during API key validation`); + return; + } + } catch (err) { + // If it's already a SvelteKit error, re-throw it + if (err && typeof err === 'object' && 'status' in err) { + throw err; + } + + // Network or other errors + console.warn('Cannot connect to server for API key validation:', err); + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/attachment-display.ts b/llama.cpp/tools/server/webui/src/lib/utils/attachment-display.ts new file mode 100644 index 0000000..750aaa3 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/attachment-display.ts @@ -0,0 +1,61 @@ +import { FileTypeCategory } from '$lib/enums'; +import { getFileTypeCategory, getFileTypeCategoryByExtension, isImageFile } from '$lib/utils'; + +export interface AttachmentDisplayItemsOptions { + uploadedFiles?: ChatUploadedFile[]; + attachments?: DatabaseMessageExtra[]; +} + +/** + * Gets the file type category from an uploaded file, checking both MIME type and extension + */ +function getUploadedFileCategory(file: ChatUploadedFile): FileTypeCategory | null { + const categoryByMime = getFileTypeCategory(file.type); + + if (categoryByMime) { + return categoryByMime; + } + + return getFileTypeCategoryByExtension(file.name); +} + +/** + * Creates a unified list of display items from uploaded files and stored attachments. + * Items are returned in reverse order (newest first). + */ +export function getAttachmentDisplayItems( + options: AttachmentDisplayItemsOptions +): ChatAttachmentDisplayItem[] { + const { uploadedFiles = [], attachments = [] } = options; + const items: ChatAttachmentDisplayItem[] = []; + + // Add uploaded files (ChatForm) + for (const file of uploadedFiles) { + items.push({ + id: file.id, + name: file.name, + size: file.size, + preview: file.preview, + isImage: getUploadedFileCategory(file) === FileTypeCategory.IMAGE, + uploadedFile: file, + textContent: file.textContent + }); + } + + // Add stored attachments (ChatMessage) + for (const [index, attachment] of attachments.entries()) { + const isImage = isImageFile(attachment); + + items.push({ + id: `attachment-${index}`, + name: attachment.name, + preview: isImage && 'base64Url' in attachment ? attachment.base64Url : undefined, + isImage, + attachment, + attachmentIndex: index, + textContent: 'content' in attachment ? attachment.content : undefined + }); + } + + return items.reverse(); +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/attachment-type.ts b/llama.cpp/tools/server/webui/src/lib/utils/attachment-type.ts new file mode 100644 index 0000000..9e9f096 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/attachment-type.ts @@ -0,0 +1,105 @@ +import { AttachmentType, FileTypeCategory } from '$lib/enums'; +import { getFileTypeCategory, getFileTypeCategoryByExtension } from '$lib/utils'; + +/** + * Gets the file type category from an uploaded file, checking both MIME type and extension + * @param uploadedFile - The uploaded file to check + * @returns The file type category or null if not recognized + */ +function getUploadedFileCategory(uploadedFile: ChatUploadedFile): FileTypeCategory | null { + // First try MIME type + const categoryByMime = getFileTypeCategory(uploadedFile.type); + + if (categoryByMime) { + return categoryByMime; + } + + // Fallback to extension (browsers don't always provide correct MIME types) + return getFileTypeCategoryByExtension(uploadedFile.name); +} + +/** + * Determines if an attachment or uploaded file is a text file + * @param uploadedFile - Optional uploaded file + * @param attachment - Optional database attachment + * @returns true if the file is a text file + */ +export function isTextFile( + attachment?: DatabaseMessageExtra, + uploadedFile?: ChatUploadedFile +): boolean { + if (uploadedFile) { + return getUploadedFileCategory(uploadedFile) === FileTypeCategory.TEXT; + } + + if (attachment) { + return ( + attachment.type === AttachmentType.TEXT || attachment.type === AttachmentType.LEGACY_CONTEXT + ); + } + + return false; +} + +/** + * Determines if an attachment or uploaded file is an image + * @param uploadedFile - Optional uploaded file + * @param attachment - Optional database attachment + * @returns true if the file is an image + */ +export function isImageFile( + attachment?: DatabaseMessageExtra, + uploadedFile?: ChatUploadedFile +): boolean { + if (uploadedFile) { + return getUploadedFileCategory(uploadedFile) === FileTypeCategory.IMAGE; + } + + if (attachment) { + return attachment.type === AttachmentType.IMAGE; + } + + return false; +} + +/** + * Determines if an attachment or uploaded file is a PDF + * @param uploadedFile - Optional uploaded file + * @param attachment - Optional database attachment + * @returns true if the file is a PDF + */ +export function isPdfFile( + attachment?: DatabaseMessageExtra, + uploadedFile?: ChatUploadedFile +): boolean { + if (uploadedFile) { + return getUploadedFileCategory(uploadedFile) === FileTypeCategory.PDF; + } + + if (attachment) { + return attachment.type === AttachmentType.PDF; + } + + return false; +} + +/** + * Determines if an attachment or uploaded file is an audio file + * @param uploadedFile - Optional uploaded file + * @param attachment - Optional database attachment + * @returns true if the file is an audio file + */ +export function isAudioFile( + attachment?: DatabaseMessageExtra, + uploadedFile?: ChatUploadedFile +): boolean { + if (uploadedFile) { + return getUploadedFileCategory(uploadedFile) === FileTypeCategory.AUDIO; + } + + if (attachment) { + return attachment.type === AttachmentType.AUDIO; + } + + return false; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/audio-recording.ts b/llama.cpp/tools/server/webui/src/lib/utils/audio-recording.ts new file mode 100644 index 0000000..2a21985 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/audio-recording.ts @@ -0,0 +1,226 @@ +import { MimeTypeAudio } from '$lib/enums'; + +/** + * AudioRecorder - Browser-based audio recording with MediaRecorder API + * + * This class provides a complete audio recording solution using the browser's MediaRecorder API. + * It handles microphone access, recording state management, and audio format optimization. + * + * **Features:** + * - Automatic microphone permission handling + * - Audio enhancement (echo cancellation, noise suppression, auto gain) + * - Multiple format support with fallback (WAV, WebM, MP4, AAC) + * - Real-time recording state tracking + * - Proper cleanup and resource management + */ +export class AudioRecorder { + private mediaRecorder: MediaRecorder | null = null; + private audioChunks: Blob[] = []; + private stream: MediaStream | null = null; + private recordingState: boolean = false; + + async startRecording(): Promise { + try { + this.stream = await navigator.mediaDevices.getUserMedia({ + audio: { + echoCancellation: true, + noiseSuppression: true, + autoGainControl: true + } + }); + + this.initializeRecorder(this.stream); + + this.audioChunks = []; + // Start recording with a small timeslice to ensure we get data + this.mediaRecorder!.start(100); + this.recordingState = true; + } catch (error) { + console.error('Failed to start recording:', error); + throw new Error('Failed to access microphone. Please check permissions.'); + } + } + + async stopRecording(): Promise { + return new Promise((resolve, reject) => { + if (!this.mediaRecorder || this.mediaRecorder.state === 'inactive') { + reject(new Error('No active recording to stop')); + return; + } + + this.mediaRecorder.onstop = () => { + const mimeType = this.mediaRecorder?.mimeType || MimeTypeAudio.WAV; + const audioBlob = new Blob(this.audioChunks, { type: mimeType }); + + this.cleanup(); + + resolve(audioBlob); + }; + + this.mediaRecorder.onerror = (event) => { + console.error('Recording error:', event); + this.cleanup(); + reject(new Error('Recording failed')); + }; + + this.mediaRecorder.stop(); + }); + } + + isRecording(): boolean { + return this.recordingState; + } + + cancelRecording(): void { + if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') { + this.mediaRecorder.stop(); + } + this.cleanup(); + } + + private initializeRecorder(stream: MediaStream): void { + const options: MediaRecorderOptions = {}; + + if (MediaRecorder.isTypeSupported(MimeTypeAudio.WAV)) { + options.mimeType = MimeTypeAudio.WAV; + } else if (MediaRecorder.isTypeSupported(MimeTypeAudio.WEBM_OPUS)) { + options.mimeType = MimeTypeAudio.WEBM_OPUS; + } else if (MediaRecorder.isTypeSupported(MimeTypeAudio.WEBM)) { + options.mimeType = MimeTypeAudio.WEBM; + } else if (MediaRecorder.isTypeSupported(MimeTypeAudio.MP4)) { + options.mimeType = MimeTypeAudio.MP4; + } else { + console.warn('No preferred audio format supported, using default'); + } + + this.mediaRecorder = new MediaRecorder(stream, options); + + this.mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0) { + this.audioChunks.push(event.data); + } + }; + + this.mediaRecorder.onstop = () => { + this.recordingState = false; + }; + + this.mediaRecorder.onerror = (event) => { + console.error('MediaRecorder error:', event); + this.recordingState = false; + }; + } + + private cleanup(): void { + if (this.stream) { + for (const track of this.stream.getTracks()) { + track.stop(); + } + + this.stream = null; + } + this.mediaRecorder = null; + this.audioChunks = []; + this.recordingState = false; + } +} + +export async function convertToWav(audioBlob: Blob): Promise { + try { + if (audioBlob.type.includes('wav')) { + return audioBlob; + } + + const arrayBuffer = await audioBlob.arrayBuffer(); + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)(); + + const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); + + const wavBlob = audioBufferToWav(audioBuffer); + + audioContext.close(); + + return wavBlob; + } catch (error) { + console.error('Failed to convert audio to WAV:', error); + return audioBlob; + } +} + +function audioBufferToWav(buffer: AudioBuffer): Blob { + const length = buffer.length; + const numberOfChannels = buffer.numberOfChannels; + const sampleRate = buffer.sampleRate; + const bytesPerSample = 2; // 16-bit + const blockAlign = numberOfChannels * bytesPerSample; + const byteRate = sampleRate * blockAlign; + const dataSize = length * blockAlign; + const bufferSize = 44 + dataSize; + + const arrayBuffer = new ArrayBuffer(bufferSize); + const view = new DataView(arrayBuffer); + + const writeString = (offset: number, string: string) => { + for (let i = 0; i < string.length; i++) { + view.setUint8(offset + i, string.charCodeAt(i)); + } + }; + + writeString(0, 'RIFF'); // ChunkID + view.setUint32(4, bufferSize - 8, true); // ChunkSize + writeString(8, 'WAVE'); // Format + writeString(12, 'fmt '); // Subchunk1ID + view.setUint32(16, 16, true); // Subchunk1Size + view.setUint16(20, 1, true); // AudioFormat (PCM) + view.setUint16(22, numberOfChannels, true); // NumChannels + view.setUint32(24, sampleRate, true); // SampleRate + view.setUint32(28, byteRate, true); // ByteRate + view.setUint16(32, blockAlign, true); // BlockAlign + view.setUint16(34, 16, true); // BitsPerSample + writeString(36, 'data'); // Subchunk2ID + view.setUint32(40, dataSize, true); // Subchunk2Size + + let offset = 44; + for (let i = 0; i < length; i++) { + for (let channel = 0; channel < numberOfChannels; channel++) { + const sample = Math.max(-1, Math.min(1, buffer.getChannelData(channel)[i])); + view.setInt16(offset, sample * 0x7fff, true); + offset += 2; + } + } + + return new Blob([arrayBuffer], { type: MimeTypeAudio.WAV }); +} + +/** + * Create a File object from audio blob with timestamp-based naming + * @param audioBlob - The audio blob to wrap + * @param filename - Optional custom filename + * @returns File object with appropriate name and metadata + */ +export function createAudioFile(audioBlob: Blob, filename?: string): File { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const extension = audioBlob.type.includes('wav') ? 'wav' : 'mp3'; + const defaultFilename = `recording-${timestamp}.${extension}`; + + return new File([audioBlob], filename || defaultFilename, { + type: audioBlob.type, + lastModified: Date.now() + }); +} + +/** + * Check if audio recording is supported in the current browser + * @returns True if MediaRecorder and getUserMedia are available + */ +export function isAudioRecordingSupported(): boolean { + return !!( + typeof navigator !== 'undefined' && + navigator.mediaDevices && + typeof navigator.mediaDevices.getUserMedia === 'function' && + typeof window !== 'undefined' && + window.MediaRecorder + ); +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/autoresize-textarea.ts b/llama.cpp/tools/server/webui/src/lib/utils/autoresize-textarea.ts new file mode 100644 index 0000000..cfee5ec --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/autoresize-textarea.ts @@ -0,0 +1,10 @@ +/** + * Automatically resizes a textarea element to fit its content + * @param textareaElement - The textarea element to resize + */ +export default function autoResizeTextarea(textareaElement: HTMLTextAreaElement | null): void { + if (textareaElement) { + textareaElement.style.height = '1rem'; + textareaElement.style.height = textareaElement.scrollHeight + 'px'; + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/branching.ts b/llama.cpp/tools/server/webui/src/lib/utils/branching.ts new file mode 100644 index 0000000..3be5604 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/branching.ts @@ -0,0 +1,283 @@ +/** + * Message branching utilities for conversation tree navigation. + * + * Conversation branching allows users to edit messages and create alternate paths + * while preserving the original conversation flow. Each message has parent/children + * relationships forming a tree structure. + * + * Example tree: + * root + * ├── message 1 (user) + * │ └── message 2 (assistant) + * │ ├── message 3 (user) + * │ └── message 6 (user) ← new branch + * └── message 4 (user) + * └── message 5 (assistant) + */ + +/** + * Filters messages to get the conversation path from root to a specific leaf node. + * If the leafNodeId doesn't exist, returns the path with the latest timestamp. + * + * @param messages - All messages in the conversation + * @param leafNodeId - The target leaf node ID to trace back from + * @param includeRoot - Whether to include root messages in the result + * @returns Array of messages from root to leaf, sorted by timestamp + */ +export function filterByLeafNodeId( + messages: readonly DatabaseMessage[], + leafNodeId: string, + includeRoot: boolean = false +): readonly DatabaseMessage[] { + const result: DatabaseMessage[] = []; + const nodeMap = new Map(); + + // Build node map for quick lookups + for (const msg of messages) { + nodeMap.set(msg.id, msg); + } + + // Find the starting node (leaf node or latest if not found) + let startNode: DatabaseMessage | undefined = nodeMap.get(leafNodeId); + if (!startNode) { + // If leaf node not found, use the message with latest timestamp + let latestTime = -1; + for (const msg of messages) { + if (msg.timestamp > latestTime) { + startNode = msg; + latestTime = msg.timestamp; + } + } + } + + // Traverse from leaf to root, collecting messages + let currentNode: DatabaseMessage | undefined = startNode; + while (currentNode) { + // Include message if it's not root, or if we want to include root + if (currentNode.type !== 'root' || includeRoot) { + result.push(currentNode); + } + + // Stop traversal if parent is null (reached root) + if (currentNode.parent === null) { + break; + } + currentNode = nodeMap.get(currentNode.parent); + } + + // Sort by timestamp to get chronological order (root to leaf) + result.sort((a, b) => a.timestamp - b.timestamp); + return result; +} + +/** + * Finds the leaf node (message with no children) for a given message branch. + * Traverses down the tree following the last child until reaching a leaf. + * + * @param messages - All messages in the conversation + * @param messageId - Starting message ID to find leaf for + * @returns The leaf node ID, or the original messageId if no children + */ +export function findLeafNode(messages: readonly DatabaseMessage[], messageId: string): string { + const nodeMap = new Map(); + + // Build node map for quick lookups + for (const msg of messages) { + nodeMap.set(msg.id, msg); + } + + let currentNode: DatabaseMessage | undefined = nodeMap.get(messageId); + while (currentNode && currentNode.children.length > 0) { + // Follow the last child (most recent branch) + const lastChildId = currentNode.children[currentNode.children.length - 1]; + currentNode = nodeMap.get(lastChildId); + } + + return currentNode?.id ?? messageId; +} + +/** + * Finds all descendant messages (children, grandchildren, etc.) of a given message. + * This is used for cascading deletion to remove all messages in a branch. + * + * @param messages - All messages in the conversation + * @param messageId - The root message ID to find descendants for + * @returns Array of all descendant message IDs + */ +export function findDescendantMessages( + messages: readonly DatabaseMessage[], + messageId: string +): string[] { + const nodeMap = new Map(); + + // Build node map for quick lookups + for (const msg of messages) { + nodeMap.set(msg.id, msg); + } + + const descendants: string[] = []; + const queue: string[] = [messageId]; + + while (queue.length > 0) { + const currentId = queue.shift()!; + const currentNode = nodeMap.get(currentId); + + if (currentNode) { + // Add all children to the queue and descendants list + for (const childId of currentNode.children) { + descendants.push(childId); + queue.push(childId); + } + } + } + + return descendants; +} + +/** + * Gets sibling information for a message, including all sibling IDs and current position. + * Siblings are messages that share the same parent. + * + * @param messages - All messages in the conversation + * @param messageId - The message to get sibling info for + * @returns Sibling information including leaf node IDs for navigation + */ +export function getMessageSiblings( + messages: readonly DatabaseMessage[], + messageId: string +): ChatMessageSiblingInfo | null { + const nodeMap = new Map(); + + // Build node map for quick lookups + for (const msg of messages) { + nodeMap.set(msg.id, msg); + } + + const message = nodeMap.get(messageId); + if (!message) { + return null; + } + + // Handle null parent (root message) case + if (message.parent === null) { + // No parent means this is likely a root node with no siblings + return { + message, + siblingIds: [messageId], + currentIndex: 0, + totalSiblings: 1 + }; + } + + const parentNode = nodeMap.get(message.parent); + if (!parentNode) { + // Parent not found - treat as single message + return { + message, + siblingIds: [messageId], + currentIndex: 0, + totalSiblings: 1 + }; + } + + // Get all sibling IDs (including self) + const siblingIds = parentNode.children; + + // Convert sibling message IDs to their corresponding leaf node IDs + // This allows navigation between different conversation branches + const siblingLeafIds = siblingIds.map((siblingId: string) => findLeafNode(messages, siblingId)); + + // Find current message's position among siblings + const currentIndex = siblingIds.indexOf(messageId); + + return { + message, + siblingIds: siblingLeafIds, + currentIndex, + totalSiblings: siblingIds.length + }; +} + +/** + * Creates a display-ready list of messages with sibling information for UI rendering. + * This is the main function used by chat components to render conversation branches. + * + * @param messages - All messages in the conversation + * @param leafNodeId - Current leaf node being viewed + * @returns Array of messages with sibling navigation info + */ +export function getMessageDisplayList( + messages: readonly DatabaseMessage[], + leafNodeId: string +): ChatMessageSiblingInfo[] { + // Get the current conversation path + const currentPath = filterByLeafNodeId(messages, leafNodeId, true); + const result: ChatMessageSiblingInfo[] = []; + + // Add sibling info for each message in the current path + for (const message of currentPath) { + if (message.type === 'root') { + continue; // Skip root messages in display + } + + const siblingInfo = getMessageSiblings(messages, message.id); + if (siblingInfo) { + result.push(siblingInfo); + } + } + + return result; +} + +/** + * Checks if a message has multiple siblings (indicating branching at that point). + * + * @param messages - All messages in the conversation + * @param messageId - The message to check + * @returns True if the message has siblings + */ +export function hasMessageSiblings( + messages: readonly DatabaseMessage[], + messageId: string +): boolean { + const siblingInfo = getMessageSiblings(messages, messageId); + return siblingInfo ? siblingInfo.totalSiblings > 1 : false; +} + +/** + * Gets the next sibling message ID for navigation. + * + * @param messages - All messages in the conversation + * @param messageId - Current message ID + * @returns Next sibling's leaf node ID, or null if at the end + */ +export function getNextSibling( + messages: readonly DatabaseMessage[], + messageId: string +): string | null { + const siblingInfo = getMessageSiblings(messages, messageId); + if (!siblingInfo || siblingInfo.currentIndex >= siblingInfo.totalSiblings - 1) { + return null; + } + + return siblingInfo.siblingIds[siblingInfo.currentIndex + 1]; +} + +/** + * Gets the previous sibling message ID for navigation. + * + * @param messages - All messages in the conversation + * @param messageId - Current message ID + * @returns Previous sibling's leaf node ID, or null if at the beginning + */ +export function getPreviousSibling( + messages: readonly DatabaseMessage[], + messageId: string +): string | null { + const siblingInfo = getMessageSiblings(messages, messageId); + if (!siblingInfo || siblingInfo.currentIndex <= 0) { + return null; + } + + return siblingInfo.siblingIds[siblingInfo.currentIndex - 1]; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/browser-only.ts b/llama.cpp/tools/server/webui/src/lib/utils/browser-only.ts new file mode 100644 index 0000000..0af8006 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/browser-only.ts @@ -0,0 +1,35 @@ +/** + * Browser-only utility exports + * + * These utilities require browser APIs (DOM, Canvas, MediaRecorder, etc.) + * and cannot be imported during SSR. Import from '$lib/utils/browser-only' + * only in client-side code or components that are not server-rendered. + */ + +// Audio utilities (MediaRecorder API) +export { + AudioRecorder, + convertToWav, + createAudioFile, + isAudioRecordingSupported +} from './audio-recording'; + +// PDF processing utilities (pdfjs-dist with DOMMatrix) +export { + convertPDFToText, + convertPDFToImage, + isPdfFile as isPdfFileFromFile, + isApplicationMimeType +} from './pdf-processing'; + +// File conversion utilities (depends on pdf-processing) +export { parseFilesToMessageExtras, type FileProcessingResult } from './convert-files-to-extra'; + +// File upload processing utilities (depends on pdf-processing, svg-to-png, webp-to-png) +export { processFilesToChatUploaded } from './process-uploaded-files'; + +// SVG utilities (Canvas/Image API) +export { svgBase64UrlToPngDataURL, isSvgFile, isSvgMimeType } from './svg-to-png'; + +// WebP utilities (Canvas/Image API) +export { webpBase64UrlToPngDataURL, isWebpFile, isWebpMimeType } from './webp-to-png'; diff --git a/llama.cpp/tools/server/webui/src/lib/utils/clipboard.ts b/llama.cpp/tools/server/webui/src/lib/utils/clipboard.ts new file mode 100644 index 0000000..940e64c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/clipboard.ts @@ -0,0 +1,259 @@ +import { toast } from 'svelte-sonner'; +import { AttachmentType } from '$lib/enums'; +import type { + DatabaseMessageExtra, + DatabaseMessageExtraTextFile, + DatabaseMessageExtraLegacyContext +} from '$lib/types/database'; + +/** + * Copy text to clipboard with toast notification + * Uses modern clipboard API when available, falls back to legacy method for non-secure contexts + * @param text - Text to copy to clipboard + * @param successMessage - Custom success message (optional) + * @param errorMessage - Custom error message (optional) + * @returns Promise - True if successful, false otherwise + */ +export async function copyToClipboard( + text: string, + successMessage = 'Copied to clipboard', + errorMessage = 'Failed to copy to clipboard' +): Promise { + try { + // Try modern clipboard API first (secure contexts only) + if (navigator.clipboard && navigator.clipboard.writeText) { + await navigator.clipboard.writeText(text); + toast.success(successMessage); + return true; + } + + // Fallback for non-secure contexts + const textArea = document.createElement('textarea'); + textArea.value = text; + textArea.style.position = 'fixed'; + textArea.style.left = '-999999px'; + textArea.style.top = '-999999px'; + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + + const successful = document.execCommand('copy'); + document.body.removeChild(textArea); + + if (successful) { + toast.success(successMessage); + return true; + } else { + throw new Error('execCommand failed'); + } + } catch (error) { + console.error('Failed to copy to clipboard:', error); + toast.error(errorMessage); + return false; + } +} + +/** + * Copy code with HTML entity decoding and toast notification + * @param rawCode - Raw code string that may contain HTML entities + * @param successMessage - Custom success message (optional) + * @param errorMessage - Custom error message (optional) + * @returns Promise - True if successful, false otherwise + */ +export async function copyCodeToClipboard( + rawCode: string, + successMessage = 'Code copied to clipboard', + errorMessage = 'Failed to copy code' +): Promise { + return copyToClipboard(rawCode, successMessage, errorMessage); +} + +/** + * Format for text attachments when copied to clipboard + */ +export interface ClipboardTextAttachment { + type: typeof AttachmentType.TEXT; + name: string; + content: string; +} + +/** + * Parsed result from clipboard content + */ +export interface ParsedClipboardContent { + message: string; + textAttachments: ClipboardTextAttachment[]; +} + +/** + * Formats a message with text attachments for clipboard copying. + * + * Default format (asPlainText = false): + * ``` + * "Text message content" + * [ + * {"type":"TEXT","name":"filename.txt","content":"..."}, + * {"type":"TEXT","name":"another.txt","content":"..."} + * ] + * ``` + * + * Plain text format (asPlainText = true): + * ``` + * Text message content + * + * file content here + * + * another file content + * ``` + * + * @param content - The message text content + * @param extras - Optional array of message attachments + * @param asPlainText - If true, format as plain text without JSON structure + * @returns Formatted string for clipboard + */ +export function formatMessageForClipboard( + content: string, + extras?: DatabaseMessageExtra[], + asPlainText: boolean = false +): string { + // Filter only text attachments (TEXT type and legacy CONTEXT type) + const textAttachments = + extras?.filter( + (extra): extra is DatabaseMessageExtraTextFile | DatabaseMessageExtraLegacyContext => + extra.type === AttachmentType.TEXT || extra.type === AttachmentType.LEGACY_CONTEXT + ) ?? []; + + if (textAttachments.length === 0) { + return content; + } + + if (asPlainText) { + const parts = [content]; + for (const att of textAttachments) { + parts.push(att.content); + } + return parts.join('\n\n'); + } + + const clipboardAttachments: ClipboardTextAttachment[] = textAttachments.map((att) => ({ + type: AttachmentType.TEXT, + name: att.name, + content: att.content + })); + + return `${JSON.stringify(content)}\n${JSON.stringify(clipboardAttachments, null, 2)}`; +} + +/** + * Parses clipboard content to extract message and text attachments. + * Supports both plain text and the special format with attachments. + * + * @param clipboardText - Raw text from clipboard + * @returns Parsed content with message and attachments + */ +export function parseClipboardContent(clipboardText: string): ParsedClipboardContent { + const defaultResult: ParsedClipboardContent = { + message: clipboardText, + textAttachments: [] + }; + + if (!clipboardText.startsWith('"')) { + return defaultResult; + } + + try { + let stringEndIndex = -1; + let escaped = false; + + for (let i = 1; i < clipboardText.length; i++) { + const char = clipboardText[i]; + + if (escaped) { + escaped = false; + continue; + } + + if (char === '\\') { + escaped = true; + continue; + } + + if (char === '"') { + stringEndIndex = i; + break; + } + } + + if (stringEndIndex === -1) { + return defaultResult; + } + + const jsonStringPart = clipboardText.substring(0, stringEndIndex + 1); + const remainingPart = clipboardText.substring(stringEndIndex + 1).trim(); + + const message = JSON.parse(jsonStringPart) as string; + + if (!remainingPart || !remainingPart.startsWith('[')) { + return { + message, + textAttachments: [] + }; + } + + const attachments = JSON.parse(remainingPart) as unknown[]; + + const validAttachments: ClipboardTextAttachment[] = []; + + for (const att of attachments) { + if (isValidTextAttachment(att)) { + validAttachments.push({ + type: AttachmentType.TEXT, + name: att.name, + content: att.content + }); + } + } + + return { + message, + textAttachments: validAttachments + }; + } catch { + return defaultResult; + } +} + +/** + * Type guard to validate a text attachment object + * @param obj The object to validate + * @returns true if the object is a valid text attachment + */ +function isValidTextAttachment( + obj: unknown +): obj is { type: string; name: string; content: string } { + if (typeof obj !== 'object' || obj === null) { + return false; + } + + const record = obj as Record; + + return ( + (record.type === AttachmentType.TEXT || record.type === 'TEXT') && + typeof record.name === 'string' && + typeof record.content === 'string' + ); +} + +/** + * Checks if clipboard content contains our special format with attachments + * @param clipboardText - Raw text from clipboard + * @returns true if the clipboard content contains our special format with attachments + */ +export function hasClipboardAttachments(clipboardText: string): boolean { + if (!clipboardText.startsWith('"')) { + return false; + } + + const parsed = parseClipboardContent(clipboardText); + return parsed.textAttachments.length > 0; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/config-helpers.ts b/llama.cpp/tools/server/webui/src/lib/utils/config-helpers.ts new file mode 100644 index 0000000..b85242d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/config-helpers.ts @@ -0,0 +1,51 @@ +/** + * Type-safe configuration helpers + * + * Provides utilities for safely accessing and modifying configuration objects + * with dynamic keys while maintaining TypeScript type safety. + */ + +/** + * Type-safe helper to access config properties dynamically + * Provides better type safety than direct casting to Record + */ +export function setConfigValue( + config: T, + key: string, + value: unknown +): void { + if (key in config) { + (config as Record)[key] = value; + } +} + +/** + * Type-safe helper to get config values dynamically + */ +export function getConfigValue( + config: T, + key: string +): string | number | boolean | undefined { + const value = (config as Record)[key]; + return value as string | number | boolean | undefined; +} + +/** + * Convert a SettingsConfigType to a ParameterRecord for specific keys + * Useful for parameter synchronization operations + */ +export function configToParameterRecord( + config: T, + keys: string[] +): Record { + const record: Record = {}; + + for (const key of keys) { + const value = getConfigValue(config, key); + if (value !== undefined) { + record[key] = value; + } + } + + return record; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/conversation-utils.ts b/llama.cpp/tools/server/webui/src/lib/utils/conversation-utils.ts new file mode 100644 index 0000000..aee244a --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/conversation-utils.ts @@ -0,0 +1,30 @@ +/** + * Utility functions for conversation data manipulation + */ + +/** + * Creates a map of conversation IDs to their message counts from exported conversation data + * @param exportedData - Array of exported conversations with their messages + * @returns Map of conversation ID to message count + */ +export function createMessageCountMap( + exportedData: Array<{ conv: DatabaseConversation; messages: DatabaseMessage[] }> +): Map { + const countMap = new Map(); + + for (const item of exportedData) { + countMap.set(item.conv.id, item.messages.length); + } + + return countMap; +} + +/** + * Gets the message count for a specific conversation from the count map + * @param conversationId - The ID of the conversation + * @param countMap - Map of conversation IDs to message counts + * @returns The message count, or 0 if not found + */ +export function getMessageCount(conversationId: string, countMap: Map): number { + return countMap.get(conversationId) ?? 0; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/convert-files-to-extra.ts b/llama.cpp/tools/server/webui/src/lib/utils/convert-files-to-extra.ts new file mode 100644 index 0000000..6eb50f6 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/convert-files-to-extra.ts @@ -0,0 +1,192 @@ +import { convertPDFToImage, convertPDFToText } from './pdf-processing'; +import { isSvgMimeType, svgBase64UrlToPngDataURL } from './svg-to-png'; +import { isWebpMimeType, webpBase64UrlToPngDataURL } from './webp-to-png'; +import { FileTypeCategory, AttachmentType } from '$lib/enums'; +import { config, settingsStore } from '$lib/stores/settings.svelte'; +import { modelsStore } from '$lib/stores/models.svelte'; +import { getFileTypeCategory } from '$lib/utils'; +import { readFileAsText, isLikelyTextFile } from './text-files'; +import { toast } from 'svelte-sonner'; + +function readFileAsBase64(file: File): Promise { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + + reader.onload = () => { + // Extract base64 data without the data URL prefix + const dataUrl = reader.result as string; + const base64 = dataUrl.split(',')[1]; + resolve(base64); + }; + + reader.onerror = () => reject(reader.error); + + reader.readAsDataURL(file); + }); +} + +export interface FileProcessingResult { + extras: DatabaseMessageExtra[]; + emptyFiles: string[]; +} + +export async function parseFilesToMessageExtras( + files: ChatUploadedFile[], + activeModelId?: string +): Promise { + const extras: DatabaseMessageExtra[] = []; + const emptyFiles: string[] = []; + + for (const file of files) { + if (getFileTypeCategory(file.type) === FileTypeCategory.IMAGE) { + if (file.preview) { + let base64Url = file.preview; + + if (isSvgMimeType(file.type)) { + try { + base64Url = await svgBase64UrlToPngDataURL(base64Url); + } catch (error) { + console.error('Failed to convert SVG to PNG for database storage:', error); + } + } else if (isWebpMimeType(file.type)) { + try { + base64Url = await webpBase64UrlToPngDataURL(base64Url); + } catch (error) { + console.error('Failed to convert WebP to PNG for database storage:', error); + } + } + + extras.push({ + type: AttachmentType.IMAGE, + name: file.name, + base64Url + }); + } + } else if (getFileTypeCategory(file.type) === FileTypeCategory.AUDIO) { + // Process audio files (MP3 and WAV) + try { + const base64Data = await readFileAsBase64(file.file); + + extras.push({ + type: AttachmentType.AUDIO, + name: file.name, + base64Data: base64Data, + mimeType: file.type + }); + } catch (error) { + console.error(`Failed to process audio file ${file.name}:`, error); + } + } else if (getFileTypeCategory(file.type) === FileTypeCategory.PDF) { + try { + // Always get base64 data for preview functionality + const base64Data = await readFileAsBase64(file.file); + const currentConfig = config(); + // Use per-model vision check for router mode + const hasVisionSupport = activeModelId + ? modelsStore.modelSupportsVision(activeModelId) + : false; + + // Force PDF-to-text for non-vision models + let shouldProcessAsImages = Boolean(currentConfig.pdfAsImage) && hasVisionSupport; + + // If user had pdfAsImage enabled but model doesn't support vision, update setting and notify + if (currentConfig.pdfAsImage && !hasVisionSupport) { + console.log('Non-vision model detected: forcing PDF-to-text mode and updating settings'); + + // Update the setting in localStorage + settingsStore.updateConfig('pdfAsImage', false); + + // Show toast notification to user + toast.warning( + 'PDF setting changed: Non-vision model detected, PDFs will be processed as text instead of images.', + { + duration: 5000 + } + ); + + shouldProcessAsImages = false; + } + + if (shouldProcessAsImages) { + // Process PDF as images (only for vision models) + try { + const images = await convertPDFToImage(file.file); + + // Show success toast for PDF image processing + toast.success( + `PDF "${file.name}" processed as ${images.length} images for vision model.`, + { + duration: 3000 + } + ); + + extras.push({ + type: AttachmentType.PDF, + name: file.name, + content: `PDF file with ${images.length} pages`, + images: images, + processedAsImages: true, + base64Data: base64Data + }); + } catch (imageError) { + console.warn( + `Failed to process PDF ${file.name} as images, falling back to text:`, + imageError + ); + + // Fallback to text processing + const content = await convertPDFToText(file.file); + + extras.push({ + type: AttachmentType.PDF, + name: file.name, + content: content, + processedAsImages: false, + base64Data: base64Data + }); + } + } else { + // Process PDF as text (default or forced for non-vision models) + const content = await convertPDFToText(file.file); + + // Show success toast for PDF text processing + toast.success(`PDF "${file.name}" processed as text content.`, { + duration: 3000 + }); + + extras.push({ + type: AttachmentType.PDF, + name: file.name, + content: content, + processedAsImages: false, + base64Data: base64Data + }); + } + } catch (error) { + console.error(`Failed to process PDF file ${file.name}:`, error); + } + } else { + try { + const content = await readFileAsText(file.file); + + // Check if file is empty + if (content.trim() === '') { + console.warn(`File ${file.name} is empty and will be skipped`); + emptyFiles.push(file.name); + } else if (isLikelyTextFile(content)) { + extras.push({ + type: AttachmentType.TEXT, + name: file.name, + content: content + }); + } else { + console.warn(`File ${file.name} appears to be binary and will be skipped`); + } + } catch (error) { + console.error(`Failed to read file ${file.name}:`, error); + } + } + } + + return { extras, emptyFiles }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/file-preview.ts b/llama.cpp/tools/server/webui/src/lib/utils/file-preview.ts new file mode 100644 index 0000000..26a6053 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/file-preview.ts @@ -0,0 +1,36 @@ +/** + * Gets a display label for a file type from various input formats + * + * Handles: + * - MIME types: 'application/pdf' → 'PDF' + * - AttachmentType values: 'PDF', 'AUDIO' → 'PDF', 'AUDIO' + * - File names: 'document.pdf' → 'PDF' + * - Unknown: returns 'FILE' + * + * @param input - MIME type, AttachmentType value, or file name + * @returns Formatted file type label (uppercase) + */ +export function getFileTypeLabel(input: string | undefined): string { + if (!input) return 'FILE'; + + // Handle MIME types (contains '/') + if (input.includes('/')) { + const subtype = input.split('/').pop(); + if (subtype) { + // Handle special cases like 'vnd.ms-excel' → 'EXCEL' + if (subtype.includes('.')) { + return subtype.split('.').pop()?.toUpperCase() || 'FILE'; + } + return subtype.toUpperCase(); + } + } + + // Handle file names (contains '.') + if (input.includes('.')) { + const ext = input.split('.').pop(); + if (ext) return ext.toUpperCase(); + } + + // Handle AttachmentType or other plain strings + return input.toUpperCase(); +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/file-type.ts b/llama.cpp/tools/server/webui/src/lib/utils/file-type.ts new file mode 100644 index 0000000..9a9996d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/file-type.ts @@ -0,0 +1,222 @@ +import { + AUDIO_FILE_TYPES, + IMAGE_FILE_TYPES, + PDF_FILE_TYPES, + TEXT_FILE_TYPES +} from '$lib/constants/supported-file-types'; +import { + FileExtensionAudio, + FileExtensionImage, + FileExtensionPdf, + FileExtensionText, + FileTypeCategory, + MimeTypeApplication, + MimeTypeAudio, + MimeTypeImage, + MimeTypeText +} from '$lib/enums'; + +export function getFileTypeCategory(mimeType: string): FileTypeCategory | null { + switch (mimeType) { + // Images + case MimeTypeImage.JPEG: + case MimeTypeImage.PNG: + case MimeTypeImage.GIF: + case MimeTypeImage.WEBP: + case MimeTypeImage.SVG: + return FileTypeCategory.IMAGE; + + // Audio + case MimeTypeAudio.MP3_MPEG: + case MimeTypeAudio.MP3: + case MimeTypeAudio.MP4: + case MimeTypeAudio.WAV: + case MimeTypeAudio.WEBM: + case MimeTypeAudio.WEBM_OPUS: + return FileTypeCategory.AUDIO; + + // PDF + case MimeTypeApplication.PDF: + return FileTypeCategory.PDF; + + // Text + case MimeTypeText.PLAIN: + case MimeTypeText.MARKDOWN: + case MimeTypeText.ASCIIDOC: + case MimeTypeText.JAVASCRIPT: + case MimeTypeText.JAVASCRIPT_APP: + case MimeTypeText.TYPESCRIPT: + case MimeTypeText.JSX: + case MimeTypeText.TSX: + case MimeTypeText.CSS: + case MimeTypeText.HTML: + case MimeTypeText.JSON: + case MimeTypeText.XML_TEXT: + case MimeTypeText.XML_APP: + case MimeTypeText.YAML_TEXT: + case MimeTypeText.YAML_APP: + case MimeTypeText.CSV: + case MimeTypeText.PYTHON: + case MimeTypeText.JAVA: + case MimeTypeText.CPP_SRC: + case MimeTypeText.C_SRC: + case MimeTypeText.C_HDR: + case MimeTypeText.PHP: + case MimeTypeText.RUBY: + case MimeTypeText.GO: + case MimeTypeText.RUST: + case MimeTypeText.SHELL: + case MimeTypeText.BAT: + case MimeTypeText.SQL: + case MimeTypeText.R: + case MimeTypeText.SCALA: + case MimeTypeText.KOTLIN: + case MimeTypeText.SWIFT: + case MimeTypeText.DART: + case MimeTypeText.VUE: + case MimeTypeText.SVELTE: + case MimeTypeText.LATEX: + case MimeTypeText.BIBTEX: + case MimeTypeText.CUDA: + case MimeTypeText.CPP_HDR: + case MimeTypeText.CSHARP: + case MimeTypeText.HASKELL: + case MimeTypeText.PROPERTIES: + case MimeTypeText.TEX: + case MimeTypeText.TEX_APP: + return FileTypeCategory.TEXT; + + default: + return null; + } +} + +export function getFileTypeCategoryByExtension(filename: string): FileTypeCategory | null { + const extension = filename.toLowerCase().substring(filename.lastIndexOf('.')); + + switch (extension) { + // Images + case FileExtensionImage.JPG: + case FileExtensionImage.JPEG: + case FileExtensionImage.PNG: + case FileExtensionImage.GIF: + case FileExtensionImage.WEBP: + case FileExtensionImage.SVG: + return FileTypeCategory.IMAGE; + + // Audio + case FileExtensionAudio.MP3: + case FileExtensionAudio.WAV: + return FileTypeCategory.AUDIO; + + // PDF + case FileExtensionPdf.PDF: + return FileTypeCategory.PDF; + + // Text + case FileExtensionText.TXT: + case FileExtensionText.MD: + case FileExtensionText.ADOC: + case FileExtensionText.JS: + case FileExtensionText.TS: + case FileExtensionText.JSX: + case FileExtensionText.TSX: + case FileExtensionText.CSS: + case FileExtensionText.HTML: + case FileExtensionText.HTM: + case FileExtensionText.JSON: + case FileExtensionText.XML: + case FileExtensionText.YAML: + case FileExtensionText.YML: + case FileExtensionText.CSV: + case FileExtensionText.LOG: + case FileExtensionText.PY: + case FileExtensionText.JAVA: + case FileExtensionText.CPP: + case FileExtensionText.C: + case FileExtensionText.H: + case FileExtensionText.PHP: + case FileExtensionText.RB: + case FileExtensionText.GO: + case FileExtensionText.RS: + case FileExtensionText.SH: + case FileExtensionText.BAT: + case FileExtensionText.SQL: + case FileExtensionText.R: + case FileExtensionText.SCALA: + case FileExtensionText.KT: + case FileExtensionText.SWIFT: + case FileExtensionText.DART: + case FileExtensionText.VUE: + case FileExtensionText.SVELTE: + case FileExtensionText.TEX: + case FileExtensionText.BIB: + case FileExtensionText.COMP: + case FileExtensionText.CU: + case FileExtensionText.CUH: + case FileExtensionText.HPP: + case FileExtensionText.HS: + case FileExtensionText.PROPERTIES: + return FileTypeCategory.TEXT; + + default: + return null; + } +} + +export function getFileTypeByExtension(filename: string): string | null { + const extension = filename.toLowerCase().substring(filename.lastIndexOf('.')); + + for (const [key, type] of Object.entries(IMAGE_FILE_TYPES)) { + if ((type.extensions as readonly string[]).includes(extension)) { + return `${FileTypeCategory.IMAGE}:${key}`; + } + } + + for (const [key, type] of Object.entries(AUDIO_FILE_TYPES)) { + if ((type.extensions as readonly string[]).includes(extension)) { + return `${FileTypeCategory.AUDIO}:${key}`; + } + } + + for (const [key, type] of Object.entries(PDF_FILE_TYPES)) { + if ((type.extensions as readonly string[]).includes(extension)) { + return `${FileTypeCategory.PDF}:${key}`; + } + } + + for (const [key, type] of Object.entries(TEXT_FILE_TYPES)) { + if ((type.extensions as readonly string[]).includes(extension)) { + return `${FileTypeCategory.TEXT}:${key}`; + } + } + + return null; +} + +export function isFileTypeSupported(filename: string, mimeType?: string): boolean { + // Images are detected and handled separately for vision models + if (mimeType) { + const category = getFileTypeCategory(mimeType); + if ( + category === FileTypeCategory.IMAGE || + category === FileTypeCategory.AUDIO || + category === FileTypeCategory.PDF + ) { + return true; + } + } + + // Check extension for known types (especially images without MIME) + const extCategory = getFileTypeCategoryByExtension(filename); + if ( + extCategory === FileTypeCategory.IMAGE || + extCategory === FileTypeCategory.AUDIO || + extCategory === FileTypeCategory.PDF + ) { + return true; + } + + // Fallback: treat everything else as text (inclusive by default) + return true; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/formatters.ts b/llama.cpp/tools/server/webui/src/lib/utils/formatters.ts new file mode 100644 index 0000000..ae9f59a --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/formatters.ts @@ -0,0 +1,53 @@ +/** + * Formats file size in bytes to human readable format + * Supports Bytes, KB, MB, and GB + * + * @param bytes - File size in bytes (or unknown for safety) + * @returns Formatted file size string + */ +export function formatFileSize(bytes: number | unknown): string { + if (typeof bytes !== 'number') return 'Unknown'; + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const sizes = ['Bytes', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; +} + +/** + * Format parameter count to human-readable format (B, M, K) + * + * @param params - Parameter count + * @returns Human-readable parameter count + */ +export function formatParameters(params: number | unknown): string { + if (typeof params !== 'number') return 'Unknown'; + + if (params >= 1e9) { + return `${(params / 1e9).toFixed(2)}B`; + } + + if (params >= 1e6) { + return `${(params / 1e6).toFixed(2)}M`; + } + + if (params >= 1e3) { + return `${(params / 1e3).toFixed(2)}K`; + } + + return params.toString(); +} + +/** + * Format number with locale-specific thousands separators + * + * @param num - Number to format + * @returns Human-readable number + */ +export function formatNumber(num: number | unknown): string { + if (typeof num !== 'number') return 'Unknown'; + + return num.toLocaleString(); +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/index.ts b/llama.cpp/tools/server/webui/src/lib/utils/index.ts new file mode 100644 index 0000000..588167b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/index.ts @@ -0,0 +1,95 @@ +/** + * Unified exports for all utility functions + * Import utilities from '$lib/utils' for cleaner imports + * + * For browser-only utilities (pdf-processing, audio-recording, svg-to-png, + * webp-to-png, process-uploaded-files, convert-files-to-extra), use: + * import { ... } from '$lib/utils/browser-only' + */ + +// API utilities +export { getAuthHeaders, getJsonHeaders } from './api-headers'; +export { validateApiKey } from './api-key-validation'; + +// Attachment utilities +export { + getAttachmentDisplayItems, + type AttachmentDisplayItemsOptions +} from './attachment-display'; +export { isTextFile, isImageFile, isPdfFile, isAudioFile } from './attachment-type'; + +// Textarea utilities +export { default as autoResizeTextarea } from './autoresize-textarea'; + +// Branching utilities +export { + filterByLeafNodeId, + findLeafNode, + findDescendantMessages, + getMessageSiblings, + getMessageDisplayList, + hasMessageSiblings, + getNextSibling, + getPreviousSibling +} from './branching'; + +// Config helpers +export { setConfigValue, getConfigValue, configToParameterRecord } from './config-helpers'; + +// Conversation utilities +export { createMessageCountMap, getMessageCount } from './conversation-utils'; + +// Clipboard utilities +export { + copyToClipboard, + copyCodeToClipboard, + formatMessageForClipboard, + parseClipboardContent, + hasClipboardAttachments, + type ClipboardTextAttachment, + type ParsedClipboardContent +} from './clipboard'; + +// File preview utilities +export { getFileTypeLabel } from './file-preview'; +export { getPreviewText } from './text'; + +// File type utilities +export { + getFileTypeCategory, + getFileTypeCategoryByExtension, + getFileTypeByExtension, + isFileTypeSupported +} from './file-type'; + +// Formatting utilities +export { formatFileSize, formatParameters, formatNumber } from './formatters'; + +// IME utilities +export { isIMEComposing } from './is-ime-composing'; + +// LaTeX utilities +export { maskInlineLaTeX, preprocessLaTeX } from './latex-protection'; + +// Modality file validation utilities +export { + isFileTypeSupportedByModel, + filterFilesByModalities, + generateModalityErrorMessage, + type ModalityCapabilities +} from './modality-file-validation'; + +// Model name utilities +export { normalizeModelName, isValidModelName } from './model-names'; + +// Portal utilities +export { portalToBody } from './portal-to-body'; + +// Precision utilities +export { normalizeFloatingPoint, normalizeNumber } from './precision'; + +// Syntax highlighting utilities +export { getLanguageFromFilename } from './syntax-highlight-language'; + +// Text file utilities +export { isTextFileByName, readFileAsText, isLikelyTextFile } from './text-files'; diff --git a/llama.cpp/tools/server/webui/src/lib/utils/is-ime-composing.ts b/llama.cpp/tools/server/webui/src/lib/utils/is-ime-composing.ts new file mode 100644 index 0000000..9182ea4 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/is-ime-composing.ts @@ -0,0 +1,5 @@ +export function isIMEComposing(event: KeyboardEvent) { + // Check for IME composition using isComposing property and keyCode 229 (specifically for IME composition on Safari, which is notorious for not supporting KeyboardEvent.isComposing) + // This prevents form submission when confirming IME word selection (e.g., Japanese/Chinese input) + return event.isComposing || event.keyCode === 229; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/latex-protection.ts b/llama.cpp/tools/server/webui/src/lib/utils/latex-protection.ts new file mode 100644 index 0000000..cafa2d4 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/latex-protection.ts @@ -0,0 +1,270 @@ +import { + CODE_BLOCK_REGEXP, + LATEX_MATH_AND_CODE_PATTERN, + LATEX_LINEBREAK_REGEXP, + MHCHEM_PATTERN_MAP +} from '$lib/constants/latex-protection'; + +/** + * Replaces inline LaTeX expressions enclosed in `$...$` with placeholders, avoiding dollar signs + * that appear to be part of monetary values or identifiers. + * + * This function processes the input line by line and skips `$` sequences that are likely + * part of money amounts (e.g., `$5`, `$100.99`) or code-like tokens (e.g., `var$`, `$var`). + * Valid LaTeX inline math is replaced with a placeholder like `<>`, and the + * actual LaTeX content is stored in the provided `latexExpressions` array. + * + * @param content - The input text potentially containing LaTeX expressions. + * @param latexExpressions - An array used to collect extracted LaTeX expressions. + * @returns The processed string with LaTeX replaced by placeholders. + */ +export function maskInlineLaTeX(content: string, latexExpressions: string[]): string { + if (!content.includes('$')) { + return content; + } + return content + .split('\n') + .map((line) => { + if (line.indexOf('$') == -1) { + return line; + } + + let processedLine = ''; + let currentPosition = 0; + + while (currentPosition < line.length) { + const openDollarIndex = line.indexOf('$', currentPosition); + + if (openDollarIndex == -1) { + processedLine += line.slice(currentPosition); + break; + } + + // Is there a next $-sign? + const closeDollarIndex = line.indexOf('$', openDollarIndex + 1); + + if (closeDollarIndex == -1) { + processedLine += line.slice(currentPosition); + break; + } + + const charBeforeOpen = openDollarIndex > 0 ? line[openDollarIndex - 1] : ''; + const charAfterOpen = line[openDollarIndex + 1]; + const charBeforeClose = + openDollarIndex + 1 < closeDollarIndex ? line[closeDollarIndex - 1] : ''; + const charAfterClose = closeDollarIndex + 1 < line.length ? line[closeDollarIndex + 1] : ''; + + let shouldSkipAsNonLatex = false; + + if (closeDollarIndex == currentPosition + 1) { + // No content + shouldSkipAsNonLatex = true; + } + + if (/[A-Za-z0-9_$-]/.test(charBeforeOpen)) { + // Character, digit, $, _ or - before first '$', no TeX. + shouldSkipAsNonLatex = true; + } + + if ( + /[0-9]/.test(charAfterOpen) && + (/[A-Za-z0-9_$-]/.test(charAfterClose) || ' ' == charBeforeClose) + ) { + // First $ seems to belong to an amount. + shouldSkipAsNonLatex = true; + } + + if (shouldSkipAsNonLatex) { + processedLine += line.slice(currentPosition, openDollarIndex + 1); + currentPosition = openDollarIndex + 1; + + continue; + } + + // Treat as LaTeX + processedLine += line.slice(currentPosition, openDollarIndex); + const latexContent = line.slice(openDollarIndex, closeDollarIndex + 1); + latexExpressions.push(latexContent); + processedLine += `<>`; + currentPosition = closeDollarIndex + 1; + } + + return processedLine; + }) + .join('\n'); +} + +function escapeBrackets(text: string): string { + return text.replace( + LATEX_MATH_AND_CODE_PATTERN, + ( + match: string, + codeBlock: string | undefined, + squareBracket: string | undefined, + roundBracket: string | undefined + ): string => { + if (codeBlock != null) { + return codeBlock; + } else if (squareBracket != null) { + return `$$${squareBracket}$$`; + } else if (roundBracket != null) { + return `$${roundBracket}$`; + } + + return match; + } + ); +} + +// Escape $\\ce{...} → $\\ce{...} but with proper handling +function escapeMhchem(text: string): string { + return MHCHEM_PATTERN_MAP.reduce((result, [pattern, replacement]) => { + return result.replace(pattern, replacement); + }, text); +} + +const doEscapeMhchem = false; + +/** + * Preprocesses markdown content to safely handle LaTeX math expressions while protecting + * against false positives (e.g., dollar amounts like $5.99) and ensuring proper rendering. + * + * This function: + * - Protects code blocks (```) and inline code (`...`) + * - Safeguards block and inline LaTeX: \(...\), \[...\], $$...$$, and selective $...$ + * - Escapes standalone dollar signs before numbers (e.g., $5 → \$5) to prevent misinterpretation + * - Restores protected LaTeX and code blocks after processing + * - Converts \(...\) → $...$ and \[...\] → $$...$$ for compatibility with math renderers + * - Applies additional escaping for brackets and mhchem syntax if needed + * + * @param content - The raw text (e.g., markdown) that may contain LaTeX or code blocks. + * @returns The preprocessed string with properly escaped and normalized LaTeX. + * + * @example + * preprocessLaTeX("Price: $10. The equation is \\(x^2\\).") + * // → "Price: $10. The equation is $x^2$." + */ +export function preprocessLaTeX(content: string): string { + // See also: + // https://github.com/danny-avila/LibreChat/blob/main/client/src/utils/latex.ts + + // Step 0: Temporarily remove blockquote markers (>) to process LaTeX correctly + // Store the structure so we can restore it later + const blockquoteMarkers: Map = new Map(); + const lines = content.split('\n'); + const processedLines = lines.map((line, index) => { + const match = line.match(/^(>\s*)/); + if (match) { + blockquoteMarkers.set(index, match[1]); + return line.slice(match[1].length); + } + return line; + }); + content = processedLines.join('\n'); + + // Step 1: Protect code blocks + const codeBlocks: string[] = []; + + content = content.replace(CODE_BLOCK_REGEXP, (match) => { + codeBlocks.push(match); + + return `<>`; + }); + + // Step 2: Protect existing LaTeX expressions + const latexExpressions: string[] = []; + + // Match \S...\[...\] and protect them and insert a line-break. + content = content.replace(/([\S].*?)\\\[([\s\S]*?)\\\](.*)/g, (match, group1, group2, group3) => { + // Check if there are characters following the formula (display-formula in a table-cell?) + if (group1.endsWith('\\')) { + return match; // Backslash before \[, do nothing. + } + const hasSuffix = /\S/.test(group3); + let optBreak; + + if (hasSuffix) { + latexExpressions.push(`\\(${group2.trim()}\\)`); // Convert into inline. + optBreak = ''; + } else { + latexExpressions.push(`\\[${group2}\\]`); + optBreak = '\n'; + } + + return `${group1}${optBreak}<>${optBreak}${group3}`; + }); + + // Match \(...\), \[...\], $$...$$ and protect them + content = content.replace( + /(\$\$[\s\S]*?\$\$|(? { + latexExpressions.push(match); + + return `<>`; + } + ); + + // Protect inline $...$ but NOT if it looks like money (e.g., $10, $3.99) + content = maskInlineLaTeX(content, latexExpressions); + + // Step 3: Escape standalone $ before digits (currency like $5 → \$5) + // (Now that inline math is protected, this will only escape dollars not already protected) + content = content.replace(/\$(?=\d)/g, '\\$'); + + // Step 4: Restore protected LaTeX expressions (they are valid) + content = content.replace(/<>/g, (_, index) => { + let expr = latexExpressions[parseInt(index)]; + const match = expr.match(LATEX_LINEBREAK_REGEXP); + if (match) { + // Katex: The $$-delimiters should be in their own line + // if there are \\-line-breaks. + const formula = match[1]; + const prefix = formula.startsWith('\n') ? '' : '\n'; + const suffix = formula.endsWith('\n') ? '' : '\n'; + expr = '$$' + prefix + formula + suffix + '$$'; + } + return expr; + }); + + // Step 5: Apply additional escaping functions (brackets and mhchem) + // This must happen BEFORE restoring code blocks to avoid affecting code content + content = escapeBrackets(content); + + if (doEscapeMhchem && (content.includes('\\ce{') || content.includes('\\pu{'))) { + content = escapeMhchem(content); + } + + // Step 6: Convert remaining \(...\) → $...$, \[...\] → $$...$$ + // This must happen BEFORE restoring code blocks to avoid affecting code content + content = content + // Using the look‑behind pattern `(? { + return `$$${content}$$`; + } + ); + + // Step 7: Restore code blocks + // This happens AFTER all LaTeX conversions to preserve code content + content = content.replace(/<>/g, (_, index) => { + return codeBlocks[parseInt(index)]; + }); + + // Step 8: Restore blockquote markers + if (blockquoteMarkers.size > 0) { + const finalLines = content.split('\n'); + const restoredLines = finalLines.map((line, index) => { + const marker = blockquoteMarkers.get(index); + return marker ? marker + line : line; + }); + content = restoredLines.join('\n'); + } + + return content; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/modality-file-validation.ts b/llama.cpp/tools/server/webui/src/lib/utils/modality-file-validation.ts new file mode 100644 index 0000000..136c084 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/modality-file-validation.ts @@ -0,0 +1,162 @@ +/** + * File validation utilities based on model modalities + * Ensures only compatible file types are processed based on model capabilities + */ + +import { getFileTypeCategory } from '$lib/utils'; +import { FileTypeCategory } from '$lib/enums'; + +/** Modality capabilities for file validation */ +export interface ModalityCapabilities { + hasVision: boolean; + hasAudio: boolean; +} + +/** + * Check if a file type is supported by the given modalities + * @param filename - The filename to check + * @param mimeType - The MIME type of the file + * @param capabilities - The modality capabilities to check against + * @returns true if the file type is supported + */ +export function isFileTypeSupportedByModel( + filename: string, + mimeType: string | undefined, + capabilities: ModalityCapabilities +): boolean { + const category = mimeType ? getFileTypeCategory(mimeType) : null; + + // If we can't determine the category from MIME type, fall back to general support check + if (!category) { + // For unknown types, only allow if they might be text files + // This is a conservative approach for edge cases + return true; // Let the existing isFileTypeSupported handle this + } + + switch (category) { + case FileTypeCategory.TEXT: + // Text files are always supported + return true; + + case FileTypeCategory.PDF: + // PDFs are always supported (will be processed as text for non-vision models) + return true; + + case FileTypeCategory.IMAGE: + // Images require vision support + return capabilities.hasVision; + + case FileTypeCategory.AUDIO: + // Audio files require audio support + return capabilities.hasAudio; + + default: + // Unknown categories - be conservative and allow + return true; + } +} + +/** + * Filter files based on model modalities and return supported/unsupported lists + * @param files - Array of files to filter + * @param capabilities - The modality capabilities to check against + * @returns Object with supportedFiles and unsupportedFiles arrays + */ +export function filterFilesByModalities( + files: File[], + capabilities: ModalityCapabilities +): { + supportedFiles: File[]; + unsupportedFiles: File[]; + modalityReasons: Record; +} { + const supportedFiles: File[] = []; + const unsupportedFiles: File[] = []; + const modalityReasons: Record = {}; + + const { hasVision, hasAudio } = capabilities; + + for (const file of files) { + const category = getFileTypeCategory(file.type); + let isSupported = true; + let reason = ''; + + switch (category) { + case FileTypeCategory.IMAGE: + if (!hasVision) { + isSupported = false; + reason = 'Images require a vision-capable model'; + } + break; + + case FileTypeCategory.AUDIO: + if (!hasAudio) { + isSupported = false; + reason = 'Audio files require an audio-capable model'; + } + break; + + case FileTypeCategory.TEXT: + case FileTypeCategory.PDF: + // Always supported + break; + + default: + // For unknown types, check if it's a generally supported file type + // This handles edge cases and maintains backward compatibility + break; + } + + if (isSupported) { + supportedFiles.push(file); + } else { + unsupportedFiles.push(file); + modalityReasons[file.name] = reason; + } + } + + return { supportedFiles, unsupportedFiles, modalityReasons }; +} + +/** + * Generate a user-friendly error message for unsupported files + * @param unsupportedFiles - Array of unsupported files + * @param modalityReasons - Reasons why files are unsupported + * @param capabilities - The modality capabilities to check against + * @returns Formatted error message + */ +export function generateModalityErrorMessage( + unsupportedFiles: File[], + modalityReasons: Record, + capabilities: ModalityCapabilities +): string { + if (unsupportedFiles.length === 0) return ''; + + const { hasVision, hasAudio } = capabilities; + + let message = ''; + + if (unsupportedFiles.length === 1) { + const file = unsupportedFiles[0]; + const reason = modalityReasons[file.name]; + message = `The file "${file.name}" cannot be uploaded: ${reason}.`; + } else { + const fileNames = unsupportedFiles.map((f) => f.name).join(', '); + message = `The following files cannot be uploaded: ${fileNames}.`; + } + + // Add helpful information about what is supported + const supportedTypes: string[] = ['text files', 'PDFs']; + if (hasVision) supportedTypes.push('images'); + if (hasAudio) supportedTypes.push('audio files'); + + message += ` This model supports: ${supportedTypes.join(', ')}.`; + + return message; +} + +/** + * Generate file input accept string based on model modalities + * @param capabilities - The modality capabilities to check against + * @returns Accept string for HTML file input element + */ diff --git a/llama.cpp/tools/server/webui/src/lib/utils/model-names.ts b/llama.cpp/tools/server/webui/src/lib/utils/model-names.ts new file mode 100644 index 0000000..c0a1e1c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/model-names.ts @@ -0,0 +1,56 @@ +/** + * Normalizes a model name by extracting the filename from a path, but preserves Hugging Face repository format. + * + * Handles both forward slashes (/) and backslashes (\) as path separators. + * - If the model name has exactly one slash (org/model format), preserves the full "org/model" name + * - If the model name has no slash or multiple slashes, extracts just the filename + * - If the model name is just a filename (no path), returns it as-is. + * + * @param modelName - The model name or path to normalize + * @returns The normalized model name + * + * @example + * normalizeModelName('models/llama-3.1-8b') // Returns: 'llama-3.1-8b' (multiple slashes -> filename) + * normalizeModelName('C:\\Models\\gpt-4') // Returns: 'gpt-4' (multiple slashes -> filename) + * normalizeModelName('meta-llama/Llama-3.1-8B') // Returns: 'meta-llama/Llama-3.1-8B' (Hugging Face format) + * normalizeModelName('simple-model') // Returns: 'simple-model' (no slash) + * normalizeModelName(' spaced ') // Returns: 'spaced' + * normalizeModelName('') // Returns: '' + */ +export function normalizeModelName(modelName: string): string { + const trimmed = modelName.trim(); + + if (!trimmed) { + return ''; + } + + const segments = trimmed.split(/[\\/]/); + + // If we have exactly 2 segments (one slash), treat it as Hugging Face repo format + // and preserve the full "org/model" format + if (segments.length === 2) { + const [org, model] = segments; + const trimmedOrg = org?.trim(); + const trimmedModel = model?.trim(); + + if (trimmedOrg && trimmedModel) { + return `${trimmedOrg}/${trimmedModel}`; + } + } + + // For other cases (no slash, or multiple slashes), extract just the filename + const candidate = segments.pop(); + const normalized = candidate?.trim(); + + return normalized && normalized.length > 0 ? normalized : trimmed; +} + +/** + * Validates if a model name is valid (non-empty after normalization). + * + * @param modelName - The model name to validate + * @returns true if valid, false otherwise + */ +export function isValidModelName(modelName: string): boolean { + return normalizeModelName(modelName).length > 0; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/pdf-processing.ts b/llama.cpp/tools/server/webui/src/lib/utils/pdf-processing.ts new file mode 100644 index 0000000..84c456d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/pdf-processing.ts @@ -0,0 +1,150 @@ +/** + * PDF processing utilities using PDF.js + * Handles PDF text extraction and image conversion in the browser + */ + +import { browser } from '$app/environment'; +import { MimeTypeApplication, MimeTypeImage } from '$lib/enums'; +import * as pdfjs from 'pdfjs-dist'; + +type TextContent = { + items: Array<{ str: string }>; +}; + +if (browser) { + // Import worker as text and create blob URL for inline bundling + import('pdfjs-dist/build/pdf.worker.min.mjs?raw') + .then((workerModule) => { + const workerBlob = new Blob([workerModule.default], { type: 'application/javascript' }); + pdfjs.GlobalWorkerOptions.workerSrc = URL.createObjectURL(workerBlob); + }) + .catch(() => { + console.warn('Failed to load PDF.js worker, PDF processing may not work'); + }); +} + +/** + * Convert a File object to ArrayBuffer for PDF.js processing + * @param file - The PDF file to convert + * @returns Promise resolving to the file's ArrayBuffer + */ +async function getFileAsBuffer(file: File): Promise { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onload = (event) => { + if (event.target?.result) { + resolve(event.target.result as ArrayBuffer); + } else { + reject(new Error('Failed to read file.')); + } + }; + reader.onerror = () => { + reject(new Error('Failed to read file.')); + }; + reader.readAsArrayBuffer(file); + }); +} + +/** + * Extract text content from a PDF file + * @param file - The PDF file to process + * @returns Promise resolving to the extracted text content + */ +export async function convertPDFToText(file: File): Promise { + if (!browser) { + throw new Error('PDF processing is only available in the browser'); + } + + try { + const buffer = await getFileAsBuffer(file); + const pdf = await pdfjs.getDocument(buffer).promise; + const numPages = pdf.numPages; + + const textContentPromises: Promise[] = []; + + for (let i = 1; i <= numPages; i++) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + textContentPromises.push(pdf.getPage(i).then((page: any) => page.getTextContent())); + } + + const textContents = await Promise.all(textContentPromises); + const textItems = textContents.flatMap((textContent: TextContent) => + textContent.items.map((item) => item.str ?? '') + ); + + return textItems.join('\n'); + } catch (error) { + console.error('Error converting PDF to text:', error); + throw new Error( + `Failed to convert PDF to text: ${error instanceof Error ? error.message : 'Unknown error'}` + ); + } +} + +/** + * Convert PDF pages to PNG images as data URLs + * @param file - The PDF file to convert + * @param scale - Rendering scale factor (default: 1.5) + * @returns Promise resolving to array of PNG data URLs + */ +export async function convertPDFToImage(file: File, scale: number = 1.5): Promise { + if (!browser) { + throw new Error('PDF processing is only available in the browser'); + } + + try { + const buffer = await getFileAsBuffer(file); + const doc = await pdfjs.getDocument(buffer).promise; + const pages: Promise[] = []; + + for (let i = 1; i <= doc.numPages; i++) { + const page = await doc.getPage(i); + const viewport = page.getViewport({ scale }); + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + canvas.width = viewport.width; + canvas.height = viewport.height; + + if (!ctx) { + throw new Error('Failed to get 2D context from canvas'); + } + + const task = page.render({ + canvasContext: ctx, + viewport: viewport, + canvas: canvas + }); + pages.push( + task.promise.then(() => { + return canvas.toDataURL(MimeTypeImage.PNG); + }) + ); + } + + return await Promise.all(pages); + } catch (error) { + console.error('Error converting PDF to images:', error); + throw new Error( + `Failed to convert PDF to images: ${error instanceof Error ? error.message : 'Unknown error'}` + ); + } +} + +/** + * Check if a file is a PDF based on its MIME type + * @param file - The file to check + * @returns True if the file is a PDF + */ +export function isPdfFile(file: File): boolean { + return file.type === MimeTypeApplication.PDF; +} + +/** + * Check if a MIME type represents a PDF + * @param mimeType - The MIME type to check + * @returns True if the MIME type is application/pdf + */ +export function isApplicationMimeType(mimeType: string): boolean { + return mimeType === MimeTypeApplication.PDF; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/portal-to-body.ts b/llama.cpp/tools/server/webui/src/lib/utils/portal-to-body.ts new file mode 100644 index 0000000..bffbe89 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/portal-to-body.ts @@ -0,0 +1,20 @@ +export function portalToBody(node: HTMLElement) { + if (typeof document === 'undefined') { + return; + } + + const target = document.body; + if (!target) { + return; + } + + target.appendChild(node); + + return { + destroy() { + if (node.parentNode === target) { + target.removeChild(node); + } + } + }; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/precision.ts b/llama.cpp/tools/server/webui/src/lib/utils/precision.ts new file mode 100644 index 0000000..6da200c --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/precision.ts @@ -0,0 +1,25 @@ +/** + * Floating-point precision utilities + * + * Provides functions to normalize floating-point numbers for consistent comparison + * and display, addressing JavaScript's floating-point precision issues. + */ + +import { PRECISION_MULTIPLIER } from '$lib/constants/precision'; + +/** + * Normalize floating-point numbers for consistent comparison + * Addresses JavaScript floating-point precision issues (e.g., 0.949999988079071 → 0.95) + */ +export function normalizeFloatingPoint(value: unknown): unknown { + return typeof value === 'number' + ? Math.round(value * PRECISION_MULTIPLIER) / PRECISION_MULTIPLIER + : value; +} + +/** + * Type-safe version that only accepts numbers + */ +export function normalizeNumber(value: number): number { + return Math.round(value * PRECISION_MULTIPLIER) / PRECISION_MULTIPLIER; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/process-uploaded-files.ts b/llama.cpp/tools/server/webui/src/lib/utils/process-uploaded-files.ts new file mode 100644 index 0000000..0342dce --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/process-uploaded-files.ts @@ -0,0 +1,136 @@ +import { isSvgMimeType, svgBase64UrlToPngDataURL } from './svg-to-png'; +import { isWebpMimeType, webpBase64UrlToPngDataURL } from './webp-to-png'; +import { FileTypeCategory } from '$lib/enums'; +import { modelsStore } from '$lib/stores/models.svelte'; +import { settingsStore } from '$lib/stores/settings.svelte'; +import { toast } from 'svelte-sonner'; +import { getFileTypeCategory } from '$lib/utils'; +import { convertPDFToText } from './pdf-processing'; + +/** + * Read a file as a data URL (base64 encoded) + * @param file - The file to read + * @returns Promise resolving to the data URL string + */ +function readFileAsDataURL(file: File): Promise { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onload = () => resolve(reader.result as string); + reader.onerror = () => reject(reader.error); + reader.readAsDataURL(file); + }); +} + +/** + * Read a file as UTF-8 text + * @param file - The file to read + * @returns Promise resolving to the text content + */ +function readFileAsUTF8(file: File): Promise { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onload = () => resolve(reader.result as string); + reader.onerror = () => reject(reader.error); + reader.readAsText(file); + }); +} + +/** + * Process uploaded files into ChatUploadedFile format with previews and content + * + * This function processes various file types and generates appropriate previews: + * - Images: Base64 data URLs with format normalization (SVG/WebP → PNG) + * - Text files: UTF-8 content extraction + * - PDFs: Metadata only (processed later in conversion pipeline) + * - Audio: Base64 data URLs for preview + * + * @param files - Array of File objects to process + * @returns Promise resolving to array of ChatUploadedFile objects + */ +export async function processFilesToChatUploaded( + files: File[], + activeModelId?: string +): Promise { + const results: ChatUploadedFile[] = []; + + for (const file of files) { + const id = Date.now().toString() + Math.random().toString(36).substr(2, 9); + const base: ChatUploadedFile = { + id, + name: file.name, + size: file.size, + type: file.type, + file + }; + + try { + if (getFileTypeCategory(file.type) === FileTypeCategory.IMAGE) { + let preview = await readFileAsDataURL(file); + + // Normalize SVG and WebP to PNG in previews + if (isSvgMimeType(file.type)) { + try { + preview = await svgBase64UrlToPngDataURL(preview); + } catch (err) { + console.error('Failed to convert SVG to PNG:', err); + } + } else if (isWebpMimeType(file.type)) { + try { + preview = await webpBase64UrlToPngDataURL(preview); + } catch (err) { + console.error('Failed to convert WebP to PNG:', err); + } + } + + results.push({ ...base, preview }); + } else if (getFileTypeCategory(file.type) === FileTypeCategory.PDF) { + // Extract text content from PDF for preview + try { + const textContent = await convertPDFToText(file); + results.push({ ...base, textContent }); + } catch (err) { + console.warn('Failed to extract text from PDF, adding without content:', err); + results.push(base); + } + + // Show suggestion toast if vision model is available but PDF as image is disabled + const hasVisionSupport = activeModelId + ? modelsStore.modelSupportsVision(activeModelId) + : false; + const currentConfig = settingsStore.config; + if (hasVisionSupport && !currentConfig.pdfAsImage) { + toast.info(`You can enable parsing PDF as images with vision models.`, { + duration: 8000, + action: { + label: 'Enable PDF as Images', + onClick: () => { + settingsStore.updateConfig('pdfAsImage', true); + toast.success('PDF parsing as images enabled!', { + duration: 3000 + }); + } + } + }); + } + } else if (getFileTypeCategory(file.type) === FileTypeCategory.AUDIO) { + // Generate preview URL for audio files + const preview = await readFileAsDataURL(file); + results.push({ ...base, preview }); + } else { + // Fallback: treat unknown files as text + try { + const textContent = await readFileAsUTF8(file); + results.push({ ...base, textContent }); + } catch (err) { + console.warn('Failed to read file as text, adding without content:', err); + results.push(base); + } + } + } catch (error) { + console.error('Error processing file', file.name, error); + results.push(base); + } + } + + return results; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/svg-to-png.ts b/llama.cpp/tools/server/webui/src/lib/utils/svg-to-png.ts new file mode 100644 index 0000000..d5a7f7d --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/svg-to-png.ts @@ -0,0 +1,71 @@ +import { MimeTypeImage } from '$lib/enums'; + +/** + * Convert an SVG base64 data URL to a PNG data URL + * @param base64UrlSvg - The SVG base64 data URL to convert + * @param backgroundColor - Background color for the PNG (default: 'white') + * @returns Promise resolving to PNG data URL + */ +export function svgBase64UrlToPngDataURL( + base64UrlSvg: string, + backgroundColor: string = 'white' +): Promise { + return new Promise((resolve, reject) => { + try { + const img = new Image(); + + img.onload = () => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + if (!ctx) { + reject(new Error('Failed to get 2D canvas context.')); + return; + } + + const targetWidth = img.naturalWidth || 300; + const targetHeight = img.naturalHeight || 300; + + canvas.width = targetWidth; + canvas.height = targetHeight; + + if (backgroundColor) { + ctx.fillStyle = backgroundColor; + ctx.fillRect(0, 0, canvas.width, canvas.height); + } + ctx.drawImage(img, 0, 0, targetWidth, targetHeight); + + resolve(canvas.toDataURL(MimeTypeImage.PNG)); + }; + + img.onerror = () => { + reject(new Error('Failed to load SVG image. Ensure the SVG data is valid.')); + }; + + img.src = base64UrlSvg; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + const errorMessage = `Error converting SVG to PNG: ${message}`; + console.error(errorMessage, error); + reject(new Error(errorMessage)); + } + }); +} + +/** + * Check if a file is an SVG based on its MIME type + * @param file - The file to check + * @returns True if the file is an SVG + */ +export function isSvgFile(file: File): boolean { + return file.type === MimeTypeImage.SVG; +} + +/** + * Check if a MIME type represents an SVG + * @param mimeType - The MIME type to check + * @returns True if the MIME type is image/svg+xml + */ +export function isSvgMimeType(mimeType: string): boolean { + return mimeType === MimeTypeImage.SVG; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/syntax-highlight-language.ts b/llama.cpp/tools/server/webui/src/lib/utils/syntax-highlight-language.ts new file mode 100644 index 0000000..5384291 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/syntax-highlight-language.ts @@ -0,0 +1,145 @@ +/** + * Maps file extensions to highlight.js language identifiers + */ +export function getLanguageFromFilename(filename: string): string { + const extension = filename.toLowerCase().substring(filename.lastIndexOf('.')); + + switch (extension) { + // JavaScript / TypeScript + case '.js': + case '.mjs': + case '.cjs': + return 'javascript'; + case '.ts': + case '.mts': + case '.cts': + return 'typescript'; + case '.jsx': + return 'javascript'; + case '.tsx': + return 'typescript'; + + // Web + case '.html': + case '.htm': + return 'html'; + case '.css': + return 'css'; + case '.scss': + return 'scss'; + case '.less': + return 'less'; + case '.vue': + return 'html'; + case '.svelte': + return 'html'; + + // Data formats + case '.json': + return 'json'; + case '.xml': + return 'xml'; + case '.yaml': + case '.yml': + return 'yaml'; + case '.toml': + return 'ini'; + case '.csv': + return 'plaintext'; + + // Programming languages + case '.py': + return 'python'; + case '.java': + return 'java'; + case '.kt': + case '.kts': + return 'kotlin'; + case '.scala': + return 'scala'; + case '.cpp': + case '.cc': + case '.cxx': + case '.c++': + return 'cpp'; + case '.c': + return 'c'; + case '.h': + case '.hpp': + return 'cpp'; + case '.cs': + return 'csharp'; + case '.go': + return 'go'; + case '.rs': + return 'rust'; + case '.rb': + return 'ruby'; + case '.php': + return 'php'; + case '.swift': + return 'swift'; + case '.dart': + return 'dart'; + case '.r': + return 'r'; + case '.lua': + return 'lua'; + case '.pl': + case '.pm': + return 'perl'; + + // Shell + case '.sh': + case '.bash': + case '.zsh': + return 'bash'; + case '.bat': + case '.cmd': + return 'dos'; + case '.ps1': + return 'powershell'; + + // Database + case '.sql': + return 'sql'; + + // Markup / Documentation + case '.md': + case '.markdown': + return 'markdown'; + case '.tex': + case '.latex': + return 'latex'; + case '.adoc': + case '.asciidoc': + return 'asciidoc'; + + // Config + case '.ini': + case '.cfg': + case '.conf': + return 'ini'; + case '.dockerfile': + return 'dockerfile'; + case '.nginx': + return 'nginx'; + + // Other + case '.graphql': + case '.gql': + return 'graphql'; + case '.proto': + return 'protobuf'; + case '.diff': + case '.patch': + return 'diff'; + case '.log': + return 'plaintext'; + case '.txt': + return 'plaintext'; + + default: + return 'plaintext'; + } +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/text-files.ts b/llama.cpp/tools/server/webui/src/lib/utils/text-files.ts new file mode 100644 index 0000000..e8006de --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/text-files.ts @@ -0,0 +1,97 @@ +/** + * Text file processing utilities + * Handles text file detection, reading, and validation + */ + +import { + DEFAULT_BINARY_DETECTION_OPTIONS, + type BinaryDetectionOptions +} from '$lib/constants/binary-detection'; +import { FileExtensionText } from '$lib/enums'; + +/** + * Check if a filename indicates a text file based on its extension + * @param filename - The filename to check + * @returns True if the filename has a recognized text file extension + */ +export function isTextFileByName(filename: string): boolean { + const textExtensions = Object.values(FileExtensionText); + + return textExtensions.some((ext: FileExtensionText) => filename.toLowerCase().endsWith(ext)); +} + +/** + * Read a file's content as text + * @param file - The file to read + * @returns Promise resolving to the file's text content + */ +export async function readFileAsText(file: File): Promise { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + + reader.onload = (event) => { + if (event.target?.result !== null && event.target?.result !== undefined) { + resolve(event.target.result as string); + } else { + reject(new Error('Failed to read file')); + } + }; + + reader.onerror = () => reject(new Error('File reading error')); + + reader.readAsText(file); + }); +} + +/** + * Heuristic check to determine if content is likely from a text file + * Detects binary files by counting suspicious characters and null bytes + * @param content - The file content to analyze + * @param options - Optional configuration for detection parameters + * @returns True if the content appears to be text-based + */ +export function isLikelyTextFile( + content: string, + options: Partial = {} +): boolean { + if (!content) return true; + + const config = { ...DEFAULT_BINARY_DETECTION_OPTIONS, ...options }; + const sample = content.substring(0, config.prefixLength); + + let nullCount = 0; + let suspiciousControlCount = 0; + + for (let i = 0; i < sample.length; i++) { + const charCode = sample.charCodeAt(i); + + // Count null bytes - these are strong indicators of binary files + if (charCode === 0) { + nullCount++; + + continue; + } + + // Count suspicious control characters + // Allow common whitespace characters: tab (9), newline (10), carriage return (13) + if (charCode < 32 && charCode !== 9 && charCode !== 10 && charCode !== 13) { + // Count most suspicious control characters + if (charCode < 8 || (charCode > 13 && charCode < 27)) { + suspiciousControlCount++; + } + } + + // Count replacement characters (indicates encoding issues) + if (charCode === 0xfffd) { + suspiciousControlCount++; + } + } + + // Reject if too many null bytes + if (nullCount > config.maxAbsoluteNullBytes) return false; + + // Reject if too many suspicious characters + if (suspiciousControlCount / sample.length > config.suspiciousCharThresholdRatio) return false; + + return true; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/text.ts b/llama.cpp/tools/server/webui/src/lib/utils/text.ts new file mode 100644 index 0000000..5c5dd0f --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/text.ts @@ -0,0 +1,7 @@ +/** + * Returns a shortened preview of the provided content capped at the given length. + * Appends an ellipsis when the content exceeds the maximum. + */ +export function getPreviewText(content: string, max = 150): string { + return content.length > max ? content.slice(0, max) + '...' : content; +} diff --git a/llama.cpp/tools/server/webui/src/lib/utils/webp-to-png.ts b/llama.cpp/tools/server/webui/src/lib/utils/webp-to-png.ts new file mode 100644 index 0000000..ea51838 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/lib/utils/webp-to-png.ts @@ -0,0 +1,73 @@ +import { FileExtensionImage, MimeTypeImage } from '$lib/enums'; + +/** + * Convert a WebP base64 data URL to a PNG data URL + * @param base64UrlWebp - The WebP base64 data URL to convert + * @param backgroundColor - Background color for the PNG (default: 'white') + * @returns Promise resolving to PNG data URL + */ +export function webpBase64UrlToPngDataURL( + base64UrlWebp: string, + backgroundColor: string = 'white' +): Promise { + return new Promise((resolve, reject) => { + try { + const img = new Image(); + + img.onload = () => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + if (!ctx) { + reject(new Error('Failed to get 2D canvas context.')); + return; + } + + const targetWidth = img.naturalWidth || 300; + const targetHeight = img.naturalHeight || 300; + + canvas.width = targetWidth; + canvas.height = targetHeight; + + if (backgroundColor) { + ctx.fillStyle = backgroundColor; + ctx.fillRect(0, 0, canvas.width, canvas.height); + } + ctx.drawImage(img, 0, 0, targetWidth, targetHeight); + + resolve(canvas.toDataURL(MimeTypeImage.PNG)); + }; + + img.onerror = () => { + reject(new Error('Failed to load WebP image. Ensure the WebP data is valid.')); + }; + + img.src = base64UrlWebp; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + const errorMessage = `Error converting WebP to PNG: ${message}`; + console.error(errorMessage, error); + reject(new Error(errorMessage)); + } + }); +} + +/** + * Check if a file is a WebP based on its MIME type + * @param file - The file to check + * @returns True if the file is a WebP + */ +export function isWebpFile(file: File): boolean { + return ( + file.type === MimeTypeImage.WEBP || file.name.toLowerCase().endsWith(FileExtensionImage.WEBP) + ); +} + +/** + * Check if a MIME type represents a WebP + * @param mimeType - The MIME type to check + * @returns True if the MIME type is image/webp + */ +export function isWebpMimeType(mimeType: string): boolean { + return mimeType === MimeTypeImage.WEBP; +} diff --git a/llama.cpp/tools/server/webui/src/routes/+error.svelte b/llama.cpp/tools/server/webui/src/routes/+error.svelte new file mode 100644 index 0000000..faddf0b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/+error.svelte @@ -0,0 +1,70 @@ + + + + Error {status} - WebUI + + +{#if isApiKeyError} + +{:else} + +
            +
            +
            +
            + + + +
            +

            Error {status}

            +

            + {error?.message || 'Something went wrong'} +

            +
            + +
            +
            +{/if} diff --git a/llama.cpp/tools/server/webui/src/routes/+layout.svelte b/llama.cpp/tools/server/webui/src/routes/+layout.svelte new file mode 100644 index 0000000..095827b --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/+layout.svelte @@ -0,0 +1,223 @@ + + + + + + + + + + +
            + + + + + {#if !(alwaysShowSidebarOnDesktop && isDesktop)} + + {/if} + + + {@render children?.()} + +
            +
            +
            + + diff --git a/llama.cpp/tools/server/webui/src/routes/+page.svelte b/llama.cpp/tools/server/webui/src/routes/+page.svelte new file mode 100644 index 0000000..32a7c2e --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/+page.svelte @@ -0,0 +1,91 @@ + + + + llama.cpp - AI Chat Interface + + + + + diff --git a/llama.cpp/tools/server/webui/src/routes/+page.ts b/llama.cpp/tools/server/webui/src/routes/+page.ts new file mode 100644 index 0000000..7905af6 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/+page.ts @@ -0,0 +1,6 @@ +import type { PageLoad } from './$types'; +import { validateApiKey } from '$lib/utils'; + +export const load: PageLoad = async ({ fetch }) => { + await validateApiKey(fetch); +}; diff --git a/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.svelte b/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.svelte new file mode 100644 index 0000000..b897ef5 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.svelte @@ -0,0 +1,176 @@ + + + + {activeConversation()?.name || 'Chat'} - llama.cpp + + + + + diff --git a/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.ts b/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.ts new file mode 100644 index 0000000..7905af6 --- /dev/null +++ b/llama.cpp/tools/server/webui/src/routes/chat/[id]/+page.ts @@ -0,0 +1,6 @@ +import type { PageLoad } from './$types'; +import { validateApiKey } from '$lib/utils'; + +export const load: PageLoad = async ({ fetch }) => { + await validateApiKey(fetch); +}; diff --git a/llama.cpp/tools/server/webui/src/styles/katex-custom.scss b/llama.cpp/tools/server/webui/src/styles/katex-custom.scss new file mode 100644 index 0000000..9c8b96e --- /dev/null +++ b/llama.cpp/tools/server/webui/src/styles/katex-custom.scss @@ -0,0 +1,13 @@ +// Override KaTeX SCSS variables to disable ttf and woff fonts +// Only use woff2 format which is embedded in the bundle +$use-woff2: true; +$use-woff: false; +$use-ttf: false; + +// Use Vite alias for font folder +$font-folder: 'katex-fonts'; + +// Import KaTeX SCSS with overridden variables +// Note: @import is deprecated but required because KaTeX uses @import internally +// The deprecation warnings are from KaTeX's code and cannot be avoided +@import 'katex/src/styles/katex.scss'; diff --git a/llama.cpp/tools/server/webui/static/favicon.svg b/llama.cpp/tools/server/webui/static/favicon.svg new file mode 100644 index 0000000..a7ae136 --- /dev/null +++ b/llama.cpp/tools/server/webui/static/favicon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/llama.cpp/tools/server/webui/static/loading.html b/llama.cpp/tools/server/webui/static/loading.html new file mode 100644 index 0000000..c3fd19a --- /dev/null +++ b/llama.cpp/tools/server/webui/static/loading.html @@ -0,0 +1,12 @@ + + + + + + +
            + The model is loading. Please wait.
            + The user interface will appear soon. +
            + + diff --git a/llama.cpp/tools/server/webui/svelte.config.js b/llama.cpp/tools/server/webui/svelte.config.js new file mode 100644 index 0000000..9474993 --- /dev/null +++ b/llama.cpp/tools/server/webui/svelte.config.js @@ -0,0 +1,34 @@ +import { mdsvex } from 'mdsvex'; +import adapter from '@sveltejs/adapter-static'; +import { vitePreprocess } from '@sveltejs/vite-plugin-svelte'; + +/** @type {import('@sveltejs/kit').Config} */ +const config = { + // Consult https://svelte.dev/docs/kit/integrations + // for more information about preprocessors + preprocess: [vitePreprocess(), mdsvex()], + + kit: { + paths: { + relative: true + }, + router: { type: 'hash' }, + adapter: adapter({ + pages: '../public', + assets: '../public', + fallback: 'index.html', + precompress: false, + strict: true + }), + output: { + bundleStrategy: 'inline' + }, + alias: { + $styles: 'src/styles' + } + }, + + extensions: ['.svelte', '.svx'] +}; + +export default config; diff --git a/llama.cpp/tools/server/webui/tests/client/components/TestWrapper.svelte b/llama.cpp/tools/server/webui/tests/client/components/TestWrapper.svelte new file mode 100644 index 0000000..4bbb8e8 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/client/components/TestWrapper.svelte @@ -0,0 +1,17 @@ + + + + + + + + diff --git a/llama.cpp/tools/server/webui/tests/client/page.svelte.test.ts b/llama.cpp/tools/server/webui/tests/client/page.svelte.test.ts new file mode 100644 index 0000000..6849beb --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/client/page.svelte.test.ts @@ -0,0 +1,11 @@ +import { describe, it, expect } from 'vitest'; +import { render } from 'vitest-browser-svelte'; +import TestWrapper from './components/TestWrapper.svelte'; + +describe('/+page.svelte', () => { + it('should render page without throwing', async () => { + // Basic smoke test - page should render without throwing errors + // API calls will fail in test environment but component should still mount + expect(() => render(TestWrapper)).not.toThrow(); + }); +}); diff --git a/llama.cpp/tools/server/webui/tests/e2e/demo.test.ts b/llama.cpp/tools/server/webui/tests/e2e/demo.test.ts new file mode 100644 index 0000000..9985ce1 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/e2e/demo.test.ts @@ -0,0 +1,6 @@ +import { expect, test } from '@playwright/test'; + +test('home page has expected h1', async ({ page }) => { + await page.goto('/'); + await expect(page.locator('h1')).toBeVisible(); +}); diff --git a/llama.cpp/tools/server/webui/tests/stories/ChatForm.stories.svelte b/llama.cpp/tools/server/webui/tests/stories/ChatForm.stories.svelte new file mode 100644 index 0000000..18319e8 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/ChatForm.stories.svelte @@ -0,0 +1,161 @@ + + + { + mockServerProps(mockConfigs.noModalities); + + const textarea = await canvas.findByRole('textbox'); + const submitButton = await canvas.findByRole('button', { name: 'Send' }); + + // Expect the input to be focused after the component is mounted + await expect(textarea).toHaveFocus(); + + // Expect the submit button to be disabled + await expect(submitButton).toBeDisabled(); + + const text = 'What is the meaning of life?'; + + await userEvent.clear(textarea); + await userEvent.type(textarea, text); + + await expect(textarea).toHaveValue(text); + + const fileInput = document.querySelector('input[type="file"]'); + await expect(fileInput).not.toHaveAttribute('accept'); + + // Open file attachments dropdown + const fileUploadButton = canvas.getByText('Attach files'); + await userEvent.click(fileUploadButton); + + // Check dropdown menu items are disabled (no modalities) + const imagesButton = document.querySelector('.images-button'); + const audioButton = document.querySelector('.audio-button'); + + await expect(imagesButton).toHaveAttribute('data-disabled'); + await expect(audioButton).toHaveAttribute('data-disabled'); + + // Close dropdown by pressing Escape + await userEvent.keyboard('{Escape}'); + }} +/> + + + + { + mockServerProps(mockConfigs.visionOnly); + + // Open file attachments dropdown and verify it works + const fileUploadButton = canvas.getByText('Attach files'); + await userEvent.click(fileUploadButton); + + // Verify dropdown menu items exist + const imagesButton = document.querySelector('.images-button'); + const audioButton = document.querySelector('.audio-button'); + + await expect(imagesButton).toBeInTheDocument(); + await expect(audioButton).toBeInTheDocument(); + + // Close dropdown by pressing Escape + await userEvent.keyboard('{Escape}'); + + console.log('✅ Vision modality: Dropdown menu verified'); + }} +/> + + { + mockServerProps(mockConfigs.audioOnly); + + // Open file attachments dropdown and verify it works + const fileUploadButton = canvas.getByText('Attach files'); + await userEvent.click(fileUploadButton); + + // Verify dropdown menu items exist + const imagesButton = document.querySelector('.images-button'); + const audioButton = document.querySelector('.audio-button'); + + await expect(imagesButton).toBeInTheDocument(); + await expect(audioButton).toBeInTheDocument(); + + // Close dropdown by pressing Escape + await userEvent.keyboard('{Escape}'); + + console.log('✅ Audio modality: Dropdown menu verified'); + }} +/> + + { + mockServerProps(mockConfigs.bothModalities); + + const jpgAttachment = canvas.getByAltText('1.jpg'); + const svgAttachment = canvas.getByAltText('hf-logo.svg'); + const pdfFileExtension = canvas.getByText('PDF'); + const pdfAttachment = canvas.getByText('example.pdf'); + const pdfSize = canvas.getByText('342.82 KB'); + + await expect(jpgAttachment).toBeInTheDocument(); + await expect(jpgAttachment).toHaveAttribute('src', jpgAsset); + + await expect(svgAttachment).toBeInTheDocument(); + await expect(svgAttachment).toHaveAttribute('src', svgAsset); + + await expect(pdfFileExtension).toBeInTheDocument(); + await expect(pdfAttachment).toBeInTheDocument(); + await expect(pdfSize).toBeInTheDocument(); + }} +/> diff --git a/llama.cpp/tools/server/webui/tests/stories/ChatMessage.stories.svelte b/llama.cpp/tools/server/webui/tests/stories/ChatMessage.stories.svelte new file mode 100644 index 0000000..5f4de7d --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/ChatMessage.stories.svelte @@ -0,0 +1,207 @@ + + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', false); + }} +/> + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', false); + }} +/> + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', false); + }} +/> + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', true); + }} +/> + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', false); + // Phase 1: Stream reasoning content in chunks + let reasoningText = + 'I need to think about this carefully. Let me break down the problem:\n\n1. The user is asking for help with something complex\n2. I should provide a thorough and helpful response\n3. I need to consider multiple approaches\n4. The best solution would be to explain step by step\n\nThis approach will ensure clarity and understanding.'; + + let reasoningChunk = 'I'; + let i = 0; + while (i < reasoningText.length) { + const chunkSize = Math.floor(Math.random() * 5) + 3; // Random 3-7 characters + const chunk = reasoningText.slice(i, i + chunkSize); + reasoningChunk += chunk; + + // Update the reactive state directly + streamingMessage.thinking = reasoningChunk; + + i += chunkSize; + await new Promise((resolve) => setTimeout(resolve, 50)); + } + + const regularText = + "Based on my analysis, here's the solution:\n\n**Step 1:** First, we need to understand the requirements clearly.\n\n**Step 2:** Then we can implement the solution systematically.\n\n**Step 3:** Finally, we test and validate the results.\n\nThis approach ensures we cover all aspects of the problem effectively."; + + let contentChunk = ''; + i = 0; + + while (i < regularText.length) { + const chunkSize = Math.floor(Math.random() * 5) + 3; // Random 3-7 characters + const chunk = regularText.slice(i, i + chunkSize); + contentChunk += chunk; + + // Update the reactive state directly + streamingMessage.content = contentChunk; + + i += chunkSize; + await new Promise((resolve) => setTimeout(resolve, 50)); + } + + streamingMessage.timestamp = Date.now(); + }} +> +
            + +
            +
            + + { + const { settingsStore } = await import('$lib/stores/settings.svelte'); + settingsStore.updateConfig('disableReasoningFormat', false); + // Import the chat store to simulate loading state + const { chatStore } = await import('$lib/stores/chat.svelte'); + + // Set loading state to true to trigger the processing UI + chatStore.isLoading = true; + + // Simulate the processing state hook behavior + // This will show the "Generating..." text and parameter details + await new Promise((resolve) => setTimeout(resolve, 100)); + }} +/> diff --git a/llama.cpp/tools/server/webui/tests/stories/ChatSettings.stories.svelte b/llama.cpp/tools/server/webui/tests/stories/ChatSettings.stories.svelte new file mode 100644 index 0000000..4d8dbe5 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/ChatSettings.stories.svelte @@ -0,0 +1,19 @@ + + + diff --git a/llama.cpp/tools/server/webui/tests/stories/ChatSidebar.stories.svelte b/llama.cpp/tools/server/webui/tests/stories/ChatSidebar.stories.svelte new file mode 100644 index 0000000..42cea87 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/ChatSidebar.stories.svelte @@ -0,0 +1,97 @@ + + + { + const { conversationsStore } = await import('$lib/stores/conversations.svelte'); + + waitFor(() => setTimeout(() => { + conversationsStore.conversations = mockConversations; + }, 0)); + }} +> +
            + +
            +
            + + { + const { conversationsStore } = await import('$lib/stores/conversations.svelte'); + + waitFor(() => setTimeout(() => { + conversationsStore.conversations = mockConversations; + }, 0)); + + const searchTrigger = screen.getByText('Search conversations'); + userEvent.click(searchTrigger); + }} +> +
            + +
            +
            + + { + // Mock empty conversations store + const { conversationsStore } = await import('$lib/stores/conversations.svelte'); + conversationsStore.conversations = []; + }} +> +
            + +
            +
            diff --git a/llama.cpp/tools/server/webui/tests/stories/Introduction.mdx b/llama.cpp/tools/server/webui/tests/stories/Introduction.mdx new file mode 100644 index 0000000..19d0b28 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/Introduction.mdx @@ -0,0 +1,44 @@ +import { Meta } from '@storybook/addon-docs/blocks'; + + + +# llama.cpp Web UI + +Welcome to the **llama.cpp Web UI** component library! This Storybook showcases the components used in the modern web interface for the llama.cpp server. + +## 🚀 About This Project + +WebUI is a modern web interface for the llama.cpp server, built with SvelteKit and ShadCN UI. Features include: + +- **Real-time chat conversations** with AI assistants +- **Multi-conversation management** with persistent storage +- **Advanced parameter tuning** for model behavior +- **File upload support** for multimodal interactions +- **Responsive design** that works on desktop and mobile + +## 🎨 Design System + +The UI is built using: + +- **SvelteKit** - Modern web framework with excellent performance +- **Tailwind CSS** - Utility-first CSS framework for rapid styling +- **ShadCN/UI** - High-quality, accessible component library +- **Lucide Icons** - Beautiful, consistent icon set + +## 🔧 Development + +This Storybook serves as both documentation and a development environment for the UI components. Each story demonstrates: + +- **Component variations** - Different states and configurations +- **Interactive examples** - Live components you can interact with +- **Usage patterns** - How components work together +- **Styling consistency** - Unified design language + +## 🚀 Getting Started + +To explore the components: + +1. **Browse the sidebar** to see all available components +2. **Click on stories** to see different component states +3. **Use the controls panel** to interact with component props +4. **Check the docs tab** for detailed component information diff --git a/llama.cpp/tools/server/webui/tests/stories/MarkdownContent.stories.svelte b/llama.cpp/tools/server/webui/tests/stories/MarkdownContent.stories.svelte new file mode 100644 index 0000000..90aa90b --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/MarkdownContent.stories.svelte @@ -0,0 +1,130 @@ + + + + + + + + + + + + + + + + + { + // Wait for component to render + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Find all links in the rendered content + const links = canvasElement.querySelectorAll('a[href]'); + + // Test that we have the expected number of links + expect(links.length).toBeGreaterThan(0); + + // Test each link for proper attributes + links.forEach((link) => { + const href = link.getAttribute('href'); + + // Test that external links have proper security attributes + if (href && (href.startsWith('http://') || href.startsWith('https://'))) { + expect(link.getAttribute('target')).toBe('_blank'); + expect(link.getAttribute('rel')).toBe('noopener noreferrer'); + } + }); + + // Test specific links exist + const hugginFaceLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://huggingface.co' + ); + expect(hugginFaceLink).toBeTruthy(); + expect(hugginFaceLink?.textContent).toBe('Hugging Face Homepage'); + + const githubLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://github.com/ggml-org/llama.cpp' + ); + expect(githubLink).toBeTruthy(); + expect(githubLink?.textContent).toBe('GitHub Repository'); + + const openaiLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://openai.com' + ); + expect(openaiLink).toBeTruthy(); + expect(openaiLink?.textContent).toBe('OpenAI Website'); + + const googleLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://www.google.com' + ); + expect(googleLink).toBeTruthy(); + expect(googleLink?.textContent).toBe('Google Search'); + + // Test inline links (auto-linked URLs) + const exampleLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://example.com' + ); + expect(exampleLink).toBeTruthy(); + + const pythonDocsLink = Array.from(links).find( + (link) => link.getAttribute('href') === 'https://docs.python.org' + ); + expect(pythonDocsLink).toBeTruthy(); + + console.log(`✅ URL Links test passed - Found ${links.length} links with proper attributes`); + }} +/> diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/ai-tutorial.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/ai-tutorial.ts new file mode 100644 index 0000000..b3b1c24 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/ai-tutorial.ts @@ -0,0 +1,164 @@ +// AI Assistant Tutorial Response +export const AI_TUTORIAL_MD = String.raw` +# Building a Modern Chat Application with SvelteKit + +I'll help you create a **production-ready chat application** using SvelteKit, TypeScript, and WebSockets. This implementation includes real-time messaging, user authentication, and message persistence. + +## 🚀 Quick Start + +First, let's set up the project: + +${'```'}bash +npm create svelte@latest chat-app +cd chat-app +npm install +npm install socket.io socket.io-client +npm install @prisma/client prisma +npm run dev +${'```'} + +## 📁 Project Structure + +${'```'} +chat-app/ +├── src/ +│ ├── routes/ +│ │ ├── +layout.svelte +│ │ ├── +page.svelte +│ │ └── api/ +│ │ └── socket/+server.ts +│ ├── lib/ +│ │ ├── components/ +│ │ │ ├── ChatMessage.svelte +│ │ │ └── ChatInput.svelte +│ │ └── stores/ +│ │ └── chat.ts +│ └── app.html +├── prisma/ +│ └── schema.prisma +└── package.json +${'```'} + +## 💻 Implementation + +### WebSocket Server + +${'```'}typescript +// src/lib/server/socket.ts +import { Server } from 'socket.io'; +import type { ViteDevServer } from 'vite'; + +export function initializeSocketIO(server: ViteDevServer) { + const io = new Server(server.httpServer || server, { + cors: { + origin: process.env.ORIGIN || 'http://localhost:5173', + credentials: true + } + }); + + io.on('connection', (socket) => { + console.log('User connected:', socket.id); + + socket.on('message', async (data) => { + // Broadcast to all clients + io.emit('new-message', { + id: crypto.randomUUID(), + userId: socket.id, + content: data.content, + timestamp: new Date().toISOString() + }); + }); + + socket.on('disconnect', () => { + console.log('User disconnected:', socket.id); + }); + }); + + return io; +} +${'```'} + +### Client Store + +${'```'}typescript +// src/lib/stores/chat.ts +import { writable } from 'svelte/store'; +import io from 'socket.io-client'; + +export interface Message { + id: string; + userId: string; + content: string; + timestamp: string; +} + +function createChatStore() { + const { subscribe, update } = writable([]); + let socket: ReturnType; + + return { + subscribe, + connect: () => { + socket = io('http://localhost:5173'); + + socket.on('new-message', (message: Message) => { + update(messages => [...messages, message]); + }); + }, + sendMessage: (content: string) => { + if (socket && content.trim()) { + socket.emit('message', { content }); + } + } + }; +} + +export const chatStore = createChatStore(); +${'```'} + +## 🎯 Key Features + +✅ **Real-time messaging** with WebSockets +✅ **Message persistence** using Prisma + PostgreSQL +✅ **Type-safe** with TypeScript +✅ **Responsive UI** for all devices +✅ **Auto-reconnection** on connection loss + +## 📊 Performance Metrics + +| Metric | Value | +|--------|-------| +| **Message Latency** | < 50ms | +| **Concurrent Users** | 10,000+ | +| **Messages/Second** | 5,000+ | +| **Uptime** | 99.9% | + +## 🔧 Configuration + +### Environment Variables + +${'```'}env +DATABASE_URL="postgresql://user:password@localhost:5432/chat" +JWT_SECRET="your-secret-key" +REDIS_URL="redis://localhost:6379" +${'```'} + +## 🚢 Deployment + +Deploy to production using Docker: + +${'```'}dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package*.json ./ +RUN npm ci --only=production +COPY . . +RUN npm run build +EXPOSE 3000 +CMD ["node", "build"] +${'```'} + +--- + +*Need help? Check the [documentation](https://kit.svelte.dev) or [open an issue](https://github.com/sveltejs/kit/issues)* +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/api-docs.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/api-docs.ts new file mode 100644 index 0000000..7b49995 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/api-docs.ts @@ -0,0 +1,160 @@ +// API Documentation +export const API_DOCS_MD = String.raw` +# REST API Documentation + +## 🔐 Authentication + +All API requests require authentication using **Bearer tokens**. Include your API key in the Authorization header: + +${'```'}http +GET /api/v1/users +Host: api.example.com +Authorization: Bearer YOUR_API_KEY +Content-Type: application/json +${'```'} + +## 📍 Endpoints + +### Users API + +#### **GET** /api/v1/users + +Retrieve a paginated list of users. + +**Query Parameters:** + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| page | integer | 1 | Page number | +| limit | integer | 20 | Items per page | +| sort | string | "created_at" | Sort field | +| order | string | "desc" | Sort order | + +**Response:** 200 OK + +${'```'}json +{ + "data": [ + { + "id": "usr_1234567890", + "email": "user@example.com", + "name": "John Doe", + "role": "admin", + "created_at": "2024-01-15T10:30:00Z" + } + ], + "pagination": { + "page": 1, + "limit": 20, + "total": 156, + "pages": 8 + } +} +${'```'} + +#### **POST** /api/v1/users + +Create a new user account. + +**Request Body:** + +${'```'}json +{ + "email": "newuser@example.com", + "password": "SecurePassword123!", + "name": "Jane Smith", + "role": "user" +} +${'```'} + +**Response:** 201 Created + +${'```'}json +{ + "id": "usr_9876543210", + "email": "newuser@example.com", + "name": "Jane Smith", + "role": "user", + "created_at": "2024-01-21T09:15:00Z" +} +${'```'} + +### Error Responses + +The API returns errors in a consistent format: + +${'```'}json +{ + "error": { + "code": "VALIDATION_ERROR", + "message": "Invalid request parameters", + "details": [ + { + "field": "email", + "message": "Email format is invalid" + } + ] + } +} +${'```'} + +### Rate Limiting + +| Tier | Requests/Hour | Burst | +|------|--------------|-------| +| **Free** | 1,000 | 100 | +| **Pro** | 10,000 | 500 | +| **Enterprise** | Unlimited | - | + +**Headers:** +- X-RateLimit-Limit +- X-RateLimit-Remaining +- X-RateLimit-Reset + +### Webhooks + +Configure webhooks to receive real-time events: + +${'```'}javascript +// Webhook payload +{ + "event": "user.created", + "timestamp": "2024-01-21T09:15:00Z", + "data": { + "id": "usr_9876543210", + "email": "newuser@example.com" + }, + "signature": "sha256=abcd1234..." +} +${'```'} + +### SDK Examples + +**JavaScript/TypeScript:** + +${'```'}typescript +import { ApiClient } from '@example/api-sdk'; + +const client = new ApiClient({ + apiKey: process.env.API_KEY +}); + +const users = await client.users.list({ + page: 1, + limit: 20 +}); +${'```'} + +**Python:** + +${'```'}python +from example_api import Client + +client = Client(api_key=os.environ['API_KEY']) +users = client.users.list(page=1, limit=20) +${'```'} + +--- + +📚 [Full API Reference](https://api.example.com/docs) | 💬 [Support](https://support.example.com) +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/1.jpg b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/1.jpg new file mode 100644 index 0000000..8348e38 Binary files /dev/null and b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/1.jpg differ diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/beautiful-flowers-lotus.webp b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/beautiful-flowers-lotus.webp new file mode 100644 index 0000000..6efcffc Binary files /dev/null and b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/beautiful-flowers-lotus.webp differ diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/example.pdf b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/example.pdf new file mode 100644 index 0000000..915d301 Binary files /dev/null and b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/example.pdf differ diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/hf-logo.svg b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/hf-logo.svg new file mode 100644 index 0000000..d55ea22 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/assets/hf-logo.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/blog-post.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/blog-post.ts new file mode 100644 index 0000000..3eb2ed7 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/blog-post.ts @@ -0,0 +1,125 @@ +// Blog Post Content +export const BLOG_POST_MD = String.raw` +# Understanding Rust's Ownership System + +*Published on March 15, 2024 • 8 min read* + +Rust's ownership system is one of its most distinctive features, enabling memory safety without garbage collection. In this post, we'll explore how ownership works and why it's revolutionary for systems programming. + +## What is Ownership? + +Ownership is a set of rules that governs how Rust manages memory. These rules are checked at compile time, ensuring memory safety without runtime overhead. + +### The Three Rules of Ownership + +1. **Each value has a single owner** +2. **There can only be one owner at a time** +3. **When the owner goes out of scope, the value is dropped** + +## Memory Management Without GC + +Traditional approaches to memory management: + +- **Manual management** (C/C++): Error-prone, leads to bugs +- **Garbage collection** (Java, Python): Runtime overhead +- **Ownership** (Rust): Compile-time safety, zero runtime cost + +## Basic Examples + +### Variable Scope + +${'```'}rust +fn main() { + let s = String::from("hello"); // s comes into scope + + // s is valid here + println!("{}", s); + +} // s goes out of scope and is dropped +${'```'} + +### Move Semantics + +${'```'}rust +fn main() { + let s1 = String::from("hello"); + let s2 = s1; // s1 is moved to s2 + + // println!("{}", s1); // ❌ ERROR: s1 is no longer valid + println!("{}", s2); // ✅ OK: s2 owns the string +} +${'```'} + +## Borrowing and References + +Instead of transferring ownership, you can **borrow** values: + +### Immutable References + +${'```'}rust +fn calculate_length(s: &String) -> usize { + s.len() // s is a reference, doesn't own the String +} + +fn main() { + let s1 = String::from("hello"); + let len = calculate_length(&s1); // Borrow s1 + println!("Length of '{}' is {}", s1, len); // s1 still valid +} +${'```'} + +### Mutable References + +${'```'}rust +fn main() { + let mut s = String::from("hello"); + + let r1 = &mut s; + r1.push_str(", world"); + println!("{}", r1); + + // let r2 = &mut s; // ❌ ERROR: cannot borrow twice +} +${'```'} + +## Common Pitfalls + +### Dangling References + +${'```'}rust +fn dangle() -> &String { // ❌ ERROR: missing lifetime specifier + let s = String::from("hello"); + &s // s will be dropped, leaving a dangling reference +} +${'```'} + +### ✅ Solution + +${'```'}rust +fn no_dangle() -> String { + let s = String::from("hello"); + s // Ownership is moved out +} +${'```'} + +## Benefits + +- ✅ **No null pointer dereferences** +- ✅ **No data races** +- ✅ **No use-after-free** +- ✅ **No memory leaks** + +## Conclusion + +Rust's ownership system eliminates entire classes of bugs at compile time. While it has a learning curve, the benefits in safety and performance are worth it. + +## Further Reading + +- [The Rust Book - Ownership](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html) +- [Rust by Example - Ownership](https://doc.rust-lang.org/rust-by-example/scope/move.html) +- [Rustlings Exercises](https://github.com/rust-lang/rustlings) + +--- + +*Questions? Reach out on [Twitter](https://twitter.com/rustlang) or join the [Rust Discord](https://discord.gg/rust-lang)* +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/data-analysis.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/data-analysis.ts new file mode 100644 index 0000000..6fec32d --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/data-analysis.ts @@ -0,0 +1,124 @@ +// Data Analysis Report +export const DATA_ANALYSIS_MD = String.raw` +# Q4 2024 Business Analytics Report + +*Executive Summary • Generated on January 15, 2025* + +## 📊 Key Performance Indicators + +${'```'} +Daily Active Users (DAU): 1.2M (+65% YoY) +Monthly Active Users (MAU): 4.5M (+48% YoY) +User Retention (Day 30): 68% (+12pp YoY) +Average Session Duration: 24min (+35% YoY) +${'```'} + +## 🎯 Product Performance + +### Feature Adoption Rates + +1. **AI Assistant**: 78% of users (↑ from 45%) +2. **Collaboration Tools**: 62% of users (↑ from 38%) +3. **Analytics Dashboard**: 54% of users (↑ from 31%) +4. **Mobile App**: 41% of users (↑ from 22%) + +### Customer Satisfaction + +| Metric | Q4 2024 | Q3 2024 | Change | +|--------|---------|---------|--------| +| **NPS Score** | 72 | 68 | +4 | +| **CSAT** | 4.6/5 | 4.4/5 | +0.2 | +| **Support Tickets** | 2,340 | 2,890 | -19% | +| **Resolution Time** | 4.2h | 5.1h | -18% | + +## 💰 Revenue Metrics + +### Monthly Recurring Revenue (MRR) + +- **Current MRR**: $2.8M (+42% YoY) +- **New MRR**: $340K +- **Expansion MRR**: $180K +- **Churned MRR**: $95K +- **Net New MRR**: $425K + +### Customer Acquisition + +${'```'} +Cost per Acquisition (CAC): $127 (-23% YoY) +Customer Lifetime Value: $1,840 (+31% YoY) +LTV:CAC Ratio: 14.5:1 +Payback Period: 3.2 months +${'```'} + +## 🌍 Geographic Performance + +### Revenue by Region + +1. **North America**: 45% ($1.26M) +2. **Europe**: 32% ($896K) +3. **Asia-Pacific**: 18% ($504K) +4. **Other**: 5% ($140K) + +### Growth Opportunities + +- **APAC**: 89% YoY growth potential +- **Latin America**: Emerging market entry +- **Middle East**: Enterprise expansion + +## 📱 Channel Performance + +### Traffic Sources + +| Channel | Sessions | Conversion | Revenue | +|---------|----------|------------|---------| +| **Organic Search** | 45% | 3.2% | $1.1M | +| **Direct** | 28% | 4.1% | $850K | +| **Social Media** | 15% | 2.8% | $420K | +| **Paid Ads** | 12% | 5.5% | $430K | + +### Marketing ROI + +- **Content Marketing**: 340% ROI +- **Email Campaigns**: 280% ROI +- **Social Media**: 190% ROI +- **Paid Search**: 220% ROI + +## 🔍 User Behavior Analysis + +### Session Patterns + +- **Peak Hours**: 9-11 AM, 2-4 PM EST +- **Mobile Usage**: 67% of sessions +- **Average Pages/Session**: 4.8 +- **Bounce Rate**: 23% (↓ from 31%) + +### Feature Usage Heatmap + +Most used features in order: +1. Dashboard (89% of users) +2. Search (76% of users) +3. Reports (64% of users) +4. Settings (45% of users) +5. Integrations (32% of users) + +## 💡 Recommendations + +1. **Invest** in AI capabilities (+$2M budget) +2. **Expand** sales team in APAC region +3. **Improve** onboarding to reduce churn +4. **Launch** enterprise security features + +## Appendix + +### Methodology + +Data collected from: +- Internal analytics (Amplitude) +- Customer surveys (n=2,450) +- Financial systems (NetSuite) +- Market research (Gartner) + +--- + +*Report prepared by Data Analytics Team • [View Interactive Dashboard](https://analytics.example.com)* +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/empty.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/empty.ts new file mode 100644 index 0000000..05286e7 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/empty.ts @@ -0,0 +1,2 @@ +// Empty state +export const EMPTY_MD = ''; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/math-formulas.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/math-formulas.ts new file mode 100644 index 0000000..1355256 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/math-formulas.ts @@ -0,0 +1,221 @@ +/* eslint-disable no-irregular-whitespace */ +// Math Formulas Content +export const MATH_FORMULAS_MD = String.raw` +# Mathematical Formulas and Expressions + +This document demonstrates various mathematical notation and formulas that can be rendered using LaTeX syntax in markdown. + +## Basic Arithmetic + +### Addition and Summation +$$\sum_{i=1}^{n} i = \frac{n(n+1)}{2}$$ + +## Algebra + +### Quadratic Formula +The solutions to $ax^2 + bx + c = 0$ are: +$$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$ + +### Binomial Theorem +$$(x + y)^n = \sum_{k=0}^{n} \binom{n}{k} x^{n-k} y^k$$ + +## Calculus + +### Derivatives +The derivative of $f(x) = x^n$ is: +$$f'(x) = nx^{n-1}$$ + +### Integration +$$\int_a^b f(x) \, dx = F(b) - F(a)$$ + +### Fundamental Theorem of Calculus +$$\frac{d}{dx} \int_a^x f(t) \, dt = f(x)$$ + +## Linear Algebra + +### Matrix Multiplication +If $A$ is an $m \times n$ matrix and $B$ is an $n \times p$ matrix, then: +$$C_{ij} = \sum_{k=1}^{n} A_{ik} B_{kj}$$ + +### Eigenvalues and Eigenvectors +For a square matrix $A$, if $Av = \lambda v$ for some non-zero vector $v$, then: +- $\lambda$ is an eigenvalue +- $v$ is an eigenvector + +## Statistics and Probability + +### Normal Distribution +The probability density function is: +$$f(x) = \frac{1}{\sigma\sqrt{2\pi}} e^{-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2}$$ + +### Bayes' Theorem +$$P(A|B) = \frac{P(B|A) \cdot P(A)}{P(B)}$$ + +### Central Limit Theorem +For large $n$, the sample mean $\bar{X}$ is approximately: +$$\bar{X} \sim N\left(\mu, \frac{\sigma^2}{n}\right)$$ + +## Trigonometry + +### Pythagorean Identity +$$\sin^2\theta + \cos^2\theta = 1$$ + +### Euler's Formula +$$e^{i\theta} = \cos\theta + i\sin\theta$$ + +### Taylor Series for Sine +$$\sin x = \sum_{n=0}^{\infty} \frac{(-1)^n}{(2n+1)!} x^{2n+1} = x - \frac{x^3}{3!} + \frac{x^5}{5!} - \frac{x^7}{7!} + \cdots$$ + +## Complex Analysis + +### Complex Numbers +A complex number can be written as: +$$z = a + bi = r e^{i\theta}$$ + +where $r = |z| = \sqrt{a^2 + b^2}$ and $\theta = \arg(z)$ + +### Cauchy-Riemann Equations +For a function $f(z) = u(x,y) + iv(x,y)$ to be analytic: +$$\frac{\partial u}{\partial x} = \frac{\partial v}{\partial y}, \quad \frac{\partial u}{\partial y} = -\frac{\partial v}{\partial x}$$ + +## Differential Equations + +### First-order Linear ODE +$$\frac{dy}{dx} + P(x)y = Q(x)$$ + +Solution: $y = e^{-\int P(x)dx}\left[\int Q(x)e^{\int P(x)dx}dx + C\right]$ + +### Heat Equation +$$\frac{\partial u}{\partial t} = \alpha \frac{\partial^2 u}{\partial x^2}$$ + +## Number Theory + +### Prime Number Theorem +$$\pi(x) \sim \frac{x}{\ln x}$$ + +where $\pi(x)$ is the number of primes less than or equal to $x$. + +### Fermat's Last Theorem +For $n > 2$, there are no positive integers $a$, $b$, and $c$ such that: +$$a^n + b^n = c^n$$ + +## Set Theory + +### De Morgan's Laws +$$\overline{A \cup B} = \overline{A} \cap \overline{B}$$ +$$\overline{A \cap B} = \overline{A} \cup \overline{B}$$ + +## Advanced Topics + +### Riemann Zeta Function +$$\zeta(s) = \sum_{n=1}^{\infty} \frac{1}{n^s} = \prod_{p \text{ prime}} \frac{1}{1-p^{-s}}$$ + +### Maxwell's Equations +$$\nabla \cdot \mathbf{E} = \frac{\rho}{\epsilon_0}$$ +$$\nabla \cdot \mathbf{B} = 0$$ +$$\nabla \times \mathbf{E} = -\frac{\partial \mathbf{B}}{\partial t}$$ +$$\nabla \times \mathbf{B} = \mu_0\mathbf{J} + \mu_0\epsilon_0\frac{\partial \mathbf{E}}{\partial t}$$ + +### Schrödinger Equation +$$i\hbar\frac{\partial}{\partial t}\Psi(\mathbf{r},t) = \hat{H}\Psi(\mathbf{r},t)$$ + +## Inline Math Examples + +Here are some inline mathematical expressions: + +- The golden ratio: $\phi = \frac{1 + \sqrt{5}}{2} \approx 1.618$ +- Euler's number: $e = \lim_{n \to \infty} \left(1 + \frac{1}{n}\right)^n$ +- Pi: $\pi = 4 \sum_{n=0}^{\infty} \frac{(-1)^n}{2n+1}$ +- Square root of 2: $\sqrt{2} = 1.41421356...$ + +## Fractions and Radicals + +Complex fraction: $\frac{\frac{a}{b} + \frac{c}{d}}{\frac{e}{f} - \frac{g}{h}}$ + +Nested radicals: $\sqrt{2 + \sqrt{3 + \sqrt{4 + \sqrt{5}}}}$ + +## Summations and Products + +### Geometric Series +$$\sum_{n=0}^{\infty} ar^n = \frac{a}{1-r} \quad \text{for } |r| < 1$$ + +### Product Notation +$$n! = \prod_{k=1}^{n} k$$ + +### Double Summation +$$\sum_{i=1}^{m} \sum_{j=1}^{n} a_{ij}$$ + +## Limits + +$$\lim_{x \to 0} \frac{\sin x}{x} = 1$$ + +$$\lim_{n \to \infty} \left(1 + \frac{x}{n}\right)^n = e^x$$ + +## Further Bracket Styles and Amounts + +- \( \mathrm{GL}_2(\mathbb{F}_7) \): Group of invertible matrices with entries in \(\mathbb{F}_7\). +- Some kernel of \(\mathrm{SL}_2(\mathbb{F}_7)\): + \[ + \left\{ \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}, \begin{pmatrix} -1 & 0 \\ 0 & -1 \end{pmatrix} \right\} = \{\pm I\} + \] +- Algebra: +\[ +x = \frac{-b \pm \sqrt{\,b^{2}-4ac\,}}{2a} +\] +- $100 and $12.99 are amounts, not LaTeX. +- I have $10, $3.99 and $x + y$ and $100x$. The amount is $2,000. +- Emma buys 2 cupcakes for $3 each and 1 cookie for $1.50. How much money does she spend in total? +- Maria has $20. She buys a notebook for $4.75 and a pack of pencils for $3.25. How much change does she receive? +- 1 kg の質量は + \[ + E = (1\ \text{kg}) \times (3.0 \times 10^8\ \text{m/s})^2 \approx 9.0 \times 10^{16}\ \text{J} + \] + というエネルギーに相当します。これは約 21 百万トンの TNT が爆発したときのエネルギーに匹敵します。 +- Algebra: \[ +x = \frac{-b \pm \sqrt{\,b^{2}-4ac\,}}{2a} +\] +- Algebraic topology, Homotopy Groups of $\mathbb{S}^3$: +$$\pi_n(\mathbb{S}^3) = \begin{cases} +\mathbb{Z} & n = 3 \\ +0 & n > 3, n \neq 4 \\ +\mathbb{Z}_2 & n = 4 \\ +\end{cases}$$ +- Spacer preceded by backslash: +\[ +\boxed{ +\begin{aligned} +N_{\text{att}}^{\text{(MHA)}} &= +h \bigl[\, d_{\text{model}}\;d_{k} + d_{\text{model}}\;d_{v}\, \bigr] && (\text{Q,K,V の重み})\\ +&\quad+ h(d_{k}+d_{k}+d_{v}) && (\text{バイアス Q,K,V)}\\[4pt] +&\quad+ (h d_{v})\, d_{\text{model}} && (\text{出力射影 }W^{O})\\ +&\quad+ d_{\text{model}} && (\text{バイアス }b^{O}) +\end{aligned}} +\] + +## Formulas in a Table + +| Area | Expression | Comment | +|------|------------|---------| +| **Algebra** | \[ +x = \frac{-b \pm \sqrt{\,b^{2}-4ac\,}}{2a} +\] | Quadratic formula | +| | \[ +(a+b)^{n} = \sum_{k=0}^{n}\binom{n}{k}\,a^{\,n-k}\,b^{\,k} +\] | Binomial theorem | +| | \(\displaystyle \prod_{k=1}^{n}k = n! \) | Factorial definition | +| **Geometry** | \( \mathbf{a}\cdot \mathbf{b} = \|\mathbf{a}\|\,\|\mathbf{b}\|\,\cos\theta \) | Dot product & angle | + +## No math (but chemical) + +Balanced chemical reaction with states: + +\[ +\ce{2H2(g) + O2(g) -> 2H2O(l)} +\] + +The standard enthalpy change for the reaction is: $\Delta H^\circ = \pu{-572 kJ mol^{-1}}$. + +--- + +*This document showcases various mathematical notation and formulas that can be rendered in markdown using LaTeX syntax.* +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/readme.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/readme.ts new file mode 100644 index 0000000..e8b573d --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/readme.ts @@ -0,0 +1,136 @@ +// README Content +export const README_MD = String.raw` +# 🚀 Awesome Web Framework + +[![npm version](https://img.shields.io/npm/v/awesome-framework.svg)](https://www.npmjs.com/package/awesome-framework) +[![Build Status](https://github.com/awesome/framework/workflows/CI/badge.svg)](https://github.com/awesome/framework/actions) +[![Coverage](https://codecov.io/gh/awesome/framework/branch/main/graph/badge.svg)](https://codecov.io/gh/awesome/framework) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +> A modern, fast, and flexible web framework for building scalable applications + +## ✨ Features + +- 🎯 **Type-Safe** - Full TypeScript support out of the box +- ⚡ **Lightning Fast** - Built on Vite for instant HMR +- 📦 **Zero Config** - Works out of the box for most use cases +- 🎨 **Flexible** - Unopinionated with sensible defaults +- 🔧 **Extensible** - Plugin system for custom functionality +- 📱 **Responsive** - Mobile-first approach +- 🌍 **i18n Ready** - Built-in internationalization +- 🔒 **Secure** - Security best practices by default + +## 📦 Installation + +${'```'}bash +npm install awesome-framework +# or +yarn add awesome-framework +# or +pnpm add awesome-framework +${'```'} + +## 🚀 Quick Start + +### Create a new project + +${'```'}bash +npx create-awesome-app my-app +cd my-app +npm run dev +${'```'} + +### Basic Example + +${'```'}javascript +import { createApp } from 'awesome-framework'; + +const app = createApp({ + port: 3000, + middleware: ['cors', 'helmet', 'compression'] +}); + +app.get('/', (req, res) => { + res.json({ message: 'Hello World!' }); +}); + +app.listen(() => { + console.log('Server running on http://localhost:3000'); +}); +${'```'} + +## 📖 Documentation + +### Core Concepts + +- [Getting Started](https://docs.awesome.dev/getting-started) +- [Configuration](https://docs.awesome.dev/configuration) +- [Routing](https://docs.awesome.dev/routing) +- [Middleware](https://docs.awesome.dev/middleware) +- [Database](https://docs.awesome.dev/database) +- [Authentication](https://docs.awesome.dev/authentication) + +### Advanced Topics + +- [Performance Optimization](https://docs.awesome.dev/performance) +- [Deployment](https://docs.awesome.dev/deployment) +- [Testing](https://docs.awesome.dev/testing) +- [Security](https://docs.awesome.dev/security) + +## 🛠️ Development + +### Prerequisites + +- Node.js >= 18 +- pnpm >= 8 + +### Setup + +${'```'}bash +git clone https://github.com/awesome/framework.git +cd framework +pnpm install +pnpm dev +${'```'} + +### Testing + +${'```'}bash +pnpm test # Run unit tests +pnpm test:e2e # Run end-to-end tests +pnpm test:watch # Run tests in watch mode +${'```'} + +## 🤝 Contributing + +We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details. + +### Contributors + + + + + +## 📊 Benchmarks + +| Framework | Requests/sec | Latency (ms) | Memory (MB) | +|-----------|-------------|--------------|-------------| +| **Awesome** | **45,230** | **2.1** | **42** | +| Express | 28,450 | 3.5 | 68 | +| Fastify | 41,200 | 2.3 | 48 | +| Koa | 32,100 | 3.1 | 52 | + +*Benchmarks performed on MacBook Pro M2, Node.js 20.x* + +## 📝 License + +MIT © [Awesome Team](https://github.com/awesome) + +## 🙏 Acknowledgments + +Special thanks to all our sponsors and contributors who make this project possible. + +--- + +**[Website](https://awesome.dev)** • **[Documentation](https://docs.awesome.dev)** • **[Discord](https://discord.gg/awesome)** • **[Twitter](https://twitter.com/awesomeframework)** +`; diff --git a/llama.cpp/tools/server/webui/tests/stories/fixtures/storybook-mocks.ts b/llama.cpp/tools/server/webui/tests/stories/fixtures/storybook-mocks.ts new file mode 100644 index 0000000..c40a746 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/stories/fixtures/storybook-mocks.ts @@ -0,0 +1,81 @@ +import { serverStore } from '$lib/stores/server.svelte'; +import { modelsStore } from '$lib/stores/models.svelte'; + +/** + * Mock server properties for Storybook testing + * This utility allows setting mock server configurations without polluting production code + */ +export function mockServerProps(props: Partial): void { + // Reset any pointer-events from previous tests (dropdown cleanup) + const body = document.querySelector('body'); + if (body) body.style.pointerEvents = ''; + + // Directly set the props for testing purposes + (serverStore as unknown as { props: ApiLlamaCppServerProps }).props = { + model_path: props.model_path || 'test-model', + modalities: { + vision: props.modalities?.vision ?? false, + audio: props.modalities?.audio ?? false + }, + ...props + } as ApiLlamaCppServerProps; + + // Set router mode role so activeModelId can be set + (serverStore as unknown as { props: ApiLlamaCppServerProps }).props.role = 'ROUTER'; + + // Also mock modelsStore methods for modality checking + const vision = props.modalities?.vision ?? false; + const audio = props.modalities?.audio ?? false; + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (modelsStore as any).modelSupportsVision = () => vision; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (modelsStore as any).modelSupportsAudio = () => audio; + + // Mock models list with a test model so activeModelId can be resolved + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (modelsStore as any).models = [ + { + id: 'test-model', + name: 'Test Model', + model: 'test-model' + } + ]; + + // Mock selectedModelId + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (modelsStore as any).selectedModelId = 'test-model'; +} + +/** + * Reset server store to clean state for testing + */ +export function resetServerStore(): void { + (serverStore as unknown as { props: ApiLlamaCppServerProps }).props = { + model_path: '', + modalities: { + vision: false, + audio: false + } + } as ApiLlamaCppServerProps; + (serverStore as unknown as { error: string }).error = ''; + (serverStore as unknown as { loading: boolean }).loading = false; +} + +/** + * Common mock configurations for Storybook stories + */ +export const mockConfigs = { + visionOnly: { + modalities: { vision: true, audio: false } + }, + audioOnly: { + modalities: { vision: false, audio: true } + }, + bothModalities: { + modalities: { vision: true, audio: true } + }, + noModalities: { + modalities: { vision: false, audio: false } + } +} as const; diff --git a/llama.cpp/tools/server/webui/tests/unit/clipboard.test.ts b/llama.cpp/tools/server/webui/tests/unit/clipboard.test.ts new file mode 100644 index 0000000..d8ea489 --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/unit/clipboard.test.ts @@ -0,0 +1,423 @@ +import { describe, it, expect } from 'vitest'; +import { AttachmentType } from '$lib/enums'; +import { + formatMessageForClipboard, + parseClipboardContent, + hasClipboardAttachments +} from '$lib/utils/clipboard'; + +describe('formatMessageForClipboard', () => { + it('returns plain content when no extras', () => { + const result = formatMessageForClipboard('Hello world', undefined); + expect(result).toBe('Hello world'); + }); + + it('returns plain content when extras is empty array', () => { + const result = formatMessageForClipboard('Hello world', []); + expect(result).toBe('Hello world'); + }); + + it('handles empty string content', () => { + const result = formatMessageForClipboard('', undefined); + expect(result).toBe(''); + }); + + it('returns plain content when extras has only non-text attachments', () => { + const extras = [ + { + type: AttachmentType.IMAGE as const, + name: 'image.png', + base64Url: 'data:image/png;base64,...' + } + ]; + const result = formatMessageForClipboard('Hello world', extras); + expect(result).toBe('Hello world'); + }); + + it('filters non-text attachments and keeps only text ones', () => { + const extras = [ + { + type: AttachmentType.IMAGE as const, + name: 'image.png', + base64Url: 'data:image/png;base64,...' + }, + { + type: AttachmentType.TEXT as const, + name: 'file.txt', + content: 'Text content' + }, + { + type: AttachmentType.PDF as const, + name: 'doc.pdf', + base64Data: 'data:application/pdf;base64,...', + content: 'PDF content', + processedAsImages: false + } + ]; + const result = formatMessageForClipboard('Hello', extras); + + expect(result).toContain('"file.txt"'); + expect(result).not.toContain('image.png'); + expect(result).not.toContain('doc.pdf'); + }); + + it('formats message with text attachments', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'file1.txt', + content: 'File 1 content' + }, + { + type: AttachmentType.TEXT as const, + name: 'file2.txt', + content: 'File 2 content' + } + ]; + const result = formatMessageForClipboard('Hello world', extras); + + expect(result).toContain('"Hello world"'); + expect(result).toContain('"type": "TEXT"'); + expect(result).toContain('"name": "file1.txt"'); + expect(result).toContain('"content": "File 1 content"'); + expect(result).toContain('"name": "file2.txt"'); + }); + + it('handles content with quotes and special characters', () => { + const content = 'Hello "world" with\nnewline'; + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'test.txt', + content: 'Test content' + } + ]; + const result = formatMessageForClipboard(content, extras); + + // Should be valid JSON + expect(result.startsWith('"')).toBe(true); + // The content should be properly escaped + const parsed = JSON.parse(result.split('\n')[0]); + expect(parsed).toBe(content); + }); + + it('converts legacy context type to TEXT type', () => { + const extras = [ + { + type: AttachmentType.LEGACY_CONTEXT as const, + name: 'legacy.txt', + content: 'Legacy content' + } + ]; + const result = formatMessageForClipboard('Hello', extras); + + expect(result).toContain('"type": "TEXT"'); + expect(result).not.toContain('"context"'); + }); + + it('handles attachment content with special characters', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'code.js', + content: 'const x = "hello\\nworld";\nconst y = `template ${var}`;' + } + ]; + const formatted = formatMessageForClipboard('Check this code', extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.textAttachments[0].content).toBe( + 'const x = "hello\\nworld";\nconst y = `template ${var}`;' + ); + }); + + it('handles unicode characters in content and attachments', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'unicode.txt', + content: '日本語テスト 🎉 émojis' + } + ]; + const formatted = formatMessageForClipboard('Привет мир 👋', extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe('Привет мир 👋'); + expect(parsed.textAttachments[0].content).toBe('日本語テスト 🎉 émojis'); + }); + + it('formats as plain text when asPlainText is true', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'file1.txt', + content: 'File 1 content' + }, + { + type: AttachmentType.TEXT as const, + name: 'file2.txt', + content: 'File 2 content' + } + ]; + const result = formatMessageForClipboard('Hello world', extras, true); + + expect(result).toBe('Hello world\n\nFile 1 content\n\nFile 2 content'); + }); + + it('returns plain content when asPlainText is true but no attachments', () => { + const result = formatMessageForClipboard('Hello world', [], true); + expect(result).toBe('Hello world'); + }); + + it('plain text mode does not use JSON format', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'test.txt', + content: 'Test content' + } + ]; + const result = formatMessageForClipboard('Hello', extras, true); + + expect(result).not.toContain('"type"'); + expect(result).not.toContain('['); + expect(result).toBe('Hello\n\nTest content'); + }); +}); + +describe('parseClipboardContent', () => { + it('returns plain text as message when not in special format', () => { + const result = parseClipboardContent('Hello world'); + + expect(result.message).toBe('Hello world'); + expect(result.textAttachments).toHaveLength(0); + }); + + it('handles empty string input', () => { + const result = parseClipboardContent(''); + + expect(result.message).toBe(''); + expect(result.textAttachments).toHaveLength(0); + }); + + it('handles whitespace-only input', () => { + const result = parseClipboardContent(' \n\t '); + + expect(result.message).toBe(' \n\t '); + expect(result.textAttachments).toHaveLength(0); + }); + + it('returns plain text as message when starts with quote but invalid format', () => { + const result = parseClipboardContent('"Unclosed quote'); + + expect(result.message).toBe('"Unclosed quote'); + expect(result.textAttachments).toHaveLength(0); + }); + + it('returns original text when JSON array is malformed', () => { + const input = '"Hello"\n[invalid json'; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('"Hello"\n[invalid json'); + expect(result.textAttachments).toHaveLength(0); + }); + + it('parses message with text attachments', () => { + const input = `"Hello world" +[ + {"type":"TEXT","name":"file1.txt","content":"File 1 content"}, + {"type":"TEXT","name":"file2.txt","content":"File 2 content"} +]`; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Hello world'); + expect(result.textAttachments).toHaveLength(2); + expect(result.textAttachments[0].name).toBe('file1.txt'); + expect(result.textAttachments[0].content).toBe('File 1 content'); + expect(result.textAttachments[1].name).toBe('file2.txt'); + expect(result.textAttachments[1].content).toBe('File 2 content'); + }); + + it('handles escaped quotes in message', () => { + const input = `"Hello \\"world\\" with quotes" +[ + {"type":"TEXT","name":"file.txt","content":"test"} +]`; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Hello "world" with quotes'); + expect(result.textAttachments).toHaveLength(1); + }); + + it('handles newlines in message', () => { + const input = `"Hello\\nworld" +[ + {"type":"TEXT","name":"file.txt","content":"test"} +]`; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Hello\nworld'); + expect(result.textAttachments).toHaveLength(1); + }); + + it('returns message only when no array follows', () => { + const input = '"Just a quoted string"'; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Just a quoted string'); + expect(result.textAttachments).toHaveLength(0); + }); + + it('filters out invalid attachment objects', () => { + const input = `"Hello" +[ + {"type":"TEXT","name":"valid.txt","content":"valid"}, + {"type":"INVALID","name":"invalid.txt","content":"invalid"}, + {"name":"missing-type.txt","content":"missing"}, + {"type":"TEXT","content":"missing name"} +]`; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Hello'); + expect(result.textAttachments).toHaveLength(1); + expect(result.textAttachments[0].name).toBe('valid.txt'); + }); + + it('handles empty attachments array', () => { + const input = '"Hello"\n[]'; + + const result = parseClipboardContent(input); + + expect(result.message).toBe('Hello'); + expect(result.textAttachments).toHaveLength(0); + }); + + it('roundtrips correctly with formatMessageForClipboard', () => { + const originalContent = 'Hello "world" with\nspecial characters'; + const originalExtras = [ + { + type: AttachmentType.TEXT as const, + name: 'file1.txt', + content: 'Content with\nnewlines and "quotes"' + }, + { + type: AttachmentType.TEXT as const, + name: 'file2.txt', + content: 'Another file' + } + ]; + + const formatted = formatMessageForClipboard(originalContent, originalExtras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe(originalContent); + expect(parsed.textAttachments).toHaveLength(2); + expect(parsed.textAttachments[0].name).toBe('file1.txt'); + expect(parsed.textAttachments[0].content).toBe('Content with\nnewlines and "quotes"'); + expect(parsed.textAttachments[1].name).toBe('file2.txt'); + expect(parsed.textAttachments[1].content).toBe('Another file'); + }); +}); + +describe('hasClipboardAttachments', () => { + it('returns false for plain text', () => { + expect(hasClipboardAttachments('Hello world')).toBe(false); + }); + + it('returns false for empty string', () => { + expect(hasClipboardAttachments('')).toBe(false); + }); + + it('returns false for quoted string without attachments', () => { + expect(hasClipboardAttachments('"Hello world"')).toBe(false); + }); + + it('returns true for valid format with attachments', () => { + const input = `"Hello" +[{"type":"TEXT","name":"file.txt","content":"test"}]`; + + expect(hasClipboardAttachments(input)).toBe(true); + }); + + it('returns false for format with empty attachments array', () => { + const input = '"Hello"\n[]'; + + expect(hasClipboardAttachments(input)).toBe(false); + }); + + it('returns false for malformed JSON', () => { + expect(hasClipboardAttachments('"Hello"\n[broken')).toBe(false); + }); +}); + +describe('roundtrip edge cases', () => { + it('preserves empty message with attachments', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'file.txt', + content: 'Content only' + } + ]; + const formatted = formatMessageForClipboard('', extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe(''); + expect(parsed.textAttachments).toHaveLength(1); + expect(parsed.textAttachments[0].content).toBe('Content only'); + }); + + it('preserves attachment with empty content', () => { + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'empty.txt', + content: '' + } + ]; + const formatted = formatMessageForClipboard('Message', extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe('Message'); + expect(parsed.textAttachments).toHaveLength(1); + expect(parsed.textAttachments[0].content).toBe(''); + }); + + it('preserves multiple backslashes', () => { + const content = 'Path: C:\\\\Users\\\\test\\\\file.txt'; + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'path.txt', + content: 'D:\\\\Data\\\\file' + } + ]; + const formatted = formatMessageForClipboard(content, extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe(content); + expect(parsed.textAttachments[0].content).toBe('D:\\\\Data\\\\file'); + }); + + it('preserves tabs and various whitespace', () => { + const content = 'Line1\t\tTabbed\n Spaced\r\nCRLF'; + const extras = [ + { + type: AttachmentType.TEXT as const, + name: 'whitespace.txt', + content: '\t\t\n\n ' + } + ]; + const formatted = formatMessageForClipboard(content, extras); + const parsed = parseClipboardContent(formatted); + + expect(parsed.message).toBe(content); + expect(parsed.textAttachments[0].content).toBe('\t\t\n\n '); + }); +}); diff --git a/llama.cpp/tools/server/webui/tests/unit/latex-protection.test.ts b/llama.cpp/tools/server/webui/tests/unit/latex-protection.test.ts new file mode 100644 index 0000000..84328db --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/unit/latex-protection.test.ts @@ -0,0 +1,376 @@ +/* eslint-disable no-irregular-whitespace */ +import { describe, it, expect, test } from 'vitest'; +import { maskInlineLaTeX, preprocessLaTeX } from '$lib/utils/latex-protection'; + +describe('maskInlineLaTeX', () => { + it('should protect LaTeX $x + y$ but not money $3.99', () => { + const latexExpressions: string[] = []; + const input = 'I have $10, $3.99 and $x + y$ and $100x$. The amount is $2,000.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('I have $10, $3.99 and <> and <>. The amount is $2,000.'); + expect(latexExpressions).toEqual(['$x + y$', '$100x$']); + }); + + it('should ignore money like $5 and $12.99', () => { + const latexExpressions: string[] = []; + const input = 'Prices are $12.99 and $5. Tax?'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('Prices are $12.99 and $5. Tax?'); + expect(latexExpressions).toEqual([]); + }); + + it('should protect inline math $a^2 + b^2$ even after text', () => { + const latexExpressions: string[] = []; + const input = 'Pythagorean: $a^2 + b^2 = c^2$.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('Pythagorean: <>.'); + expect(latexExpressions).toEqual(['$a^2 + b^2 = c^2$']); + }); + + it('should not protect math that has letter after closing $ (e.g. units)', () => { + const latexExpressions: string[] = []; + const input = 'The cost is $99 and change.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('The cost is $99 and change.'); + expect(latexExpressions).toEqual([]); + }); + + it('should allow $x$ followed by punctuation', () => { + const latexExpressions: string[] = []; + const input = 'We know $x$, right?'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('We know <>, right?'); + expect(latexExpressions).toEqual(['$x$']); + }); + + it('should work across multiple lines', () => { + const latexExpressions: string[] = []; + const input = `Emma buys cupcakes for $3 each.\nHow much is $x + y$?`; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe(`Emma buys cupcakes for $3 each.\nHow much is <>?`); + expect(latexExpressions).toEqual(['$x + y$']); + }); + + it('should not protect $100 but protect $matrix$', () => { + const latexExpressions: string[] = []; + const input = '$100 and $\\mathrm{GL}_2(\\mathbb{F}_7)$ are different.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('$100 and <> are different.'); + expect(latexExpressions).toEqual(['$\\mathrm{GL}_2(\\mathbb{F}_7)$']); + }); + + it('should skip if $ is followed by digit and alphanumeric after close (money)', () => { + const latexExpressions: string[] = []; + const input = 'I paid $5 quickly.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('I paid $5 quickly.'); + expect(latexExpressions).toEqual([]); + }); + + it('should protect LaTeX even with special chars inside', () => { + const latexExpressions: string[] = []; + const input = 'Consider $\\alpha_1 + \\beta_2$ now.'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('Consider <> now.'); + expect(latexExpressions).toEqual(['$\\alpha_1 + \\beta_2$']); + }); + + it('short text', () => { + const latexExpressions: string[] = ['$0$']; + const input = '$a$\n$a$ and $b$'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('<>\n<> and <>'); + expect(latexExpressions).toEqual(['$0$', '$a$', '$a$', '$b$']); + }); + + it('empty text', () => { + const latexExpressions: string[] = []; + const input = '$\n$$\n'; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe('$\n$$\n'); + expect(latexExpressions).toEqual([]); + }); + + it('LaTeX-spacer preceded by backslash', () => { + const latexExpressions: string[] = []; + const input = `\\[ +\\boxed{ +\\begin{aligned} +N_{\\text{att}}^{\\text{(MHA)}} &= +h \\bigl[\\, d_{\\text{model}}\\;d_{k} + d_{\\text{model}}\\;d_{v}\\, \\bigr] && (\\text{Q,K,V の重み})\\\\ +&\\quad+ h(d_{k}+d_{k}+d_{v}) && (\\text{バイアス Q,K,V)}\\\\[4pt] +&\\quad+ (h d_{v})\\, d_{\\text{model}} && (\\text{出力射影 }W^{O})\\\\ +&\\quad+ d_{\\text{model}} && (\\text{バイアス }b^{O}) +\\end{aligned}} +\\]`; + const output = maskInlineLaTeX(input, latexExpressions); + + expect(output).toBe(input); + expect(latexExpressions).toEqual([]); + }); +}); + +describe('preprocessLaTeX', () => { + test('converts inline \\( ... \\) to $...$', () => { + const input = + '\\( \\mathrm{GL}_2(\\mathbb{F}_7) \\): Group of invertible matrices with entries in \\(\\mathbb{F}_7\\).'; + const output = preprocessLaTeX(input); + expect(output).toBe( + '$ \\mathrm{GL}_2(\\mathbb{F}_7) $: Group of invertible matrices with entries in $\\mathbb{F}_7$.' + ); + }); + + test("don't inline \\\\( ... \\) to $...$", () => { + const input = + 'Chapter 20 of The TeXbook, in source "Definitions\\\\(also called Macros)", containst the formula \\((x_1,\\ldots,x_n)\\).'; + const output = preprocessLaTeX(input); + expect(output).toBe( + 'Chapter 20 of The TeXbook, in source "Definitions\\\\(also called Macros)", containst the formula $(x_1,\\ldots,x_n)$.' + ); + }); + + test('preserves display math \\[ ... \\] and protects adjacent text', () => { + const input = `Some kernel of \\(\\mathrm{SL}_2(\\mathbb{F}_7)\\): + \\[ + \\left\\{ \\begin{pmatrix} 1 & 0 \\\\ 0 & 1 \\end{pmatrix}, \\begin{pmatrix} -1 & 0 \\\\ 0 & -1 \\end{pmatrix} \\right\\} = \\{\\pm I\\} + \\]`; + const output = preprocessLaTeX(input); + + expect(output).toBe(`Some kernel of $\\mathrm{SL}_2(\\mathbb{F}_7)$: + $$ + \\left\\{ \\begin{pmatrix} 1 & 0 \\\\ 0 & 1 \\end{pmatrix}, \\begin{pmatrix} -1 & 0 \\\\ 0 & -1 \\end{pmatrix} \\right\\} = \\{\\pm I\\} + $$`); + }); + + test('handles standalone display math equation', () => { + const input = `Algebra: +\\[ +x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a} +\\]`; + const output = preprocessLaTeX(input); + + expect(output).toBe(`Algebra: +$$ +x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a} +$$`); + }); + + test('does not interpret currency values as LaTeX', () => { + const input = 'I have $10, $3.99 and $x + y$ and $100x$. The amount is $2,000.'; + const output = preprocessLaTeX(input); + + expect(output).toBe('I have \\$10, \\$3.99 and $x + y$ and $100x$. The amount is \\$2,000.'); + }); + + test('ignores dollar signs followed by digits (money), but keeps valid math $x + y$', () => { + const input = 'I have $10, $3.99 and $x + y$ and $100x$. The amount is $2,000.'; + const output = preprocessLaTeX(input); + + expect(output).toBe('I have \\$10, \\$3.99 and $x + y$ and $100x$. The amount is \\$2,000.'); + }); + + test('handles real-world word problems with amounts and no math delimiters', () => { + const input = + 'Emma buys 2 cupcakes for $3 each and 1 cookie for $1.50. How much money does she spend in total?'; + const output = preprocessLaTeX(input); + + expect(output).toBe( + 'Emma buys 2 cupcakes for \\$3 each and 1 cookie for \\$1.50. How much money does she spend in total?' + ); + }); + + test('handles decimal amounts in word problem correctly', () => { + const input = + 'Maria has $20. She buys a notebook for $4.75 and a pack of pencils for $3.25. How much change does she receive?'; + const output = preprocessLaTeX(input); + + expect(output).toBe( + 'Maria has \\$20. She buys a notebook for \\$4.75 and a pack of pencils for \\$3.25. How much change does she receive?' + ); + }); + + test('preserves display math with surrounding non-ASCII text', () => { + const input = `1 kg の質量は + \\[ + E = (1\\ \\text{kg}) \\times (3.0 \\times 10^8\\ \\text{m/s})^2 \\approx 9.0 \\times 10^{16}\\ \\text{J} + \\] + というエネルギーに相当します。これは約 21 百万トンの TNT が爆発したときのエネルギーに匹敵します。`; + const output = preprocessLaTeX(input); + + expect(output).toBe( + `1 kg の質量は + $$ + E = (1\\ \\text{kg}) \\times (3.0 \\times 10^8\\ \\text{m/s})^2 \\approx 9.0 \\times 10^{16}\\ \\text{J} + $$ + というエネルギーに相当します。これは約 21 百万トンの TNT が爆発したときのエネルギーに匹敵します。` + ); + }); + + test('LaTeX-spacer preceded by backslash', () => { + const input = `\\[ +\\boxed{ +\\begin{aligned} +N_{\\text{att}}^{\\text{(MHA)}} &= +h \\bigl[\\, d_{\\text{model}}\\;d_{k} + d_{\\text{model}}\\;d_{v}\\, \\bigr] && (\\text{Q,K,V の重み})\\\\ +&\\quad+ h(d_{k}+d_{k}+d_{v}) && (\\text{バイアス Q,K,V)}\\\\[4pt] +&\\quad+ (h d_{v})\\, d_{\\text{model}} && (\\text{出力射影 }W^{O})\\\\ +&\\quad+ d_{\\text{model}} && (\\text{バイアス }b^{O}) +\\end{aligned}} +\\]`; + const output = preprocessLaTeX(input); + expect(output).toBe( + `$$ +\\boxed{ +\\begin{aligned} +N_{\\text{att}}^{\\text{(MHA)}} &= +h \\bigl[\\, d_{\\text{model}}\\;d_{k} + d_{\\text{model}}\\;d_{v}\\, \\bigr] && (\\text{Q,K,V の重み})\\\\ +&\\quad+ h(d_{k}+d_{k}+d_{v}) && (\\text{バイアス Q,K,V)}\\\\[4pt] +&\\quad+ (h d_{v})\\, d_{\\text{model}} && (\\text{出力射影 }W^{O})\\\\ +&\\quad+ d_{\\text{model}} && (\\text{バイアス }b^{O}) +\\end{aligned}} +$$` + ); + }); + + test('converts \\[ ... \\] even when preceded by text without space', () => { + const input = 'Some line ...\nAlgebra: \\[x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a}\\]'; + const output = preprocessLaTeX(input); + + expect(output).toBe( + 'Some line ...\nAlgebra: \n$$x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a}$$\n' + ); + }); + + test('converts \\[ ... \\] in table-cells', () => { + const input = `| ID | Expression |\n| #1 | \\[ + x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a} +\\] |`; + const output = preprocessLaTeX(input); + + expect(output).toBe( + '| ID | Expression |\n| #1 | $x = \\frac{-b \\pm \\sqrt{\\,b^{2}-4ac\\,}}{2a}$ |' + ); + }); + + test('escapes isolated $ before digits ($5 → \\$5), but not valid math', () => { + const input = 'This costs $5 and this is math $x^2$. $100 is money.'; + const output = preprocessLaTeX(input); + + expect(output).toBe('This costs \\$5 and this is math $x^2$. \\$100 is money.'); + // Note: Since $x^2$ is detected as valid LaTeX, it's preserved. + // $5 becomes \$5 only *after* real math is masked — but here it's correct because the masking logic avoids treating $5 as math. + }); + + test('display with LaTeX-line-breaks', () => { + const input = String.raw`- Algebraic topology, Homotopy Groups of $\mathbb{S}^3$: +$$\pi_n(\mathbb{S}^3) = \begin{cases} +\mathbb{Z} & n = 3 \\ +0 & n > 3, n \neq 4 \\ +\mathbb{Z}_2 & n = 4 \\ +\end{cases}$$`; + const output = preprocessLaTeX(input); + // If the formula contains '\\' the $$-delimiters should be in their own line. + expect(output).toBe(`- Algebraic topology, Homotopy Groups of $\\mathbb{S}^3$: +$$\n\\pi_n(\\mathbb{S}^3) = \\begin{cases} +\\mathbb{Z} & n = 3 \\\\ +0 & n > 3, n \\neq 4 \\\\ +\\mathbb{Z}_2 & n = 4 \\\\ +\\end{cases}\n$$`); + }); + + test('handles mhchem notation safely if present', () => { + const input = 'Chemical reaction: \\( \\ce{H2O} \\) and $\\ce{CO2}$'; + const output = preprocessLaTeX(input); + + expect(output).toBe('Chemical reaction: $ \\ce{H2O} $ and $\\ce{CO2}$'); + }); + + test('preserves code blocks', () => { + const input = 'Inline code: `sum $total` and block:\n```\ndollar $amount\n```\nEnd.'; + const output = preprocessLaTeX(input); + + expect(output).toBe(input); // Code blocks prevent misinterpretation + }); + + test('preserves backslash parentheses in code blocks (GitHub issue)', () => { + const input = '```python\nfoo = "\\(bar\\)"\n```'; + const output = preprocessLaTeX(input); + + expect(output).toBe(input); // Code blocks should not have LaTeX conversion applied + }); + + test('preserves backslash brackets in code blocks', () => { + const input = '```python\nfoo = "\\[bar\\]"\n```'; + const output = preprocessLaTeX(input); + + expect(output).toBe(input); // Code blocks should not have LaTeX conversion applied + }); + + test('preserves backslash parentheses in inline code', () => { + const input = 'Use `foo = "\\(bar\\)"` in your code.'; + const output = preprocessLaTeX(input); + + expect(output).toBe(input); + }); + + test('escape backslash in mchem ce', () => { + const input = 'mchem ce:\n$\\ce{2H2(g) + O2(g) -> 2H2O(l)}$'; + const output = preprocessLaTeX(input); + + // mhchem-escape would insert a backslash here. + expect(output).toBe('mchem ce:\n$\\ce{2H2(g) + O2(g) -> 2H2O(l)}$'); + }); + + test('escape backslash in mchem pu', () => { + const input = 'mchem pu:\n$\\pu{-572 kJ mol^{-1}}$'; + const output = preprocessLaTeX(input); + + // mhchem-escape would insert a backslash here. + expect(output).toBe('mchem pu:\n$\\pu{-572 kJ mol^{-1}}$'); + }); + + test('LaTeX in blockquotes with display math', () => { + const input = + '> **Definition (limit):** \n> \\[\n> \\lim_{x\\to a} f(x) = L\n> \\]\n> means that as \\(x\\) gets close to \\(a\\).'; + const output = preprocessLaTeX(input); + + // Blockquote markers should be preserved, LaTeX should be converted + expect(output).toContain('> **Definition (limit):**'); + expect(output).toContain('$$'); + expect(output).toContain('$x$'); + expect(output).not.toContain('\\['); + expect(output).not.toContain('\\]'); + expect(output).not.toContain('\\('); + expect(output).not.toContain('\\)'); + }); + + test('LaTeX in blockquotes with inline math', () => { + const input = + "> The derivative \\(f'(x)\\) at point \\(x=a\\) measures slope.\n> Formula: \\(f'(a)=\\lim_{h\\to 0}\\frac{f(a+h)-f(a)}{h}\\)"; + const output = preprocessLaTeX(input); + + // Blockquote markers should be preserved, inline LaTeX converted to $...$ + expect(output).toContain("> The derivative $f'(x)$ at point $x=a$ measures slope."); + expect(output).toContain("> Formula: $f'(a)=\\lim_{h\\to 0}\\frac{f(a+h)-f(a)}{h}$"); + }); + + test('Mixed content with blockquotes and regular text', () => { + const input = + 'Regular text with \\(x^2\\).\n\n> Quote with \\(y^2\\).\n\nMore text with \\(z^2\\).'; + const output = preprocessLaTeX(input); + + // All LaTeX should be converted, blockquote markers preserved + expect(output).toBe('Regular text with $x^2$.\n\n> Quote with $y^2$.\n\nMore text with $z^2$.'); + }); +}); diff --git a/llama.cpp/tools/server/webui/tests/unit/model-names.test.ts b/llama.cpp/tools/server/webui/tests/unit/model-names.test.ts new file mode 100644 index 0000000..40c5a0e --- /dev/null +++ b/llama.cpp/tools/server/webui/tests/unit/model-names.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from 'vitest'; +import { isValidModelName, normalizeModelName } from '$lib/utils/model-names'; + +describe('normalizeModelName', () => { + it('preserves Hugging Face org/model format (single slash)', () => { + // Single slash is treated as Hugging Face format and preserved + expect(normalizeModelName('meta-llama/Llama-3.1-8B')).toBe('meta-llama/Llama-3.1-8B'); + expect(normalizeModelName('models/model-name-1')).toBe('models/model-name-1'); + }); + + it('extracts filename from multi-segment paths', () => { + // Multiple slashes -> extract just the filename + expect(normalizeModelName('path/to/model/model-name-2')).toBe('model-name-2'); + expect(normalizeModelName('/absolute/path/to/model')).toBe('model'); + }); + + it('extracts filename from backslash paths', () => { + expect(normalizeModelName('C\\Models\\model-name-1')).toBe('model-name-1'); + expect(normalizeModelName('path\\to\\model\\model-name-2')).toBe('model-name-2'); + }); + + it('handles mixed path separators', () => { + expect(normalizeModelName('path/to\\model/model-name-2')).toBe('model-name-2'); + }); + + it('returns simple names as-is', () => { + expect(normalizeModelName('simple-model')).toBe('simple-model'); + expect(normalizeModelName('model-name-2')).toBe('model-name-2'); + }); + + it('trims whitespace', () => { + expect(normalizeModelName(' model-name ')).toBe('model-name'); + }); + + it('returns empty string for empty input', () => { + expect(normalizeModelName('')).toBe(''); + expect(normalizeModelName(' ')).toBe(''); + }); +}); + +describe('isValidModelName', () => { + it('returns true for valid names', () => { + expect(isValidModelName('model')).toBe(true); + expect(isValidModelName('path/to/model.bin')).toBe(true); + }); + + it('returns false for empty values', () => { + expect(isValidModelName('')).toBe(false); + expect(isValidModelName(' ')).toBe(false); + }); +}); diff --git a/llama.cpp/tools/server/webui/tsconfig.json b/llama.cpp/tools/server/webui/tsconfig.json new file mode 100644 index 0000000..0b2d886 --- /dev/null +++ b/llama.cpp/tools/server/webui/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./.svelte-kit/tsconfig.json", + "compilerOptions": { + "allowJs": true, + "checkJs": true, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "skipLibCheck": true, + "sourceMap": true, + "strict": true, + "moduleResolution": "bundler" + } + // Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias + // except $lib which is handled by https://svelte.dev/docs/kit/configuration#files + // + // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes + // from the referenced tsconfig.json - TypeScript does not merge them in +} diff --git a/llama.cpp/tools/server/webui/vite.config.ts b/llama.cpp/tools/server/webui/vite.config.ts new file mode 100644 index 0000000..5183c09 --- /dev/null +++ b/llama.cpp/tools/server/webui/vite.config.ts @@ -0,0 +1,166 @@ +import tailwindcss from '@tailwindcss/vite'; +import { sveltekit } from '@sveltejs/kit/vite'; +import * as fflate from 'fflate'; +import { readFileSync, writeFileSync, existsSync } from 'fs'; +import { resolve } from 'path'; +import { defineConfig } from 'vite'; +import devtoolsJson from 'vite-plugin-devtools-json'; +import { storybookTest } from '@storybook/addon-vitest/vitest-plugin'; + +const GUIDE_FOR_FRONTEND = ` + +`.trim(); + +const MAX_BUNDLE_SIZE = 2 * 1024 * 1024; + +/** + * the maximum size of an embedded asset in bytes, + * e.g. maximum size of embedded font (see node_modules/katex/dist/fonts/*.woff2) + */ +const MAX_ASSET_SIZE = 32000; + +/** public/index.html.gz minified flag */ +const ENABLE_JS_MINIFICATION = true; + +function llamaCppBuildPlugin() { + return { + name: 'llamacpp:build', + apply: 'build' as const, + closeBundle() { + // Ensure the SvelteKit adapter has finished writing to ../public + setTimeout(() => { + try { + const indexPath = resolve('../public/index.html'); + const gzipPath = resolve('../public/index.html.gz'); + + if (!existsSync(indexPath)) { + return; + } + + let content = readFileSync(indexPath, 'utf-8'); + + const faviconPath = resolve('static/favicon.svg'); + if (existsSync(faviconPath)) { + const faviconContent = readFileSync(faviconPath, 'utf-8'); + const faviconBase64 = Buffer.from(faviconContent).toString('base64'); + const faviconDataUrl = `data:image/svg+xml;base64,${faviconBase64}`; + + content = content.replace(/href="[^"]*favicon\.svg"/g, `href="${faviconDataUrl}"`); + + console.log('✓ Inlined favicon.svg as base64 data URL'); + } + + content = content.replace(/\r/g, ''); + content = GUIDE_FOR_FRONTEND + '\n' + content; + + const compressed = fflate.gzipSync(Buffer.from(content, 'utf-8'), { level: 9 }); + + compressed[0x4] = 0; + compressed[0x5] = 0; + compressed[0x6] = 0; + compressed[0x7] = 0; + compressed[0x9] = 0; + + if (compressed.byteLength > MAX_BUNDLE_SIZE) { + throw new Error( + `Bundle size is too large (${Math.ceil(compressed.byteLength / 1024)} KB).\n` + + `Please reduce the size of the frontend or increase MAX_BUNDLE_SIZE in vite.config.ts.\n` + ); + } + + writeFileSync(gzipPath, compressed); + console.log('✓ Created index.html.gz'); + } catch (error) { + console.error('Failed to create gzip file:', error); + } + }, 100); + } + }; +} + +export default defineConfig({ + resolve: { + alias: { + 'katex-fonts': resolve('node_modules/katex/dist/fonts') + } + }, + build: { + assetsInlineLimit: MAX_ASSET_SIZE, + chunkSizeWarningLimit: 3072, + minify: ENABLE_JS_MINIFICATION + }, + css: { + preprocessorOptions: { + scss: { + additionalData: ` + $use-woff2: true; + $use-woff: false; + $use-ttf: false; + ` + } + } + }, + plugins: [tailwindcss(), sveltekit(), devtoolsJson(), llamaCppBuildPlugin()], + test: { + projects: [ + { + extends: './vite.config.ts', + test: { + name: 'client', + environment: 'browser', + browser: { + enabled: true, + provider: 'playwright', + instances: [{ browser: 'chromium' }] + }, + include: ['tests/client/**/*.svelte.{test,spec}.{js,ts}'], + setupFiles: ['./vitest-setup-client.ts'] + } + }, + { + extends: './vite.config.ts', + test: { + name: 'unit', + environment: 'node', + include: ['tests/unit/**/*.{test,spec}.{js,ts}'] + } + }, + { + extends: './vite.config.ts', + test: { + name: 'ui', + environment: 'browser', + browser: { + enabled: true, + provider: 'playwright', + instances: [{ browser: 'chromium', headless: true }] + }, + include: ['tests/stories/**/*.stories.{js,ts,svelte}'], + setupFiles: ['./.storybook/vitest.setup.ts'] + }, + plugins: [ + storybookTest({ + storybookScript: 'pnpm run storybook --no-open' + }) + ] + } + ] + }, + + server: { + proxy: { + '/v1': 'http://localhost:8080', + '/props': 'http://localhost:8080', + '/models': 'http://localhost:8080' + }, + headers: { + 'Cross-Origin-Embedder-Policy': 'require-corp', + 'Cross-Origin-Opener-Policy': 'same-origin' + } + } +}); diff --git a/llama.cpp/tools/server/webui/vitest-setup-client.ts b/llama.cpp/tools/server/webui/vitest-setup-client.ts new file mode 100644 index 0000000..570b9f0 --- /dev/null +++ b/llama.cpp/tools/server/webui/vitest-setup-client.ts @@ -0,0 +1,2 @@ +/// +/// -- cgit v1.2.3