Coverage for src / lilbee / cli / tui / screens / chat.py: 100%
788 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-04-29 19:16 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-04-29 19:16 +0000
1"""Chat screen — scrollable message log with streaming markdown responses."""
3from __future__ import annotations
5import asyncio
6import contextlib
7import logging
8import shutil
9import threading
10import time
11from collections.abc import Callable
12from pathlib import Path
13from typing import TYPE_CHECKING, Any, ClassVar
15from textual import on, work
16from textual.actions import SkipAction
17from textual.app import ComposeResult
18from textual.binding import Binding, BindingType
19from textual.containers import Vertical, VerticalScroll
20from textual.content import Content
21from textual.screen import Screen
22from textual.widgets import Footer, Input, Select, Static
24# Cancellation check for @work(thread=True) workers. Import at module level
25# since it's used in multiple methods.
26from textual.worker import get_current_worker as _get_worker
28from lilbee import asyncio_loop, settings
29from lilbee.cli.helpers import get_version
30from lilbee.cli.settings_map import SETTINGS_MAP
31from lilbee.cli.tui import messages as msg
32from lilbee.cli.tui.app import apply_active_model
33from lilbee.cli.tui.command_registry import build_dispatch_dict
34from lilbee.cli.tui.thread_safe import call_from_thread
35from lilbee.cli.tui.widgets.autocomplete import CompletionOverlay, get_completions
36from lilbee.cli.tui.widgets.message import AssistantMessage, UserMessage
37from lilbee.cli.tui.widgets.model_bar import ModelBar
38from lilbee.cli.tui.widgets.nav_aware_input import NavAwareInput
39from lilbee.cli.tui.widgets.status_bar import ViewTabs
40from lilbee.cli.tui.widgets.task_bar import ProgressReporter, TaskBar
41from lilbee.config import cfg
42from lilbee.crawler import crawler_available, is_url, require_valid_crawl_url
43from lilbee.embedder import is_model_available
44from lilbee.progress import EventType, ProgressEvent
45from lilbee.providers.base import ClosableIterator
46from lilbee.providers.model_ref import parse_model_ref
47from lilbee.query import ChatMessage
48from lilbee.services import get_services, reset_services
49from lilbee.store import scope_to_chunk_type
51if TYPE_CHECKING:
52 from lilbee.cli.tui.widgets.task_bar import TaskBarController
54log = logging.getLogger(__name__)
56_DISPATCH = build_dispatch_dict()
58_MAX_HISTORY_MESSAGES = 200
60# Coalesce per-token UI updates into ~50 ms windows. Tiny reasoning models
61# can emit 100+ tokens/sec; one call_from_thread per token saturates
62# Textual's message queue and makes key events visibly lag.
63_STREAM_FLUSH_INTERVAL = 0.05
65_STREAM_SCROLL_INTERVAL = 0.15
68def _close_stream(stream: Any) -> None:
69 """Close a streaming iterator if it satisfies the ClosableIterator protocol."""
70 if isinstance(stream, ClosableIterator):
71 with contextlib.suppress(Exception):
72 stream.close()
75def _remove_copied_files(names: list[str]) -> None:
76 """Delete files previously copied into documents/ by a /add invocation.
78 Called on cancel or failure of the add task so a cancelled file does not
79 re-appear on the next sync. Silently tolerates missing entries;
80 the user may have removed them concurrently, and the goal is just to
81 prevent accidental indexing.
82 """
83 for name in names:
84 target = cfg.documents_dir / name
85 try:
86 if target.is_dir():
87 shutil.rmtree(target, ignore_errors=True)
88 elif target.exists():
89 target.unlink()
90 except OSError:
91 log.debug("Could not remove copied file %s", target, exc_info=True)
94class ChatWelcome(Static):
95 """Empty-state welcome posted into the chat log; removed on first message."""
97 def __init__(self, *, id: str | None = None) -> None:
98 title = Content.styled(msg.CHAT_WELCOME_TITLE, "bold $primary")
99 tagline = Content.styled(msg.CHAT_WELCOME_TAGLINE, "$text-muted")
100 hint = Content.styled(msg.CHAT_WELCOME_HINT, "$text-muted")
101 body = Content.assemble(title, "\n", tagline, "\n\n", hint)
102 super().__init__(body, id=id)
105class PromptArea(Vertical):
106 """Container for chat input that highlights on focus-within."""
108 pass
111class ChatScreen(Screen[None]):
112 """Primary chat interface with streaming LLM responses."""
114 CSS_PATH = "chat.tcss"
115 AUTO_FOCUS = "#chat-input"
117 HELP = (
118 "# Chat\n\n"
119 "Ask questions about your knowledge base.\n\n"
120 "Press **Escape** for normal mode (vim keys), "
121 "**i**/**a**/**o** to return to insert mode."
122 )
124 _SCROLL_GROUP = Binding.Group("Scroll", compact=True)
126 BINDINGS: ClassVar[list[BindingType]] = [
127 Binding("slash", "focus_commands", "Commands", show=True),
128 Binding("tab", "complete", "Tab models / complete", show=True, priority=True),
129 Binding("ctrl+n", "complete_next", "^n next", show=False),
130 Binding("ctrl+p", "complete_prev", "^p prev", show=False),
131 Binding("pageup", "scroll_up", "PgUp", show=False, group=_SCROLL_GROUP),
132 Binding("pagedown", "scroll_down", "PgDn", show=False, group=_SCROLL_GROUP),
133 Binding("ctrl+d", "half_page_down", "^d half PgDn", show=False, group=_SCROLL_GROUP),
134 Binding("ctrl+u", "half_page_up", "^u half PgUp", show=False, group=_SCROLL_GROUP),
135 Binding("j", "vim_scroll_down", "j down", show=False, group=_SCROLL_GROUP),
136 Binding("k", "vim_scroll_up", "k up", show=False, group=_SCROLL_GROUP),
137 Binding("g", "vim_scroll_home", "g top", show=False, group=_SCROLL_GROUP),
138 Binding("G", "vim_scroll_end", "G bottom", show=False, group=_SCROLL_GROUP),
139 Binding("up", "history_prev", "Up", show=False),
140 Binding("down", "history_next", "Down", show=False),
141 Binding("escape", "enter_normal_mode", "Normal mode", show=True, priority=True),
142 Binding("ctrl+r", "toggle_markdown", "Markdown", show=False),
143 Binding("m", "focus_model_bar", "Models", show=True),
144 Binding("f5", "open_setup", "Setup", show=False),
145 ]
147 def __init__(self, *, auto_sync: bool = False) -> None:
148 super().__init__()
149 self._auto_sync = auto_sync
150 self._history: list[ChatMessage] = []
151 self._history_lock = threading.Lock()
152 self.streaming = False
153 self._insert_mode: bool = True
154 self._completing = False
155 self._sync_active: bool = False
156 self._input_history: list[str] = []
157 self._history_index: int = -1
159 @property
160 def _task_bar(self) -> TaskBarController:
161 """The app-level TaskBarController (always set by LilbeeApp)."""
162 return self.app.task_bar # type: ignore[attr-defined,no-any-return]
164 def compose(self) -> ComposeResult:
165 from lilbee.cli.tui.widgets.bottom_bars import BottomBars
166 from lilbee.cli.tui.widgets.suggester import SlashSuggester
167 from lilbee.cli.tui.widgets.top_bars import TopBars
169 with TopBars():
170 yield ViewTabs()
171 yield Static(msg.CHAT_ONLY_BANNER, id="chat-only-banner")
172 yield VerticalScroll(
173 ChatWelcome(id="chat-welcome"),
174 id="chat-log",
175 )
176 yield CompletionOverlay(id="completion-overlay")
177 with BottomBars():
178 with PromptArea(id="chat-prompt-area"):
179 yield NavAwareInput(
180 placeholder=msg.CHAT_INPUT_PLACEHOLDER,
181 id="chat-input",
182 suggester=SlashSuggester(use_cache=False),
183 )
184 yield ModelBar(id="model-bar")
185 yield TaskBar()
186 yield Footer()
188 def on_mount(self) -> None:
189 self._update_input_style()
190 self.query_one("#chat-only-banner", Static).display = False
191 if self._needs_setup():
192 from lilbee.cli.tui.screens.setup import SetupWizard
194 self.app.push_screen(SetupWizard(), self._on_setup_complete)
195 elif not self._embedding_ready():
196 self._show_chat_only_banner()
197 elif self._auto_sync:
198 self._run_sync()
200 def on_show(self) -> None:
201 """Called when screen becomes visible."""
202 from lilbee.splash import dismiss
204 dismiss()
205 self.refresh_model_bar()
207 def _needs_setup(self) -> bool:
208 """True when the setup wizard should run: fresh data dir or unresolved models.
210 Remote-prefixed refs skip the native probe since they resolve
211 through the SDK backend at call time.
212 """
213 if not cfg.lancedb_dir.is_dir():
214 log.debug("_needs_setup: lancedb_dir missing (%s)", cfg.lancedb_dir)
215 return True
216 from lilbee.providers.base import ProviderError
217 from lilbee.providers.llama_cpp_provider import resolve_model_path
219 for label, model in (("chat", cfg.chat_model), ("embedding", cfg.embedding_model)):
220 if parse_model_ref(model).is_remote:
221 continue
222 try:
223 resolve_model_path(model)
224 except (ProviderError, KeyError, ValueError) as exc:
225 log.debug("_needs_setup: %s model %r unresolved: %s", label, model, exc)
226 return True
227 return False
229 def _embedding_ready(self) -> bool:
230 """Quick check if the embedding model resolves (no network calls)."""
231 return is_model_available(cfg.embedding_model, get_services().provider)
233 def _on_setup_complete(self, result: str | None) -> None:
234 """Called when wizard completes or is skipped."""
235 if result == "skipped" and not self._embedding_ready():
236 self._show_chat_only_banner()
237 elif self._embedding_ready():
238 self._hide_chat_only_banner()
239 if self._auto_sync:
240 self._run_sync()
241 self.refresh_model_bar()
243 def _show_chat_only_banner(self) -> None:
244 """Show the persistent chat-only banner."""
245 self.query_one("#chat-only-banner", Static).display = True
247 def _hide_chat_only_banner(self) -> None:
248 """Hide the chat-only banner."""
249 self.query_one("#chat-only-banner", Static).display = False
251 def action_open_setup(self) -> None:
252 """Open the setup wizard."""
253 self._cmd_setup("")
255 def _enter_insert_mode(self) -> None:
256 """Switch to insert mode: focus input, update border style."""
257 self._insert_mode = True
258 self.query_one("#chat-input", Input).focus()
259 self._update_input_style()
261 def _update_input_style(self) -> None:
262 """Toggle input opacity and mode indicator based on current mode."""
263 inp = self.query_one("#chat-input", Input)
264 if self._insert_mode:
265 inp.remove_class("normal-mode")
266 else:
267 inp.add_class("normal-mode")
268 self._update_mode_indicator()
270 def _update_mode_indicator(self) -> None:
271 """Update the ViewTabs mode text to reflect the current mode."""
272 from textual.css.query import NoMatches
274 with contextlib.suppress(NoMatches):
275 bar = self.query_one(ViewTabs)
276 bar.mode_text = msg.MODE_INSERT if self._insert_mode else msg.MODE_NORMAL
278 def on_key(self, event: object) -> None:
279 """Handle key events: vim mode and typing from chat log."""
280 from textual.events import Key
282 if not isinstance(event, Key):
283 return
284 inp = self.query_one("#chat-input", Input)
285 if self._insert_mode:
286 if not inp.has_focus and event.is_printable and event.character:
287 inp.focus()
288 inp.insert_text_at_cursor(event.character)
289 event.prevent_default()
290 event.stop()
291 return
292 if event.key == "enter" or (event.character and event.character in "iao"):
293 # Let a focused Select handle Enter / i / a / o itself.
294 if isinstance(self.focused, Select):
295 return
296 self._enter_insert_mode()
297 event.prevent_default()
298 event.stop()
299 return
301 @on(Input.Submitted, "#chat-input")
302 def _on_chat_submitted(self, event: Input.Submitted) -> None:
303 text = event.value.strip()
304 if not text:
305 return
306 event.input.value = ""
307 self._input_history.append(text)
308 self._history_index = -1
310 if text.startswith("/"):
311 self._handle_slash(text)
312 return
314 self._send_message(text)
316 def _handle_slash(self, text: str) -> None:
317 """Dispatch slash commands via the command registry."""
318 cmd = text.split()[0].lower()
319 args = text[len(cmd) :].strip()
320 handler_name = _DISPATCH.get(cmd)
321 if handler_name:
322 getattr(self, handler_name)(args)
323 else:
324 self.notify(msg.CMD_UNKNOWN.format(cmd=cmd), severity="warning")
326 def _cmd_add(self, args: str) -> None:
327 if not args:
328 return
329 if self._sync_active:
330 self.notify(msg.SYNC_ALREADY_ACTIVE, severity="warning")
331 return
332 if is_url(args):
333 self._cmd_crawl(args)
334 return
335 path = Path(args).expanduser()
336 if not path.exists():
337 self.notify(msg.CMD_ADD_NOT_FOUND.format(path=path), severity="error")
338 return
339 # Directory adds are whole-tree copies handled by copy_files'
340 # recursion; a same-named subdir in documents_dir is not a clean
341 # "duplicate file" signal, so skip the prompt there and let
342 # copy_files emit its per-file skipped notices.
343 dest = cfg.documents_dir / path.name
344 if path.is_file() and dest.exists():
345 self._prompt_overwrite(path)
346 return
347 self._submit_add(path, force=False)
349 def _prompt_overwrite(self, path: Path) -> None:
350 """Ask to overwrite an existing copy before re-syncing."""
351 from lilbee.cli.tui.widgets.confirm_dialog import ConfirmDialog
353 def _on_confirm(confirmed: bool | None) -> None:
354 if not confirmed:
355 self.notify(msg.CMD_ADD_SKIPPED_DUPLICATE.format(name=path.name))
356 return
357 self._submit_add(path, force=True)
359 self.app.push_screen(
360 ConfirmDialog(
361 msg.CMD_ADD_DUPLICATE_TITLE,
362 msg.CMD_ADD_DUPLICATE_MESSAGE.format(name=path.name),
363 ),
364 _on_confirm,
365 )
367 def _submit_add(self, path: Path, *, force: bool) -> None:
368 """Spawn the add worker. Separated so overwrite confirm can reuse it."""
369 from lilbee.cli.tui.task_queue import TaskType
371 self._sync_active = True
373 def _target(reporter: ProgressReporter) -> None:
374 try:
375 self._do_add(path, reporter, force=force)
376 finally:
377 self._sync_active = False
379 self._task_bar.start_task(f"Add {path.name}", TaskType.ADD, _target, indeterminate=True)
381 def _do_add(self, path: Path, reporter: ProgressReporter, *, force: bool = False) -> None:
382 """Copy files and run sync. Called on worker thread with a reporter."""
383 from lilbee.cli.helpers import copy_files
384 from lilbee.ingest import sync
385 from lilbee.progress import FileStartEvent
387 reporter.update(0, f"Copying {path.name}...", indeterminate=True)
388 copy_result = copy_files([path], force=force)
389 copied = copy_result.copied
390 for name in copy_result.skipped:
391 call_from_thread(self, self.notify, f"{name} already exists (use --force to overwrite)")
392 reporter.update(0, f"Copied {len(copied)} file(s), syncing...", indeterminate=True)
394 def on_progress(event_type: EventType, data: ProgressEvent) -> None:
395 # Polling point so /c in Task Center can stop a long ingest
396 # between file boundaries without having to kill the thread.
397 reporter.check_cancelled()
398 if event_type == EventType.FILE_START and isinstance(data, FileStartEvent):
399 reporter.update(0, f"Syncing {data.file}...", indeterminate=True)
401 try:
402 sync_result = asyncio_loop.run(sync(quiet=True, on_progress=on_progress))
403 except BaseException:
404 # On cancel or any failure, remove the files we copied into
405 # documents/ so the next sync doesn't silently re-ingest the
406 # file the user just cancelled. Only files copied by
407 # this /add invocation are removed; pre-existing files the user
408 # put in documents/ themselves are never touched.
409 _remove_copied_files(copied)
410 raise
411 if sync_result.failed:
412 _remove_copied_files(copied)
413 raise RuntimeError(msg.SYNC_FAILED_FILES.format(files=", ".join(sync_result.failed)))
414 call_from_thread(self, self.notify, msg.CMD_ADD_SUCCESS.format(count=len(copied)))
416 def _cmd_cancel(self, _args: str) -> None:
417 for worker in self.workers:
418 worker.cancel()
419 self.notify(msg.CMD_CANCEL)
421 def _cmd_clear(self, _args: str) -> None:
422 for worker in self.workers:
423 worker.cancel()
424 self.streaming = False
425 chat_log = self.query_one("#chat-log", VerticalScroll)
426 chat_log.remove_children()
427 with self._history_lock:
428 self._history.clear()
429 self.notify(msg.CMD_CLEAR)
431 def _cmd_crawl(self, args: str) -> None:
432 if not crawler_available():
433 self.notify(msg.CMD_CRAWL_UNAVAILABLE, severity="error")
434 return
435 if not args:
436 self._open_crawl_dialog()
437 return
438 parts = args.split()
439 url = parts[0]
440 if not is_url(url):
441 url = f"https://{url}"
442 try:
443 require_valid_crawl_url(url)
444 except ValueError as exc:
445 self.notify(str(exc), severity="error")
446 return
447 depth, max_pages, include_subdomains = self._parse_crawl_flags(parts[1:])
448 self._start_crawl(url, depth, max_pages, include_subdomains=include_subdomains)
450 def _open_crawl_dialog(self) -> None:
451 """Push the crawl modal and handle its result."""
452 from lilbee.cli.tui.widgets.crawl_dialog import CrawlDialog, CrawlParams
454 def _on_result(result: CrawlParams | None) -> None:
455 if result is not None:
456 self._start_crawl(result.url, result.depth, result.max_pages)
458 self.app.push_screen(CrawlDialog(), callback=_on_result)
460 def _start_crawl(
461 self,
462 url: str,
463 depth: int | None,
464 max_pages: int | None,
465 *,
466 include_subdomains: bool = False,
467 ) -> None:
468 """Enqueue a crawl task and run it in the background.
470 Bootstrap Chromium first via the controller helper. If the
471 browser isn't installed yet, a SETUP task renders in the Task
472 Center and the crawl kicks off from its on_success hook. On a
473 machine where Chromium is already present this is a synchronous
474 no-op and the crawl starts immediately (bb-wq8g).
475 """
476 from lilbee.cli.tui.task_queue import TaskType
478 def _kick_off_crawl() -> None:
479 self._task_bar.start_task(
480 msg.TASK_NAME_CRAWL.format(url=url),
481 TaskType.CRAWL,
482 lambda reporter: self._do_crawl(
483 url, depth, max_pages, reporter, include_subdomains=include_subdomains
484 ),
485 on_success=lambda: call_from_thread(self, self._run_sync),
486 )
488 self.notify(msg.CMD_CRAWL_STARTED.format(url=url))
489 self._task_bar.ensure_chromium(_kick_off_crawl)
491 @staticmethod
492 def _parse_crawl_flags(tokens: list[str]) -> tuple[int | None, int | None, bool]:
493 """Extract --depth, --max-pages, and --include-subdomains from tokens.
495 Numeric flags return None when absent so the caller inherits
496 crawl_and_save's unbounded-by-default semantics. The boolean
497 ``--include-subdomains`` flag defaults to False (exact-host scope).
498 """
499 flag_map = {"--depth": "depth", "--max-pages": "max_pages"}
500 parsed: dict[str, int | None] = {"depth": None, "max_pages": None}
501 include_subdomains = False
502 i = 0
503 while i < len(tokens):
504 if tokens[i] == "--include-subdomains":
505 include_subdomains = True
506 i += 1
507 continue
508 key = flag_map.get(tokens[i])
509 if key and i + 1 < len(tokens):
510 with contextlib.suppress(ValueError):
511 parsed[key] = int(tokens[i + 1])
512 i += 2
513 else:
514 i += 1
515 return parsed["depth"], parsed["max_pages"], include_subdomains
517 def _do_crawl(
518 self,
519 url: str,
520 depth: int | None,
521 max_pages: int | None,
522 reporter: ProgressReporter,
523 *,
524 include_subdomains: bool = False,
525 ) -> None:
526 """Crawl body. Runs on worker thread; reporter handles progress + cancel."""
527 from lilbee.crawler import crawl_and_save
528 from lilbee.progress import CrawlPageEvent, SetupProgressEvent
530 reporter.update(0, msg.CMD_CRAWL_STARTED.format(url=url))
532 def on_progress(event_type: EventType, data: ProgressEvent) -> None:
533 if event_type == EventType.SETUP_START:
534 reporter.update(0, msg.SETUP_CHROMIUM_NAME)
535 elif event_type == EventType.SETUP_PROGRESS and isinstance(data, SetupProgressEvent):
536 if data.total_bytes:
537 pct = int(data.downloaded_bytes * 100 / data.total_bytes)
538 detail = msg.SETUP_CHROMIUM_DETAIL.format(
539 done=data.downloaded_bytes // (1024 * 1024),
540 total=data.total_bytes // (1024 * 1024),
541 )
542 else:
543 pct = 0
544 detail = msg.SETUP_CHROMIUM_DETAIL_UNKNOWN.format(
545 done=data.downloaded_bytes // (1024 * 1024),
546 )
547 reporter.update(pct, detail)
548 elif event_type == EventType.CRAWL_PAGE and isinstance(data, CrawlPageEvent):
549 pct = int(data.current * 100 / data.total) if data.total > 0 else 50
550 reporter.update(pct, f"[{data.current}/{data.total}]: {data.url}")
552 paths = asyncio_loop.run(
553 crawl_and_save(
554 url,
555 depth=depth,
556 max_pages=max_pages,
557 on_progress=on_progress,
558 quiet=True,
559 include_subdomains=include_subdomains,
560 )
561 )
562 call_from_thread(self, self.notify, msg.CMD_CRAWL_SUCCESS.format(count=len(paths), url=url))
564 def _cmd_catalog(self, _args: str) -> None:
565 from lilbee.cli.tui.screens.catalog import CatalogScreen
567 self.app.push_screen(CatalogScreen())
569 def _cmd_delete(self, args: str) -> None:
570 try:
571 sources = get_services().store.get_sources()
572 except Exception:
573 log.debug("Failed to list documents for /delete", exc_info=True)
574 self.notify(msg.CMD_DELETE_NO_DOCS, severity="warning")
575 return
577 known = {s.get("filename", s.get("source", "?")) for s in sources}
578 if not known:
579 self.notify(msg.CMD_DELETE_NO_DOCS, severity="warning")
580 return
582 name = args.strip()
583 if not name:
584 self.notify(msg.CMD_DELETE_USAGE.format(names=", ".join(sorted(known))))
585 return
587 if name not in known:
588 self.notify(msg.CMD_DELETE_NOT_FOUND.format(name=name), severity="error")
589 return
591 store = get_services().store
592 store.delete_by_source(name)
593 store.delete_source(name)
594 self.notify(msg.CMD_DELETE_SUCCESS.format(name=name))
596 def _cmd_help(self, _args: str) -> None:
597 self.app.action_show_help_panel()
599 def _cmd_login(self, args: str) -> None:
600 token = args.strip()
601 if not token:
602 import webbrowser
604 webbrowser.open("https://huggingface.co/settings/tokens")
605 self.notify(msg.CHAT_LOGIN_PROMPT)
606 return
607 self._run_hf_login(token)
609 @work(thread=True)
610 def _run_hf_login(self, token: str) -> None:
611 try:
612 from huggingface_hub import login
614 login(token=token, add_to_git_credential=False)
615 call_from_thread(self, self.notify, msg.CHAT_LOGGED_IN)
616 except Exception as exc:
617 log.warning("HuggingFace login failed", exc_info=True)
618 call_from_thread(
619 self, self.notify, msg.CHAT_LOGIN_FAILED.format(error=exc), severity="error"
620 )
622 def _cmd_model(self, args: str) -> None:
623 if args:
624 apply_active_model(self.app, "chat_model", args)
625 self.app.title = f"lilbee -- {cfg.chat_model}"
626 self.notify(msg.CMD_MODEL_SET.format(name=cfg.chat_model))
627 self._apply_model_change()
628 self.refresh_model_bar()
629 else:
630 from lilbee.cli.tui.screens.catalog import CatalogScreen
632 self.app.push_screen(CatalogScreen())
634 def _cmd_quit(self, _args: str) -> None:
635 self.app.exit()
637 def _cmd_remove(self, args: str) -> None:
638 name = args.strip()
639 if not name:
640 self.notify(msg.CMD_REMOVE_USAGE, severity="warning")
641 return
642 self._run_remove_model(name)
644 @work(thread=True)
645 def _run_remove_model(self, name: str) -> None:
646 from lilbee.model_manager import get_model_manager
648 mgr = get_model_manager()
649 if not mgr.is_installed(name):
650 call_from_thread(
651 self, self.notify, msg.CMD_REMOVE_NOT_FOUND.format(name=name), severity="error"
652 )
653 return
654 try:
655 removed = mgr.remove(name)
656 if removed:
657 call_from_thread(self, self.notify, msg.CMD_REMOVE_SUCCESS.format(name=name))
658 else:
659 call_from_thread(
660 self, self.notify, msg.CMD_REMOVE_FAILED.format(name=name), severity="error"
661 )
662 except Exception:
663 log.warning("Remove failed for %s", name, exc_info=True)
664 call_from_thread(
665 self, self.notify, msg.CMD_REMOVE_FAILED.format(name=name), severity="error"
666 )
668 def _cmd_reset(self, args: str) -> None:
669 from lilbee.cli.tui.widgets.confirm_dialog import ConfirmDialog
671 def _on_confirm(confirmed: bool | None) -> None:
672 if not confirmed:
673 return
674 from lilbee.cli.helpers import perform_reset
676 try:
677 result = perform_reset()
678 if result.skipped:
679 self.notify(
680 msg.CMD_RESET_PARTIAL.format(skipped=len(result.skipped)),
681 severity="warning",
682 )
683 else:
684 self.notify(msg.CMD_RESET_SUCCESS)
685 except Exception as exc:
686 log.warning("Reset failed", exc_info=True)
687 self.notify(msg.CMD_RESET_FAILED.format(error=exc), severity="error")
689 self.app.push_screen(
690 ConfirmDialog("Reset Knowledge Base", "This will permanently delete all data."),
691 _on_confirm,
692 )
694 def _cmd_set(self, args: str) -> None:
695 if not args:
696 return
697 parts = args.split(None, 1)
698 key = parts[0]
699 value = parts[1] if len(parts) > 1 else ""
701 if key not in SETTINGS_MAP:
702 self.notify(msg.CMD_SET_UNKNOWN.format(key=key), severity="warning")
703 return
705 defn = SETTINGS_MAP[key]
706 if not defn.writable:
707 self.notify(msg.CMD_SET_READONLY.format(key=key), severity="warning")
708 return
709 try:
710 if defn.type is bool:
711 parsed = value.lower() in ("true", "1", "yes", "on")
712 elif defn.nullable and value.lower() in ("none", "null", ""):
713 parsed = None
714 else:
715 parsed = defn.type(value)
716 setattr(cfg, key, parsed)
717 persisted = str(parsed) if parsed is not None else ""
718 settings.set_value(cfg.data_root, key, persisted)
719 if key == "llm_provider": # pragma: no cover
720 reset_services()
721 self.notify(msg.CMD_SET_SUCCESS.format(key=key, value=parsed))
722 except (ValueError, TypeError) as exc:
723 self.notify(msg.CMD_SET_INVALID.format(key=key, error=exc), severity="error")
725 def _cmd_settings(self, _args: str) -> None:
726 from lilbee.cli.tui.screens.settings import SettingsScreen
728 self.app.push_screen(SettingsScreen())
730 def _cmd_setup(self, _args: str) -> None:
731 from lilbee.cli.tui.screens.setup import SetupWizard
733 self.app.push_screen(SetupWizard(), self._on_setup_complete)
735 def _cmd_status(self, _args: str) -> None:
736 from lilbee.cli.tui.screens.status import StatusScreen
738 self.app.push_screen(StatusScreen())
740 def _cmd_theme(self, args: str) -> None:
741 from lilbee.cli.tui.app import DARK_THEMES, LilbeeApp
743 if args and isinstance(self.app, LilbeeApp):
744 self.app.set_theme(args)
745 self.notify(msg.THEME_SET.format(name=args))
746 else:
747 theme_list = msg.CMD_THEME_LIST.format(names=", ".join(DARK_THEMES))
748 self.notify(theme_list, severity="information")
750 def _cmd_version(self, _args: str) -> None:
751 self.notify(msg.CHAT_VERSION.format(version=get_version()))
753 def _cmd_wiki(self, _args: str) -> None:
754 if not cfg.wiki:
755 self.notify(msg.CMD_WIKI_DISABLED, severity="warning")
756 return
757 from lilbee.cli.tui.app import LilbeeApp
759 if isinstance(self.app, LilbeeApp): # test apps aren't LilbeeApp
760 self.app.switch_view("Wiki")
762 def _send_message(self, text: str) -> None:
763 """Send a user message and stream the response."""
764 from textual.css.query import NoMatches
766 log = self.query_one("#chat-log", VerticalScroll)
767 with contextlib.suppress(NoMatches):
768 log.query_one("#chat-welcome", ChatWelcome).remove()
769 log.mount(UserMessage(text))
771 assistant_msg = AssistantMessage()
772 log.mount(assistant_msg)
773 log.scroll_end(animate=False)
775 with self._history_lock:
776 self._history.append({"role": "user", "content": text})
777 self.streaming = True
778 self._stream_response(text, assistant_msg, self._current_chunk_type())
780 def _current_chunk_type(self) -> str | None:
781 """Translate the ModelBar scope selection into a ``chunk_type`` arg.
783 Returns ``None`` for "both" (no filter) and the raw/wiki string
784 otherwise. Defaults to ``None`` when the ModelBar isn't mounted
785 (e.g. test apps).
786 """
787 from textual.css.query import NoMatches
789 try:
790 bar = self.query_one("#model-bar", ModelBar)
791 except NoMatches:
792 return None
793 return scope_to_chunk_type(bar.scope)
795 @work(thread=True)
796 def _stream_response(
797 self, question: str, widget: AssistantMessage, chunk_type: str | None
798 ) -> None:
799 """Stream LLM response in a background thread, coalescing UI updates."""
800 response_parts: list[str] = []
801 sources: list[str] = []
802 stream: Any = None
803 try:
804 with self._history_lock:
805 history_snapshot = self._history[:-1]
806 stream = get_services().searcher.ask_stream(
807 question, history=history_snapshot, chunk_type=chunk_type
808 )
809 self._consume_stream(stream, widget, response_parts)
810 except Exception as exc:
811 log.debug("Stream error", exc_info=True)
812 with contextlib.suppress(Exception):
813 call_from_thread(self, widget.append_content, msg.STREAM_ERROR.format(error=exc))
814 finally:
815 _close_stream(stream)
816 self._finalize_stream(widget, sources, response_parts)
818 def _consume_stream(
819 self, stream: Any, widget: AssistantMessage, response_parts: list[str]
820 ) -> None:
821 """Pull tokens off *stream*, batching UI updates to ~50 ms windows."""
822 worker = _get_worker()
823 reason_buf: list[str] = []
824 content_buf: list[str] = []
825 timings = [time.monotonic(), 0.0] # [last_flush, last_scroll]
827 def flush() -> None:
828 if reason_buf:
829 call_from_thread(self, widget.append_reasoning, "".join(reason_buf))
830 reason_buf.clear()
831 if content_buf:
832 call_from_thread(self, widget.append_content, "".join(content_buf))
833 content_buf.clear()
835 for token in stream:
836 if worker.is_cancelled:
837 break
838 try:
839 self._buffer_token(token, reason_buf, content_buf, response_parts)
840 self._maybe_flush_and_scroll(flush, timings)
841 except Exception:
842 break # App shutting down (Ctrl-C) -- stop streaming
843 with contextlib.suppress(Exception):
844 flush()
846 @staticmethod
847 def _buffer_token(
848 token: Any,
849 reason_buf: list[str],
850 content_buf: list[str],
851 response_parts: list[str],
852 ) -> None:
853 """Append *token* to the right buffer; record response content for history."""
854 if token.is_reasoning:
855 reason_buf.append(token.content)
856 elif token.content:
857 response_parts.append(token.content)
858 content_buf.append(token.content)
860 def _maybe_flush_and_scroll(self, flush: Callable[[], None], timings: list[float]) -> None:
861 """Run *flush* and the auto-scroll on their respective intervals."""
862 now = time.monotonic()
863 if now - timings[0] >= _STREAM_FLUSH_INTERVAL:
864 flush()
865 timings[0] = now
866 if now - timings[1] >= _STREAM_SCROLL_INTERVAL:
867 call_from_thread(self, self._scroll_to_bottom)
868 timings[1] = now
870 def _finalize_stream(
871 self, widget: AssistantMessage, sources: list[str], response_parts: list[str]
872 ) -> None:
873 """Persist the assistant turn and update the widget. Always runs."""
874 self.streaming = False
875 full_response = "".join(response_parts)
876 if full_response:
877 with self._history_lock:
878 self._history.append({"role": "assistant", "content": full_response})
879 self._trim_history()
880 call_from_thread(self, widget.finish, sources)
881 call_from_thread(self, self._scroll_to_bottom)
883 def _trim_history(self) -> None:
884 """Trim history to max size, dropping oldest messages. Caller must hold _history_lock."""
885 if len(self._history) > _MAX_HISTORY_MESSAGES:
886 self._history[:] = self._history[-_MAX_HISTORY_MESSAGES:]
888 def _scroll_to_bottom(self) -> None:
889 log_widget = self.query_one("#chat-log", VerticalScroll)
890 # Only auto-scroll if user is near the bottom (within 5 lines).
891 # If they scrolled up to read, don't yank them back.
892 if log_widget.max_scroll_y - log_widget.scroll_y < 5:
893 log_widget.scroll_end(animate=False)
895 def action_scroll_up(self) -> None:
896 self.query_one("#chat-log", VerticalScroll).scroll_page_up()
898 def action_scroll_down(self) -> None:
899 self.query_one("#chat-log", VerticalScroll).scroll_page_down()
901 def action_enter_normal_mode(self) -> None:
902 """Escape: cancel stream, return from model bar, or enter normal mode."""
903 if self.streaming:
904 for worker in self.workers:
905 worker.cancel()
906 self.streaming = False
907 return
908 if isinstance(self.focused, Select):
909 self.query_one("#chat-input", Input).focus()
910 return
911 self._insert_mode = False
912 self.query_one("#chat-log", VerticalScroll).focus()
913 self._update_input_style()
915 def action_cancel_stream(self) -> None:
916 """Context-aware Escape: cancel stream -> blur input -> no-op."""
917 if self.streaming:
918 for worker in self.workers:
919 worker.cancel()
920 self.streaming = False
921 return
922 inp = self.query_one("#chat-input", Input)
923 if inp.has_focus:
924 self.query_one("#chat-log", VerticalScroll).focus()
926 def _apply_model_change(self) -> None:
927 """Cancel active stream (if any) and reset services for the new model."""
928 if self.streaming:
929 self.action_cancel_stream()
930 self.call_later(self._deferred_service_reset)
931 else:
932 reset_services()
934 def _deferred_service_reset(self) -> None:
935 """Reset services once workers have drained."""
936 if self.workers:
937 self.call_later(self._deferred_service_reset)
938 return
939 reset_services()
941 async def action_toggle_markdown(self) -> None:
942 """Toggle between Markdown and plain-text rendering for chat responses."""
943 cfg.markdown_rendering = not cfg.markdown_rendering
944 use_md = cfg.markdown_rendering
945 chat_log = self.query_one("#chat-log", VerticalScroll)
946 for widget in chat_log.query(AssistantMessage):
947 await widget.rebuild_content_widget(use_md)
948 label = "Markdown" if use_md else "Plain text"
949 self.notify(msg.CHAT_RENDERING.format(label=label))
951 def _run_sync(self) -> None:
952 """Enqueue a document sync in the task bar."""
953 if self._sync_active:
954 self.notify(msg.SYNC_ALREADY_ACTIVE, severity="warning")
955 return
956 from lilbee.cli.tui.task_queue import TaskType
958 self._sync_active = True
960 def _target(reporter: ProgressReporter) -> None:
961 try:
962 self._do_sync(reporter)
963 finally:
964 self._sync_active = False
966 self._task_bar.start_task("Sync documents", TaskType.SYNC, _target, indeterminate=True)
968 def _do_sync(self, reporter: ProgressReporter) -> None:
969 """Sync body. Runs on worker thread."""
970 from lilbee.ingest import sync
971 from lilbee.progress import EmbedEvent, FileDoneEvent, FileStartEvent, SyncDoneEvent
973 reporter.update(0, msg.SYNC_STATUS_SYNCING, indeterminate=True)
975 last_embed_update = 0.0
976 _throttle_seconds = 0.15
978 def on_progress(event_type: EventType, data: ProgressEvent) -> None:
979 nonlocal last_embed_update
980 if event_type == EventType.FILE_START and isinstance(data, FileStartEvent):
981 pct = int((data.current_file - 1) * 100 / data.total_files)
982 status = msg.SYNC_FILE_PROGRESS.format(
983 current=data.current_file, total=data.total_files, file=data.file
984 )
985 reporter.update(pct, status, indeterminate=False)
986 elif event_type == EventType.FILE_DONE and isinstance(data, FileDoneEvent):
987 reporter.update(0, msg.SYNC_FILE_DONE.format(file=data.file), indeterminate=False)
988 elif event_type == EventType.EMBED and isinstance(data, EmbedEvent):
989 now = time.monotonic()
990 if now - last_embed_update < _throttle_seconds:
991 return
992 last_embed_update = now
993 pct = int(data.chunk * 100 / data.total_chunks) if data.total_chunks else 0
994 reporter.update(pct, msg.SYNC_EMBEDDING.format(file=data.file), indeterminate=False)
995 elif event_type == EventType.DONE and isinstance(data, SyncDoneEvent):
996 # Without this handler the task never ticks to 100% and the
997 # Task Center row never flashes "just-completed" (bb-7enj).
998 # "Synced (N docs)" means successfully synced, so failed is excluded.
999 total = data.added + data.updated + data.removed
1000 reporter.update(100, msg.SYNC_STATUS_DONE.format(count=total), indeterminate=False)
1002 try:
1003 result = asyncio_loop.run(sync(quiet=True, on_progress=on_progress))
1004 except asyncio.CancelledError as exc:
1005 self._auto_sync = False
1006 raise RuntimeError("Sync cancelled. Use /sync to resume.") from exc
1007 if result.failed:
1008 raise RuntimeError(msg.SYNC_FAILED_FILES.format(files=", ".join(result.failed)))
1010 def action_focus_commands(self) -> None:
1011 """Focus chat input and pre-fill with '/' for command entry."""
1012 inp = self.query_one("#chat-input", Input)
1013 inp.focus()
1014 if not inp.value.startswith("/"):
1015 inp.value = "/"
1016 inp.action_end()
1018 def action_focus_model_bar(self) -> None:
1019 """Focus the first Select in the model bar (normal mode only)."""
1020 if self._insert_mode:
1021 raise SkipAction()
1022 import contextlib
1024 with contextlib.suppress(Exception):
1025 self.query_one("#chat-model-select", Select).focus()
1027 def action_complete(self) -> None:
1028 """Tab: cycle autocomplete in input, else focus the next model dropdown."""
1029 inp = self.query_one("#chat-input", Input)
1030 if not inp.has_focus:
1031 if isinstance(self.focused, Select):
1032 self.screen.focus_next()
1033 else:
1034 self.query_one("#chat-model-select", Select).focus()
1035 return
1036 if not self._cycle_completion_forward(inp):
1037 self.screen.focus_next()
1039 def action_complete_next(self) -> None:
1040 """Ctrl+N: show completions or cycle forward."""
1041 inp = self.query_one("#chat-input", Input)
1042 if not inp.has_focus:
1043 return
1044 self._cycle_completion_forward(inp)
1046 def _cycle_completion_forward(self, inp: Input) -> bool:
1047 """Show or cycle forward through autocomplete; returns True if it acted."""
1048 overlay = self.query_one("#completion-overlay", CompletionOverlay)
1050 if overlay.is_visible:
1051 selection = overlay.cycle_next()
1052 if selection:
1053 cmd_prefix = inp.value.split()[0] + " " if " " in inp.value else ""
1054 self._completing = True
1055 inp.value = cmd_prefix + selection
1056 self._completing = False
1057 inp.action_end()
1058 return True
1060 options = get_completions(inp.value)
1061 if options:
1062 overlay.show_completions(options)
1063 first = overlay.get_current()
1064 self._completing = True
1065 if first and " " in inp.value:
1066 cmd_prefix = inp.value.split()[0] + " "
1067 inp.value = cmd_prefix + first
1068 inp.action_end()
1069 elif first:
1070 inp.value = first
1071 inp.action_end()
1072 self._completing = False
1073 return True
1075 return False
1077 def action_complete_prev(self) -> None:
1078 """Ctrl+P: cycle backward through completions."""
1079 overlay = self.query_one("#completion-overlay", CompletionOverlay)
1080 inp = self.query_one("#chat-input", Input)
1082 if overlay.is_visible:
1083 selection = overlay.cycle_prev()
1084 if selection:
1085 cmd_prefix = inp.value.split()[0] + " " if " " in inp.value else ""
1086 self._completing = True
1087 inp.value = cmd_prefix + selection
1088 self._completing = False
1089 inp.action_end()
1090 return
1092 options = get_completions(inp.value)
1093 if options:
1094 overlay.show_completions(options)
1095 last = overlay.get_current()
1096 self._completing = True
1097 if last and " " in inp.value:
1098 cmd_prefix = inp.value.split()[0] + " "
1099 inp.value = cmd_prefix + last
1100 inp.action_end()
1101 elif last:
1102 inp.value = last
1103 inp.action_end()
1104 self._completing = False
1106 def action_history_prev(self) -> None:
1107 """Up arrow: recall previous input history entry."""
1108 if not self._insert_mode:
1109 raise SkipAction()
1110 inp = self.query_one("#chat-input", Input)
1111 if not inp.has_focus or not self._input_history:
1112 raise SkipAction()
1113 if self._history_index == -1:
1114 self._history_index = len(self._input_history) - 1
1115 elif self._history_index > 0:
1116 self._history_index -= 1
1117 else:
1118 return
1119 inp.value = self._input_history[self._history_index]
1120 inp.action_end()
1122 def action_history_next(self) -> None:
1123 """Down arrow: recall next input history entry."""
1124 if not self._insert_mode:
1125 raise SkipAction()
1126 inp = self.query_one("#chat-input", Input)
1127 if not inp.has_focus or self._history_index == -1:
1128 raise SkipAction()
1129 if self._history_index < len(self._input_history) - 1:
1130 self._history_index += 1
1131 inp.value = self._input_history[self._history_index]
1132 inp.action_end()
1133 else:
1134 self._history_index = -1
1135 inp.value = ""
1137 @on(Input.Changed, "#chat-input")
1138 def _on_chat_input_changed(self, event: Input.Changed) -> None:
1139 """Hide completion overlay when input changes manually."""
1140 if self._completing:
1141 return
1142 overlay = self.query_one("#completion-overlay", CompletionOverlay)
1143 if overlay.is_visible:
1144 overlay.hide()
1146 def refresh_model_bar(self) -> None:
1147 """Re-scan installed models and refresh the dropdowns."""
1148 self.query_one("#model-bar", ModelBar).refresh_models()
1150 def action_vim_scroll_down(self) -> None:
1151 """Vim j: scroll down in normal mode."""
1152 if self._insert_mode:
1153 raise SkipAction()
1154 self.query_one("#chat-log", VerticalScroll).scroll_down()
1156 def action_vim_scroll_up(self) -> None:
1157 """Vim k: scroll up in normal mode."""
1158 if self._insert_mode:
1159 raise SkipAction()
1160 self.query_one("#chat-log", VerticalScroll).scroll_up()
1162 def action_vim_scroll_home(self) -> None:
1163 """Vim g: scroll to top in normal mode."""
1164 if self._insert_mode:
1165 raise SkipAction()
1166 self.query_one("#chat-log", VerticalScroll).scroll_home()
1168 def action_vim_scroll_end(self) -> None:
1169 """Vim G: scroll to bottom in normal mode."""
1170 if self._insert_mode:
1171 raise SkipAction()
1172 self.query_one("#chat-log", VerticalScroll).scroll_end()
1174 def action_half_page_down(self) -> None:
1175 """Ctrl-D: half-page down (vim style)."""
1176 log_widget = self.query_one("#chat-log", VerticalScroll)
1177 half = max(1, log_widget.size.height // 2)
1178 log_widget.scroll_relative(y=half)
1180 def action_half_page_up(self) -> None:
1181 """Ctrl-U: half-page up (vim style)."""
1182 log_widget = self.query_one("#chat-log", VerticalScroll)
1183 half = max(1, log_widget.size.height // 2)
1184 log_widget.scroll_relative(y=-half)