Coverage for src / lilbee / server / routes / search.py: 100%
43 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-04-29 19:16 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-04-29 19:16 +0000
1"""Search, ask, ask_stream, chat, and chat_stream route handlers."""
3from __future__ import annotations
5from litestar import get, post
6from litestar.exceptions import HTTPException, ValidationException
7from litestar.params import Parameter
8from litestar.response import Stream
10from lilbee.query import ChatMessage as ChatMessageDict
11from lilbee.results import DocumentResult
12from lilbee.server import handlers
13from lilbee.server.auth import read_only
14from lilbee.server.models import (
15 AskRequest,
16 AskResponse,
17 ChatRequest,
18)
19from lilbee.store import scope_to_chunk_type
22@get("/api/search")
23@read_only
24async def search_route(
25 q: str = Parameter(query="q"),
26 top_k: int = Parameter(query="top_k", default=5, le=100),
27 chunk_type: str | None = Parameter(query="chunk_type", default=None),
28) -> list[DocumentResult]:
29 """Search indexed documents by semantic similarity. No LLM call required."""
30 try:
31 chunk_type = scope_to_chunk_type(chunk_type)
32 except ValueError as exc:
33 raise ValidationException(str(exc)) from exc
34 try:
35 return await handlers.search(q, top_k=top_k, chunk_type=chunk_type)
36 except ValueError as exc:
37 raise ValidationException(str(exc)) from exc
38 except Exception as exc:
39 raise HTTPException(status_code=503, detail=str(exc)) from exc
42@post("/api/ask")
43async def ask_route(data: AskRequest) -> AskResponse:
44 """One-shot RAG question returning an answer with source chunks."""
45 try:
46 return await handlers.ask(
47 question=data.question,
48 top_k=data.top_k,
49 options=data.options,
50 chunk_type=data.chunk_type,
51 )
52 except ValueError as exc:
53 raise ValidationException(str(exc)) from exc
54 except Exception as exc:
55 raise HTTPException(status_code=503, detail=str(exc)) from exc
58@post("/api/ask/stream")
59async def ask_stream_route(data: AskRequest) -> Stream:
60 """Streaming SSE version of ask, emitting token-by-token answer chunks."""
61 return Stream(
62 handlers.ask_stream(
63 question=data.question,
64 top_k=data.top_k,
65 options=data.options,
66 chunk_type=data.chunk_type,
67 ),
68 media_type="text/event-stream",
69 )
72@post("/api/chat")
73async def chat_route(data: ChatRequest) -> AskResponse:
74 """RAG chat with conversation history, returning an answer with sources."""
75 history: list[ChatMessageDict] = [
76 ChatMessageDict(role=m.role, content=m.content) for m in data.history
77 ]
78 return await handlers.chat(
79 question=data.question,
80 history=history,
81 top_k=data.top_k,
82 options=data.options,
83 chunk_type=data.chunk_type,
84 )
87@post("/api/chat/stream")
88async def chat_stream_route(data: ChatRequest) -> Stream:
89 """Streaming SSE version of chat with conversation history."""
90 history: list[ChatMessageDict] = [
91 ChatMessageDict(role=m.role, content=m.content) for m in data.history
92 ]
93 return Stream(
94 handlers.chat_stream(
95 question=data.question,
96 history=history,
97 top_k=data.top_k,
98 options=data.options,
99 chunk_type=data.chunk_type,
100 ),
101 media_type="text/event-stream",
102 )