llm_options.py 644 Bytes
from __future__ import annotations

from typing import Any


def chat_extra_body(config: dict[str, Any]) -> dict[str, Any]:
    if not _as_bool(config.get("enable_thinking", False)):
        return {"chat_template_kwargs": {"enable_thinking": False}}
    return {}


def chat_openai_kwargs(config: dict[str, Any]) -> dict[str, Any]:
    extra_body = chat_extra_body(config)
    return {"extra_body": extra_body} if extra_body else {}


def _as_bool(value: Any) -> bool:
    if isinstance(value, bool):
        return value
    if isinstance(value, str):
        return value.strip().lower() in {"1", "true", "yes", "on"}
    return bool(value)