From e20bc4ac8db67ce719b700f854a1822894e95457 Mon Sep 17 00:00:00 2001 From: sangchengmeng Date: Wed, 9 Jul 2025 20:39:10 +0800 Subject: [PATCH 1/2] [fix]pydantic to dict when build prompt --- lightllm/server/build_prompt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightllm/server/build_prompt.py b/lightllm/server/build_prompt.py index 9e3031301..dce14df12 100644 --- a/lightllm/server/build_prompt.py +++ b/lightllm/server/build_prompt.py @@ -10,7 +10,8 @@ def init_tokenizer(args): async def build_prompt(request, tools) -> str: global tokenizer - messages = request.messages + # pydantic格式转成dict, 否则拼template时的Jinja判断无法识别 + messages = [m.model_dump(by_alias=True, exclude_none=True) for m in request.messages] kwargs = {"conversation": messages} if request.character_settings: kwargs["character_settings"] = request.character_settings From c84d8ffc8c0601b3a49e52f79ae37b929c1dce6c Mon Sep 17 00:00:00 2001 From: sangchengmeng Date: Wed, 9 Jul 2025 20:39:55 +0800 Subject: [PATCH 2/2] [fix]pydantic to dict when build prompt --- lightllm/server/api_openai.py | 2 -- lightllm/server/build_prompt.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/lightllm/server/api_openai.py b/lightllm/server/api_openai.py index 04d72c512..b0fcbbea2 100644 --- a/lightllm/server/api_openai.py +++ b/lightllm/server/api_openai.py @@ -101,8 +101,6 @@ async def chat_completions_impl(request: ChatCompletionRequest, raw_request: Req "Unrecognized image input. Supports local path, http url, base64, and PIL.Image." ) - message.content = "\n".join(texts) - tools = None if request.tools and request.tool_choice != "none": # request.skip_special_tokens = False diff --git a/lightllm/server/build_prompt.py b/lightllm/server/build_prompt.py index dce14df12..a49a1b064 100644 --- a/lightllm/server/build_prompt.py +++ b/lightllm/server/build_prompt.py @@ -10,7 +10,7 @@ def init_tokenizer(args): async def build_prompt(request, tools) -> str: global tokenizer - # pydantic格式转成dict, 否则拼template时的Jinja判断无法识别 + # pydantic格式转成dict, 否则,当根据tokenizer_config.json拼template时,Jinja判断无法识别 messages = [m.model_dump(by_alias=True, exclude_none=True) for m in request.messages] kwargs = {"conversation": messages} if request.character_settings: