diff --git a/lightllm/server/api_openai.py b/lightllm/server/api_openai.py index 04d72c512..b0fcbbea2 100644 --- a/lightllm/server/api_openai.py +++ b/lightllm/server/api_openai.py @@ -101,8 +101,6 @@ async def chat_completions_impl(request: ChatCompletionRequest, raw_request: Req "Unrecognized image input. Supports local path, http url, base64, and PIL.Image." ) - message.content = "\n".join(texts) - tools = None if request.tools and request.tool_choice != "none": # request.skip_special_tokens = False diff --git a/lightllm/server/build_prompt.py b/lightllm/server/build_prompt.py index 9e3031301..a49a1b064 100644 --- a/lightllm/server/build_prompt.py +++ b/lightllm/server/build_prompt.py @@ -10,7 +10,8 @@ def init_tokenizer(args): async def build_prompt(request, tools) -> str: global tokenizer - messages = request.messages + # pydantic格式转成dict, 否则,当根据tokenizer_config.json拼template时,Jinja判断无法识别 + messages = [m.model_dump(by_alias=True, exclude_none=True) for m in request.messages] kwargs = {"conversation": messages} if request.character_settings: kwargs["character_settings"] = request.character_settings