From 60b1abdae633381405d80b0c5b11d962d2509a72 Mon Sep 17 00:00:00 2001 From: sid tuladhar <111209496+sidtuladhar@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:21:51 +0800 Subject: [PATCH] Fixed agents.md (#164) Fix docs: custom_model should return an object that has a .content attribute --- docs/source/en/reference/agents.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/en/reference/agents.md b/docs/source/en/reference/agents.md index 9cdca7d..2149c0d 100644 --- a/docs/source/en/reference/agents.md +++ b/docs/source/en/reference/agents.md @@ -65,7 +65,7 @@ You could use any `model` callable for your agent, as long as: 1. It follows the [messages format](./chat_templating) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`. 2. It stops generating outputs *before* the sequences passed in the argument `stop_sequences` -For defining your LLM, you can make a `custom_model` method which accepts a list of [messages](./chat_templating) and returns text. This callable also needs to accept a `stop_sequences` argument that indicates when to stop generating. +For defining your LLM, you can make a `custom_model` method which accepts a list of [messages](./chat_templating) and returns an object with a .content attribute containing the text. This callable also needs to accept a `stop_sequences` argument that indicates when to stop generating. ```python from huggingface_hub import login, InferenceClient @@ -76,9 +76,9 @@ model_id = "meta-llama/Llama-3.3-70B-Instruct" client = InferenceClient(model=model_id) -def custom_model(messages, stop_sequences=["Task"]) -> str: +def custom_model(messages, stop_sequences=["Task"]): response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000) - answer = response.choices[0].message.content + answer = response.choices[0].message return answer ``` @@ -140,4 +140,4 @@ model = LiteLLMModel("anthropic/claude-3-5-sonnet-latest", temperature=0.2) print(model(messages, max_tokens=10)) ``` -[[autodoc]] LiteLLMModel \ No newline at end of file +[[autodoc]] LiteLLMModel