minor example fix (#423)

This commit is contained in:
RolandJAAI 2025-01-30 01:04:30 +01:00 committed by GitHub
parent d3912c70cf
commit 3b8e519f77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 2 additions and 1 deletions

View File

@ -22,6 +22,7 @@ elif chosen_inference == "ollama":
model_id="ollama_chat/llama3.2", model_id="ollama_chat/llama3.2",
api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary
api_key="your-api-key", # replace with API key if necessary api_key="your-api-key", # replace with API key if necessary
num_ctx=8192 # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model.
) )
elif chosen_inference == "litellm": elif chosen_inference == "litellm":
@ -48,4 +49,4 @@ print("ToolCallingAgent:", agent.run("What's the weather like in Paris?"))
agent = CodeAgent(tools=[get_weather], model=model) agent = CodeAgent(tools=[get_weather], model=model)
print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) print("CodeAgent:", agent.run("What's the weather like in Paris?"))