diff --git a/docs/source/en/guided_tour.md b/docs/source/en/guided_tour.md index 9db8ecd..cee4aba 100644 --- a/docs/source/en/guided_tour.md +++ b/docs/source/en/guided_tour.md @@ -89,8 +89,9 @@ from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/llama3.2", # This model is a bit weak for agentic behaviours though - api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary + api_base="http://localhost:11434", # replace with 127.0.0.1:11434 or remote open-ai compatible server if necessary api_key="YOUR_API_KEY" # replace with API key if necessary + num_ctx=8192 # ollama default is 2048 which will fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. ) agent = CodeAgent(tools=[], model=model, add_base_tools=True)