llama-gpt/docker-compose-70b.yml

22 lines
575 B
YAML

version: '3.6'
services:
llama-gpt-api:
# image: 'ghcr.io/getumbrel/llama-gpt-api-llama-2-70b-chat:latest'
build:
context: ./api
dockerfile: 70B.Dockerfile
environment:
MODEL: '/models/llama-2-70b-chat.bin'
llama-gpt-ui:
image: 'ghcr.io/getumbrel/llama-gpt-ui:latest'
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://llama-gpt-api:8000'
- 'DEFAULT_MODEL=/models/llama-2-70b-chat.bin'
- 'WAIT_HOSTS=llama-gpt-api:8000'
- 'WAIT_TIMEOUT=600'