version: '3.6' services: llama-gpt-api: build: context: ./api dockerfile: Dockerfile environment: MODEL: '/models/llama-2-7b-chat.bin' llama-gpt-ui: build: context: ./ui dockerfile: Dockerfile ports: - 3000:3000 environment: - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - 'OPENAI_API_HOST=http://llama-gpt-api:8000' - 'DEFAULT_MODEL=/models/llama-2-7b-chat.bin'