diff --git a/examples/docker_compose/.env.example b/examples/docker_compose/.env.example new file mode 100644 index 0000000..3f3c44b --- /dev/null +++ b/examples/docker_compose/.env.example @@ -0,0 +1,6 @@ +WHISPER_NAME="openai/whisper-tiny.en" +LOCAL_STORAGE_FOLDER_PATH="/data" +HUEY_BUS_REDIS_HOST="host.docker.internal" +MODEL="speech_recognition_api.extra.whisper_model.WhisperModel" +STORAGE="speech_recognition_api.extra.local_storage.LocalStorage" +MESSAGE_BUS="speech_recognition_api.extra.huey_bus.HueyMessageBus" diff --git a/examples/docker_compose/Dockerfile b/examples/docker_compose/Dockerfile new file mode 100644 index 0000000..16f258c --- /dev/null +++ b/examples/docker_compose/Dockerfile @@ -0,0 +1,23 @@ +FROM python:3.11-slim as base + +RUN apt-get update && apt-get install -y ffmpeg +COPY requirements.txt requirements.txt +RUN pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + +FROM base as api +CMD [ \ + "gunicorn", \ + "speech_recognition_api:create_app()", \ + "-k", \ + "uvicorn.workers.UvicornWorker", \ + "-w", \ + "1", \ + "-b", \ + "0.0.0.0:8888" \ +] + +FROM base as worker +CMD [ \ + "huey_consumer", \ + "speech_recognition_api.extra.huey_bus.huey" \ +] diff --git a/examples/docker_compose/README.md b/examples/docker_compose/README.md new file mode 100644 index 0000000..888d955 --- /dev/null +++ b/examples/docker_compose/README.md @@ -0,0 +1,17 @@ +## Docker Compose example + +This example demonstrates one of the ways to build containers: +API server and async worker. + +In this example they communicate using Huey and Redis, saving data to +a local folder. + +### Running the example + +```bash +docker compose up +``` + +The service is going to be available at http://localhost:8888 + +You can visit http://localhost:8888/docs to try it right in your browser. diff --git a/examples/docker_compose/docker-compose.yaml b/examples/docker_compose/docker-compose.yaml new file mode 100644 index 0000000..3619d2d --- /dev/null +++ b/examples/docker_compose/docker-compose.yaml @@ -0,0 +1,34 @@ +version: "3.8" +services: + api: + env_file: + - .env.example + build: + context: . + dockerfile: Dockerfile + target: api + volumes: + - "./data:/data" + ports: + - "8888:8888" + depends_on: + redis: + condition: service_started + worker: + env_file: + - .env.example + build: + context: . + dockerfile: Dockerfile + target: worker + volumes: + - "./data:/data" + depends_on: + api: + condition: service_started + redis: + condition: service_started + redis: + image: redis + ports: + - "6379:6379" diff --git a/examples/docker_compose/requirements.txt b/examples/docker_compose/requirements.txt new file mode 100644 index 0000000..4cc1291 --- /dev/null +++ b/examples/docker_compose/requirements.txt @@ -0,0 +1,3 @@ +speech-recognition-api[whisper,huey] +redis +gunicorn