services: wyoming-whisper: image: slackr31337/wyoming-whisper-gpu:latest container_name: wyoming-whisper hostname: wyoming-whisper ports: [10300:10300] environment: MODEL: base-int8 LANGUAGE: en COMPUTE_TYPE: int8 BEAM_SIZE: 5 volumes: [./whisper-data:/data] restart: unless-stopped runtime: nvidia deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: - gpu - utility - compute wyoming-piper: image: slackr31337/wyoming-piper-gpu:latest container_name: wyoming-piper hostname: wyoming-piper ports: [10200:10200] environment: PIPER_VOICE: en_US-amy-medium volumes: [./piper-data:/data] restart: unless-stopped runtime: nvidia deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: - gpu - utility - compute # whisper: # image: rhasspy/wyoming-whisper:latest # ports: # - "10300:10300" # volumes: # - ./whisper-data:/data # # - /usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8:/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8:ro # # - /usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8:/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8:ro # # - /usr/lib/x86_64-linux-gnu/libcublasLt.so.11:/usr/lib/x86_64-linux-gnu/libcublasLt.so.12:ro # # - /usr/lib/x86_64-linux-gnu/libcublas.so.11:/usr/lib/x86_64-linux-gnu/libcublas.so.12:ro # command: --model medium-int8 --language no --beam-size 5 --device cuda # restart: unless-stopped # runtime: nvidia # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: [gpu] # piper: # image: rhasspy/wyoming-piper # command: --voice en_US-amy-medium # volumes: # - ./piper-data:/data # environment: # - TZ=Etc/UTC # restart: unless-stopped # ports: # - 10200:10200