Skip to content

Commit

Permalink
fix: add uwsgi config suitable for production use
Browse files Browse the repository at this point in the history
  • Loading branch information
czosel committed Nov 4, 2021
1 parent eb0af7b commit 247c5df
Showing 1 changed file with 68 additions and 5 deletions.
73 changes: 68 additions & 5 deletions uwsgi.ini
Original file line number Diff line number Diff line change
@@ -1,8 +1,71 @@
# These settings can be overridden by environment-variables: https://git.io/JemA2
[uwsgi]
http = 0.0.0.0:8000

# Crash if there are any unknown configuration parameters in the ini file.
strict = true

# Path to wsgi.py
wsgi-file = /app/document_merge_service/wsgi.py
max-requests = 2000
harakiri = 5
processes = 4
master = True
static-map = /static/=/var/www/static

# Enable graceful shutdown of workers (built-in prefork+threading multi-worker
# management mode)
master = true

# By default threads are disabled. This can cause issues if your application
# uses background threads. Without setting this parameter to true, your
# threads will not run.
enable-threads = true

# Delete temporary file (sockets, pidfiles, ...) during shutdown
vacuum = true

# Disable single interpreter since certain C extenions can not cope with it.
# Since we don't run multiple applications on the same workers we can safely
# disable this.
single-interpreter = true

# By default SIGTERM will brutally reload uWSGI instead of shuting it down. With
# this option enabled SIGTERM shuts down uWSGI as everybody would expect.
die-on-term = true

# Let uWSGI crash if it is not able to load the application module.
need-app = true

# uWSGI's logging is rather verbose. Instead of logging every request (which
# already happens on the reverse proxy level), logging all 4xx and 5xx errors
# should suffice.
disable-logging = true
log-4xx = true
log-5xx = true

# It is a good idea to recycle workers every now and then to prevent memory
# leaks or unintentional states.
max-requests = 1000 ; Restart workers after this many requests
max-worker-lifetime = 3600 ; Restart workers after this many seconds
reload-on-rss = 2048 ; Restart workers after this much resident memory
worker-reload-mercy = 60 ; How long to wait before forcefully killing workers

# The busyness algorithm attempts to always have spare workers available,
# which is useful when anticipating unexpected traffic surges.
cheaper-algo = busyness
processes = 500 ; Maximum number of workers allowed

This comment has been minimized.

Copy link
@anehx

anehx Nov 4, 2021

Member

Way too much!!!

cheaper = 8 ; Minimum number of workers allowed
cheaper-initial = 16 ; Workers created at startup
cheaper-overload = 1 ; Length of a cycle in seconds
cheaper-step = 16 ; How many workers to spawn at a time

cheaper-busyness-multiplier = 30 ; How many cycles to wait before killing workers
cheaper-busyness-min = 20 ; Below this threshold, kill workers (if stable for multiplier cycles)
cheaper-busyness-max = 70 ; Above this threshold, spawn new workers
cheaper-busyness-backlog-alert = 16 ; Spawn emergency workers if more than this many requests are waiting in the queue
cheaper-busyness-backlog-step = 2 ; How many emergency workers to create if there are too many requests in the queue

# SIGKILL workers after certain timeout if they get stuck.
harakiri = 60

# Auto rename worker processes
auto-procname = true
procname-prefix = "dms " ; note the space

buffer-size = 32768

0 comments on commit 247c5df

Please sign in to comment.