diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d4f2e95 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +vm_vars/credentials.yaml +.vault_pass.txt +*.retry +*.swp +.gandi.config.yaml diff --git a/delete-machines.yml b/delete-machines.yml new file mode 100644 index 0000000..78dbffd --- /dev/null +++ b/delete-machines.yml @@ -0,0 +1,31 @@ +- name: delete deployment + hosts: localhost + connection: local + gather_facts: false + + vars_files: + - "vm_vars/credentials.yaml" + - "vm_vars/settings.yaml" + + tasks: + - name: delete machines + gandi_vps: + gandi_api_key: "{{ gandi_api_key }}" + name: "{{ item }}" + datacenter: "{{ datacenter }}" + state: deleted + with_items: + - trad1 + async: 7200 + poll: 0 + register: node_deletion + + - name: wait for deletion + async_status: + jid: "{{ item.ansible_job_id }}" + register: jobs + until: jobs.finished + delay: 10 + retries: 300 + with_items: + - "{{ node_deletion.results }}" diff --git a/deploy-machines.yaml b/deploy-machines.yaml new file mode 100644 index 0000000..edd99c0 --- /dev/null +++ b/deploy-machines.yaml @@ -0,0 +1,40 @@ +- name: deploy machines + hosts: localhost + connection: local + gather_facts: false + + vars_files: + - "vm_vars/credentials.yaml" + - "vm_vars/settings.yaml" + + tasks: + + - name: weblate machine + gandi_vps: + gandi_api_key: "{{ gandi_api_key }}" + name: trad1 + machine_type: custom + image: "{{ os_image }}" + datacenter: "{{ datacenter }}" + sshkey_ids: + - "{{ gandi_ssh_key_id }}" + cores: 2 + memory: 2048 + disk: 5 + interfaces: {'publics': [{'ipv4': 'auto'}] } + state: running + farm: tools + tags: weblate + async: 7200 + poll: 0 + register: weblate_creation + + - name: wait for weblate creation + async_status: + jid: "{{ item.ansible_job_id }}" + register: jobs + until: jobs.finished + delay: 10 + retries: 300 + with_items: + - "{{ weblate_creation }}" diff --git a/deploy-services.yml b/deploy-services.yml new file mode 100644 index 0000000..28124a8 --- /dev/null +++ b/deploy-services.yml @@ -0,0 +1,8 @@ +- hosts: trad1 + become: yes + remote_user: root + roles: + - common + - weblate + tags: + - tool diff --git a/group_vars/all.template b/group_vars/all.template new file mode 100644 index 0000000..ccf4053 --- /dev/null +++ b/group_vars/all.template @@ -0,0 +1,26 @@ +# IPS +gw_private_ip: "{{ hostvars['gw1']['private_ips'][0] }}" + +weblate_dir: "" + +# EMAIL CONFIGURATION +EMAIL_HOST: "" +EMAIL_HOST_PASSWORD: "" +EMAIL_HOST_USER: "" +EMAIL_PORT: "" +# E-mail address that error messages come from. +SERVER_EMAIL = "" +# Default email address to use for various automated correspondence from +# the site managers. Used for registration emails. +DEFAULT_FROM_EMAIL = "" + +# WEBLATE ADMIN, FOR ERRORS AND NOTIFICATIONS +weblate_admin: "" +admin_mail: "" + +# PASSWORDS +PSQL_weblate_password: "" +PSQL_master_password: "" + +# password for "weblate" linux user +weblate_user_pass: "" diff --git a/roles/common/files/ssh_authorized_keys b/roles/common/files/ssh_authorized_keys new file mode 100644 index 0000000..3035561 --- /dev/null +++ b/roles/common/files/ssh_authorized_keys @@ -0,0 +1,8 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5+2ViaP1ktWlzMCY4IOJOV1K0TH1GstHzoMdeIf9ihiSz7nR7wKcYJMC6KlmOYVQzftENXHQZAtbL4tVPLpLWXN+4fCn+pbQVu47P3QCH9Ez0d23p4byZl5h+qyx0dJv/ltc71X6NIvHH2WXmvvy+Bda4b1NVpJN/voiMoihipsjPPeL+s6B+3dw6PD3h5vvzvJCrfkKGijoT74+BbjYimwmNsaDRQH9tIMaTVeV7ZIe9qfxg5fkg4WsFl9mzikbqYzdBgiC2XeK/L4w3FJONALAEy7FTsUdNaenKxTn4zw/9qdV20TqYEyCbYlANS+2NMLYxeSqdpYB3yvePoucOw== mric@gandi.net +ssh-dss AAAAB3NzaC1kc3MAAACBAM686CNkUeMiHvr/1tj4zRaJMqAgZAFCuX6WmocNHleTLG2yWcQPAIXKONp++AJ78woEERCTB2otJSsP4Ur8q/K95UiPYmtRJ/wwTI4ojrCk4BmK9KK2hb0OONOL0SvX/sUZlddFtAZ2xnSFD6YC4gtANE1nnojo2/BOrgs9h13tAAAAFQCkqnmRZOK29LK8OPI+095IzI0YMQAAAIAf3BB/TX2mZWGtB9PivKybt+QPMx5YWA43jK6NippTIVq60ihvcnVKpAQDt0llZn4J5qoEgVHwELr+4F6vMz2HP3ZviQ3c/4hlIpfknVsFLgMkJynKZaJLTe+Afwv1r+8DAA2+/SvtwLjFIDcbkTgGdxiyInD8rDyprKQ7nI3sNwAAAIABuUMiFMmpkARmatAJoXjFm2V1JIyycuJdMqJMUoq9m7kjJB4r55+eTLEtIvtBs/LnlAUTl2kCQszEax4VlLGiEEH/hWryaePRuosEv1issiISiluJmIQcJU+vgAHApyGH6uVCWzoc58or5rnQto22MEcH/qHIggTuKIfQvz8Hhg== esion99@gmail.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCzWy1ckqww3CP8BPB56tfHwN7Wh+/4dpByhwTpb5lXpfjNvI3KNaz3835oa/cHvxt34alBHdhG65eUQOYOX3frXGEPxFPCjADPBh0+WAa00IpFLCyvsVxOutBU9Fu5eGVVcmIC5gq4M21zUppbiFzppc/7a8W/xz4W17LzuqzRRaerz5WcYNE5uf+arqW2zrmWA5Nhwjo2C1Q0qB8a+6Nvg14OAKyYL8P2eTUmzmK+Dv3oCwgqLZalx1djjmFv3N6SWdOLK92Jq3/b6Xb9RHCALP1/wnVtmzKFaCtspBpDs9eU6f6G4hP67sRobimMWdB/EibLlLDw3Sul6j6CK6Qd8QMJwR5cU2+lmSQXlwlvRmgvhayPUNJEwR0nY3uiHoybJzClF3LOljn7RkqBUY6ud3y1L1OUbJvUYw9ou6gd61HGqXFjuD8hLDRBdCaAzlPm1Wm6eAY01bLOcKcXdqNJOoSh/ZKAdT5VvmPZgbit9c5OitujNGs3wkI2O+DydMa/UBJbdzQdh+QZLVFzQ1qO2BPkQCQ2TZBad0YqtlTxGiB+cEwIx4dkYfJcbWcrVZs04g24otMQOnb+1KcISKcqVR/qOWvFzaBMa8uXu6JoDE2qL6R1q+uls+gMFFIKoy16C3skg7jxydlgXEfiURWNy3RkIEgMSEkGz9dxDv8UAQ== david.epely@gandi.net +ssh-dss AAAAB3NzaC1kc3MAAACBAMZ50GLnHEvUFvMQsHBRdlUka0ikKOmpTt7etALYXfDSsR6ItF1r3evY74bDGrQjD6SwOHiwfnDsUH6pw8OydHH17803JBhUjDe2/NEhmaAqOmI+sLTknHVJZwDEVLhvXBaFTQcT4qEGgG0XYxM+EZ7vBirodzvGwBpwXnmMlNA3AAAAFQDVlmYkBh8rhBbJwZpmWGaGDPjFBQAAAIEAl4shqD8f64XTqcaJ456j5vghvElyPN1T8JoqlUg7iyzFeRnHlcGZkN3nw+aB1DVHIRIBA5hGk0TOCXgGRNkcgtDn6Wj9R0A2Wow/reFFPjio+NIM5YaNVqUPirFMWOO+cVhCaN3RvIHjWzGM+oQjk2LGxpN/Tzh2UyF7O3FLU6MAAACAcH39fUlEERGhrfx6cyKv7jjoChOsHKc/Wix3QyU7jq7Ta5iVjOGBOR1nYL5JKJWnEhIx/g5N+NAuvvzLIe/x4vlFo8PLk0obCXDeOsucuwWpcCldYE3GqTJZzxnCOIkdjooszQhztmxe1bBP/Aib7KyogaJmKrQv6BOjWQhx/TE= laurent@brasil +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBPQJeSOFDn9N73xOkJvWS97CvGQLarKI6n2kaA4cLzx root@argentina +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCm70qHMJSqaQKJdoNOzrfCo+1pzlhqHIFBozI5VCiMwA6Nr2qEHD+VgSbhDByX0xdgv9cwIcxvVzWMZPn8QM2ZewvNgPHoQxH4ae3pWjTM+W8qqaNjBfWuarinwt7gO8jT8i55AcMa5ctihvXWE3jTM6EHcaKTngFD1NYFj5tS4Zrw9a5nK1ZRsMrPF6Wte9S3e2PWiPYiT8uCauNUB5Xi6r1BxzMtviJddZmv0r4WQL3QD672Gmia6xhIybiIFTOID+N4cAARKZKh7WSlcx4qA1umWLd1nst5HgyK4SfFhSPd+2XJLsPc1cZpVVfjJRGomLi7yxu4P8VMaKwwCiuj stan@BobyLap +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdqFMRQx+OI+3b8st+ho1Ioqp3wiQqqMlMKiNjDC1rUiQ5sGhGT1uxBzxWSA8UjyfJmYaJhllfe5Tjp2D1lUyXX2tX0QCMM1doHON/29wjBXxBgtP7i5focNAv6KP2suSuyFuIRpP3MuEyieQgyH0atL1FxNpQIrvnOrdiw609T4xfLTWfad+jjtIo3qq3Rvb7TpI9h0lBcgJEHPSjsapYenFPNCaRE+3oye37OtYdWaF9ozdHkRBDj8mp23bitJSwltYOhYZlVs8fVyBr30+z4tSwNMizl7DCrr+rJFBCRwoHUOLo82LuJf1ivQwu3mC77JJgWsiycMYnKPOamDwv pablo@pablo +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTHau8idcljL+CRudl/i0HPzKbTxYk+qSJg26ql/K+BJF5uS360jYt9MW+C4qLSfYBtY0Ywcb7ePeO+1I/BjAdf6BtzxnNCYDnKptPMlkq9E0caMcDuuYFNG8BZQ6HcVmIbCns6aa0cXRGZcIBeHFAWfveocPdpcnTOXbFItV4ndFhm+KsCaJ9uQUxmJZMuUYoA/mmIgizddevh+bWMyN2/ntLhCvXucKti79sUoGVo5Ihk4KKXYxmZkWMJeY6y72TMeMx/KOJuoI5bkaLl/Y7oF8g7gzghmF56yI0uev83CyY1Mi+nF+qYqcjA9W9UVzx/xlkt3vhVyQ0C7hywoW1SPultSOr2ovtaEEFG16phOhWCNTwc1fUBwDv+UHHSgDjUQw1XBeMpTyW3d0Kys6bqvdisT9Z8n364BNDs2wC5VpctOLMyWXOt7stql56625fjMTGZYxD/+b4IvyW+qP9JRSjeeqb03PXoT+45V57s6UxJpIihjZSrEqJ1ZXAD0tLv3Fsj1KHNQi8da+P0ph33XeplCGPhRBohiL3obxWSDZ/WX0MaX4tgICqDHAkSGrDJkeF8dgNAtO2+m44oty6U/ztnuSKQGBXACxbpS+M8YfPFTyBkjrOtfqk0R/MgdG2gxvPyYPG3ody20QcYzTbG9LrtOReN58kPt4D/3Mp2w== dev@lanza.fr \ No newline at end of file diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml new file mode 100644 index 0000000..e05c0f9 --- /dev/null +++ b/roles/common/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: copy ssh keys + copy: + src: files/ssh_authorized_keys + dest: /root/.ssh/authorized_keys + mode: 0600 + +- name: add gandi apt key + apt_key: + url: https://mirrors.gandi.net/gandi/pubkey + id: D9A579D6BB859933 + state: present + +- name: update and upgrade + apt: + update_cache: yes + upgrade: dist diff --git a/roles/weblate/tasks/cache.yml b/roles/weblate/tasks/cache.yml new file mode 100644 index 0000000..238aa8e --- /dev/null +++ b/roles/weblate/tasks/cache.yml @@ -0,0 +1,25 @@ +--- +- name: download redis + unarchive: + src: http://download.redis.io/releases/redis-5.0.3.tar.gz + dest: /tmp + remote_src: yes + +- name: compile redis + command: make + args: + chdir: /tmp/redis-5.0.3 + +- name: move redis binary to path + command: mv /tmp/redis-5.0.3 /usr/bin/ + +- name: create redis dir + file: + path: /etc/redis + state: directory + +- name: copy config file + template: + src: redis.conf.j2 + dest: "/etc/redis/redis.conf" + diff --git a/roles/weblate/tasks/database.yml b/roles/weblate/tasks/database.yml new file mode 100644 index 0000000..9d117fb --- /dev/null +++ b/roles/weblate/tasks/database.yml @@ -0,0 +1,24 @@ +--- +- name: install dependencies + apt: + package: {{ item }} + state: installed + update_cache: yes + with_items: + - postgresql + +- name: set password + shell: psql postgres -c "alter user postgres with password '{{ PSQL_master_password }}'" + become: yes + become_user: postgres + +- name: create user weblate + postgresql_user: + name: weblate + password: "{{ PSQL_weblate_password }}" + role_attr_flags: "NOCREATEDB,NOCREATEROLE,NOSUPERUSER" + +- name: create db + postgresql_db: + name: weblate + owner: weblate diff --git a/roles/weblate/tasks/main.yml b/roles/weblate/tasks/main.yml new file mode 100644 index 0000000..713c1ba --- /dev/null +++ b/roles/weblate/tasks/main.yml @@ -0,0 +1,51 @@ +- name: install dependencies + apt: + package: {{ item }} + state: installed + update_cache: yes + with_items: + - libxml2-dev + - libxslt-dev + - libfreetype6-dev + - libjpeg-dev + - libz-dev + - libyaml-dev + - python3-dev + - python3-pip + - python3-virtualenv + - gcc + - make + - git + - python3-requests-oauthlib + - python3-six + - python3-openid + +- name: Update virtualenv + command: pip3 install --upgrade virtualenv + +- name: create virtualenv + command: virtualenv {{ weblate_dir }} + +- include: database.yml + +- include: cache.yml + +- name: install weblate and optional packages + pip: + name: + - Weblate + - pytz + - python-bidi + - PyYAML + - pyuca + - psycopg2-binary + - django-redis + - hiredis + virtualenv: ~/weblate-env + +- name: Copy weblate configuration + template: + src: settings.py.j2 + dest: "~/weblate-env/lib/python2.7/site-packages/weblate/settings.py" + +- include: webserver.yml \ No newline at end of file diff --git a/roles/weblate/tasks/webserver.yml b/roles/weblate/tasks/webserver.yml new file mode 100644 index 0000000..707b793 --- /dev/null +++ b/roles/weblate/tasks/webserver.yml @@ -0,0 +1,40 @@ +- name: install dependencies + apt: + package: {{ item }} + state: installed + update_cache: yes + with_items: + - nginx + - uwsgi-plugin-python3 + +# - name: install uwsgi with pip +# pip: +# name: +# - uwsgi +# virtualenv: ~/weblate-env + +- name: Create socket for uwsgi/nginx communication + template: + src: uwsgi-weblate.socket.j2 + dest: /etc/systemd/system/uwsgi-weblate.socket + +- name: Create service for uwsgi + template: + src: uwsgi-weblate.service.j2 + dest: /etc/systemd/system/uwsgi-weblate.service + +- name: copy nginx site + template: + src: weblate.nginx.j2 + dest: /etc/nginx/sites-available/weblate + +- name: create symlink for weblate site + file: + src: /etc/nginx/sites-available/weblate + dest: /etc/nginx/sites-enabled/weblate + state: link + +- name: reload nginx + service: + name: nginx + state: reloaded diff --git a/roles/weblate/templates/postgresql.conf b/roles/weblate/templates/postgresql.conf new file mode 100644 index 0000000..9cd2d6c --- /dev/null +++ b/roles/weblate/templates/postgresql.conf @@ -0,0 +1,643 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, or use "pg_ctl reload". Some +# parameters, which are marked below, require a server shutdown and restart to +# take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/var/lib/postgresql/9.6/main' # use data in another directory + # (change requires restart) +hba_file = '/etc/postgresql/9.6/main/pg_hba.conf' # host-based authentication file + # (change requires restart) +ident_file = '/etc/postgresql/9.6/main/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +external_pid_file = '/var/run/postgresql/9.6-main.pid' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = 'localhost' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +ssl = true # (change requires restart) +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers + # (change requires restart) +#ssl_prefer_server_ciphers = on # (change requires restart) +#ssl_ecdh_curve = 'prime256v1' # (change requires restart) +ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' # (change requires restart) +ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' # (change requires restart) +#ssl_ca_file = '' # (change requires restart) +#ssl_crl_file = '' # (change requires restart) +#password_encryption = on +#db_user_namespace = off +#row_security = on + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#maintenance_work_mem = 64MB # min 1MB +#replacement_sort_tuples = 150000 # limits use of replacement selection sort +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +#shared_preload_libraries = '' # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 0 # taken from max_worker_processes +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = minimal # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#max_wal_size = 1GB +#min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 0 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 0 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # number of sync standbys and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = off # "on" allows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#min_parallel_relation_size = 8MB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%m [%p] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'localtime' + + +# - Process Title - + +cluster_name = '9.6/main' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +stats_temp_directory = '/var/run/postgresql/9.6-main.pg_stat_tmp' + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#search_path = '"$user", public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'localtime' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +#session_preload_libraries = '' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#sql_inheritance = on +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. + +#include_dir = 'conf.d' # include files ending in '.conf' from + # directory 'conf.d' +#include_if_exists = 'exists.conf' # include file only if it exists +#include = 'special.conf' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/roles/weblate/templates/redis.conf.j2 b/roles/weblate/templates/redis.conf.j2 new file mode 100644 index 0000000..7f6317a --- /dev/null +++ b/roles/weblate/templates/redis.conf.j2 @@ -0,0 +1,1378 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 loopback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 0 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +unixsocket /var/run/redis/redis.sock +unixsocketperm 775 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New replicas and reconnecting replicas that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the replicas. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new replicas arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple replicas +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 +# seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a replica +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the replica missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the replica can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a replica connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected replicas for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last replica disconnected, for +# the backlog buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a +# master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a replica is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +maxmemory 20mb + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +maxmemory-policy allkeys-lru + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested +# even in production and manually tested by multiple engineers for some +# time. +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in an "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag yes + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage +# active-defrag-cycle-min 5 + +# Maximal effort for defrag in CPU percentage +# active-defrag-cycle-max 75 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + diff --git a/roles/weblate/templates/settings.py.j2 b/roles/weblate/templates/settings.py.j2 new file mode 100644 index 0000000..f90cd0a --- /dev/null +++ b/roles/weblate/templates/settings.py.j2 @@ -0,0 +1,811 @@ +# -*- coding: utf-8 -*- +# +# Copyright © 2012 - 2019 Michal Čihař +# +# This file is part of Weblate +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from __future__ import unicode_literals +import platform +import os +from logging.handlers import SysLogHandler + +# +# Django settings for Weblate project. +# + +DEBUG = False + +ADMINS = ( + ('{{ weblate_admin }}', '{{ admin_email }}'), +) + +MANAGERS = ADMINS + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.postgresql', + 'NAME': 'weblate', + 'USER': '{{ PSQL_weblate_user }}', + 'PASSWORD': '{{ PSQL_weblate_password }}', + #Leaving it empty defaults to localhost peer auth + 'HOST': 'localhost', + 'PORT': '', + } +} + +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +# Data directory +DATA_DIR = os.path.join(BASE_DIR, 'data') + +# Local time zone for this installation. Choices can be found here: +# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name +# although not all choices may be available on all operating systems. +# In a Windows environment this must be set to your system time zone. +#TIME_ZONE = 'Europe/Paris' +TIME_ZONE = 'UTC' + +# Language code for this installation. All choices can be found here: +# http://www.i18nguy.com/unicode/language-identifiers.html +LANGUAGE_CODE = 'en-us' + +LANGUAGES = ( + ('ar', 'العربية'), + ('az', 'Azərbaycan'), + ('be', 'Беларуская'), + ('be@latin', 'Biełaruskaja'), + ('bg', 'Български'), + ('br', 'Brezhoneg'), + ('ca', 'Català'), + ('cs', 'Čeština'), + ('da', 'Dansk'), + ('de', 'Deutsch'), + ('en', 'English'), + ('en-gb', 'English (United Kingdom)'), + ('el', 'Ελληνικά'), + ('es', 'Español'), + ('fi', 'Suomi'), + ('fr', 'Français'), + ('fy', 'Frysk'), + ('gl', 'Galego'), + ('he', 'עברית'), + ('hu', 'Magyar'), + ('id', 'Indonesia'), + ('it', 'Italiano'), + ('ja', '日本語'), + ('ko', '한국어'), + ('ksh', 'Kölsch'), + ('nb', 'Norsk bokmål'), + ('nl', 'Nederlands'), + ('pl', 'Polski'), + ('pt', 'Português'), + ('pt-br', 'Português brasileiro'), + ('ru', 'Русский'), + ('sk', 'Slovenčina'), + ('sl', 'Slovenščina'), + ('sr', 'Српски'), + ('sv', 'Svenska'), + ('tr', 'Türkçe'), + ('uk', 'Українська'), + ('zh-hans', '简体字'), + ('zh-hant', '正體字'), +) + +SITE_ID = 1 + +# If you set this to False, Django will make some optimizations so as not +# to load the internationalization machinery. +USE_I18N = True + +# If you set this to False, Django will not format dates, numbers and +# calendars according to the current locale. +USE_L10N = True + +# If you set this to False, Django will not use timezone-aware datetimes. +USE_TZ = True + +# URL prefix to use, please see documentation for more details +URL_PREFIX = '' + +# Absolute filesystem path to the directory that will hold user-uploaded files. +# Example: "/home/media/media.lawrence.com/media/" +MEDIA_ROOT = os.path.join(DATA_DIR, 'media') + +# URL that handles the media served from MEDIA_ROOT. Make sure to use a +# trailing slash. +# Examples: "http://media.lawrence.com/media/", "http://example.com/media/" +MEDIA_URL = '{0}/media/'.format(URL_PREFIX) + +# Absolute path to the directory static files should be collected to. +# Don't put anything in this directory yourself; store your static files +# in apps' "static/" subdirectories and in STATICFILES_DIRS. +# Example: "/home/media/media.lawrence.com/static/" +STATIC_ROOT = os.path.join(DATA_DIR, 'static') + +# URL prefix for static files. +# Example: "http://media.lawrence.com/static/" +STATIC_URL = '{0}/static/'.format(URL_PREFIX) + +# Additional locations of static files +STATICFILES_DIRS = ( + # Put strings here, like "/home/html/static" or "C:/www/django/static". + # Always use forward slashes, even on Windows. + # Don't forget to use absolute paths, not relative paths. +) + +# List of finder classes that know how to find static files in +# various locations. +STATICFILES_FINDERS = ( + 'django.contrib.staticfiles.finders.FileSystemFinder', + 'django.contrib.staticfiles.finders.AppDirectoriesFinder', + 'compressor.finders.CompressorFinder', +) + +# Make this unique, and don't share it with anybody. +# You can generate it using examples/generate-secret-key +SECRET_KEY = 'jm8fqjlg+5!#xu%e-oh#7!$aa7!6avf7ud*_v=chdrb9qdco6(' # noqa + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [ + os.path.join(BASE_DIR, 'weblate', 'templates'), + ], + 'OPTIONS': { + 'context_processors': [ + 'django.contrib.auth.context_processors.auth', + 'django.template.context_processors.debug', + 'django.template.context_processors.i18n', + 'django.template.context_processors.request', + 'django.template.context_processors.csrf', + 'django.contrib.messages.context_processors.messages', + 'weblate.trans.context_processors.weblate_context', + ], + 'loaders': [ + ('django.template.loaders.cached.Loader', [ + 'django.template.loaders.filesystem.Loader', + 'django.template.loaders.app_directories.Loader', + ]), + ], + }, + }, +] + + +# GitHub username for sending pull requests. +# Please see the documentation for more details. +GITHUB_USERNAME = None + +# Authentication configuration +AUTHENTICATION_BACKENDS = ( + 'social_core.backends.email.EmailAuth', + # 'social_core.backends.google.GoogleOAuth2', + # 'social_core.backends.github.GithubOAuth2', + # 'social_core.backends.bitbucket.BitbucketOAuth', + # 'social_core.backends.suse.OpenSUSEOpenId', + # 'social_core.backends.ubuntu.UbuntuOpenId', + # 'social_core.backends.fedora.FedoraOpenId', + # 'social_core.backends.facebook.FacebookOAuth2', + 'weblate.accounts.auth.WeblateUserBackend', +) + +# Custom user model +AUTH_USER_MODEL = 'weblate_auth.User' + +# Social auth backends setup +SOCIAL_AUTH_GITHUB_KEY = '' +SOCIAL_AUTH_GITHUB_SECRET = '' +SOCIAL_AUTH_GITHUB_SCOPE = ['user:email'] + +SOCIAL_AUTH_BITBUCKET_KEY = '' +SOCIAL_AUTH_BITBUCKET_SECRET = '' +SOCIAL_AUTH_BITBUCKET_VERIFIED_EMAILS_ONLY = True + +SOCIAL_AUTH_FACEBOOK_KEY = '' +SOCIAL_AUTH_FACEBOOK_SECRET = '' +SOCIAL_AUTH_FACEBOOK_SCOPE = ['email', 'public_profile'] +SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {'fields': 'id,name,email'} +SOCIAL_AUTH_FACEBOOK_API_VERSION = '3.1' + +SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' +SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' + +# Social auth settings +SOCIAL_AUTH_PIPELINE = ( + 'social_core.pipeline.social_auth.social_details', + 'social_core.pipeline.social_auth.social_uid', + 'social_core.pipeline.social_auth.auth_allowed', + 'social_core.pipeline.social_auth.social_user', + 'weblate.accounts.pipeline.store_params', + 'weblate.accounts.pipeline.verify_open', + 'social_core.pipeline.user.get_username', + 'weblate.accounts.pipeline.require_email', + 'social_core.pipeline.mail.mail_validation', + 'weblate.accounts.pipeline.revoke_mail_code', + 'weblate.accounts.pipeline.ensure_valid', + 'weblate.accounts.pipeline.remove_account', + 'social_core.pipeline.social_auth.associate_by_email', + 'weblate.accounts.pipeline.reauthenticate', + 'weblate.accounts.pipeline.verify_username', + 'social_core.pipeline.user.create_user', + 'social_core.pipeline.social_auth.associate_user', + 'social_core.pipeline.social_auth.load_extra_data', + 'weblate.accounts.pipeline.cleanup_next', + 'weblate.accounts.pipeline.user_full_name', + 'weblate.accounts.pipeline.store_email', + 'weblate.accounts.pipeline.notify_connect', + 'weblate.accounts.pipeline.password_reset', +) +SOCIAL_AUTH_DISCONNECT_PIPELINE = ( + 'social_core.pipeline.disconnect.allowed_to_disconnect', + 'social_core.pipeline.disconnect.get_entries', + 'social_core.pipeline.disconnect.revoke_tokens', + 'weblate.accounts.pipeline.cycle_session', + 'weblate.accounts.pipeline.adjust_primary_mail', + 'weblate.accounts.pipeline.notify_disconnect', + 'social_core.pipeline.disconnect.disconnect', + 'weblate.accounts.pipeline.cleanup_next', +) + +# Custom authentication strategy +SOCIAL_AUTH_STRATEGY = 'weblate.accounts.strategy.WeblateStrategy' + +# Raise exceptions so that we can handle them later +SOCIAL_AUTH_RAISE_EXCEPTIONS = True + +SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = \ + 'weblate.accounts.pipeline.send_validation' +SOCIAL_AUTH_EMAIL_VALIDATION_URL = \ + '{0}/accounts/email-sent/'.format(URL_PREFIX) +SOCIAL_AUTH_LOGIN_ERROR_URL = \ + '{0}/accounts/login/'.format(URL_PREFIX) +SOCIAL_AUTH_EMAIL_FORM_URL = \ + '{0}/accounts/email/'.format(URL_PREFIX) +SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = \ + '{0}/accounts/profile/#auth'.format(URL_PREFIX) +SOCIAL_AUTH_PROTECTED_USER_FIELDS = ('email',) +SOCIAL_AUTH_SLUGIFY_USERNAMES = True +SOCIAL_AUTH_SLUGIFY_FUNCTION = 'weblate.accounts.pipeline.slugify_username' + +# Password validation configuration +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + 'OPTIONS': { + 'min_length': 6, + } + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, + { + 'NAME': 'weblate.accounts.password_validation.CharsPasswordValidator', + }, + { + 'NAME': 'weblate.accounts.password_validation.PastPasswordsValidator', + }, + # Optional password strength validation by django-zxcvbn-password + # { + # 'NAME': 'zxcvbn_password.ZXCVBNValidator', + # 'OPTIONS': { + # 'min_score': 3, + # 'user_attributes': ('username', 'email', 'full_name') + # } + # }, +] + +# Allow new user registrations +REGISTRATION_OPEN = True + +# Middleware +MIDDLEWARE = [ + 'weblate.middleware.ProxyMiddleware', + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.locale.LocaleMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'weblate.accounts.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'social_django.middleware.SocialAuthExceptionMiddleware', + 'weblate.accounts.middleware.RequireLoginMiddleware', + 'weblate.middleware.SecurityMiddleware', +] + +ROOT_URLCONF = 'weblate.urls' + +# Django and Weblate apps +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'django.contrib.admin.apps.SimpleAdminConfig', + 'django.contrib.admindocs', + 'django.contrib.sitemaps', + 'social_django', + 'crispy_forms', + 'compressor', + 'rest_framework', + 'rest_framework.authtoken', + 'weblate.addons', + 'weblate.auth', + 'weblate.checks', + 'weblate.formats', + 'weblate.machinery', + 'weblate.trans', + 'weblate.lang', + 'weblate.langdata', + 'weblate.memory', + 'weblate.screenshots', + 'weblate.accounts', + 'weblate.utils', + 'weblate.vcs', + 'weblate.wladmin', + 'weblate', + + # Optional: Git exporter + # 'weblate.gitexport', +) + +# Path to locales +LOCALE_PATHS = (os.path.join(BASE_DIR, 'weblate', 'locale'), ) + +# Custom exception reporter to include some details +DEFAULT_EXCEPTION_REPORTER_FILTER = \ + 'weblate.trans.debug.WeblateExceptionReporterFilter' + +# Default logging of Weblate messages +# - to syslog in production (if available) +# - otherwise to console +# - you can also choose 'logfile' to log into separate file +# after configuring it below + +# Detect if we can connect to syslog +HAVE_SYSLOG = False +if platform.system() != 'Windows': + try: + handler = SysLogHandler( + address='/dev/log', facility=SysLogHandler.LOG_LOCAL2 + ) + handler.close() + HAVE_SYSLOG = True + except IOError: + HAVE_SYSLOG = False + +if DEBUG or not HAVE_SYSLOG: + DEFAULT_LOG = 'console' +else: + DEFAULT_LOG = 'syslog' + +# A sample logging configuration. The only tangible logging +# performed by this configuration is to send an email to +# the site admins on every HTTP 500 error when DEBUG=False. +# See http://docs.djangoproject.com/en/stable/topics/logging for +# more details on how to customize your logging configuration. +LOGGING = { + 'version': 1, + 'disable_existing_loggers': True, + 'filters': { + 'require_debug_false': { + '()': 'django.utils.log.RequireDebugFalse' + } + }, + 'formatters': { + 'syslog': { + 'format': 'weblate[%(process)d]: %(levelname)s %(message)s' + }, + 'simple': { + 'format': '%(levelname)s %(message)s' + }, + 'logfile': { + 'format': '%(asctime)s %(levelname)s %(message)s' + }, + 'django.server': { + '()': 'django.utils.log.ServerFormatter', + 'format': '[%(server_time)s] %(message)s', + } + }, + 'handlers': { + 'mail_admins': { + 'level': 'ERROR', + 'filters': ['require_debug_false'], + 'class': 'django.utils.log.AdminEmailHandler', + 'include_html': True, + }, + 'console': { + 'level': 'DEBUG', + 'class': 'logging.StreamHandler', + 'formatter': 'simple' + }, + 'django.server': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'django.server', + }, + 'syslog': { + 'level': 'DEBUG', + 'class': 'logging.handlers.SysLogHandler', + 'formatter': 'syslog', + 'address': '/dev/log', + 'facility': SysLogHandler.LOG_LOCAL2, + }, + # Logging to a file + # 'logfile': { + # 'level':'DEBUG', + # 'class':'logging.handlers.RotatingFileHandler', + # 'filename': "/var/log/weblate/weblate.log", + # 'maxBytes': 100000, + # 'backupCount': 3, + # 'formatter': 'logfile', + # }, + }, + 'loggers': { + 'django.request': { + 'handlers': ['mail_admins', DEFAULT_LOG], + 'level': 'ERROR', + 'propagate': True, + }, + 'django.server': { + 'handlers': ['django.server'], + 'level': 'INFO', + 'propagate': False, + }, + # Logging database queries + # 'django.db.backends': { + # 'handlers': [DEFAULT_LOG], + # 'level': 'DEBUG', + # }, + 'weblate': { + 'handlers': [DEFAULT_LOG], + 'level': 'DEBUG', + }, + # Logging search operations + 'weblate.search': { + 'handlers': [DEFAULT_LOG], + 'level': 'INFO', + }, + # Logging VCS operations + 'weblate.vcs': { + 'handlers': [DEFAULT_LOG], + 'level': 'WARNING', + }, + # Python Social Auth logging + # 'social': { + # 'handlers': [DEFAULT_LOG], + # 'level': 'DEBUG', + # }, + } +} + +# Logging of management commands to console +if (os.environ.get('DJANGO_IS_MANAGEMENT_COMMAND', False) and + 'console' not in LOGGING['loggers']['weblate']['handlers']): + LOGGING['loggers']['weblate']['handlers'].append('console') + +# Remove syslog setup if it's not present +if not HAVE_SYSLOG: + del LOGGING['handlers']['syslog'] + +# List of machine translations +# MT_SERVICES = ( +# 'weblate.machinery.apertium.ApertiumAPYTranslation', +# 'weblate.machinery.baidu.BaiduTranslation', +# 'weblate.machinery.deepl.DeepLTranslation', +# 'weblate.machinery.glosbe.GlosbeTranslation', +# 'weblate.machinery.google.GoogleTranslation', +# 'weblate.machinery.microsoft.MicrosoftCognitiveTranslation', +# 'weblate.machinery.microsoftterminology.MicrosoftTerminologyService', +# 'weblate.machinery.mymemory.MyMemoryTranslation', +# 'weblate.machinery.netease.NeteaseSightTranslation', +# 'weblate.machinery.tmserver.AmagamaTranslation', +# 'weblate.machinery.tmserver.TMServerTranslation', +# 'weblate.machinery.yandex.YandexTranslation', +# 'weblate.machinery.weblatetm.WeblateTranslation', +# 'weblate.machinery.saptranslationhub.SAPTranslationHub', +# 'weblate.machinery.youdao.YoudaoTranslation', +# 'weblate.memory.machine.WeblateMemory', +# ) + +# Machine translation API keys + +# URL of the Apertium APy server +MT_APERTIUM_APY = None + +# DeepL API key +MT_DEEPL_KEY = None + +# Microsoft Cognitive Services Translator API, register at +# https://portal.azure.com/ +MT_MICROSOFT_COGNITIVE_KEY = None + +# MyMemory identification email, see +# https://mymemory.translated.net/doc/spec.php +MT_MYMEMORY_EMAIL = None + +# Optional MyMemory credentials to access private translation memory +MT_MYMEMORY_USER = None +MT_MYMEMORY_KEY = None + +# Google API key for Google Translate API +MT_GOOGLE_KEY = None + +# Baidu app key and secret +MT_BAIDU_ID = None +MT_BAIDU_SECRET = None + +# Youdao Zhiyun app key and secret +MT_YOUDAO_ID = None +MT_YOUDAO_SECRET = None + +# Netease Sight (Jianwai) app key and secret +MT_NETEASE_KEY = None +MT_NETEASE_SECRET = None + +# API key for Yandex Translate API +MT_YANDEX_KEY = None + +# tmserver URL +MT_TMSERVER = None + +# SAP Translation Hub +MT_SAP_BASE_URL = None +MT_SAP_SANDBOX_APIKEY = None +MT_SAP_USERNAME = None +MT_SAP_PASSWORD = None +MT_SAP_USE_MT = True + +# Title of site to use +SITE_TITLE = 'Weblate' + +# Whether site uses https +ENABLE_HTTPS = True + +# Use HTTPS when creating redirect URLs for social authentication, see +# documentation for more details: +# https://python-social-auth-docs.readthedocs.io/en/latest/configuration/settings.html#processing-redirects-and-urlopen +SOCIAL_AUTH_REDIRECT_IS_HTTPS = ENABLE_HTTPS + +# Make CSRF cookie HttpOnly, see documentation for more details: +# https://docs.djangoproject.com/en/1.11/ref/settings/#csrf-cookie-httponly +CSRF_COOKIE_HTTPONLY = True +CSRF_COOKIE_SECURE = ENABLE_HTTPS +# Store CSRF token in session (since Django 1.11) +CSRF_USE_SESSIONS = True +SESSION_COOKIE_SECURE = ENABLE_HTTPS +# SSL redirect +SECURE_SSL_REDIRECT = ENABLE_HTTPS +# Session cookie age (in seconds) +SESSION_COOKIE_AGE = 1209600 +SESSION_ENGINE = 'django.contrib.sessions.backends.cache' + +# Some security headers +SECURE_BROWSER_XSS_FILTER = True +X_FRAME_OPTIONS = 'DENY' +SECURE_CONTENT_TYPE_NOSNIFF = True + +# Optionally enable HSTS +SECURE_HSTS_SECONDS = 0 +SECURE_HSTS_PRELOAD = False +SECURE_HSTS_INCLUDE_SUBDOMAINS = False + +# URL of login +LOGIN_URL = '{0}/accounts/login/'.format(URL_PREFIX) + +# URL of logout +LOGOUT_URL = '{0}/accounts/logout/'.format(URL_PREFIX) + +# Default location for login +LOGIN_REDIRECT_URL = '{0}/'.format(URL_PREFIX) + +# Anonymous user name +ANONYMOUS_USER_NAME = 'anonymous' + +# Reverse proxy settings +IP_PROXY_HEADER = 'HTTP_X_FORWARDED_FOR' +IP_BEHIND_REVERSE_PROXY = False +IP_PROXY_OFFSET = 0 + +# Sending HTML in mails +EMAIL_SEND_HTML = True + +# Subject of emails includes site title +EMAIL_SUBJECT_PREFIX = '[{0}] '.format(SITE_TITLE) + +# Enable remote hooks +ENABLE_HOOKS = True + +# Number of nearby messages to show in each direction +NEARBY_MESSAGES = 5 + +# Use simple language codes for default language/country combinations +SIMPLIFY_LANGUAGES = True + +# Render forms using bootstrap +CRISPY_TEMPLATE_PACK = 'bootstrap3' + +# List of quality checks +# CHECK_LIST = ( +# 'weblate.checks.same.SameCheck', +# 'weblate.checks.chars.BeginNewlineCheck', +# 'weblate.checks.chars.EndNewlineCheck', +# 'weblate.checks.chars.BeginSpaceCheck', +# 'weblate.checks.chars.EndSpaceCheck', +# 'weblate.checks.chars.EndStopCheck', +# 'weblate.checks.chars.EndColonCheck', +# 'weblate.checks.chars.EndQuestionCheck', +# 'weblate.checks.chars.EndExclamationCheck', +# 'weblate.checks.chars.EndEllipsisCheck', +# 'weblate.checks.chars.EndSemicolonCheck', +# 'weblate.checks.chars.MaxLengthCheck', +# 'weblate.checks.format.PythonFormatCheck', +# 'weblate.checks.format.PythonBraceFormatCheck', +# 'weblate.checks.format.PHPFormatCheck', +# 'weblate.checks.format.CFormatCheck', +# 'weblate.checks.format.PerlFormatCheck', +# 'weblate.checks.format.JavascriptFormatCheck', +# 'weblate.checks.format.CSharpFormatCheck', +# 'weblate.checks.format.JavaFormatCheck', +# 'weblate.checks.format.JavaMessageFormatCheck', +# 'weblate.checks.angularjs.AngularJSInterpolationCheck', +# 'weblate.checks.consistency.PluralsCheck', +# 'weblate.checks.consistency.SamePluralsCheck', +# 'weblate.checks.consistency.ConsistencyCheck', +# 'weblate.checks.consistency.TranslatedCheck', +# 'weblate.checks.chars.NewlineCountingCheck', +# 'weblate.checks.markup.BBCodeCheck', +# 'weblate.checks.chars.ZeroWidthSpaceCheck', +# 'weblate.checks.markup.XMLValidityCheck', +# 'weblate.checks.markup.XMLTagsCheck', +# 'weblate.checks.source.OptionalPluralCheck', +# 'weblate.checks.source.EllipsisCheck', +# 'weblate.checks.source.MultipleFailingCheck', +# ) + +# List of automatic fixups +# AUTOFIX_LIST = ( +# 'weblate.trans.autofixes.whitespace.SameBookendingWhitespace', +# 'weblate.trans.autofixes.chars.ReplaceTrailingDotsWithEllipsis', +# 'weblate.trans.autofixes.chars.RemoveZeroSpace', +# 'weblate.trans.autofixes.chars.RemoveControlChars', +# ) + +# List of enabled addons +# WEBLATE_ADDONS = ( +# 'weblate.addons.gettext.GenerateMoAddon', +# 'weblate.addons.gettext.UpdateLinguasAddon', +# 'weblate.addons.gettext.UpdateConfigureAddon', +# 'weblate.addons.gettext.MsgmergeAddon', +# 'weblate.addons.gettext.GettextCustomizeAddon', +# 'weblate.addons.gettext.GettextAuthorComments', +# 'weblate.addons.cleanup.CleanupAddon', +# 'weblate.addons.consistency.LangaugeConsistencyAddon', +# 'weblate.addons.discovery.DiscoveryAddon', +# 'weblate.addons.flags.SourceEditAddon', +# 'weblate.addons.flags.TargetEditAddon', +# 'weblate.addons.generate.GenerateFileAddon', +# 'weblate.addons.json.JSONCustomizeAddon', +# 'weblate.addons.properties.PropertiesSortAddon', +# 'weblate.addons.git.GitSquashAddon', +# ) + +EMAIL_HOST = '{{ EMAIL_HOST }}' +EMAIL_HOST_PASSWORD = '{{ EMAIL_HOST_PASSWORD }}' +EMAIL_HOST_USER = '{{ EMAIL_HOST_USER }}' +EMAIL_PORT = '{{ EMAIL_PORT }}' + +# E-mail address that error messages come from. +SERVER_EMAIL = 'noreply@weblate.com' + +# Default email address to use for various automated correspondence from +# the site managers. Used for registration emails. +DEFAULT_FROM_EMAIL = 'noreply@weblate.com' + +# List of URLs your site is supposed to serve +ALLOWED_HOSTS = ['.caliopen.org'] + +# Example configuration for caching +CACHES = { +#Recommended redis + hiredis: + 'default': { + 'BACKEND': 'django_redis.cache.RedisCache', + 'LOCATION': 'unix:///var/run/redis/redis.sock?db=0', + 'OPTIONS': { + 'CLIENT_CLASS': 'django_redis.client.DefaultClient', + 'PARSER_CLASS': 'redis.connection.HiredisParser', + } + }, + +} + +# REST framework settings for API +REST_FRAMEWORK = { + # Use Django's standard `django.contrib.auth` permissions, + # or allow read-only access for unauthenticated users. + 'DEFAULT_PERMISSION_CLASSES': [ + 'rest_framework.permissions.IsAuthenticatedOrReadOnly' + ], + 'DEFAULT_AUTHENTICATION_CLASSES': ( + 'rest_framework.authentication.TokenAuthentication', + 'weblate.api.authentication.BearerAuthentication', + 'rest_framework.authentication.SessionAuthentication', + ), + 'DEFAULT_THROTTLE_CLASSES': ( + 'rest_framework.throttling.AnonRateThrottle', + 'rest_framework.throttling.UserRateThrottle' + ), + 'DEFAULT_THROTTLE_RATES': { + 'anon': '100/day', + 'user': '1000/day' + }, + 'DEFAULT_PAGINATION_CLASS': ( + 'rest_framework.pagination.PageNumberPagination' + ), + 'PAGE_SIZE': 20, + 'VIEW_DESCRIPTION_FUNCTION': 'weblate.api.views.get_view_description', + 'UNAUTHENTICATED_USER': 'weblate.auth.models.get_anonymous', +} + +# Example for restricting access to logged in users +LOGIN_REQUIRED_URLS = ( +# r'/(.*)$', +) + +# In such case you will want to include some of the exceptions +LOGIN_REQUIRED_URLS_EXCEPTIONS = ( +# r'/accounts/(.*)$', # Required for login + r'/admin/login/(.*)$', # Required for admin login +# r'/static/(.*)$', # Required for development mode +# r'/widgets/(.*)$', # Allowing public access to widgets +# r'/data/(.*)$', # Allowing public access to data exports +# r'/hooks/(.*)$', # Allowing public access to notification hooks +# r'/healthz/$', # Allowing public access to health check +# r'/api/(.*)$', # Allowing access to API +# r'/js/i18n/$', # Javascript localization +# r'/contact/$', # Optional for contact form +# r'/legal/(.*)$', # Optional for legal app +) + +# Celery worker configuration for testing +#CELERY_TASK_ALWAYS_EAGER = True +#CELERY_BROKER_URL = 'memory://' +#CELERY_EAGER_PROPAGATES_EXCEPTIONS = True +# Celery worker configuration for production +CELERY_TASK_ALWAYS_EAGER = False +CELERY_BROKER_URL = 'redis+socket:///var/run/redis/redis.sock' +CELERY_RESULT_BACKEND = CELERY_BROKER_URL + +# Celery settings, it is not recommended to change these +CELERY_WORKER_PREFETCH_MULTIPLIER = 0 +CELERY_BEAT_SCHEDULE_FILENAME = os.path.join( + DATA_DIR, 'celery', 'beat-schedule' +) +CELERY_TASK_ROUTES = { + 'weblate.trans.search.*': {'queue': 'search'}, + 'weblate.trans.tasks.optimize_fulltext': {'queue': 'search'}, + 'weblate.trans.tasks.cleanup_fulltext': {'queue': 'search'}, + 'weblate.memory.tasks.*': {'queue': 'memory'}, +} diff --git a/roles/weblate/templates/uwsgi-weblate.service.j2 b/roles/weblate/templates/uwsgi-weblate.service.j2 new file mode 100644 index 0000000..985975b --- /dev/null +++ b/roles/weblate/templates/uwsgi-weblate.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=weblate uWSGI app +After=syslog.target + +[Service] +ExecStart={{ weblate_dir }}/bin/uwsgi \ + --ini {{ weblate_dir }}/lib/python3.5/site-packages/weblate/uwsgi.ini \ + --wsgi-file {{ weblate_dir }}/lib/python3.5/site-packages/weblate/wsgi.py + --enable-threads +User=weblate +Group=weblate +Restart=on-failure +KillSignal=SIGQUIT +StandardError=syslog + +[Install] +WantedBy=multi-user.target diff --git a/roles/weblate/templates/uwsgi-weblate.socket.j2 b/roles/weblate/templates/uwsgi-weblate.socket.j2 new file mode 100644 index 0000000..49eacfb --- /dev/null +++ b/roles/weblate/templates/uwsgi-weblate.socket.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Socket for uWSGI app weblate + +[Socket] +ListenStream=/var/run/uwsgi/weblate.sock +SocketUser=weblate +SocketGroup=weblate +SocketMode=0770 + +[Install] +WantedBy=sockets.target diff --git a/roles/weblate/templates/weblate.nginx.j2 b/roles/weblate/templates/weblate.nginx.j2 new file mode 100644 index 0000000..99f7561 --- /dev/null +++ b/roles/weblate/templates/weblate.nginx.j2 @@ -0,0 +1,56 @@ +server { + listen 80 default_server; + server_name _; + return 301 https://weblate.caliopen.org; +} + +server { + + listen 443 ssl; + listen [::]:443 ssl; + server_name weblate.caliopen.org; + root /usr/share/weblate; + + ssl_certificate /etc/nginx/certificates/caliopen.org.crt; + ssl_certificate_key /etc/nginx/certificates/caliopen.org.key; + ssl_prefer_server_ciphers On; + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS; + ssl_session_cache shared:SSL:10m; + + if ($host !~* ^(wbelate.caliopen.org)$ ) { + return 444; + } + + location ~ ^/favicon.ico$ { + # DATA_DIR/static/favicon.ico + alias /home/weblate/weblate-env/lib/python3.5/site-packages/data/static/favicon.ico; + expires 30d; + } + + location ~ ^/robots.txt$ { + # DATA_DIR/static/robots.txt + alias /home/weblate/weblate-env/lib/python3.5/site-packages/data/static/robots.txt; + expires 30d; + } + + location /static/ { + # DATA_DIR/static/ + alias /home/weblate/weblate-env/lib/python3.5/site-packages/data/static/; + expires 30d; + } + + location /media/ { + # DATA_DIR/media/ + alias /home/weblate/weblate-env/lib/python3.5/site-packages/data/media/; + expires 30d; + } + + location / { + include uwsgi_params; + # Needed for long running operations in admin interface + uwsgi_read_timeout 3600; + # Adjust based to uwsgi configuration: + uwsgi_pass unix:///var/run/uwsgi/weblate.sock; + } +} diff --git a/roles/weblate/templates/weblate.uwsgi.ini.j2 b/roles/weblate/templates/weblate.uwsgi.ini.j2 new file mode 100644 index 0000000..3860941 --- /dev/null +++ b/roles/weblate/templates/weblate.uwsgi.ini.j2 @@ -0,0 +1,37 @@ +[uwsgi] +plugins = python +master = true +protocol = uwsgi +socket = /var/run/uwsgi/weblate.sock +wsgi-file = {{ weblate_dir }}/lib/python3.5/site-packages/weblate/wsgi.py + +virtualenv = {{ weblate_dir }} + +# Needed for OAuth/OpenID +buffer-size = 8192 + +# Increase number of workers for heavily loaded sites +# workers = 6 + +# Child processes do not need file descriptors +close-on-exec = true + +# Avoid default 0000 umask +umask = 0022 + +# Run as weblate user +uid = weblate +gid = weblate + +# Enable harakiri mode (kill requests after some time) +# harakiri = 3600 +# harakiri-verbose = true + +# Enable uWSGI stats server +# stats = :1717 +# stats-http = true + +# Do not log some errors caused by client disconnects +ignore-sigpipe = true +ignore-write-errors = true +disable-write-exception = true diff --git a/vm_vars/credentials.yaml b/vm_vars/credentials.yaml new file mode 100644 index 0000000..34212ec --- /dev/null +++ b/vm_vars/credentials.yaml @@ -0,0 +1,3 @@ +# Gandi credentials + +gandi_api_key: "" diff --git a/vm_vars/settings.yaml b/vm_vars/settings.yaml new file mode 100644 index 0000000..18b0638 --- /dev/null +++ b/vm_vars/settings.yaml @@ -0,0 +1,9 @@ +# VM configuration + +##Common +os_image: Debian 9 +datacenter: FR-SD3 + +#SSH keys +#You can give a key id +gandi_ssh_key_id: 43083