mirror of
https://git.kescher.at/CatCatNya/catstodon.git
synced 2024-11-25 19:01:37 +01:00
3886bfb5eb
Admins can still disable the feature by adding `ES_ENABLED=false` to their environment, if they prefer not to use it. Be sure to set the variable before you deploy!
280 lines
8.9 KiB
YAML
280 lines
8.9 KiB
YAML
run.config:
|
|
engine: ruby
|
|
engine.config:
|
|
runtime: ruby-2.5
|
|
|
|
extra_packages:
|
|
# basic servers:
|
|
- nginx
|
|
- nodejs
|
|
|
|
# for images:
|
|
- ImageMagick
|
|
- jemalloc
|
|
|
|
# for videos:
|
|
- ffmpeg3
|
|
|
|
# to prep the .env file:
|
|
- gettext-tools
|
|
|
|
# for node-gyp, used in the asset compilation process:
|
|
- python-2
|
|
|
|
# i18n:
|
|
- libidn
|
|
|
|
cache_dirs:
|
|
- node_modules
|
|
|
|
extra_path_dirs:
|
|
- node_modules/.bin
|
|
|
|
build_triggers:
|
|
- .ruby-version
|
|
- Gemfile
|
|
- Gemfile.lock
|
|
- package.json
|
|
- yarn.lock
|
|
|
|
extra_steps:
|
|
- cp .env.nanobox .env
|
|
- yarn
|
|
|
|
fs_watch: true
|
|
|
|
|
|
deploy.config:
|
|
extra_steps:
|
|
- NODE_ENV=production bundle exec rake assets:precompile
|
|
transform:
|
|
- "envsubst < /app/.env.nanobox > /app/.env.production"
|
|
- |-
|
|
if [ -z "$LOCAL_DOMAIN" ]
|
|
then
|
|
. /app/.env.production
|
|
export LOCAL_DOMAIN
|
|
fi
|
|
erb /app/nanobox/nginx-web.conf.erb > /app/nanobox/nginx-web.conf
|
|
erb /app/nanobox/nginx-stream.conf.erb > /app/nanobox/nginx-stream.conf
|
|
- touch /app/log/production.log
|
|
before_live:
|
|
web.web:
|
|
- bundle exec rake db:migrate:setup
|
|
- |-
|
|
if [[ "${ES_ENABLED}" != "false" ]]
|
|
then
|
|
bundle exec rake chewy:deploy
|
|
fi
|
|
|
|
|
|
web.web:
|
|
start:
|
|
nginx: nginx -c /app/nanobox/nginx-web.conf
|
|
rails: bundle exec puma -C /app/config/puma.rb
|
|
|
|
routes:
|
|
- '/'
|
|
|
|
writable_dirs:
|
|
- tmp
|
|
|
|
log_watch:
|
|
rails: 'log/production.log'
|
|
|
|
network_dirs:
|
|
data.storage:
|
|
- public/system
|
|
|
|
|
|
web.stream:
|
|
start:
|
|
nginx: nginx -c /app/nanobox/nginx-stream.conf
|
|
node: yarn run start
|
|
|
|
routes:
|
|
- '/api/v1/streaming*'
|
|
# Somehow we're getting requests for scheme://domain//api/v1/streaming* - match those, too
|
|
- '//api/v1/streaming*'
|
|
|
|
writable_dirs:
|
|
- tmp
|
|
|
|
|
|
worker.sidekiq:
|
|
start:
|
|
default: bundle exec sidekiq -c 5 -q default -L /app/log/sidekiq.log
|
|
mailers: bundle exec sidekiq -c 5 -q mailers -L /app/log/sidekiq.log
|
|
pull: bundle exec sidekiq -c 5 -q pull -L /app/log/sidekiq.log
|
|
push: bundle exec sidekiq -c 5 -q push -L /app/log/sidekiq.log
|
|
|
|
writable_dirs:
|
|
- tmp
|
|
|
|
log_watch:
|
|
rails: 'log/production.log'
|
|
sidekiq: 'log/sidekiq.log'
|
|
|
|
network_dirs:
|
|
data.storage:
|
|
- public/system
|
|
|
|
|
|
worker.cron_only:
|
|
start: sleep 365d
|
|
|
|
writable_dirs:
|
|
- tmp
|
|
|
|
log_watch:
|
|
rake: 'log/production.log'
|
|
|
|
network_dirs:
|
|
data.storage:
|
|
- public/system
|
|
|
|
cron:
|
|
# 20:00 (8 pm), server time: send out the daily digest emails to everyone
|
|
# who opted to receive one
|
|
- id: send_digest_emails
|
|
schedule: '00 20 * * *'
|
|
command: 'bundle exec rake mastodon:emails:digest'
|
|
|
|
# 00:10 (ten past midnight), server time: remove local copies of remote
|
|
# users' media once they are older than a certain age (use NUM_DAYS evar to
|
|
# change this from the default of 7 days)
|
|
- id: clear_remote_media
|
|
schedule: '10 00 * * *'
|
|
command: 'bundle exec rake mastodon:media:remove_remote'
|
|
|
|
# 00:20 (twenty past midnight), server time: remove subscriptions to remote
|
|
# users that nobody follows locally (anymore)
|
|
- id: clear_unfollowed_subs
|
|
schedule: '20 00 * * *'
|
|
command: 'bundle exec rake mastodon:push:clear'
|
|
|
|
# 00:30 (half past midnight), server time: update local copies of remote
|
|
# users' avatars to match whatever they currently have set on their profile
|
|
- id: update_remote_avatars
|
|
schedule: '30 00 * * *'
|
|
command: 'bundle exec rake mastodon:media:redownload_avatars'
|
|
|
|
############################################################################
|
|
# This task is one you might want to enable, or might not. It keeps disk
|
|
# usage low, but makes "shadow bans" (scenarios where the user is silenced,
|
|
# but not intended to be made aware that the silencing has occurred) much
|
|
# more difficult to put in place, as users would then notice their media is
|
|
# vanishing on a regular basis. Enable it if you aren't worried about users
|
|
# knowing they've been silenced (on the instance level), and want to save
|
|
# disk space. Leave it disabled otherwise.
|
|
############################################################################
|
|
# # 00:00 (midnight), server time: remove media posted by silenced users
|
|
# - id: clear_silenced_media
|
|
# schedule: '00 00 * * *'
|
|
# command: 'bundle exec rake mastodon:media:remove_silenced'
|
|
|
|
############################################################################
|
|
# The following two tasks can be uncommented to automatically open and close
|
|
# registrations on a schedule. The format of 'schedule' is a standard cron
|
|
# time expression: minute hour day month day-of-week; search for "cron
|
|
# time expressions" for more info on how to set these up. The examples here
|
|
# open registration only from 8 am to 4 pm, server time.
|
|
############################################################################
|
|
# # 08:00 (8 am), server time: open registrations so new users can join
|
|
# - id: open_registrations
|
|
# schedule: '00 08 * * *'
|
|
# command: 'bundle exec rake mastodon:settings:open_registrations'
|
|
#
|
|
# # 16:00 (4 pm), server time: close registrations so new users *can't* join
|
|
# - id: close_registrations
|
|
# schedule: '00 16 * * *'
|
|
# command: 'bundle exec rake mastodon:settings:close_registrations'
|
|
|
|
|
|
data.db:
|
|
image: nanobox/postgresql:9.6
|
|
|
|
cron:
|
|
- id: backup
|
|
schedule: '0 3 * * *'
|
|
command: |
|
|
PGPASSWORD=${DATA_DB_PASS} pg_dump -U ${DATA_DB_USER} -w -Fc -O gonano |
|
|
gzip |
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).sql.gz -X POST -T - >&2
|
|
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
|
|
sed 's/,/\n/g' |
|
|
grep ${HOSTNAME} |
|
|
sort |
|
|
head -n-${BACKUP_COUNT:-1} |
|
|
sed 's/.*: \?"\(.*\)".*/\1/' |
|
|
while read file
|
|
do
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
|
|
done
|
|
|
|
|
|
data.elastic:
|
|
image: nanobox/elasticsearch:5
|
|
|
|
cron:
|
|
- id: backup
|
|
schedule: '0 3 * * *'
|
|
command: |
|
|
id=$(cat /proc/sys/kernel/random/uuid)
|
|
curl -X PUT -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}" -d "{\"type\": \"fs\",\"settings\": {\"location\": \"/var/tmp/${id}\",\"compress\": true}}"
|
|
curl -X PUT -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}/backup?wait_for_completion=true&pretty"
|
|
tar -cz -C "/var/tmp/${id}" . |
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).tgz -X POST -T - >&2
|
|
curl -X DELETE -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}"
|
|
rm -rf "/var/tmp/${id}"
|
|
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
|
|
sed 's/,/\n/g' |
|
|
grep ${HOSTNAME} |
|
|
sort |
|
|
head -n-${BACKUP_COUNT:-1} |
|
|
sed 's/.*: \?"\(.*\)".*/\1/' |
|
|
while read file
|
|
do
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
|
|
done
|
|
|
|
|
|
data.redis:
|
|
image: nanobox/redis:4.0
|
|
|
|
cron:
|
|
- id: backup
|
|
schedule: '0 3 * * *'
|
|
command: |
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).rdb -X POST -T /data/var/db/redis/dump.rdb >&2
|
|
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
|
|
sed 's/,/\n/g' |
|
|
grep ${HOSTNAME} |
|
|
sort |
|
|
head -n-${BACKUP_COUNT:-1} |
|
|
sed 's/.*: \?"\(.*\)".*/\1/' |
|
|
while read file
|
|
do
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
|
|
done
|
|
|
|
|
|
data.storage:
|
|
image: nanobox/unfs:0.9
|
|
|
|
cron:
|
|
- id: backup
|
|
schedule: '0 3 * * *'
|
|
command: |
|
|
tar cz -C /data/var/db/unfs/ . |
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).tgz -X POST -T - >&2
|
|
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
|
|
sed 's/,/\n/g' |
|
|
grep ${HOSTNAME} |
|
|
sort |
|
|
head -n-${BACKUP_COUNT:-1} |
|
|
sed 's/.*: \?"\(.*\)".*/\1/' |
|
|
while read file
|
|
do
|
|
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
|
|
done
|