Browse Source

Add nginx sidecar in the Peertube pod to serve static filees and preserve optimisation from the official Peertube Nginx configuration. Sidecar is optionnal and can be disabled.

tags/v1.1.0^2
LecygneNoir 11 months ago
parent
commit
f44353bb00
5 changed files with 234 additions and 1 deletions
  1. +3
    -0
      scripts/peertube-init.sh
  2. +60
    -1
      templates/deployment.yaml
  3. +148
    -0
      templates/nginx-proxyconf.yml
  4. +7
    -0
      templates/service.yaml
  5. +16
    -0
      values.yaml

+ 3
- 0
scripts/peertube-init.sh View File

@ -11,4 +11,7 @@ cp /app/support/docker/production/config/custom-environment-variables.yaml /conf
# Patch user after the cp
find /config ! -user peertube -exec chown peertube:peertube {} \;
# Prepare assets for the Nginx sidecar
cp -r /app/client/dist/* /assets/
exit 0

+ 60
- 1
templates/deployment.yaml View File

@ -31,6 +31,8 @@ spec:
- mountPath: /init
name: peertube-init
readOnly: true
- name: peertubeassets
mountPath: /assets
command:
- sh
- /init/peertube-init.sh
@ -51,12 +53,54 @@ spec:
- /init/peertube-chown.sh
{{- end }}
containers:
{{- if .Values.nginxproxy.enabled }}
- name: nginx-proxy
image: "{{ .Values.nginxproxy.image.repository }}:{{ .Values.nginxproxy.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.nginxproxy.service.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 60
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 60
volumeMounts:
- name: nginxtemp
mountPath: /nginxtemp
- mountPath: /etc/nginx/conf.d
name: nginx-proxyconf
readOnly: true
- name: peertubeassets
mountPath: /assets
readOnly: true
- name: data
mountPath: /data
{{- end }}
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.nginxproxy.enabled }}
livenessProbe:
httpGet:
path: /
port: 9000
initialDelaySeconds: 60
readinessProbe:
httpGet:
path: /
port: 9000
initialDelaySeconds: 60
{{- else }}
ports:
- name: http
containerPort: 9000
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
httpGet:
@ -68,6 +112,7 @@ spec:
path: /
port: http
initialDelaySeconds: 60
{{- end }}
command:
- gosu
- peertube
@ -146,6 +191,20 @@ spec:
- name: config
emptyDir: {}
{{- end }}
{{- if .Values.nginxproxy.persistence.enabled }}
- name: nginxtemp
persistentVolumeClaim:
claimName: {{ .Values.nginxproxy.persistence.existingClaim }}
{{- else }}
- name: nginxtemp
emptyDir: {}
{{- end }}
- name: peertubeassets
emptyDir: {}
- configMap:
defaultMode: 420
name: nginx-proxyconf
name: nginx-proxyconf
- configMap:
defaultMode: 420
name: peertube-init

+ 148
- 0
templates/nginx-proxyconf.yml View File

@ -0,0 +1,148 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-proxyconf
data:
peertubesite.conf: |-
server {
listen {{ .Values.nginxproxy.service.port }} default_server;
listen [::]:{{ .Values.nginxproxy.service.port }} default_server;
server_name _;
error_log /var/log/nginx/error.log warn;
access_log /var/log/nginx/access.log main;
# Enable compression for JS/CSS/HTML bundle, for improved client load times.
# It might be nice to compress JSON, but leaving that out to protect against potential
# compression+encryption information leak attacks like BREACH.
gzip on;
gzip_types text/css application/javascript;
gzip_vary on;
# If you have a small /var/lib partition, it could be interesting to store temp nginx uploads in a different place
# See https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_temp_path
client_body_temp_path /nginxtemp;
# Bypass PeerTube for performance reasons. Could be removed
location ~ ^/client/(.*\.(js|css|png|svg|woff2|otf|ttf|woff|eot))$ {
add_header Cache-Control "public, max-age=31536000, immutable";
alias /assets/$1;
}
# Bypass PeerTube for performance reasons. Could be removed
location ~ ^/static/(thumbnails|avatars)/ {
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
# Cache 2 hours
add_header Cache-Control "public, max-age=7200";
root /data;
rewrite ^/static/(thumbnails|avatars)/(.*)$ /$1/$2 break;
try_files $uri /;
}
location / {
proxy_pass http://127.0.0.1:{{ .Values.service.port }};
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# This is the maximum upload size, which roughly matches the maximum size of a video file
# you can send via the API or the web interface. By default this is 8GB, but administrators
# can increase or decrease the limit. Currently there's no way to communicate this limit
# to users automatically, so you may want to leave a note in your instance 'about' page if
# you change this.
#
# Note that temporary space is needed equal to the total size of all concurrent uploads.
# This data gets stored in /var/lib/nginx by default, so you may want to put this directory
# on a dedicated filesystem.
#
client_max_body_size {{ .Values.nginxproxy.maxbodysize }};
# Default timeout to 50m to allow large upload with slow connection
proxy_connect_timeout 3000;
proxy_send_timeout 3000;
proxy_read_timeout 3000;
send_timeout 3000;
}
# Bypass PeerTube for performance reasons. Could be removed
location ~ ^/static/(webseed|redundancy|streaming-playlists)/ {
# Clients usually have 4 simultaneous webseed connections, so the real limit is 4MB/s per client
set $peertube_limit_rate 1000k;
# Increase rate limit in HLS mode, because we don't have multiple simultaneous connections
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5000k;
}
# Use this with nginx >= 1.17.0
limit_rate $peertube_limit_rate;
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
if ($request_method = 'GET') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
# Don't spam access log file with byte range requests
access_log off;
}
root /data;
rewrite ^/static/webseed/(.*)$ /videos/$1 break;
rewrite ^/static/redundancy/(.*)$ /redundancy/$1 break;
rewrite ^/static/streaming-playlists/(.*)$ /streaming-playlists/$1 break;
try_files $uri /;
}
# Websocket tracker
location /tracker/socket {
# Peers send a message to the tracker every 15 minutes
# Don't close the websocket before this time
proxy_read_timeout 1200s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_pass http://127.0.0.1:{{ .Values.service.port }};
}
location /socket.io {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_pass http://127.0.0.1:{{ .Values.service.port }};
# enable WebSockets
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

+ 7
- 0
templates/service.yaml View File

@ -10,10 +10,17 @@ metadata:
spec:
type: {{ .Values.service.type }}
ports:
{{- if .Values.nginxproxy.enabled }}
- port: {{ .Values.nginxproxy.service.port }}
targetPort: http
protocol: TCP
name: http
{{- else }}
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- end }}
selector:
app: {{ template "peertube.name" . }}
release: {{ .Release.Name }}

+ 16
- 0
values.yaml View File

@ -12,6 +12,7 @@ image:
service:
type: ClusterIP
# Be careful, if nginx-proxy is enabled, the service port for ingress is the nginx service port (see nginxproxy below)
port: 9000
ingress:
@ -86,6 +87,21 @@ environment:
signup: false
transcoding: true
nginxproxy:
enabled: true
service:
# Be careful if you use the nginx proxy, the port CANNOT BE the same than default Peertube port (9000)
port: 9001
image:
repository: nginx
tag: 1.17.9
maxbodysize: 8G
# When uploading, temporary space is needed equal to the total size of all concurrent uploads.
# It could be a good idea to use an outside docker storage (eg: pvc in k8s) for these files
persistence:
enabled: false
existingClaim: pvc-nginx-proxy
redis:
usePassword: false
password: peertube

Loading…
Cancel
Save