Compare commits

..

1 Commits

Author SHA1 Message Date
530bdf191b Tinyproxy and stunnel config 2019-03-22 09:08:41 +01:00
52 changed files with 2217 additions and 924 deletions

1
.gitignore vendored
View File

@ -1 +0,0 @@
*.cred

View File

@ -19,16 +19,16 @@ after_script:
tags:
- docker
html:
extends: .builds
script:
- make html
proxy:
extends: .builds
script:
- make proxy
wiki:
extends: .builds
script:
- make wiki
nextcloud:
extends: .builds
script:

3
.gitmodules vendored
View File

@ -0,0 +1,3 @@
[submodule "html/html-wojciechkozlowski.eu"]
path = html/html-wojciechkozlowski.eu
url = https://github.com/Wojtek242/html-wojciechkozlowski.eu.git

113
Makefile
View File

@ -12,18 +12,11 @@ uninstall:
rm /lib/systemd/system/loki-server.service
systemctl daemon-reload
#------------------------------------------------------------------------------
# The container engine to use. Default to docker, but on Fedora must now use
# podman.
# ------------------------------------------------------------------------------
ENGINE = docker
# -----------------------------------------------------------------------------
# The container registry to use.
# -----------------------------------------------------------------------------
REGISTRY = registry.wojciechkozlowski.eu/wojtek/loki
DOCKER_REGISTRY = registry.wojciechkozlowski.eu/wojtek/loki
# -----------------------------------------------------------------------------
# Default target.
@ -31,93 +24,117 @@ REGISTRY = registry.wojciechkozlowski.eu/wojtek/loki
default: all
# -----------------------------------------------------------------------------
# html
# -----------------------------------------------------------------------------
HTML = $(REGISTRY)/html
html-clean:
$(ENGINE) rmi $(HTML) || /bin/true
html-build:
$(ENGINE) build -f html/Dockerfile -t $(HTML) ./html
html-push:
$(ENGINE) push $(HTML)
html-pull:
$(ENGINE) pull $(HTML)
html: html-clean html-build html-push
# -----------------------------------------------------------------------------
# proxy
# -----------------------------------------------------------------------------
PROXY = $(REGISTRY)/proxy
PROXY = $(DOCKER_REGISTRY)/proxy
proxy-clean:
$(ENGINE) rmi $(PROXY) || /bin/true
docker rmi $(PROXY) || /bin/true
proxy-build:
$(ENGINE) build -f proxy/Dockerfile -t $(PROXY) ./proxy
docker build -f proxy/Dockerfile -t $(PROXY) ./proxy
proxy-push:
$(ENGINE) push $(PROXY)
docker push $(PROXY)
proxy-pull:
$(ENGINE) pull $(PROXY)
docker pull $(PROXY)
proxy: proxy-clean proxy-build proxy-push
# -----------------------------------------------------------------------------
# wiki
# -----------------------------------------------------------------------------
WIKI = $(DOCKER_REGISTRY)/wiki
wiki-clean:
docker rmi $(WIKI) || /bin/true
wiki-build:
docker build -f dokuwiki/Dockerfile -t $(WIKI) ./dokuwiki
wiki-push:
docker push $(WIKI)
wiki-pull:
docker pull $(WIKI)
wiki: wiki-clean wiki-build wiki-push
# -----------------------------------------------------------------------------
# nextcloud
# -----------------------------------------------------------------------------
NEXTCLOUD = $(REGISTRY)/nextcloud
NEXTCLOUD = $(DOCKER_REGISTRY)/nextcloud
nextcloud-clean:
$(ENGINE) rmi $(NEXTCLOUD) || /bin/true
docker rmi $(NEXTCLOUD) || /bin/true
nextcloud-build:
$(ENGINE) build -f nextcloud/Dockerfile -t $(NEXTCLOUD) ./nextcloud
docker build -f nextcloud/Dockerfile -t $(NEXTCLOUD) ./nextcloud
nextcloud-push:
$(ENGINE) push $(NEXTCLOUD)
docker push $(NEXTCLOUD)
nextcloud-pull:
$(ENGINE) pull $(NEXTCLOUD)
docker pull $(NEXTCLOUD)
nextcloud: nextcloud-clean nextcloud-build nextcloud-push
#------------------------------------------------------------------------------
# Shadowsocks
#------------------------------------------------------------------------------
SHADOWSOCKS = $(DOCKER_REGISTRY)/shadowsocks
shadowsocks-clean:
docker rmi $(SHADOWSOCKS) || /bin/true
shadowsocks-build:
docker build -f shadowsocks/Dockerfile -t $(SHADOWSOCKS) ./shadowsocks
shadowsocks-push:
docker push $(SHADOWSOCKS)
shadowsocks-pull:
docker pull $(SHADOWSOCKS)
shadowsocks: shadowsocks-clean shadowsocks-build shadowsocks-push
# -----------------------------------------------------------------------------
# Collect targets.
# -----------------------------------------------------------------------------
clean-all:
$(ENGINE) container prune -f
$(ENGINE) image prune -a -f
docker container prune -f
docker image prune -a -f
clean-builds: \
html-clean \
proxy-clean \
nextcloud-clean
wiki-clean \
nextcloud-clean \
shadowsocks-clean
build-all: \
html-build \
proxy-build \
nextcloud-build
wiki-build \
nextcloud-build \
shadowsocks-build
push-all: \
html-push \
proxy-push \
nextcloud-push
wiki-push \
nextcloud-push \
shadowsocks-push
pull-all: \
html-pull \
proxy-pull \
nextcloud-pull
wiki-pull \
nextcloud-pull \
shadowsocks-pull
# -----------------------------------------------------------------------------
# Clean - build - push

View File

@ -43,14 +43,8 @@ To provision the server
ansible-playbook --vault-id @prompt ssh.yml
3. Set up the bare metal machine
3. Run the remaining setup
::
ansible-playbook --vault-id @prompt machine.yml
4. Install and start the dockerised ``loki`` server
::
ansible-playbook --vault-id @prompt loki.yml
ansible-playbook --vault-id @prompt main.yml

View File

@ -1,10 +0,0 @@
#!/bin/sh
# Backblaze B2 configuration variables
B2_ACCOUNT="{{ b2_key_id }}"
B2_KEY="{{ b2_app_key }}"
B2_BUCKET="loki-backup"
# GPG key (last 8 characters)
GPG_KEY="{{ gpg_key_id }}"
GPG_PASSPHRASE="{{ gpg_passphrase }}"

View File

@ -1,2 +0,0 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";

View File

@ -1,164 +0,0 @@
// Unattended-Upgrade::Origins-Pattern controls which packages are
// upgraded.
//
// Lines below have the format "keyword=value,...". A
// package will be upgraded only if the values in its metadata match
// all the supplied keywords in a line. (In other words, omitted
// keywords are wild cards.) The keywords originate from the Release
// file, but several aliases are accepted. The accepted keywords are:
// a,archive,suite (eg, "stable")
// c,component (eg, "main", "contrib", "non-free")
// l,label (eg, "Debian", "Debian-Security")
// o,origin (eg, "Debian", "Unofficial Multimedia Packages")
// n,codename (eg, "jessie", "jessie-updates")
// site (eg, "http.debian.net")
// The available values on the system are printed by the command
// "apt-cache policy", and can be debugged by running
// "unattended-upgrades -d" and looking at the log file.
//
// Within lines unattended-upgrades allows 2 macros whose values are
// derived from /etc/debian_version:
// ${distro_id} Installed origin.
// ${distro_codename} Installed codename (eg, "buster")
Unattended-Upgrade::Origins-Pattern {
// Codename based matching:
// This will follow the migration of a release through different
// archives (e.g. from testing to stable and later oldstable).
// Software will be the latest available for the named release,
// but the Debian release itself will not be automatically upgraded.
// "origin=Debian,codename=${distro_codename}-updates";
// "origin=Debian,codename=${distro_codename}-proposed-updates";
"origin=Debian,codename=${distro_codename},label=Debian";
"origin=Debian,codename=${distro_codename},label=Debian-Security";
"origin=Debian,codename=${distro_codename}-security,label=Debian-Security";
// Archive or Suite based matching:
// Note that this will silently match a different release after
// migration to the specified archive (e.g. testing becomes the
// new stable).
// "o=Debian,a=stable";
// "o=Debian,a=stable-updates";
// "o=Debian,a=proposed-updates";
// "o=Debian Backports,a=${distro_codename}-backports,l=Debian Backports";
};
// Python regular expressions, matching packages to exclude from upgrading
Unattended-Upgrade::Package-Blacklist {
// The following matches all packages starting with linux-
// "linux-";
// Use $ to explicitely define the end of a package name. Without
// the $, "libc6" would match all of them.
// "libc6$";
// "libc6-dev$";
// "libc6-i686$";
// Special characters need escaping
// "libstdc\+\+6$";
// The following matches packages like xen-system-amd64, xen-utils-4.1,
// xenstore-utils and libxenstore3.0
// "(lib)?xen(store)?";
// For more information about Python regular expressions, see
// https://docs.python.org/3/howto/regex.html
};
// This option allows you to control if on a unclean dpkg exit
// unattended-upgrades will automatically run
// dpkg --force-confold --configure -a
// The default is true, to ensure updates keep getting installed
//Unattended-Upgrade::AutoFixInterruptedDpkg "true";
// Split the upgrade into the smallest possible chunks so that
// they can be interrupted with SIGTERM. This makes the upgrade
// a bit slower but it has the benefit that shutdown while a upgrade
// is running is possible (with a small delay)
//Unattended-Upgrade::MinimalSteps "true";
// Install all updates when the machine is shutting down
// instead of doing it in the background while the machine is running.
// This will (obviously) make shutdown slower.
// Unattended-upgrades increases logind's InhibitDelayMaxSec to 30s.
// This allows more time for unattended-upgrades to shut down gracefully
// or even install a few packages in InstallOnShutdown mode, but is still a
// big step back from the 30 minutes allowed for InstallOnShutdown previously.
// Users enabling InstallOnShutdown mode are advised to increase
// InhibitDelayMaxSec even further, possibly to 30 minutes.
//Unattended-Upgrade::InstallOnShutdown "false";
// Send email to this address for problems or packages upgrades
// If empty or unset then no email is sent, make sure that you
// have a working mail setup on your system. A package that provides
// 'mailx' must be installed. E.g. "user@example.com"
Unattended-Upgrade::Mail "root";
// Set this value to one of:
// "always", "only-on-error" or "on-change"
// If this is not set, then any legacy MailOnlyOnError (boolean) value
// is used to chose between "only-on-error" and "on-change"
//Unattended-Upgrade::MailReport "on-change";
// Remove unused automatically installed kernel-related packages
// (kernel images, kernel headers and kernel version locked tools).
//Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
// Do automatic removal of newly unused dependencies after the upgrade
//Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
// Do automatic removal of unused packages after the upgrade
// (equivalent to apt-get autoremove)
//Unattended-Upgrade::Remove-Unused-Dependencies "false";
// Automatically reboot *WITHOUT CONFIRMATION* if
// the file /var/run/reboot-required is found after the upgrade
//Unattended-Upgrade::Automatic-Reboot "false";
// Automatically reboot even if there are users currently logged in
// when Unattended-Upgrade::Automatic-Reboot is set to true
//Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
// If automatic reboot is enabled and needed, reboot at the specific
// time instead of immediately
// Default: "now"
//Unattended-Upgrade::Automatic-Reboot-Time "02:00";
// Use apt bandwidth limit feature, this example limits the download
// speed to 70kb/sec
//Acquire::http::Dl-Limit "70";
// Enable logging to syslog. Default is False
// Unattended-Upgrade::SyslogEnable "false";
// Specify syslog facility. Default is daemon
// Unattended-Upgrade::SyslogFacility "daemon";
// Download and install upgrades only on AC power
// (i.e. skip or gracefully stop updates on battery)
// Unattended-Upgrade::OnlyOnACPower "true";
// Download and install upgrades only on non-metered connection
// (i.e. skip or gracefully stop updates on a metered connection)
// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true";
// Verbose logging
// Unattended-Upgrade::Verbose "false";
// Print debugging information both in unattended-upgrades and
// in unattended-upgrade-shutdown
// Unattended-Upgrade::Debug "false";
// Allow package downgrade if Pin-Priority exceeds 1000
// Unattended-Upgrade::Allow-downgrade "false";
// When APT fails to mark a package to be upgraded or installed try adjusting
// candidates of related packages to help APT's resolver in finding a solution
// where the package can be upgraded or installed.
// This is a workaround until APT's resolver is fixed to always find a
// solution if it exists. (See Debian bug #711128.)
// The fallback is enabled by default, except on Debian's sid release because
// uninstallable packages are frequent there.
// Disabling the fallback speeds up unattended-upgrades when there are
// uninstallable packages at the expense of rarely keeping back packages which
// could be upgraded or installed.
// Unattended-Upgrade::Allow-APT-Mark-Fallback "true";

View File

@ -0,0 +1,3 @@
RUN_DAILY="true"
RUN_DAILY_OPTS="-q"
DIFF_MODE="true"

View File

@ -0,0 +1,34 @@
# Defaults for rkhunter automatic tasks
# sourced by /etc/cron.*/rkhunter and /etc/apt/apt.conf.d/90rkhunter
#
# This is a POSIX shell fragment
#
# Set this to yes to enable rkhunter daily runs
# (default: false)
CRON_DAILY_RUN="yes"
# Set this to yes to enable rkhunter weekly database updates
# (default: false)
CRON_DB_UPDATE=""
# Set this to yes to enable reports of weekly database updates
# (default: false)
DB_UPDATE_EMAIL="false"
# Set this to the email address where reports and run output should be sent
# (default: root)
REPORT_EMAIL="root"
# Set this to yes to enable automatic database updates
# (default: false)
APT_AUTOGEN="false"
# Nicenesses range from -20 (most favorable scheduling) to 19 (least favorable)
# (default: 0)
NICE="0"
# Should daily check be run when running on battery
# powermgmt-base is required to detect if running on battery or on AC power
# (default: false)
RUN_CHECK_ON_BATTERY="false"

View File

@ -1,50 +1,6 @@
^[ :[:alnum:]]{15} {{ hostname }} docker-compose\[[0-9]+\]:
^[ :[:alnum:]]{15} {{ hostname }} kernel: \[[ .[:digit:]]+\] perf: interrupt took too long \([[:digit:]]+ > [[:digit:]]+\), lowering kernel.perf_event_max_sample_rate to [[:digit:]]+
^[ :[:alnum:]]{15} {{ hostname }} kernel: \[[ .[:digit:]]+\] Process accounting resumed
^[ :[:alnum:]]{15} {{ hostname }} rsyslogd: \[origin software=\"rsyslogd\" swVersion=\"[.[:digit:]]+\" x-pid=\"[[:digit:]]+\" x-info=\"https://www.rsyslog.com\"\] rsyslogd was HUPed
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: apt-daily.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: apt-daily-upgrade.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: systemd-tmpfiles-clean.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Listening on GnuPG network certificate management daemon.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Listening on GnuPG cryptographic agent
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Closed GnuPG network certificate management daemon.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Closed GnuPG cryptographic agent
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: run-docker-runtime\\x2drunc-moby
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Starting Daily man-db regeneration...
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: fstrim.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: man-db.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Started Daily man-db regeneration.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: logrotate.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: packagekit.service: Main process exited, code=killed, status=15/TERM
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: packagekit.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: acct.service: Succeeded.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Starting LSB: process and login accounting...
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Started LSB: process and login accounting.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Stopping LSB: process and login accounting...
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Stopped LSB: process and login accounting.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Finished system activity accounting tool.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Finished Generate a daily summary of process accounting.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Finished Cleanup of Temporary Directories.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: Finished Kernel process accounting.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: apt-daily.service: Consumed [[:digit:]]+.[[:digit:]]+s CPU time.
^[ :[:alnum:]]{15} {{ hostname }} systemd\[[0-9]+\]: apt-daily-upgrade.service: Consumed [[:digit:]]+.[[:digit:]]+s CPU time.
^[ :[:alnum:]]{15} {{ hostname }} accton\[[0-9]+\]: Turning on process accounting, file set to '/var/log/account/pacct'.
^[ :[:alnum:]]{15} {{ hostname }} accton\[[0-9]+\]: Turning off process accounting.
^[ :[:alnum:]]{15} {{ hostname }} accton\[[0-9]+\]: Done..
^[ :[:alnum:]]{15} {{ hostname }} auditd\[[0-9]+\]: Audit daemon rotating log files
^[ :[:alnum:]]{15} {{ hostname }} dbus-daemon\[[0-9]+\]: \[system\] Activating via systemd: service name='org.freedesktop.PackageKit' unit='packagekit.service' requested by '[:.[:digit:]]+' \(uid=0 pid=[[:digit:]]+ comm=\"/usr/bin/gdbus call --system --dest org.freedeskto\"\)
^[ :[:alnum:]]{15} {{ hostname }} dbus-daemon\[[0-9]+\]: \[system\] Activating via systemd: service name='org.freedesktop.PolicyKit1' unit='polkit.service' requested by '[:.[:digit:]]+' \(uid=0 pid=[[:digit:]]+ comm=\"/usr/lib/packagekit/packagekitd \"\)
^[ :[:alnum:]]{15} {{ hostname }} dbus-daemon\[[0-9]+\]: \[system\] Successfully activated service 'org.freedesktop.PackageKit'
^[ :[:alnum:]]{15} {{ hostname }} dbus-daemon\[[0-9]+\]: \[system\] Successfully activated service 'org.freedesktop.PolicyKit1'
^[ :[:alnum:]]{15} {{ hostname }} fstrim\[[0-9]+\]: /: [.[:digit:]]+ [KMG]iB \([[:digit:]]+ bytes\) trimmed on /dev/md[[:digit:]]
^[ :[:alnum:]]{15} {{ hostname }} PackageKit: daemon start
^[ :[:alnum:]]{15} {{ hostname }} PackageKit: daemon quit
^[ :[:alnum:]]{15} {{ hostname }} polkitd\[[0-9]+\]: started daemon version [.[:digit:]]+ using authority implementation `local' version `[.[:digit:]]+'
^[ :[:alnum:]]{15} {{ hostname }} runuser: pam_unix\(runuser:session\): session opened for user nobody by \(uid=0\)
^[ :[:alnum:]]{15} {{ hostname }} runuser: pam_unix\(runuser:session\): session closed for user nobody
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Invalid user [._'[:alnum:]-]+ from [.[:digit:]]+ port [[:digit:]]+
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Received disconnect from [.[:digit:]]+ port [:[:digit:]]+ Bye Bye \[preauth\]
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Disconnected from invalid user [._'[:alnum:]-]+ [.[:digit:]]+ port [[:digit:]]+ \[preauth\]
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Connection closed by [.[:digit:]]+ port [[:digit:]]+ \[preauth\]
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Connection reset by [.[:digit:]]+ port [[:digit:]]+ \[preauth\]
^[ :[:alnum:]]{15} {{ hostname }} sshd\[[0-9]+\]: Did not receive identification string from [.[:digit:]]+ port [[:digit:]]+
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} docker-compose\[[0-9]+\]:
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} systemd\[[0-9]+\]: Listening on GnuPG network certificate management daemon.
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} systemd\[[0-9]+\]: Listening on GnuPG cryptographic agent
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} systemd\[[0-9]+\]: Closed GnuPG network certificate management daemon.
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} systemd\[[0-9]+\]: Closed GnuPG cryptographic agent
^[[:alpha:]]{3} [ :[:digit:]]{11} {{ hostname }} auditd\[[0-9]+\]: Audit daemon rotating log files

View File

@ -18,7 +18,7 @@ table inet filter {
ip protocol icmp icmp type { echo-request, destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
# activate the following line to accept common local services
tcp dport { 80, 443, {{ ssh_port }}, {{ gitlab_ssh_port }} } ct state new accept
tcp dport { 80, 443, {{ ssh_port }}, {{ gitlab_ssh_port }}, {{ shadowsocks_port }} } ct state new accept
# count and drop any other traffic
counter drop

1251
ansible/etc/rkhunter.conf.j2 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
# $OpenBSD: sshd_config,v 1.100 2016/08/15 12:32:04 naddy Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
@ -95,6 +95,8 @@ X11Forwarding no
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#UseLogin no
#UsePrivilegeSeparation sandbox
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0

View File

@ -1,9 +0,0 @@
[Unit]
Description=Backup Loki server
[Service]
WorkingDirectory={{ loki_dir }}
Type=oneshot
ExecStartPre={{ loki_dir }}/registry-cleaner.sh
ExecStart={{ loki_dir }}/backup.sh -b
ExecStartPost={{ loki_dir }}/b2-backup.sh

View File

@ -1,10 +0,0 @@
[Unit]
Description=Back up Loki once a week
[Timer]
OnCalendar=weekly
AccuracySec=1h
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -1,2 +0,0 @@
{{ gitlab_username }}
{{ gitlab_access_token }}

View File

@ -9,7 +9,4 @@
# appropriate entries to /etc/hosts.
[server]
loki
[server:vars]
ansible_python_interpreter=/usr/bin/python3
loki

View File

@ -1,130 +0,0 @@
---
- hosts: server
vars_files:
- secrets.yml
vars:
- debian_release: bullseye
tasks:
# -------------------------------------------------------------------------
# Docker CE.
# -------------------------------------------------------------------------
- name: Install packages to enable HTTPS repository
apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
- name: Add Docker GPG key
apt_key:
id: 0EBFCD88
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add Docker repository
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/debian "{{ debian_release }}" stable
state: present
register: docker_repo
- name: Update apt cache
apt:
update_cache: yes
force_apt_get: yes
when: docker_repo is changed
- name: Install docker-ce and docker-compose
apt:
name:
- docker-ce
- docker-compose
# -------------------------------------------------------------------------
# Loki server.
# -------------------------------------------------------------------------
- name: Install git
apt:
name: git
- name: Clone Loki repo
git:
repo: https://github.com/Wojtek242/loki.git
dest: "{{ loki_dir }}"
update: no
register: loki_git
- name: Build containers locally
command: make build-all
args:
chdir: "{{ loki_dir }}"
when: loki_git is changed
- name: Add hosts file
template:
src: ./etc/hosts.j2
dest: /etc/hosts
mode: 0644
# -------------------------------------------------------------------------
# Loki server service.
# -------------------------------------------------------------------------
- name: Install Loki server
template:
src: ./etc/systemd/system/loki-server.service.j2
dest: /etc/systemd/system/loki-server.service
mode: 0644
- name: Enable and start Loki server
service:
name: loki-server
state: started
enabled: yes
# -------------------------------------------------------------------------
# Loki backup service.
# -------------------------------------------------------------------------
- name: Install duplicity
apt:
name:
- duplicity
- python3-b2sdk
- name: Create GitLab credentials file
template:
src: ./gitlab.cred.j2
dest: "{{ loki_dir }}/gitlab.cred"
mode: 0644
- name: Create B2 credentials file
template:
src: ./b2.cred.j2
dest: "{{ loki_dir }}/b2.cred"
mode: 0644
- name: Install Loki backup service
template:
src: ./etc/systemd/system/loki-backup.service.j2
dest: /etc/systemd/system/loki-backup.service
mode: 0644
- name: Install Loki backup timer
template:
src: ./etc/systemd/system/loki-backup.timer.j2
dest: /etc/systemd/system/loki-backup.timer
mode: 0644
- name: Enable and start Loki backup
service:
name: loki-backup.timer
state: started
enabled: yes

View File

@ -4,6 +4,10 @@
vars_files:
- secrets.yml
vars:
- debian_release: stretch
- loki_dir: /srv/loki
tasks:
# -------------------------------------------------------------------------
@ -18,43 +22,35 @@
force_apt_get: yes
register: apt_update
- name: Reboot the machine
reboot:
# Once ansible 2.7 is available will be able to just use reboot module.
- block:
- name: Reboot
shell: "sleep 1 && reboot"
async: 1
poll: 0
- name: Wait for host to come back up
wait_for_connection:
connect_timeout: 20
sleep: 5
delay: 5
timeout: 300
when: apt_update is changed
# -------------------------------------------------------------------------
# Ensure unattended upgrades is installed.
# -------------------------------------------------------------------------
- name: Install unattended-upgrades
apt:
name: unattended-upgrades
- name: Configure unattended-upgrades
template:
src: ./etc/apt/apt.conf.d/50unattended-upgrades.j2
dest: /etc/apt/apt.conf.d/50unattended-upgrades
mode: 0644
- name: Enable unattended-upgrades
template:
src: ./etc/apt/apt.conf.d/20auto-upgrades.j2
dest: /etc/apt/apt.conf.d/20auto-upgrades
mode: 0644
# -------------------------------------------------------------------------
# Loki uses SSDs so use fstrim on a timer.
# -------------------------------------------------------------------------
- name: Copy fstrim service file
template:
src: ./etc/systemd/system/fstrim.service.j2
copy:
src: ./etc/systemd/system/fstrim.service
dest: /etc/systemd/system/fstrim.service
mode: 0644
- name: Copy fstrim timer file
template:
src: ./etc/systemd/system/fstrim.timer.j2
copy:
src: ./etc/systemd/system/fstrim.timer
dest: /etc/systemd/system/fstrim.timer
mode: 0644
@ -64,31 +60,18 @@
state: started
enabled: yes
# -------------------------------------------------------------------------
# Set up the USB flash drive.
# -------------------------------------------------------------------------
- name: Create USB mount directory
file:
path: /media/usb0
state: directory
- name: Ensure USB drive is auto-mounted
lineinfile:
line: "LABEL=Muninn /media/usb0 ext4 defaults 0 0"
dest: "/etc/fstab"
# -------------------------------------------------------------------------
# Apparmor.
# -------------------------------------------------------------------------
- name: Install apparmor, utilities, and profiles
apt:
name:
- apparmor
- apparmor-utils
- apparmor-profiles
- apparmor-profiles-extra
name: "{{ item }}"
with_items:
- apparmor
- apparmor-utils
- apparmor-profiles
- apparmor-profiles-extra
register: apparmor
- name: Ensure /etc/default/grub.d exists
@ -104,12 +87,22 @@
mode: 0644
register: apparmor_cfg
# Once ansible 2.7 is available will be able to just use reboot module.
- block:
- name: Update grub
command: update-grub
- name: Reboot the machine
reboot:
- name: Reboot
shell: "sleep 1 && reboot"
async: 1
poll: 0
- name: Wait for host to come back up
wait_for_connection:
connect_timeout: 20
sleep: 5
delay: 5
timeout: 300
when:
apparmor is changed or
@ -146,10 +139,11 @@
- name: Install postfix
apt:
name:
- postfix
- ca-certificates
- libsasl2-modules
name: "{{ item }}"
with_items:
- postfix
- ca-certificates
- libsasl2-modules
register: postfix
- name: Configure credentials
@ -238,9 +232,10 @@
- name: Install logcheck and logrotate
apt:
name:
- logcheck
- logrotate
name: "{{ item }}"
with_items:
- logcheck
- logrotate
- name: Configure logcheck
template:
@ -313,6 +308,35 @@
auditd is changed or
auditd_cfg is changed
# -------------------------------------------------------------------------
# Chkrootkit and Rkhunter.
# -------------------------------------------------------------------------
- name: Install rkhunter and chkrootkit
apt:
name: "{{ item }}"
with_items:
- rkhunter
- chkrootkit
- name: Configure rkhunter
template:
src: ./etc/rkhunter.conf.j2
dest: /etc/rkhunter.conf
mode: 0644
- name: Configure rkhunter
template:
src: ./etc/default/rkhunter.j2
dest: /etc/default/rkhunter
mode: 0644
- name: Configure chkrootkit
template:
src: ./etc/chkrootkit.conf.j2
dest: /etc/chkrootkit.conf
mode: 0644
# -------------------------------------------------------------------------
# Install sudo and user to group.
# -------------------------------------------------------------------------
@ -327,6 +351,104 @@
groups: sudo
append: yes
# -------------------------------------------------------------------------
# Docker CE.
# -------------------------------------------------------------------------
- name: Install packages to enable HTTPS repository
apt:
name: "{{ item }}"
with_items:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
- name: Add Docker GPG key
apt_key:
id: 0EBFCD88
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add Docker repository
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/debian "{{ debian_release }}" stable
state: present
register: docker_repo
- name: Update apt cache
apt:
update_cache: yes
force_apt_get: yes
when: docker_repo is changed
- name: Install docker-ce and docker-compose
apt:
name: "{{ item }}"
with_items:
- docker-ce
- docker-compose
# -------------------------------------------------------------------------
# Loki server.
# -------------------------------------------------------------------------
- name: Install git
apt:
name: git
- name: Clone Loki repo
git:
repo: https://github.com/Wojtek242/loki.git
dest: "{{ loki_dir }}"
register: loki_git
- block:
- name: Install Loki service
command: cp "{{ loki_dir }}"/loki-server.service /lib/systemd/system/
- name: Update service file
lineinfile:
path: /lib/systemd/system/loki-server.service
regexp: '^WorkingDirectory='
line: 'WorkingDirectory={{ loki_dir }}'
- name: Reload systemd daemon
systemd:
daemon_reload: yes
- block:
- name: Update
command: ./update.sh
args:
chdir: "{{ loki_dir }}"
rescue:
- debug:
msg: "Failed to pull containers from registry - will build locally"
- name: Build locally
command: make build-all
args:
chdir: "{{ loki_dir }}"
when: loki_git is changed
# Hosts file must be added after the first update as otherwise the initial
# container pull will always fail
- name: Add hosts file
template:
src: ./etc/hosts.j2
dest: /etc/hosts
mode: 0644
- name: Ensure service is started
service:
name: loki-server
state: started
enabled: yes
# -------------------------------------------------------------------------
# Set MotD.
# -------------------------------------------------------------------------
@ -346,3 +468,16 @@
src: ./root.bashrc
dest: /root/.bashrc
mode: 0644
# -------------------------------------------------------------------------
# Update rkhunter and chkrootkit databases.
# -------------------------------------------------------------------------
- name: Update rkhunter database
command: rkhunter --propupd
- name: Run chkrootkit
command: /etc/cron.daily/chkrootkit
- name: Update chkrootkit logs
command: cp -a /var/log/chkrootkit/log.today /var/log/chkrootkit/log.expected

View File

@ -12,5 +12,5 @@
ansible_port: "{{ default_ssh_port }}"
tasks:
- name: Install python
raw: apt-get -y install python3 python3-pip python3-setuptools
- name: Install python2
raw: apt-get -y install python

View File

@ -20,6 +20,9 @@ default_ssh_port:
# GitLab
gitlab_ssh_port:
# Shadowsocks
shadowsocks_port:
# Postfix
postfix_smtp_server:
postfix_smtp_port:
@ -33,14 +36,3 @@ fail2ban_sender:
# For /etc/hosts
domains:
# Loki setup
loki_dir:
gitlab_username:
gitlab_access_token:
# Backblaze setup
b2_key_id:
b2_app_key:
gpg_key_id:
gpg_passphrase:

View File

@ -1,82 +0,0 @@
#!/bin/bash
set -ue
CYAN='\033[01;36m'
NC='\033[00m'
if [ ! -t 1 ]; then
CYAN=''
NC=''
fi
SCRIPT=$(readlink -f $0)
DIRNAME=$(dirname $SCRIPT)
# -----------------------------------------------------------------------------
# Run only if it's the first week of the month.
# -----------------------------------------------------------------------------
day_of_month=`date '+%d' | bc`
if (( $day_of_month > 7 ))
then
echo -e "${CYAN}[${SCRIPT}] No B2 backup this week ${NC}"
exit 0
fi
echo -e "${CYAN}[${SCRIPT}] Perform B2 backup ${NC}"
# -----------------------------------------------------------------------------
# Import all account and GPG variables.
# -----------------------------------------------------------------------------
source ./b2.cred
export PASSPHRASE=${GPG_PASSPHRASE}
# -----------------------------------------------------------------------------
# Local directory to backup.
# -----------------------------------------------------------------------------
LOCAL_DIR="/media/usb0/backup"
# -----------------------------------------------------------------------------
# Remove files older than 15 days.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Remove files older than 32 days ${NC}"
duplicity remove-older-than 32D --force \
--encrypt-sign-key $GPG_KEY \
b2://${B2_ACCOUNT}:${B2_KEY}@${B2_BUCKET}
# -----------------------------------------------------------------------------
# Perform a full backup.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Perform a full backup ${NC}"
duplicity full \
--encrypt-sign-key $GPG_KEY \
${LOCAL_DIR} b2://${B2_ACCOUNT}:${B2_KEY}@${B2_BUCKET}
# -----------------------------------------------------------------------------
# Clean up failures.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Clean up failures ${NC}"
duplicity cleanup --force \
--encrypt-sign-key $GPG_KEY \
b2://${B2_ACCOUNT}:${B2_KEY}@${B2_BUCKET}
# -----------------------------------------------------------------------------
# Show collection status.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Show collection status ${NC}"
duplicity collection-status \
--encrypt-sign-key $GPG_KEY \
b2://${B2_ACCOUNT}:${B2_KEY}@${B2_BUCKET}
# -----------------------------------------------------------------------------
# Unset the GPG passphrase.
# -----------------------------------------------------------------------------
unset PASSPHRASE

188
backup.sh
View File

@ -1,188 +0,0 @@
#!/bin/bash
set -e
RED='\033[01;31m'
GREEN='\033[01;32m'
YELLOW='\033[01;33m'
CYAN='\033[01;36m'
NC='\033[00m'
if [ ! -t 1 ]; then
RED=''
GREEN=''
YELLOW=''
CYAN=''
NC=''
fi
SCRIPT=$(readlink -f $0)
DIRNAME=$(dirname $SCRIPT)
BACKUP_DIR="/media/usb0/backup"
# -----------------------------------------------------------------------------
# Get the list of volumes.
# -----------------------------------------------------------------------------
function get_volumes {
# Find the line where "services:" start
services_line=$(grep -n services docker-compose.yml | \
awk '{split($0, a, ":"); print a[1]}')
# The volumes are listed between "volumes:" and "services:"
volume_list=$(head -n $services_line docker-compose.yml | \
awk '/volumes:/,/services:/')
# Split into array
IFS=':'; volumes=($volume_list); unset IFS;
# Trim whitespace
for ((i = 0; i < ${#volumes[@]}; i++)); do
volumes[$i]=$(echo -e "${volumes[$i]}" | tr -d '[:space:]')
done
# Verify that the first entry is "volumes" and the last "services"
if [ ${volumes[0]} != "volumes" ] || [ "${volumes[-1]}" != "services" ]
then
echo -e "${RED}Unexpected input${NC}" >&2
exit 1
fi
# Remove first and last entry - they will be "volumes" and " services"
let len=${#volumes[@]}-2
volumes=("${volumes[@]:1:$len}")
echo ${volumes[*]}
}
# -----------------------------------------------------------------------------
# Start the server.
# -----------------------------------------------------------------------------
function server_start {
echo -e "${CYAN}[${SCRIPT}] Restart loki-server ${NC}"
systemctl start loki-server
}
# -----------------------------------------------------------------------------
# Stop the server.
# -----------------------------------------------------------------------------
function server_stop {
echo -e "${CYAN}[${SCRIPT}] Stop loki-server ${NC}"
systemctl stop loki-server
}
# -----------------------------------------------------------------------------
# Back up volumes.
# -----------------------------------------------------------------------------
function backup {
volumes=$1
# Remove old backup directory
if [ -d ${BACKUP_DIR} ]; then
rm -f ${BACKUP_DIR}/*.tar
rmdir ${BACKUP_DIR}
fi
# Make sure directory exists
mkdir ${BACKUP_DIR}
for vol in "${volumes[@]}"
do
echo -e "${CYAN}[${SCRIPT}] Back up ${YELLOW}${vol}${CYAN} volume${NC}"
set -o xtrace
docker run --rm \
-v loki_${vol}:/opt/${vol} \
-v ${BACKUP_DIR}:/opt/backup \
debian:stable-slim \
bash -c "cd /opt/${vol} && tar cf /opt/backup/${vol}.tar ."
set +o xtrace
done
}
# -----------------------------------------------------------------------------
# Restore volumes.
# -----------------------------------------------------------------------------
function restore {
volumes=$1
for vol in "${volumes[@]}"
do
echo -e "${CYAN}[${SCRIPT}] Restore ${YELLOW}${vol}${CYAN} volume${NC}"
set -o xtrace
docker run --rm \
-v loki_${vol}:/opt/${vol} \
-v ${BACKUP_DIR}:/opt/backup \
debian:stable-slim \
bash -c "cd /opt/${vol} && tar xf /opt/backup/${vol}.tar"
set +o xtrace
done
}
# -----------------------------------------------------------------------------
# Main.
# -----------------------------------------------------------------------------
while getopts "br" option
do
case ${option} in
b )
echo -e "${CYAN}[${SCRIPT}] Extract list of volumes ${NC}"
volumes=($(get_volumes))
echo -e "${YELLOW}Volumes${NC}:"
for vol in "${volumes[@]}"
do
echo -e " - ${YELLOW}${vol} ${NC}"
done
server_stop
backup ${volumes}
server_start
exit 0
;;
r )
echo -e "${CYAN}[${SCRIPT}] Extract list of volumes ${NC}"
volumes=($(get_volumes))
echo -e "${YELLOW}Volumes${NC}:"
for vol in "${volumes[@]}"
do
echo -e " - ${YELLOW}${vol} ${NC}"
done
server_stop
restore ${volumes}
server_start
exit 0
;;
\? )
echo -e "${GREEN} Usage: backup.sh [-b|-r]${NC}"
exit 1
;;
esac
done
# If we get here then no options were triggered
echo -e "${GREEN} Usage: backup.sh [-b|-r]${NC}"

View File

@ -1,6 +1,7 @@
version: '2'
volumes:
wiki:
nextcloud:
nextcloud_db:
gitlab_config:
@ -9,8 +10,7 @@ volumes:
letsencrypt:
runner_0_etc:
runner_0_home:
runner_tud_etc:
runner_tud_home:
shadowsocks:
services:
@ -20,19 +20,30 @@ services:
html:
container_name: html
image: registry.wojciechkozlowski.eu/wojtek/loki/html
image: nginx
volumes:
- ./html/html-wojciechkozlowski.eu/:/usr/share/nginx/html/
restart: always
# ---------------------------------------------------------------------------
# DokuWiki installation available at wiki.wojciechkozlowski.eu
# ---------------------------------------------------------------------------
wiki:
container_name: wiki
build: dokuwiki
image: registry.wojciechkozlowski.eu/wojtek/loki/wiki
volumes:
- wiki:/var/dokuwiki-storage
restart: always
# ---------------------------------------------------------------------------
# NextCloud installation available at cloud.wojciechkozlowski.eu
#
# Note about version lock:
# https://dba.stackexchange.com/questions/256427/unable-to-create-tables-with-row-format-compressed
# ---------------------------------------------------------------------------
nextcloud-db:
container_name: nextcloud-db
image: mariadb:10.5
image: mariadb
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
volumes:
- nextcloud_db:/var/lib/mysql
@ -45,6 +56,7 @@ services:
nextcloud-app:
container_name: nextcloud-app
build: nextcloud
image: registry.wojciechkozlowski.eu/wojtek/loki/nextcloud
links:
- nextcloud-db
@ -69,7 +81,7 @@ services:
gitlab:
container_name: gitlab
image: gitlab/gitlab-ce:15.2.0-ce.0
image: gitlab/gitlab-ce
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'https://gitlab.wojciechkozlowski.eu'
@ -85,17 +97,12 @@ services:
- gitlab_config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
- ./gitlab/ssh_config:/etc/ssh/ssh_config
restart: always
# See https://docs.gitlab.com/ee/ci/docker/using_docker_build.html for how to
# register the runner for running docker-in-docker builds.
#
# Note the need to set docker-privileged.
#
# https://gitlab.com/gitlab-org/gitlab-runner/issues/4501
# Since docker 19.03 also need to set the following in config.toml:
# [[runners]]
# environment = ["DOCKER_TLS_CERTDIR="]
# register the runner for running docker-in-docker builds. Note the need to
# set docker-privileged.
runner-0:
container_name: runner-0
image: gitlab/gitlab-runner
@ -105,14 +112,6 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
restart: always
runner-tud:
container_name: runner-tud
image: gitlab/gitlab-runner
volumes:
- runner_tud_etc:/etc/gitlab-runner
- runner_tud_home:/home/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock
restart: always
# ---------------------------------------------------------------------------
# Reverse proxy served by Nginx.
@ -120,12 +119,14 @@ services:
proxy:
container_name: proxy
build: proxy
image: registry.wojciechkozlowski.eu/wojtek/loki/proxy
ports:
- 80:80
- 443:443
links:
- html
- wiki
- nextcloud
- gitlab
volumes:
@ -133,3 +134,17 @@ services:
- ./proxy/nginx-conf.d:/etc/nginx/conf.d:ro
- letsencrypt:/etc/letsencrypt
restart: always
# ---------------------------------------------------------------------------
# Shadowsocks server.
# ---------------------------------------------------------------------------
shadowsocks:
container_name: shadowsocks
build: shadowsocks
image: registry.wojciechkozlowski.eu/wojtek/loki/shadowsocks
ports:
- 7698:7698
volumes:
- shadowsocks:/var/shadowsocks
restart: always

52
dokuwiki/Dockerfile Normal file
View File

@ -0,0 +1,52 @@
FROM debian:stable-slim
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade
ENV HTML_PATH /usr/share/nginx/html
ENV DOKU_VOL /var/dokuwiki-storage
RUN apt-get -y install \
wget \
php7.0-fpm \
php7.0-xml \
nginx \
supervisor
RUN sed -i -e "s|cgi.fix_pathinfo=1|cgi.fix_pathinfo=0|g" /etc/php/7.0/fpm/php.ini && \
mkdir /run/php
RUN rm -rf /etc/nginx/sites-enabled/* && \
rm -rf /etc/nginx/conf.d
RUN rm -rf $HTML_PATH && \
mkdir $HTML_PATH && \
mkdir $DOKU_VOL && \
mkdir $DOKU_VOL/data && \
cd $HTML_PATH && \
wget https://download.dokuwiki.org/src/dokuwiki/dokuwiki-stable.tgz && \
tar xf dokuwiki-stable.tgz --strip 1 && \
rm dokuwiki-stable.tgz && \
chown -R www-data:www-data ./ && \
mv $HTML_PATH/data/pages $DOKU_VOL/data/pages && \
ln -s $DOKU_VOL/data/pages $HTML_PATH/data/pages && \
mv $HTML_PATH/data/meta $DOKU_VOL/data/meta && \
ln -s $DOKU_VOL/data/meta $HTML_PATH/data/meta && \
mv $HTML_PATH/data/media $DOKU_VOL/data/media && \
ln -s $DOKU_VOL/data/media $HTML_PATH/data/media && \
mv $HTML_PATH/data/media_attic $DOKU_VOL/data/media_attic && \
ln -s $DOKU_VOL/data/media_attic $HTML_PATH/data/media_attic && \
mv $HTML_PATH/data/media_meta $DOKU_VOL/data/media_meta && \
ln -s $DOKU_VOL/data/media_meta $HTML_PATH/data/media_meta && \
mv $HTML_PATH/data/attic $DOKU_VOL/data/attic && \
ln -s $DOKU_VOL/data/attic $HTML_PATH/data/attic && \
mv $HTML_PATH/conf $DOKU_VOL/conf && \
ln -s $DOKU_VOL/conf $HTML_PATH/conf
COPY nginx-conf.d /etc/nginx/conf.d
COPY supervisord.conf /etc/supervisord.conf
EXPOSE 80
VOLUME ["$DOKU_VOL"]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]

View File

@ -0,0 +1,31 @@
server {
listen 80;
server_name wiki.wojciechkozlowski.eu;
root /usr/share/nginx/html;
index index.php index.html index.htm;
location / {
index doku.php;
try_files $uri $uri/ @dokuwiki;
}
location @dokuwiki {
rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
rewrite ^/(.*) /doku.php?id=$1 last;
}
location ~ \.php$ {
try_files $uri =404;
fastcgi_pass unix:/run/php/php7.0-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~ /(data|conf|bin|inc)/ {
deny all;
}
}

23
dokuwiki/supervisord.conf Normal file
View File

@ -0,0 +1,23 @@
[supervisord]
nodaemon=true
[supervisorctl]
serverurl=unix:///var/run/supervisor.sock
[unix_http_server]
file=/var/run/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:php]
command=/usr/sbin/php-fpm7.0 --nodaemonize
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true

60
gitlab/ssh_config Normal file
View File

@ -0,0 +1,60 @@
# This is the ssh client system-wide configuration file. See
# ssh_config(5) for more information. This file provides defaults for
# users, and the values can be changed in per-user configuration files
# or on the command line.
# Configuration data is parsed as follows:
# 1. command line options
# 2. user-specific file
# 3. system-wide file
# Any configuration value is only changed the first time it is set.
# Thus, host-specific definitions should be at the beginning of the
# configuration file, and defaults at the end.
# Site-wide defaults for some commonly used options. For a comprehensive
# list of available options, their meanings and defaults, please see the
# ssh_config(5) man page.
Host github.com
StrictHostKeyChecking no
IdentityFile /etc/gitlab/github-rsa
Host *
# ForwardAgent no
# ForwardX11 no
# ForwardX11Trusted yes
# RhostsRSAAuthentication no
# RSAAuthentication yes
# PasswordAuthentication yes
# HostbasedAuthentication no
# GSSAPIAuthentication no
# GSSAPIDelegateCredentials no
# GSSAPIKeyExchange no
# GSSAPITrustDNS no
# BatchMode no
# CheckHostIP yes
# AddressFamily any
# ConnectTimeout 0
# StrictHostKeyChecking ask
# IdentityFile ~/.ssh/identity
# IdentityFile ~/.ssh/id_rsa
# IdentityFile ~/.ssh/id_dsa
# IdentityFile ~/.ssh/id_ecdsa
# IdentityFile ~/.ssh/id_ed25519
# Port 22
# Protocol 2
# Cipher 3des
# Ciphers aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc
# MACs hmac-md5,hmac-sha1,umac-64@openssh.com,hmac-ripemd160
# EscapeChar ~
# Tunnel no
# TunnelDevice any:any
# PermitLocalCommand no
# VisualHostKey no
# ProxyCommand ssh -q -W %h:%p gateway.example.com
# RekeyLimit 1G 1h
SendEnv LANG LC_*
HashKnownHosts yes
GSSAPIAuthentication yes
GSSAPIDelegateCredentials no

View File

@ -1,24 +0,0 @@
FROM nginx
# Update
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade
RUN \
# Install pre-requisites
apt-get -y install wget git && \
wget https://github.com/gohugoio/hugo/releases/download/v0.56.0/hugo_extended_0.56.0_Linux-64bit.deb && \
dpkg -i hugo_extended_0.56.0_Linux-64bit.deb && \
rm hugo_extended_0.56.0_Linux-64bit.deb && \
# Fetch the website code, build and deploy
git clone --recursive https://gitlab.wojciechkozlowski.eu/wojtek/wojciechkozlowski.eu.git && \
cd wojciechkozlowski.eu && \
hugo && \
cp -r public/* /usr/share/nginx/html && \
# Clean up
cd .. && \
rm -rf wojciechkozlowski.eu && \
apt-get -y purge hugo git wget && \
apt-get -y autoremove

@ -0,0 +1 @@
Subproject commit 5cc100c9f65b002bbc43ffe7f75a6ea7ee246423

View File

@ -3,7 +3,7 @@ Description=Dockerised web server
After=docker.service
[Service]
WorkingDirectory={{ loki_dir }}
WorkingDirectory=/root/Loki
Environment="COMPOSE_HTTP_TIMEOUT=300"
ExecStart=/usr/bin/docker-compose up
ExecStop=/usr/bin/docker-compose down

View File

@ -1,4 +1,4 @@
FROM nextcloud:24-fpm
FROM nextcloud:fpm
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade

View File

@ -44,7 +44,6 @@ http {
add_header X-Robots-Tag none;
add_header X-Download-Options noopen;
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options "SAMEORIGIN";
add_header Referrer-Policy no-referrer;
root /var/www/html;
@ -85,7 +84,7 @@ http {
#pagespeed off;
location / {
rewrite ^ /index.php;
rewrite ^ /index.php$request_uri;
}
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
@ -97,14 +96,12 @@ http {
location ~ ^/(?:index|remote|public|cron|core/ajax/update|status|ocs/v[12]|updater/.+|ocs-provider/.+)\.php(?:$|/) {
fastcgi_split_path_info ^(.+\.php)(/.*)$;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTPS on;
#Avoid sending the security headers twice
fastcgi_param modHeadersAvailable true;
# Enable pretty urls
fastcgi_param front_controller_active true;
fastcgi_pass php-handler;
fastcgi_intercept_errors on;

View File

@ -15,8 +15,6 @@ RUN mkdir /etc/nginx/cert && openssl dhparam -out /etc/nginx/cert/dhparam.pem 20
COPY certbot.cron /etc/cron.d/certbot
COPY supervisord.conf /etc/supervisord.conf
RUN chmod go-wx /etc/cron.d/certbot
VOLUME ["/etc/letsencrypt"]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]

View File

@ -8,4 +8,4 @@
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
0 */12 * * * root perl -e 'sleep int(rand(3600))' && certbot -q renew --webroot-path /var/www/html
0 */12 * * * root perl -e 'sleep int(rand(3600))' && certbot -q renew

View File

@ -16,10 +16,6 @@ server {
listen 443 ssl;
server_name cloud.wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/cloud.wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/cloud.wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/cloud.wojciechkozlowski.eu/chain.pem;
client_max_body_size 10G; # 0=unlimited - set max upload size
location / {

View File

@ -16,10 +16,6 @@ server {
listen 443 ssl;
server_name gitlab.wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/gitlab.wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitlab.wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/gitlab.wojciechkozlowski.eu/chain.pem;
client_max_body_size 10G; # 0=unlimited - set max upload size
location / {

View File

@ -16,10 +16,6 @@ server {
listen 443 ssl;
server_name registry.wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/registry.wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/registry.wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/registry.wojciechkozlowski.eu/chain.pem;
client_max_body_size 10G; # 0=unlimited - set max upload size
location / {

View File

@ -1,6 +1,6 @@
server {
listen 80;
server_name pi.wojciechkozlowski.eu;
server_name wiki.wojciechkozlowski.eu;
location ^~ /.well-known {
allow all;
@ -14,17 +14,13 @@ server {
server {
listen 443 ssl;
server_name pi.wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/pi.wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/pi.wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/pi.wojciechkozlowski.eu/chain.pem;
server_name wiki.wojciechkozlowski.eu;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_pass http://192.168.2.210:3000;
proxy_pass http://wiki;
}
error_page 500 502 503 504 /50x.html;

View File

@ -14,33 +14,7 @@ server {
server {
listen 443 ssl;
server_name wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/wojciechkozlowski.eu/chain.pem;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_pass http://html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
server {
listen 443 ssl;
server_name www.wojciechkozlowski.eu;
ssl_certificate /etc/letsencrypt/live/www.wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/www.wojciechkozlowski.eu/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/www.wojciechkozlowski.eu/chain.pem;
server_name wojciechkozlowski.eu www.wojciechkozlowski.eu;
location / {
proxy_set_header X-Real-IP $remote_addr;

View File

@ -25,18 +25,26 @@ http {
#gzip on;
ssl_certificate /etc/letsencrypt/live/wojciechkozlowski.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/wojciechkozlowski.eu/privkey.pem;
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 60m;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DHE+AES128:!ADH:!AECDH:!MD5;
ssl_dhparam /etc/nginx/cert/dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/letsencrypt/live/wojciechkozlowski.eu/chain.pem;
resolver 208.67.222.222 208.67.220.220;
add_header Strict-Transport-Security "max-age=31536000" always;
include /etc/nginx/conf.d/*.conf;
}
}

View File

@ -1,35 +0,0 @@
#!/bin/bash
set -e
CYAN='\033[01;36m'
NC='\033[00m'
if [ ! -t 1 ]; then
CYAN=''
NC=''
fi
SCRIPT=$(readlink -f $0)
DIRNAME=$(dirname $SCRIPT)
# -----------------------------------------------------------------------------
# Soft delete untagged images.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Soft delete untagged images ${NC}"
install="pip3 install gitlab-registry-cleanup"
cleanup="gitlab-registry-cleanup -g https://gitlab.wojciechkozlowski.eu -r https://registry.wojciechkozlowski.eu -c /gitlab.cred"
docker run --rm --volumes-from gitlab \
-v ${DIRNAME}/gitlab.cred:/gitlab.cred \
python bash -c "${install} && ${cleanup}"
# -----------------------------------------------------------------------------
# Garbage collect and hard delete untagged images.
# -----------------------------------------------------------------------------
echo -e "${CYAN}[${SCRIPT}] Garbage collect untagged images ${NC}"
docker exec gitlab bash -c "gitlab-ctl registry-garbage-collect"

15
shadowsocks/Dockerfile Normal file
View File

@ -0,0 +1,15 @@
FROM debian:stable-slim
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install shadowsocks
ENV SS_VOL /var/shadowsocks
RUN mkdir $SS_VOL
ADD config.json $SS_VOL
EXPOSE 7698
VOLUME ["$SS_VOL"]
CMD ["sh", "-c", "/usr/bin/ssserver -c $SS_VOL/config.json"]

18
shadowsocks/config.json Normal file
View File

@ -0,0 +1,18 @@
{
"server":"0.0.0.0",
"server_ipv6":"[::]",
"server_port":7698,
"local_address":"127.0.0.1",
"local_port":1080,
"password":"z",
"timeout":120,
"method":"aes-256-cfb",
"protocol":"$auth_sha1_v4_compatible",
"protocol_param":"",
"obfs":"http_simple_compatible",
"obfs_param":"",
"redirect":"",
"dns_ipv6":false,
"fast_open":false,
"workers":1
}

30
tinyproxy/Dockerfile Normal file
View File

@ -0,0 +1,30 @@
FROM debian:stable-slim as intermediate
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install \
build-essential \
wget
ENV VER "1.10.0"
RUN wget https://github.com/tinyproxy/tinyproxy/releases/download/$VER/tinyproxy-$VER.tar.xz
RUN tar xf tinyproxy-$VER.tar.xz && mv tinyproxy-$VER tinyproxy
RUN cd tinyproxy && \
./configure && \
make -j 9
FROM debian:stable-slim
COPY --from=intermediate /tinyproxy /tinyproxy
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get -y upgrade
RUN apt-get install -y stunnel4
ADD ./letsencrypt /etc/letsencrypt
ADD ./stunnel.conf /etc/stunnel/stunnel.conf
ADD ./tinyproxy.conf /tinyproxy/etc/tinyproxy.conf
EXPOSE 7700
CMD ["/tinyproxy/src/tinyproxy", "-d", "-c", "/tinyproxy/etc/tinyproxy.conf"]

4
tinyproxy/proxy.pac Normal file
View File

@ -0,0 +1,4 @@
function FindProxyForURL(url, host) {
// DEFAULT RULE: All other traffic, use below proxies, in fail-over order.
return "HTTPS wojciechkozlowski.eu:443";
}

7
tinyproxy/stunnel.conf Normal file
View File

@ -0,0 +1,7 @@
pid =
cert = /etc/letsencrypt/live/wojciechkozlowski.eu/fullchain.pem
key = /etc/letsencrypt/live/wojciechkozlowski.eu/privkey.pem
[p]
accept = 443
connect = localhost:7700

349
tinyproxy/tinyproxy.conf Normal file
View File

@ -0,0 +1,349 @@
##
## tinyproxy.conf -- tinyproxy daemon configuration file
##
## This example tinyproxy.conf file contains example settings
## with explanations in comments. For decriptions of all
## parameters, see the tinproxy.conf(5) manual page.
##
#
# User/Group: This allows you to set the user and group that will be
# used for tinyproxy after the initial binding to the port has been done
# as the root user. Either the user or group name or the UID or GID
# number may be used.
#
User root
Group root
#
# Port: Specify the port which tinyproxy will listen on. Please note
# that should you choose to run on a port lower than 1024 you will need
# to start tinyproxy using root.
#
Port 7700
#
# Listen: If you have multiple interfaces this allows you to bind to
# only one. If this is commented out, tinyproxy will bind to all
# interfaces present.
#
#Listen 192.168.0.1
#
# Bind: This allows you to specify which interface will be used for
# outgoing connections. This is useful for multi-home'd machines where
# you want all traffic to appear outgoing from one particular interface.
#
#Bind 192.168.0.1
#
# BindSame: If enabled, tinyproxy will bind the outgoing connection to the
# ip address of the incoming connection.
#
#BindSame yes
#
# Timeout: The maximum number of seconds of inactivity a connection is
# allowed to have before it is closed by tinyproxy.
#
Timeout 600
#
# ErrorFile: Defines the HTML file to send when a given HTTP error
# occurs. You will probably need to customize the location to your
# particular install. The usual locations to check are:
# /usr/local/share/tinyproxy
# /usr/share/tinyproxy
# /etc/tinyproxy
#
#ErrorFile 404 "/usr/local/share/tinyproxy/404.html"
#ErrorFile 400 "/usr/local/share/tinyproxy/400.html"
#ErrorFile 503 "/usr/local/share/tinyproxy/503.html"
#ErrorFile 403 "/usr/local/share/tinyproxy/403.html"
#ErrorFile 408 "/usr/local/share/tinyproxy/408.html"
#
# DefaultErrorFile: The HTML file that gets sent if there is no
# HTML file defined with an ErrorFile keyword for the HTTP error
# that has occured.
#
DefaultErrorFile "/usr/local/share/tinyproxy/default.html"
#
# StatHost: This configures the host name or IP address that is treated
# as the stat host: Whenever a request for this host is received,
# Tinyproxy will return an internal statistics page instead of
# forwarding the request to that host. The default value of StatHost is
# tinyproxy.stats.
#
#StatHost "tinyproxy.stats"
#
#
# StatFile: The HTML file that gets sent when a request is made
# for the stathost. If this file doesn't exist a basic page is
# hardcoded in tinyproxy.
#
StatFile "/usr/local/share/tinyproxy/stats.html"
#
# LogFile: Allows you to specify the location where information should
# be logged to. If you would prefer to log to syslog, then disable this
# and enable the Syslog directive. These directives are mutually
# exclusive. If neither Syslog nor LogFile are specified, output goes
# to stdout.
#
#LogFile "/usr/local/var/log/tinyproxy/tinyproxy.log"
#
# Syslog: Tell tinyproxy to use syslog instead of a logfile. This
# option must not be enabled if the Logfile directive is being used.
# These two directives are mutually exclusive.
#
#Syslog On
#
# LogLevel: Warning
#
# Set the logging level. Allowed settings are:
# Critical (least verbose)
# Error
# Warning
# Notice
# Connect (to log connections without Info's noise)
# Info (most verbose)
#
# The LogLevel logs from the set level and above. For example, if the
# LogLevel was set to Warning, then all log messages from Warning to
# Critical would be output, but Notice and below would be suppressed.
#
LogLevel Info
#
# PidFile: Write the PID of the main tinyproxy thread to this file so it
# can be used for signalling purposes.
# If not specified, no pidfile will be written.
#
#PidFile "/usr/local/var/run/tinyproxy/tinyproxy.pid"
#
# XTinyproxy: Tell Tinyproxy to include the X-Tinyproxy header, which
# contains the client's IP address.
#
#XTinyproxy Yes
#
# Upstream:
#
# Turns on upstream proxy support.
#
# The upstream rules allow you to selectively route upstream connections
# based on the host/domain of the site being accessed.
#
# Syntax: upstream type (user:pass@)ip:port ("domain")
# Or: upstream none "domain"
# The parts in parens are optional.
# Possible types are http, socks4, socks5, none
#
# For example:
# # connection to test domain goes through testproxy
# upstream http testproxy:8008 ".test.domain.invalid"
# upstream http testproxy:8008 ".our_testbed.example.com"
# upstream http testproxy:8008 "192.168.128.0/255.255.254.0"
#
# # upstream proxy using basic authentication
# upstream http user:pass@testproxy:8008 ".test.domain.invalid"
#
# # no upstream proxy for internal websites and unqualified hosts
# upstream none ".internal.example.com"
# upstream none "www.example.com"
# upstream none "10.0.0.0/8"
# upstream none "192.168.0.0/255.255.254.0"
# upstream none "."
#
# # connection to these boxes go through their DMZ firewalls
# upstream http cust1_firewall:8008 "testbed_for_cust1"
# upstream http cust2_firewall:8008 "testbed_for_cust2"
#
# # default upstream is internet firewall
# upstream http firewall.internal.example.com:80
#
# You may also use SOCKS4/SOCKS5 upstream proxies:
# upstream socks4 127.0.0.1:9050
# upstream socks5 socksproxy:1080
#
# The LAST matching rule wins the route decision. As you can see, you
# can use a host, or a domain:
# name matches host exactly
# .name matches any host in domain "name"
# . matches any host with no domain (in 'empty' domain)
# IP/bits matches network/mask
# IP/mask matches network/mask
#
#Upstream http some.remote.proxy:port
#
# MaxClients: This is the absolute highest number of threads which will
# be created. In other words, only MaxClients number of clients can be
# connected at the same time.
#
MaxClients 100
#
# MinSpareServers/MaxSpareServers: These settings set the upper and
# lower limit for the number of spare servers which should be available.
#
# If the number of spare servers falls below MinSpareServers then new
# server processes will be spawned. If the number of servers exceeds
# MaxSpareServers then the extras will be killed off.
#
MinSpareServers 5
MaxSpareServers 20
#
# StartServers: The number of servers to start initially.
#
StartServers 10
#
# MaxRequestsPerChild: The number of connections a thread will handle
# before it is killed. In practise this should be set to 0, which
# disables thread reaping. If you do notice problems with memory
# leakage, then set this to something like 10000.
#
MaxRequestsPerChild 0
#
# Allow: Customization of authorization controls. If there are any
# access control keywords then the default action is to DENY. Otherwise,
# the default action is ALLOW.
#
# The order of the controls are important. All incoming connections are
# tested against the controls based on order.
#
Allow 127.0.0.1
# BasicAuth: HTTP "Basic Authentication" for accessing the proxy.
# If there are any entries specified, access is only granted for authenticated
# users.
BasicAuth user password
#
# AddHeader: Adds the specified headers to outgoing HTTP requests that
# Tinyproxy makes. Note that this option will not work for HTTPS
# traffic, as Tinyproxy has no control over what headers are exchanged.
#
#AddHeader "X-My-Header" "Powered by Tinyproxy"
#
# ViaProxyName: The "Via" header is required by the HTTP RFC, but using
# the real host name is a security concern. If the following directive
# is enabled, the string supplied will be used as the host name in the
# Via header; otherwise, the server's host name will be used.
#
ViaProxyName "tinyproxy"
#
# DisableViaHeader: When this is set to yes, Tinyproxy does NOT add
# the Via header to the requests. This virtually puts Tinyproxy into
# stealth mode. Note that RFC 2616 requires proxies to set the Via
# header, so by enabling this option, you break compliance.
# Don't disable the Via header unless you know what you are doing...
#
#DisableViaHeader Yes
#
# Filter: This allows you to specify the location of the filter file.
#
#Filter "/usr/local/etc/tinyproxy/filter"
#
# FilterURLs: Filter based on URLs rather than domains.
#
#FilterURLs On
#
# FilterExtended: Use POSIX Extended regular expressions rather than
# basic.
#
#FilterExtended On
#
# FilterCaseSensitive: Use case sensitive regular expressions.
#
#FilterCaseSensitive On
#
# FilterDefaultDeny: Change the default policy of the filtering system.
# If this directive is commented out, or is set to "No" then the default
# policy is to allow everything which is not specifically denied by the
# filter file.
#
# However, by setting this directive to "Yes" the default policy becomes
# to deny everything which is _not_ specifically allowed by the filter
# file.
#
#FilterDefaultDeny Yes
#
# Anonymous: If an Anonymous keyword is present, then anonymous proxying
# is enabled. The headers listed are allowed through, while all others
# are denied. If no Anonymous keyword is present, then all headers are
# allowed through. You must include quotes around the headers.
#
# Most sites require cookies to be enabled for them to work correctly, so
# you will need to allow Cookies through if you access those sites.
#
#Anonymous "Host"
#Anonymous "Authorization"
#Anonymous "Cookie"
#
# ConnectPort: This is a list of ports allowed by tinyproxy when the
# CONNECT method is used. To disable the CONNECT method altogether, set
# the value to 0. If no ConnectPort line is found, all ports are
# allowed.
#
# The following two ports are used by SSL.
#
ConnectPort 443
#ConnectPort 563
#
# Configure one or more ReversePath directives to enable reverse proxy
# support. With reverse proxying it's possible to make a number of
# sites appear as if they were part of a single site.
#
# If you uncomment the following two directives and run tinyproxy
# on your own computer at port 8888, you can access Google using
# http://localhost:8888/google/ and Wired News using
# http://localhost:8888/wired/news/. Neither will actually work
# until you uncomment ReverseMagic as they use absolute linking.
#
#ReversePath "/google/" "http://www.google.com/"
#ReversePath "/wired/" "http://www.wired.com/"
#
# When using tinyproxy as a reverse proxy, it is STRONGLY recommended
# that the normal proxy is turned off by uncommenting the next directive.
#
#ReverseOnly Yes
#
# Use a cookie to track reverse proxy mappings. If you need to reverse
# proxy sites which have absolute links you must uncomment this.
#
#ReverseMagic Yes
#
# The URL that's used to access this reverse proxy. The URL is used to
# rewrite HTTP redirects so that they won't escape the proxy. If you
# have a chain of reverse proxies, you'll need to put the outermost
# URL here (the address which the end user types into his/her browser).
#
# If not set then no rewriting occurs.
#
#ReverseBaseURL "http://localhost:8888/"

View File

@ -23,7 +23,7 @@ docker-compose -f $DIRNAME/docker-compose.yml pull
echo -e "${CYAN}[${SCRIPT}] Stop the containers${NC}"
systemctl stop loki-server
service loki-server stop
# -----------------------------------------------------------------------------
# Start the containers.
@ -31,7 +31,7 @@ systemctl stop loki-server
echo -e "${CYAN}[${SCRIPT}] Start the containers${NC}"
systemctl start loki-server
service loki-server start
# -----------------------------------------------------------------------------
# Remove untagged images.