Compare commits

...
Sign in to create a new pull request.

108 commits

Author SHA1 Message Date
redbeardymcgee
0569221088 feat(grocy): add Grocy 2025-04-23 09:16:41 -05:00
redbeardymcgee
8a20cf4a3b fix(penpot): correctly name mailcatcher 2025-04-22 10:43:22 -05:00
redbeardymcgee
c17d605334 fix(penpot): move env var to secret 2025-04-21 19:06:50 -05:00
redbeardymcgee
58f428285e feat(penpot): init Penpot 2025-04-21 19:03:59 -05:00
redbeardymcgee
b9f9b8d106 feat(speedtest-tracker): init speedtest-tracker 2025-04-21 10:22:56 -05:00
redbeardymcgee
500ffb5f8c fix(quadlets): normalize networks, volumes, update 2025-04-21 08:34:11 -05:00
redbeardymcgee
72f703b5f9 fix(leantime): correct volume reference 2025-04-19 06:47:06 -05:00
redbeardymcgee
93e97f11ae fix(leantime): add env file 2025-04-18 08:40:19 -05:00
redbeardymcgee
cb8ce2a2fb fix(caddy): add reverse-proxy network 2025-04-16 08:49:49 -05:00
redbeardymcgee
27c9ba29cc fix(leantime): correct image ref & env 2025-04-15 08:51:46 -05:00
redbeardymcgee
12eb66d913 fix(leantime): add env file and network 2025-04-15 08:51:27 -05:00
redbeardymcgee
793f61fb60 fix(leantime): add env vars and secrets 2025-04-15 08:18:34 -05:00
redbeardymcgee
a067a6c857 feat(leantime): add Leantime task management 2025-04-13 09:04:36 -05:00
redbeardymcgee
8e2fce82bb feat(maxun): add Maxun 2025-04-12 19:45:52 -05:00
redbeardymcgee
ddb6721ca7 feat(checkmate): add Checkmate monitoring 2025-04-12 19:15:50 -05:00
redbeardymcgee
67148f146f feat(apprise): add Apprise 2025-04-12 18:59:46 -05:00
redbeardymcgee
808c26324e feat(betanin): add betanin 2025-04-12 18:48:11 -05:00
redbeardymcgee
3cc8919e6d feat(graphite): add Graphite 2025-04-12 15:49:46 -05:00
redbeardymcgee
6d9e428987 feat(healthchecks): add healthchecks 2025-04-12 15:38:02 -05:00
redbeardymcgee
c48014cb23 feat(homepage): add Homepage 2025-04-12 15:22:12 -05:00
redbeardymcgee
6c66dff470 feat(komga): add Komga comic reader 2025-04-12 15:09:36 -05:00
redbeardymcgee
2af05d816b feat(miniflux): add miniflux rss reader 2025-04-12 15:02:03 -05:00
redbeardymcgee
2f11e411f2 feat(n8n): add n8n workflow automation 2025-04-12 14:54:00 -05:00
redbeardymcgee
d53c42a7f7 feat(openobserve): add OpenObserve 2025-04-12 14:43:48 -05:00
redbeardymcgee
38ca9db96a fix(pihole): add missing network 2025-04-12 12:18:21 -05:00
redbeardymcgee
f2f7a03e8d feat(pihole): add Pi-hole 2025-04-12 12:16:28 -05:00
redbeardymcgee
f3b4b61137 feat(pocket-id): add Pocket ID 2025-04-12 11:37:48 -05:00
redbeardymcgee
7079952150 feat(vikunja): add Vikunja 2025-04-12 11:22:55 -05:00
redbeardymcgee
f2628b947f chore(readme): mark glueforward complete 2025-04-12 10:48:19 -05:00
redbeardymcgee
42bc453386 chore(readme): update finished list 2025-04-12 10:40:37 -05:00
redbeardymcgee
2e77a9b6d0 fix(romm): correct Volume key 2025-04-12 10:17:34 -05:00
redbeardymcgee
ee171d2d46 fix(romm): typo in secret name 2025-04-12 09:56:42 -05:00
redbeardymcgee
b759dc832b feat(romm): add ROMM 2025-04-12 09:42:33 -05:00
redbeardymcgee
284310f9cb fix(vaultwarden): remove blank network 2025-04-10 19:10:49 -05:00
redbeardymcgee
1f507d9889 feat(vaultwarden): add vaultwarden 2025-04-10 08:32:17 -05:00
redbeardymcgee
29e77039ac fix(gaseous): link to correct mariadb 2025-04-07 19:55:04 -05:00
redbeardymcgee
8c98a4e63d fix(gaseous): add network 2025-04-07 19:53:24 -05:00
redbeardymcgee
740508e177 feat(gaseous): add gaseous rom manager 2025-04-07 19:23:15 -05:00
redbeardymcgee
1fdb890ee2 chore(readme): mark newly added quadlets 2025-04-02 19:14:49 -05:00
redbeardymcgee
53af271df2 feat(syslog-ng): add syslog-ng from LSIO 2025-04-02 12:14:48 -05:00
redbeardymcgee
295999f40d fix(graylog): remove unnecessary port 2025-04-02 12:10:43 -05:00
redbeardymcgee
5c3a58c1e8 fix(graylog): correct network ref & entrypoint 2025-04-02 10:32:15 -05:00
redbeardymcgee
659de22e42 feat(graylog): add graylog 2025-04-02 07:01:40 -05:00
redbeardymcgee
e751cb6580 fix(templates): better network template 2025-04-02 07:01:23 -05:00
redbeardymcgee
49221c4b58 fix(postiz): remove extra env secret 2025-04-02 07:00:49 -05:00
redbeardymcgee
ec33e116a1 fix(postiz): correct password 2025-04-01 21:46:07 -05:00
redbeardymcgee
ce3d7b2438 feat(postiz): add Postiz social media manager 2025-04-01 19:25:15 -05:00
redbeardymcgee
8d3d21476e fix(termix): use secret for SALT 2025-04-01 11:34:56 -05:00
redbeardymcgee
2230e91693 feat(termix): add termix ssh terminal manager 2025-04-01 08:23:40 -05:00
redbeardymcgee
1d84da078c feat(forgejo): add forgejo git forge 2025-03-31 20:17:24 -05:00
redbeardymcgee
0e5e90214f fix(filestash): editor launch command 2025-03-31 19:40:50 -05:00
redbeardymcgee
17071fe229 feat(filestash): add filestash 2025-03-31 18:34:53 -05:00
redbeardymcgee
7225857391 fix(librenms): fix dependency resolution 2025-03-30 08:18:00 -05:00
redbeardymcgee
96ac7bbfac chore(readme): link to ubuntu doc 2025-03-29 10:04:19 -05:00
redbeardymcgee
d57b085d52 chore(readme): check LibreNMS 2025-03-29 09:46:40 -05:00
redbeardymcgee
0448ff83a1 chore(librenms): update readme with known issues 2025-03-29 09:45:00 -05:00
redbeardymcgee
07794e42fa fix(librenms): correct typo in mariadb env 2025-03-29 09:44:41 -05:00
redbeardymcgee
7a69dc54d8 fix(librenms): correct host and snmp community 2025-03-29 09:44:16 -05:00
redbeardymcgee
4d5815e34d fix(librenms): correct dependency names 2025-03-29 09:43:53 -05:00
redbeardymcgee
a94ef488a6 fix(librenms): add deps to start all containers 2025-03-28 18:24:38 -05:00
redbeardymcgee
c5945259e0 feat(librenms): add LibreNMS 2025-03-28 18:17:31 -05:00
EphemeralDev
c34bb5d942
fix(ubuntu): workaround missing network-online.target 2025-03-23 09:38:31 -05:00
redbeardymcgee
ed5aa52966 fix(glance): include recommended dir structure 2025-03-15 11:42:04 -05:00
redbeardymcgee
b75b3c4bb1 feat(glance): add Glance dashboard 2025-03-15 10:20:39 -05:00
redbeardymcgee
8e18a4b949 feat(openspeedtest): add openspeedtest server 2025-03-12 14:40:22 -05:00
redbeardymcgee
30e11da692 chore(mamstack): move mamstack to branch 2025-02-16 17:52:53 -06:00
redbeardymcgee
836052b84e feat(dashdot): add dashdot and dashdot-nvidia 2025-02-12 21:21:42 -06:00
redbeardymcgee
c22de69c3f chore: improve alma details 2025-02-08 16:14:24 -06:00
EphemeralDev
846491a1a3
doc(ubuntu): add ubuntu 24.11 instructions 2025-01-29 07:59:32 -06:00
redbeardymcgee
867d0d6437 fix(doc): tidy up and add suggestions from #25
A few notes were made for #25 that I could also apply for my own
authored docs.
2025-01-26 18:03:12 -06:00
redbeardymcgee
5cce25c9e8 feat(mamstack): remove unnecessary containers 2025-01-25 12:06:32 -06:00
redbeardymcgee
ecb4c46483 doc: add some sane defaults to template 2025-01-25 12:05:50 -06:00
redbeardymcgee
4f18c17e0c doc: properly format acknowledgments 2025-01-25 11:54:49 -06:00
redbeardymcgee
77257b73a5 doc: add acknowledgments for other contributors 2025-01-25 07:33:21 -06:00
redbeardymcgee
453468afbb doc: check off finished quadlets
netdata
snowflake
2025-01-25 07:20:09 -06:00
redbeardymcgee
3020be0d56 feat(netdata): add netdata monitoring 2025-01-25 06:51:42 -06:00
redbeardymcgee
989fade8bb fix(mirotalk): only accepts volume .env 2025-01-04 18:01:11 -06:00
redbeardymcgee
78a202e28d feat(mirotalk): add mirotalk whiteboard 2025-01-04 17:30:48 -06:00
redbeardymcgee
5a0c5bb487 feat(memos): add memos note taking 2024-12-27 08:38:31 -06:00
redbeardymcgee
c201da020c doc: include more upcoming quadlets 2024-12-27 08:36:04 -06:00
redbeardymcgee
dbfa66e26c feat(nebula): add nebula overlay network 2024-12-26 19:54:57 -06:00
redbeardymcgee
5a2b808b03 feat(chartdb): add chartdb 2024-12-26 19:54:37 -06:00
redbeardymcgee
2a2ecd33cc doc: update minio and blinko 2024-12-26 18:46:15 -06:00
redbeardymcgee
27e8ad1279 feat(minio): add minio s3 storage 2024-12-26 18:27:28 -06:00
redbeardymcgee
9d4f1bc7a7 feat(blinko): add blinko with postgres 2024-12-26 16:04:34 -06:00
redbeardymcgee
a03913a639 fix(prometheus): use quay.io and no default config 2024-12-26 15:33:47 -06:00
redbeardymcgee
ba86a4cb2a feat(prometheus): add prometheus monitoring 2024-12-26 09:06:48 -06:00
redbeardymcgee
c072dde506 doc: finished protonmail-bridge 2024-12-26 08:36:16 -06:00
redbeardymcgee
c6b57e5624 Squash merge feat/protonmail-bridge into main 2024-12-25 10:01:42 -06:00
redbeardymcgee
bf76662d4f doc: mark finished containers 2024-12-25 08:23:35 -06:00
redbeardymcgee
5083b65fcb fix(mealie): improve example base url 2024-12-25 08:23:08 -06:00
redbeardymcgee
969c2455c3 fix(joplin): add secret, use upstream container 2024-12-25 08:22:54 -06:00
redbeardymcgee
ffabf88216 fix(hoarder): add http protocol to addrs
This should make hoarder work as advertised now, without failure to
connect to the browser or meilisearch.

Tacked on a corrected image link as well.
2024-12-17 10:01:29 -06:00
redbeardymcgee
fb3d880182 doc: move weechat readme into related pod 2024-12-16 19:27:42 -06:00
rbm
ffea719640
Add Linkwarden (#24) 2024-12-14 19:12:31 -06:00
redbeardymcgee
3a2cccb8e2 doc: more upcoming apps 2024-12-14 13:13:02 -06:00
redbeardymcgee
05d2213d4e fix(tandoor): missing nginx container 2024-12-11 18:51:34 -06:00
redbeardymcgee
dcd80a4e21 fix(readme): mark tandoor 2024-12-11 18:29:46 -06:00
rbm
762d568e28
feat: add Tandoor 2024-12-11 18:24:13 -06:00
redbeardymcgee
990eab4cbe wip: tandoor
fix: corrected volumes and host/container name

fix: remove extra container

fix: correct volumes and dependencies
2024-12-11 18:18:29 -06:00
redbeardymcgee
0f9ada8b50 feat: add snowflake proxy 2024-12-09 17:10:31 -06:00
redbeardymcgee
6d7d626194 tidy: better template and toc 2024-12-09 16:39:11 -06:00
redbeardymcgee
9f2e5b66cd feat: add mealie 2024-12-08 10:17:02 -06:00
redbeardymcgee
72f59303bf fix(template): organize templates into a dir 2024-12-05 21:01:28 -06:00
redbeardymcgee
31950a9d70 move kimai to wip branch 2024-12-05 19:26:33 -06:00
redbeardymcgee
d57ced0147 move netbird to wip branch 2024-12-05 19:25:48 -06:00
redbeardymcgee
523cb2eb1b move linkwarden to wip branch 2024-12-05 19:25:10 -06:00
redbeardymcgee
0795095d5f move wger to wip branch 2024-12-05 19:24:37 -06:00
319 changed files with 3659 additions and 1385 deletions

View file

@ -23,7 +23,7 @@ them and decide for yourself.
## Disks ## Disks
## Partitions ### Partitions
Repeat the following steps for all disks that you want to join together into Repeat the following steps for all disks that you want to join together into
one single logical volume. one single logical volume.
@ -37,7 +37,7 @@ dd if=/dev/zero of=/dev/sdX bs=512 count=1 conv=notrunc
dd if=/dev/zero of=/dev/sdY bs=512 count=1 conv=notrunc dd if=/dev/zero of=/dev/sdY bs=512 count=1 conv=notrunc
``` ```
## LVM ### LVM
```bash ```bash
# Create physical volume # Create physical volume
@ -54,7 +54,7 @@ mke2fs -t ext4 /dev/library/books
e2fsck -f /dev/library/books e2fsck -f /dev/library/books
``` ```
## /etc/systemd/system/volumes-books.mount ### /etc/systemd/system/volumes-books.mount
```ini ```ini
[Mount] [Mount]
@ -105,15 +105,13 @@ printf '%s\n' \
## Cockpit -> https://ip-addr:9090 ## Cockpit -> https://ip-addr:9090
> [!WARNING] > [!WARNING]
> Disable the firewall if you are lazy Exposing ports for other services can be > I run behind an existing firewall, not in a VPS or cloud provider.
> exhausting and I have not learned how to do this for containers properly.
> Each container may need a new rule for something, not sure.
> ```bash > ```bash
> systemctl disable --now firewalld > systemctl disable --now firewalld
> ``` > ```
> [!NOTE] > [!NOTE]
> Should be able to set up good firewall with only 80/443 open. > Should be able to set up good firewall with only 22/80/443 open.
Enable the socket-activated cockpit service and allow it through the firewall. Enable the socket-activated cockpit service and allow it through the firewall.
@ -158,7 +156,7 @@ systemctl enable --now podman
## Prepare host networking stack ## Prepare host networking stack
## slirp4netns ### slirp4netns
> [!NOTE] > [!NOTE]
> This may not be necessary but my system is currently using it. > This may not be necessary but my system is currently using it.
@ -167,7 +165,7 @@ systemctl enable --now podman
dnf install slirp4netns dnf install slirp4netns
``` ```
## Install DNS server for `podman` ### Install DNS server for `podman`
> [!NOTE] > [!NOTE]
> Not sure how to resolve these correctly yet but the journal logs it > Not sure how to resolve these correctly yet but the journal logs it
@ -177,17 +175,17 @@ dnf install slirp4netns
dnf install aardvark-dns dnf install aardvark-dns
``` ```
## Allow rootless binding port 80+ ### Allow rootless binding port 80+
> [!NOTE] > [!NOTE]
> This is only necessary if you are setting up the reverse proxy. > This is only necessary if you are setting up the reverse proxy.
```bash ```bash
printf '%s\n' 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-unprivileged-port-binding.conf printf '%s\n' 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-unprivileged-port-binding.conf
sysctl 'net.ipv4.ip_unprivileged_port_start=80' sysctl -w net.ipv4.ip_unprivileged_port_start=80
``` ```
## Allow containers to route within multiple networks ### Allow containers to route within multiple networks
```bash ```bash
printf '%s\n' 'net.ipv4.conf.all.rp_filter=2' > /etc/sysctl.d/99-reverse-path-loose.conf printf '%s\n' 'net.ipv4.conf.all.rp_filter=2' > /etc/sysctl.d/99-reverse-path-loose.conf
@ -223,17 +221,17 @@ loginctl enable-linger $ctuser
> [!TIP] > [!TIP]
> Optionally setup ssh keys to directly login to $ctuser. > Optionally setup ssh keys to directly login to $ctuser.
### Setup $ctuser env
> [!NOTE] > [!NOTE]
> The login shell doesn't exist. Launch `bash -l` manually to get a shell or > The login shell doesn't exist. Launch `bash -l` manually to get a shell or
> else your `ssh` will exit with a status of 1. > else your `ssh` will exit with a status of 1.
## Setup $ctuser env
```bash ```bash
# Switch to user (`-i` doesn't work without a login shell) # Switch to user (`-i` doesn't work without a login shell)
sudo -u $ctuser bash -l machinectl shell $ctuser@ /bin/bash
# Create dirs # Create dirs
mkdir -p ~/.config/{containers/systemd,environment.d} ~/containers/storage mkdir -p ~/.config/{containers/systemd,environment.d}
# Prepare `systemd --user` env # Prepare `systemd --user` env
echo 'XDG_RUNTIME_DIR=/run/user/2000' >> ~/.config/environment.d/10-xdg.conf echo 'XDG_RUNTIME_DIR=/run/user/2000' >> ~/.config/environment.d/10-xdg.conf
# Enable container auto-update # Enable container auto-update
@ -247,7 +245,7 @@ exit
> I disabled SELinux to not deal with this for every container. > I disabled SELinux to not deal with this for every container.
> /etc/selinux/config -> `SELINUX=disabled` > /etc/selinux/config -> `SELINUX=disabled`
> [!NOTE] > [!TIP]
> Set up the correct policies permanently instead of disabling SELinux > Set up the correct policies permanently instead of disabling SELinux
Temporarily set SELinux policy to allow containers to use devices. Temporarily set SELinux policy to allow containers to use devices.

107
README.md
View file

@ -1,5 +1,18 @@
# podbox # podbox
## Table of Contents
- [What is this?](#what-is-this)
- [Table of Contents](#table-of-contents)
- [Getting started](#getting-started)
- [Dependencies](#dependencies)
- [Quickstart](#quickstart)
- [Hello, world](#hello-world)
- [Running real apps](#running-real-apps)
- [Example](#example)
- [Coming soon](#coming-soon)
- [Acknowledgments](#acknowledgments)
## What is this? ## What is this?
[Make `systemd` better for Podman with Quadlet](https://www.redhat.com/en/blog/quadlet-podman) [Make `systemd` better for Podman with Quadlet](https://www.redhat.com/en/blog/quadlet-podman)
@ -13,7 +26,9 @@ under the same user permissions as yourself, from within your own `$HOME`.
> [!NOTE] > [!NOTE]
> It is recommended to create another user specifically for running these > It is recommended to create another user specifically for running these
> containers, but it is not strictly required. Details for setting up a system > containers, but it is not strictly required. Details for setting up a system
> from scratch are located in [AlmaLinux.md](./AlmaLinux.md). > from scratch are located in [AlmaLinux.md](./AlmaLinux.md) or
> [Ubuntu.md](./Ubuntu.md
)
## Getting started ## Getting started
@ -123,83 +138,115 @@ Navigate to `http://localhost:9000` in your browser.
> tcp LISTEN 0 4096 *:9000 *:* users:(("rootlessport",pid=913878,fd=10)) > tcp LISTEN 0 4096 *:9000 *:* users:(("rootlessport",pid=913878,fd=10))
> ``` > ```
## Upcoming containers ## Coming soon
I'm working on new quadlets every day. This is a list of all of the containers I'm working on new quadlets every day. This is a list of all of the containers
that I intend to add to this repository. It is still growing, and I welcome that I intend to add to this repository. It is still growing, and I welcome
[pull requests](https://github.com/redbeardymcgee/podbox/pulls). [pull requests](https://git.mcgee.red/redbeardymcgee/podbox/pulls).
- [x] [Actual](https://actualbudget.github.io/docs/) - [x] [Actual](https://actualbudget.github.io/docs/)
- [x] [AdGuard](https://adguard.com) - [x] [AdGuard](https://adguard.com)
- [ ] [Apprise](https://github.com/caronc/apprise) - [x] [Apprise](https://github.com/caronc/apprise)
- [ ] [ArgoCD](https://github.com/argoproj/argo-cd)
- [x] [Audiobookshelf](https://www.audiobookshelf.org/) - [x] [Audiobookshelf](https://www.audiobookshelf.org/)
- [ ] [Authelia](https://www.authelia.com/) - [ ] [Authelia](https://www.authelia.com/)
- [ ] [Authentik](https://goauthentik.io/) - [ ] [Authentik](https://goauthentik.io/)
- [ ] [betanin](https://github.com/sentriz/betanin) - [x] [betanin](https://github.com/sentriz/betanin)
- [x] [Blinko](https://blinko.mintlify.app/introduction)
- [x] [booktree](https://github.com/myxdvz/booktree) - [x] [booktree](https://github.com/myxdvz/booktree)
- [ ] [Cabot](https://cabotapp.com/)
- [x] [Caddy](https://caddyserver.com) # Socket activation requires newer `caddy` and `podman` - [x] [Caddy](https://caddyserver.com) # Socket activation requires newer `caddy` and `podman`
- [x] [Calibre](https://github.com/linuxserver/docker-calibre) - [x] [Calibre](https://github.com/linuxserver/docker-calibre)
- [x] [Calibre-web](https://github.com/janeczku/calibre-web) - [x] [Calibre-web](https://github.com/janeczku/calibre-web)
- [ ] [Code::Stats](https://codestats.net/) - [x] [ChartDB](https://chartdb.io/)
- [ ] [dash.](https://getdashdot.com/) - [x] [Checkmate](https://github.com/bluewave-labs/checkmate)
- [x] [dash.](https://getdashdot.com/)
- [x] [Dashy](https://dashy.to) - [x] [Dashy](https://dashy.to)
- [ ] [Dittofeed](https://www.dittofeed.com)
- [ ] [Duplicacy](https://duplicacy.com/) - [ ] [Duplicacy](https://duplicacy.com/)
- [ ] [Duplicati](https://duplicati.com/) - [ ] [Duplicati](https://duplicati.com/)
- [ ] [EmulatorJS](https://emulatorjs.org/)
- [x] [Filebrowser](https://filebrowser.org/) - [x] [Filebrowser](https://filebrowser.org/)
- [x] [FreshRSS](https://www.freshrss.org/) - [x] [Filestash](https://filestash.app)
- [x] [FiveFilters](https://www.fivefilters.org/) - [x] [FiveFilters](https://www.fivefilters.org/)
- [x] [Forgejo](https://forgejo.org)
- [x] [Foundry VTT](https://foundryvtt.com) - [x] [Foundry VTT](https://foundryvtt.com)
- [ ] [Gaseous](https://github.com/gaseous-project/gaseous-server) - [x] [FreshRSS](https://www.freshrss.org/)
- [x] [Gaseous](https://github.com/gaseous-project/gaseous-server)
- [x] [Glance](https://github.com/glanceapp/glance)
- [x] [Glances](https://nicolargo.github.io/glances/) - [x] [Glances](https://nicolargo.github.io/glances/)
- [ ] [glueforward](https://github.com/GeoffreyCoulaud/glueforward) - [x] [glueforward](https://github.com/GeoffreyCoulaud/glueforward)
- [x] [gluetun](https://github.com/qdm12/gluetun) - [x] [gluetun](https://github.com/qdm12/gluetun)
- [ ] [Graphite](https://graphiteapp.org/) - [x] [Graphite](https://graphiteapp.org/)
- [ ] [Healthchecks](https://healthchecks.io/) - [x] [Graylog](https://graylog.org)
- [x] [Healthchecks](https://healthchecks.io/)
- [x] [hoarder](https://hoarder.app/) - [x] [hoarder](https://hoarder.app/)
- [x] [Homarr](https://homarr.dev/) - [x] [Homarr](https://homarr.dev/)
- [ ] [Homepage](https://gethomepage.dev/) - [x] [Homepage](https://gethomepage.dev/)
- [ ] [Immich](https://immich.app/)
- [x] [IT-Tools](https://it-tools.tech/) - [x] [IT-Tools](https://it-tools.tech/)
- [x] [Joplin](https://joplinapp.org/)
- [x] [Kavita](https://kavitareader.com) - [x] [Kavita](https://kavitareader.com)
- [ ] [Keycloak](https://www.keycloak.org) - [ ] [Keycloak](https://www.keycloak.org)
- [x] [Kibitzr](https://kibitzr.github.io/) - [x] [Kibitzr](https://kibitzr.github.io/)
- [ ] [Komga](https://komga.org/) - [x] [Komga](https://komga.org/)
- [ ] [LazyLibrarian](https://lazylibrarian.gitlab.io/) - [x] [LazyLibrarian](https://lazylibrarian.gitlab.io/)
- [x] [Leantime](https://leantime.io)
- [x] [LibreNMS](https://librenms.org)
- [x] [librespeed](https://librespeed.org) - [x] [librespeed](https://librespeed.org)
- [ ] [Linkwarden](https://linkwarden.app/) - [x] [Linkwarden](https://linkwarden.app/)
- [x] [Lounge](https://thelounge.chat) - [x] [Lounge](https://thelounge.chat)
- [x] [Matrix](https://matrix.org/) - [x] [Matrix](https://matrix.org/)
- [ ] [Miniflux](https://miniflux.app/) - [x] [Maxun](https://github.com/getmaxun/maxun)
- [ ] [n8n](https://n8n.io/) - [x] [Mealie](https://mealie.io/)
- [x] [Memos](https://usememos.com)
- [x] [Miniflux](https://miniflux.app/)
- [x] [MinIO](https://min.io)
- [x] [n8n](https://n8n.io/)
- [x] [Nebula](https://github.com/slackhq/nebula)
- [ ] [Netbird](https://netbird.io/) - [ ] [Netbird](https://netbird.io/)
- [x] [netboot.xyz](https://netboot.xyz) - [x] [netboot.xyz](https://netboot.xyz)
- [ ] [Netdata](https://www.netdata.cloud/) - [x] [Netdata](https://www.netdata.cloud/)
- [ ] [Note Mark](https://github.com/enchant97/note-mark)
- [ ] [Notesnook](https://github.com/streetwriters/notesnook-sync-server) - [ ] [Notesnook](https://github.com/streetwriters/notesnook-sync-server)
- [ ] [ntop](https://www.ntop.org/) - [x] [OpenObserve](https://openobserve.ai)
- [ ] [OpenNMS](https://www.opennms.org/) - [x] [OpenSpeedTest](https://openspeedtest.com)
- [ ] [PiHole](https://pi-hole.net/) - [x] [PiHole](https://pi-hole.net/)
- [x] [Pocket ID](https://github.com/stonith404/pocket-id)
- [ ] [Pod Arcade](https://www.pod-arcade.com/) - [ ] [Pod Arcade](https://www.pod-arcade.com/)
- [ ] [protonmail-bridge-docker](https://github.com/shenxn/protonmail-bridge-docker) - [x] [Postiz](https://postiz.com/)
- [ ] [ProtonMailBridgeDocker](https://github.com/VideoCurio/ProtonMailBridgeDocker) - [x] [Prometheus](https://prometheus.io)
- [x] [protonmail-bridge-docker](https://github.com/shenxn/protonmail-bridge-docker)
- [x] [Prowlarr](https://prowlarr.com) - [x] [Prowlarr](https://prowlarr.com)
- [x] [qbit_manage](https://github.com/StuffAnThings/qbit_manage) - [x] [qbit_manage](https://github.com/StuffAnThings/qbit_manage)
- [x] [qBittorrent](https://qbittorrent.org) - [x] [qBittorrent](https://qbittorrent.org)
- [x] [qbittorrent-port-forward-gluetun-server](https://github.com/mjmeli/qbittorrent-port-forward-gluetun-server) - [x] [qbittorrent-port-forward-gluetun-server](https://github.com/mjmeli/qbittorrent-port-forward-gluetun-server)
- [x] [Radarr](https://radarr.video) - [x] [Radarr](https://radarr.video)
- [ ] [RomM](https://romm.app/) - [x] [RomM](https://romm.app/)
- [ ] [Seafile](https://www.seafile.com) - [ ] [Seafile](https://www.seafile.com)
- [ ] [Shiori](https://github.com/go-shiori/shiori) - [ ] [Shiori](https://github.com/go-shiori/shiori)
- [ ] [SimpleX](https://simplex.chat/)
- [x] [Snowflake](https://snowflake.torproject.org/)
- [ ] [solidtime](https://docs.solidtime.io/self-hosting/intro) - [ ] [solidtime](https://docs.solidtime.io/self-hosting/intro)
- [x] [Sonarr](https://sonarr.tv) - [x] [Sonarr](https://sonarr.tv)
- [x] [Speedtest Tracker](https://speedtest-tracker.dev)
- [x] [Stirling PDF](https://stirlingpdf.io) - [x] [Stirling PDF](https://stirlingpdf.io)
- [ ] [Supervisord](http://supervisord.org/) - [x] [syslog-ng](https://syslog-ng.github.io/)
- [x] [Tandoor](https://github.com/TandoorRecipes/recipes)
- [x] [traggo](https://traggo.net) - [x] [traggo](https://traggo.net)
- [x] [Termix](https://github.com/LukeGus/Termix)
- [ ] [Ubooquity](https://vaemendis.net/ubooquity/) - [ ] [Ubooquity](https://vaemendis.net/ubooquity/)
- [ ] [Umami](https://umami.is/)
- [ ] [UrBackup](https://urbackup.org) - [ ] [UrBackup](https://urbackup.org)
- [ ] [Vikunja](https://vikunja.io) - [x] [Vikunja](https://vikunja.io)
- [ ] [Wazuh](https://wazuh.com/) - [ ] [Wazuh](https://wazuh.com/)
- [ ] [wiki.js](https://js.wiki)
- [ ] [wger](https://wger.de/) - [ ] [wger](https://wger.de/)
- [ ] [Zenoss](https://www.zenoss.com/) - [ ] [Zenoss](https://www.zenoss.com/)
- [ ] [Zitadel](https://zitadel.com/) - [ ] [Zitadel](https://zitadel.com/)
## Acknowledgments
Thanks to these users for their examples and contributions!
- [@fpatrick](https://github.com/fpatrick)/[podman-quadlet](https://github.com/fpatrick/podman-quadlet)
- [@dwedia](https://github.com/dwedia)/[podmanQuadlets](https://github.com/dwedia/podmanQuadlets)
- [@sudo-kraken](https://github.com/sudo-kraken)
- [@EphemeralDev](https://github.com/EphemeralDev)

178
Ubuntu.md Normal file
View file

@ -0,0 +1,178 @@
# Ubuntu Server
Setting up rootless podman on a fresh Ubuntu 24.10 server.
> [!WARNING]
> Perform `sudo apt update && sudo apt upgrade` immediately. Reboot system.
## SSH
SSH is optional, but highly encouraged. OpenSSH is installed by default and sshd
is running by default.
```bash
## Generate strong key on your laptop or workstation/desktop
## If you already have keys DO NOT overwrite your previous keys
ssh-keygen -t ed25519 -a 32 -f ~/.ssh/$localhost-to-$remotehost
## Optionally set a passphrase
## Copy key to Ubuntu
ssh-copy-id username@remote_host
```
## Override `sshd` config
We don't want to allow anyone to login as root remotely ever. You must be a
`sudoer` with public key auth to elevate to root.
SSH into your server and run
```bash
printf '%s\n' 'PermitRootLogin no' | sudo tee /etc/ssh/sshd_config.d/01-root.conf
printf '%s\n' \
'PubkeyAuthentication yes' \
'PasswordAuthentication no' | sudo tee /etc/ssh/sshd_config.d/01-pubkey.conf
```
Save file and then run `systemctl restart ssh` Before closing your session, open
a new terminal and test SSH is functioning correctly.
## Podman
Podman is a daemonless container hypervisor. This document prepares a fully
rootless environment for our containers to run in.
## Install
```bash
sudo apt install podman systemd-container
## Make sure podman is running
systemctl enable --now podman
```
> [!NOTE]
> Read the docs. `man podman-systemd.unit`
## Prepare host networking stack
## Pasta or slirp4netns
> [!NOTE]
> As of Podman 5.0 Pasta is the default rootless networking tool.
>
> Podman 5.0 is available in standard Ubuntu repo since 24.10.
>
> Both are installed with podman see
> [rootless networking for configuration](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#networking-configuration)
## Allow rootless binding port 80+
### Modify range of unprivileged ports
> [!NOTE]
> This is only necessary if you are setting up the reverse proxy (or any service
> on ports <1024).
```bash
printf '%s\n' 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee /etc/sysctl.d/99-unprivileged-port-binding.conf
sysctl -w 'net.ipv4.ip_unprivileged_port_start=80'
```
## Prepare container user
This user will be the owner of all containers with no login shell or root
privileges.
Container user should have range of uid/gid automatically generated. See
[subuid and subgid tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#etcsubuid-and-etcsubgid-configuration)
to verify range or create if it does not exist.
Note $ctuser is a placeholder, replace with your username
```bash
# Prepare a group id outside of the normal range
sudo groupadd --gid 2000 $ctuser
# Create user with restrictions
# We need the $HOME to live in
sudo useradd --create-home \
--shell /usr/bin/false \
--password $ctuser_pw \
--no-user-group \
--gid $ctuser \
--groups systemd-journal \
--uid 2000 \
$ctuser
# Lock user from password login
sudo usermod --lock $ctuser
# Start $ctuser session at boot without login
loginctl enable-linger $ctuser
```
> [!NOTE]
> Consider removing bash history entry that contains the password entered above
## Setup $ctuser env
> [!NOTE]
> Use machinectl instead of sudo or su to get a shell that is fully isolated
> from the original session. See the developers comments on the problem
> [with su](https://github.com/systemd/systemd/issues/825#issuecomment-127917622)
> as well as the purpose of
> [machinectl shell](https://github.com/systemd/systemd/pull/1022#issuecomment-136133244)
```bash
# Switch to $ctuser
# Note do not remove the trailing @
machinectl shell $ctuser@ /bin/bash
# Create dirs
mkdir -p ~/.config/{containers/systemd,environment.d}
# Prepare `systemd --user` env
echo 'XDG_RUNTIME_DIR=/run/user/2000' >> ~/.config/environment.d/10-xdg.conf
# Enable container auto-update
podman system migrate
# WARNING: Set strict versions for all containers or risk catastrophe
systemctl --user enable --now podman-auto-update
exit
```
## Podman fails autostart
In Podman < 5.3 containers may fail to autostart because user level units cannot depend on system level units (in this case `network-online.target`)
Podman >= 5.3 should ship with a workaround user unit that can be used `podman-user-wait-network-online.service`, use that instead of the fix below.
See [this github issue](https://github.com/containers/podman/issues/22197) for workarounds, the workaround below is what worked for me. The google.com ping can be replaced with your preferred (reachable) ip/host
To fix this, create the following
```bash
# ~/.config/systemd/user/network-online.service
[Unit]
Description=User-level proxy to system-level network-online.target
[Service]
Type=oneshot
ExecStart=sh -c 'until ping -c 1 google.com; do sleep 5; done'
[Install]
WantedBy=default.target
```
```bash
# ~/.config/systemd/user/network-online.target
[Unit]
Description=User-level network-online.target
Requires=network-online.service
Wants=network-online.service
After=network-online.service
```
Then enable the service `systemctl --user enable network-online.service`
In quadlets add the following:
```bash
[Unit]
After=network-online.target
```

View file

@ -1,5 +0,0 @@
# Kimai
## Known issues
- 502 error

View file

@ -1,25 +0,0 @@
[Unit]
Description=Kimai database
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/mysql/mysql:8.3
ContainerName=kimai-db
HostName=kimai-db
Network=protonvpn
Volume=/volumes/kimai/var/lib/mysql:/var/lib/mysql
Environment=MYSQL_DATABASE=kimai
Environment=MYSQL_USER=kimaiuser
Secret=mysql-kimai-pw,type=env,target=MYSQL_PASSWORD
Secret=mysql-kimai-root-pw,type=env,target=MYSQL_ROOT_PASSWORD

View file

@ -1,25 +0,0 @@
[Unit]
Description=Time tracking
Wants=kimai-db.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/kimai/kimai2:apache
ContainerName=kimai
HostName=kimai
Network=protonvpn
Volume=/volumes/kimai/var/plugins:/var/plugins
Volume=/volumes/kimai/var/data:/var/data
Environment=ADMINMAIL=admin@kimai.localdomain
Environment=DATABASE_URL="mysql://kimaiuser:kimaipassword@sqldb/kimai?charset=utf8mb4&serverVersion=8.3.0"
Secret=kimai-db-pass,type=env,target=ADMINPASS

View file

@ -1,3 +0,0 @@
[Volume]
VolumeName=linkwarden-data

View file

@ -1,23 +0,0 @@
[Unit]
Description=Linkwarden database
Requires=linkwarden-database.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/postgres:16-alpine
ContainerName=linkwarden
HostName=linkwarden
Volume=linkwarden-database:/var/lib/postgresql/data
Environment=DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres
# FIXME: Secret is not injected into env before Environment parses
Secret=linkwarden-db-pw,type=env,target=POSTGRES_PASSWORD

View file

@ -1,21 +0,0 @@
[Unit]
Description=Overlay VPN
Wants=netbird-signal.service
Wants=netbird-relay.service
[Container]
ContainerName=netbird
Image=docker.io/netbirdio/dashboard:latest
EnvironmentFile=./netbird.env
Volume=netbird-letsencrypt:/etc/letsencrypt
PublishPort=
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target

View file

@ -1,18 +0,0 @@
# Endpoints
NETBIRD_MGMT_API_ENDPOINT=
NETBIRD_MGMT_GRPC_API_ENDPOINT=
# OIDC
AUTH_AUDIENCE=
AUTH_CLIENT_SECRET=
AUTH_AUTHORITY=
USE_AUTH0=
AUTH_SUPPORTED_SCOPES=
AUTH_REDIRECT_URI=
AUTH_SILENT_REDIRECT_URI=
NETBIRD_TOKEN_SOURCE=
# SSL
NGINX_SSL_PORT=443
# Letsencrypt
LETSENCRYPT_DOMAIN=
LETSENCRYPT_EMAIL=

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=wger-beat

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=wger-cache

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=wger-database

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=wger-media

View file

@ -1,167 +0,0 @@
# Django's secret key, change to a 50 character random string if you are running
# this instance publicly. For an online generator, see e.g. https://djecrety.ir/
SECRET_KEY=wger-docker-supersecret-key-1234567890!@#$%^&*(-_)
# Signing key used for JWT, use something different than the secret key
SIGNING_KEY=wger-docker-secret-jwtkey-1234567890!@#$%^&*(-_=+)
# The server's timezone, for a list of possible names:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TIME_ZONE=Europe/Berlin
#
# Consult the deployment section in the readme if you are running this behind a
# reverse proxy with HTTPS enabled
# CSRF_TRUSTED_ORIGINS=https://my.domain.example.com,https://118.999.881.119
# X_FORWARDED_PROTO_HEADER_SET=True
#
# Static files
# If you are running the application behind a reverse proxy or changed the port, the
# links for some images *might* break (specially in the mobile app). Also note that
# the API response is cached and contains the host, if you change this setting, just run
# docker compose exec web python3 manage.py warmup-exercise-api-cache --force
# MEDIA_URL=https://your-domain.example.com/media/
# STATIC_URL=https://your-domain.example.com/static/
#
# These settings usually don't need changing
#
#
# Application
WGER_INSTANCE=https://wger.de # Wger instance from which to sync exercises, images, etc.
ALLOW_REGISTRATION=True
ALLOW_GUEST_USERS=True
ALLOW_UPLOAD_VIDEOS=True
# Users won't be able to contribute to exercises if their account age is
# lower than this amount in days.
MIN_ACCOUNT_AGE_TO_TRUST=21
# Synchronzing exercises
# It is recommended to keep the local database synchronized with the wger
# instance specified in WGER_INSTANCE since there are new added or translations
# improved. For this you have different possibilities:
# - Sync exercises on startup:
# SYNC_EXERCISES_ON_STARTUP=True
# DOWNLOAD_EXERCISE_IMAGES_ON_STARTUP=True
# - Sync them in the background with celery. This will setup a job that will run
# once a week at a random time (this time is selected once when starting the server)
SYNC_EXERCISES_CELERY=True
SYNC_EXERCISE_IMAGES_CELERY=True
SYNC_EXERCISE_VIDEOS_CELERY=True
# - Manually trigger the process as needed:
# docker compose exec web python3 manage.py sync-exercises
# docker compose exec web python3 manage.py download-exercise-images
# docker compose exec web python3 manage.py download-exercise-videos
# Synchronzing ingredients
# You can also syncronize the ingredients from a remote wger instance, and have
# basically the same options as for the ingredients:
# - Sync them in the background with celery. This will setup a job that will run
# once a week at a random time (this time is selected once when starting the server)
SYNC_INGREDIENTS_CELERY=True
# - Manually trigger the process as needed:
# docker compose exec web python3 manage.py sync-ingredients
# When scanning products with the barcode scanner, it is possible to dynamically
# fetch the ingredient if it is not known in the local database. This option controlls
# where to try to download the ingredient and their images.
# Possible values OFF, WGER or None. Note that it is recommended to keep this as WGER
# so that we don't overwhelm the Open Food Facts servers. Needs to have USE_CELERY
# set to true
DOWNLOAD_INGREDIENTS_FROM=WGER
# Whether celery is configured and should be used. Can be left to true with
# this setup but can be deactivated if you are using the app in some other way
USE_CELERY=True
#
# Celery
CELERY_BROKER=redis://cache:6379/2
CELERY_BACKEND=redis://cache:6379/2
CELERY_FLOWER_PASSWORD=adminadmin
#
# Database
DJANGO_DB_ENGINE=django.db.backends.postgresql
DJANGO_DB_DATABASE=wger
DJANGO_DB_USER=wger
DJANGO_DB_PASSWORD=wger
DJANGO_DB_HOST=db
DJANGO_DB_PORT=5432
DJANGO_PERFORM_MIGRATIONS=True # Perform any new database migrations on startup
#
# Cache
DJANGO_CACHE_BACKEND=django_redis.cache.RedisCache
DJANGO_CACHE_LOCATION=redis://cache:6379/1
DJANGO_CACHE_TIMEOUT=1296000 # in seconds - 60*60*24*15, 15 Days
DJANGO_CACHE_CLIENT_CLASS=django_redis.client.DefaultClient
# DJANGO_CACHE_CLIENT_PASSWORD=abcde... # Only if you changed the redis config
# DJANGO_CACHE_CLIENT_SSL_KEYFILE=/path/to/ssl_keyfile # Path to an ssl private key.
# DJANGO_CACHE_CLIENT_SSL_CERTFILE=/path/to/ssl_certfile # Path to an ssl certificate.
# DJANGO_CACHE_CLIENT_SSL_CERT_REQS=<none | optional | required> # The string value for the verify_mode.
# DJANGO_CACHE_CLIENT_SSL_CHECK_HOSTNAME=False # If set, match the hostname during the SSL handshake.
#
# Brute force login attacks
# https://django-axes.readthedocs.io/en/latest/index.html
AXES_ENABLED=True
AXES_FAILURE_LIMIT=10
AXES_COOLOFF_TIME=30 # in minutes
AXES_HANDLER=axes.handlers.cache.AxesCacheHandler
AXES_LOCKOUT_PARAMETERS=ip_address
AXES_IPWARE_PROXY_COUNT=1
AXES_IPWARE_META_PRECEDENCE_ORDER=HTTP_X_FORWARDED_FOR,REMOTE_ADDR
#
# Others
DJANGO_DEBUG=False
WGER_USE_GUNICORN=True
EXERCISE_CACHE_TTL=18000 # in seconds - 5*60*60, 5 hours
SITE_URL=http://localhost
#
# JWT auth
ACCESS_TOKEN_LIFETIME=10 # The lifetime duration of the access token, in minutes
REFRESH_TOKEN_LIFETIME=24 # The lifetime duration of the refresh token, in hours
#
# Other possible settings
# Recaptcha keys. You will need to create an account and register your domain
# https://www.google.com/recaptcha/
# RECAPTCHA_PUBLIC_KEY=abcde...
# RECAPTCHA_PRIVATE_KEY=abcde...
USE_RECAPTCHA=False
# Clears the static files before copying the new ones (i.e. just calls collectstatic
# with the appropriate flag: "manage.py collectstatic --no-input --clear"). Usually
# This can be left like this but if you have problems and new static files are not
# being copied correctly, clearing everything might help
DJANGO_CLEAR_STATIC_FIRST=False
#
# Email
# https://docs.djangoproject.com/en/4.1/topics/email/#smtp-backend
# ENABLE_EMAIL=False
# EMAIL_HOST=email.example.com
# EMAIL_PORT=587
# EMAIL_HOST_USER=username
# EMAIL_HOST_PASSWORD=password
# EMAIL_USE_TLS=True
# EMAIL_USE_SSL=False
FROM_EMAIL='wger Workout Manager <wger@example.com>'
# Set your name and email to be notified if an internal server error occurs.
# Needs a working email configuration
# DJANGO_ADMINS=your name,email@example.com
# Whether to compress css and js files into one (of each)
# COMPRESS_ENABLED=True
#
# Django Rest Framework
# The number of proxies in front of the application. In the default configuration only nginx
# is. Change as approtriate if your setup differs
NUMBER_OF_PROXIES=1

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=wger-static

View file

@ -1,23 +0,0 @@
[Unit]
Description=Wger heartbeat
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/wger/server:latest
ContainerName=wger-beat
HostName=wger-beat
Exec=/start-beat
Network=protonvpn
Volume=wger-beat:/home/wger/beat
Environment=./prod.env

View file

@ -1,17 +0,0 @@
[Unit]
Description=Wger Redis cache
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/redis:latest
ContainerName=wger-cache
HostName=wger-cache
Volume=wger-cache:/data

View file

@ -1,27 +0,0 @@
[Unit]
Description=Wger database
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/postgres:15-alpine
ContainerName=wger-db
HostName=wger-db
Network=
#PublishPort=5432
Volume=wger-database:/var/lib/postgresql/data
#Secret=wger-db-pw,type=env,target=POSTGRES_PASSWORD
Environment=POSTGRES_USER=wger
Environment=POSTGRES_PASSWORD=wger
Environment=POSTGRES_DB=wger

View file

@ -1,22 +0,0 @@
[Unit]
Description=Wger static web server
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/nginx:stable
ContainerName=wger-nginx
HostName=wger-nginx
PublishPort=80
Volume=./nginx.default.conf:/etc/nginx/conf.d/default.conf
Volume=wger-static:/wger/static:ro
Volume=wger-media:/wger/media:ro

View file

@ -1,17 +0,0 @@
[Unit]
Description=Wger worker
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/wger/server:latest
ContainerName=wger-worker
HostName=wger-worker
Environment=prod.env

View file

@ -1,23 +0,0 @@
[Unit]
Description=Fitness tracker
Requires=wger-db.service
Requires=wger-nginx.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/wger/server:latest
ContainerName=wger
HostName=wger
PublishPort=8000
Volume=wger-static:/home/wger/static
EnvironmentFile=prod.env

View file

@ -1,7 +0,0 @@
{
acme_dns $provider $api_key
}
qb.$domain.$tld {
reverse_proxy localhost:8080
}

View file

@ -1,8 +0,0 @@
FROM docker.io/caddy:$version-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/$module
FROM docker.io/caddy:$version
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

View file

@ -1,4 +0,0 @@
[Build]
ImageTag=localhost/caddy-njalla
SetWorkingDirectory=unit

View file

@ -1,21 +0,0 @@
[Unit]
Description=Reverse proxy
[Service]
Restart=on-failure
[Install]
WantedBy=default.target
[Container]
Image=caddy.build
ContainerName=caddy
HostName=caddy
Pod=MAMstack.pod
AutoUpdate=registry
Volume=caddy-config:/config
Volume=caddy-data:/data
Volume=./Caddyfile:/etc/caddy/Caddyfile

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=caddy-data

View file

@ -1,13 +0,0 @@
# calibre-web
## Known issues
### The starter metadata.db is required even if you do not use `calibre`
> [!WARNING]
> This should be run as your `$ctuser` or it will have the wrong owner and
> permissions
```bash
curl -fLSs -o /home/$ctuser/.local/share/containers/storage/volumes/calibre-web-database/metadata.db https://github.com/janeczku/calibre-web/raw/master/library/metadata.db
```

View file

@ -1,3 +0,0 @@
[Volume]
VolumeName=calibre-web-config

View file

@ -1,3 +0,0 @@
[Volume]
VolumeName=calibre-web-data

View file

@ -1,24 +0,0 @@
[Unit]
Description=calibre-web
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=lscr.io/linuxserver/calibre-web:latest
ContainerName=calibre-web
HostName=calibre-web
Pod=MAMstack.pod
AutoUpdate=registry
Volume=/volumes/books:/books
Volume=calibre-web-config:/config
Volume=calibre-config:/database
Environment=TZ=Etc/UTC

View file

@ -1,20 +0,0 @@
[Unit]
Description=Ebook manager
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=lscr.io/linuxserver/calibre:latest
ContainerName=calibre
HostName=calibre
Pod=MAMstack.pod
AutoUpdate=registry
Volume=calibre-config:/config
Environment=TZ=Etc/UTC

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=calibre-config

View file

@ -1,33 +0,0 @@
[Unit]
Description=gluetun VPN
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/qmcgaw/gluetun:$gluetun_version
ContainerName=gluetun
HostName=gluetun
AddCapability=NET_ADMIN
AddDevice=/dev/net/tun:/dev/net/tun
Pod=MAMstack.pod
AutoUpdate=registry
Volume=./config.toml:/gluetun/auth/config.toml
Environment=TZ=$timezone
Environment=UPDATER_PERIOD=24h
Environment=UPDATER_VPN_SERVICE_PROVIDERS=protonvpn
Environment=VPN_SERVICE_PROVIDER=protonvpn
# The trailing `+pmp` is for port forwarding
Environment=OPENVPN_USER=${openvpn_user}+pmp
Environment=OPENVPN_PASSWORD=$openvpn_password
Environment=OPENVPN_CIPHERS=aes-256-gcm
Environment=SERVER_COUNTRIES=$countries
Environment=VPN_PORT_FORWARDING=on
Environment=FIREWALL_DEBUG=on

View file

@ -1,23 +0,0 @@
[Unit]
Description=Ebook reader
After=caddy.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=ghcr.io/kareadita/kavita:latest
ContainerName=kavita
HostName=kavita
Pod=MAMstack.pod
AutoUpdate=registry
Volume=kavita-config:/kavita/config
Volume=/volumes/books:/library
Environment=TZ=Etc/UTC

View file

@ -1,21 +0,0 @@
[Unit]
Description=Lazy Librarian
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
ContainerName=lazylibrarian
Image=lscr.io/linuxserver/lazylibrarian:latest
HostName=lazylibrarian
Pod=MAMstack.pod
AutoUpdate=registry
Volume=lazylibrarian-config:/config
Volume=/volumes/books:/books
Environment=TZ=Etc/UTC

View file

@ -1,10 +0,0 @@
[Pod]
PodName=MAMstack
Network=mamstack.network
PublishPort=80
PublishPort=443
PublishPort=443:443/udp
Volume=

View file

@ -1,27 +0,0 @@
[Unit]
Description=Bonus points spender
After=qbittorrent.service
After=gluetun.service
BindsTo=gluetun.service
BindsTo=qbittorrent.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
# TODO: Is `latest` safe for this container?
Image=docker.io/myanonamouse/pointspend:latest
ContainerName=pointspend
HostName=pointspend
Pod=MAMstack.pod
AutoUpdate=registry
Environment=BUFFER=1000
Environment=WEDGEHOURS=0
Environment=VIP=1
Secret=mam_id,type=env,target=MAMID

View file

@ -1,3 +0,0 @@
[Volume]
VolumeName=qbit_manage-config

View file

@ -1,322 +0,0 @@
# This is an example configuration file that documents all the options.
# It will need to be modified for your specific use case.
# Please refer to the link below for more details on how to set up the configuration file
# https://github.com/StuffAnThings/qbit_manage/wiki/Config-Setup
commands:
# The commands defined below will IGNORE any commands used in command line and docker env variables.
dry_run: True
cross_seed: False
recheck: False
cat_update: False
tag_update: False
rem_unregistered: False
tag_tracker_error: False
rem_orphaned: False
tag_nohardlinks: False
share_limits: False
skip_qb_version_check: False
skip_cleanup: False
qbt:
# qBittorrent parameters
host: "localhost:8080"
user: "username"
pass: "password"
settings:
force_auto_tmm: False # Will force qBittorrent to enable Automatic Torrent Management for each torrent.
force_auto_tmm_ignore_tags: #Torrents with these tags will be ignored when force_auto_tmm is enabled.
- cross-seed
- Upload
tracker_error_tag: issue # Will set the tag of any torrents that do not have a working tracker.
nohardlinks_tag: noHL # Will set the tag of any torrents with no hardlinks.
share_limits_tag: ~share_limit # Will add this tag when applying share limits to provide an easy way to filter torrents by share limit group/priority for each torrent
share_limits_min_seeding_time_tag: MinSeedTimeNotReached # Tag to be added to torrents that have not yet reached the minimum seeding time
share_limits_min_num_seeds_tag: MinSeedsNotMet # Tag to be added to torrents that have not yet reached the minimum number of seeds
share_limits_last_active_tag: LastActiveLimitNotReached # Tag to be added to torrents that have not yet reached the last active limit
cross_seed_tag: cross-seed # Will set the tag of any torrents that are added by cross-seed command
cat_filter_completed: True # Filters for completed torrents only when running cat_update command
share_limits_filter_completed: True # Filters for completed torrents only when running share_limits command
tag_nohardlinks_filter_completed: True # Filters for completed torrents only when running tag_nohardlinks command
cat_update_all: True # Checks and updates all torrent categories if set to True when running cat_update command, otherwise only update torrents that are uncategorized
disable_qbt_default_share_limits: True # Allows QBM to handle share limits by disabling qBittorrents default Share limits. Only active when the share_limits command is set to True
directory:
# Do not remove these
# Cross-seed var: </your/path/here/> # Output directory of cross-seed
# root_dir var: </your/path/here/> # Root downloads directory used to check for orphaned files, noHL, and RecycleBin.
# <OPTIONAL> remote_dir var: </your/path/here/> # Path of docker host mapping of root_dir.
# remote_dir must be set if you're running qbit_manage locally and qBittorrent/cross_seed is in a docker
# remote_dir should not be set if qbit_manage is running in a container
# <OPTIONAL> recycle_bin var: </your/path/here/> # Path of the RecycleBin folder. Default location is set to remote_dir/.RecycleBin
# <OPTIONAL> torrents_dir var: </your/path/here/> # Path of the your qbittorrent torrents directory. Required for `save_torrents` attribute in recyclebin
# <OPTIONAL> orphaned_dir var: </your/path/here/> # Path of the the Orphaned Data folder. This is similar to RecycleBin, but only for orphaned data.
cross_seed: "/your/path/here/"
root_dir: "/data/torrents/"
remote_dir: "/mnt/user/data/torrents/"
recycle_bin: "/mnt/user/data/torrents/.RecycleBin"
torrents_dir: "/qbittorrent/data/BT_backup"
orphaned_dir: "/data/torrents/orphaned_data"
cat:
# Category & Path Parameters
# All save paths in qbittorent must be populated below.
# If you want to leave a save_path as uncategorized you can use the key 'Uncategorized' as the name of the category.
# <Category Name> : <save_path> # Path of your save directory.
movies: "/data/torrents/Movies"
tv: "/data/torrents/TV"
cat_change:
# This moves all the torrents from one category to another category. This executes on --cat-update
# WARNING: if the paths are different and Default Torrent Management Mode is set to automatic the files could be moved !!!
# <Old Category Name> : <New Category>
Radarr-HD.cross-seed: movies-hd
Radarr-UHD.cross-seed: movies-uhd
movies-hd.cross-seed: movies-hd
movies-uhd.cross-seed: movies-uhd
tracker:
# Mandatory
# Tag Parameters
# <Tracker URL Keyword>: # <MANDATORY> This is the keyword in the tracker url. You can define multiple tracker urls by splitting with `|` delimiter
# <MANDATORY> Set tag name. Can be a list of tags or a single tag
# tag: <Tag Name>
# <OPTIONAL> Set the category based on tracker URL. This category option takes priority over the category defined by save directory
# cat: <Category Name>
# <OPTIONAL> Set this to the notifiarr react name. This is used to add indexer reactions to the notifications sent by Notifiarr
# notifiarr: <notifiarr indexer>
animebytes.tv:
tag: AnimeBytes
notifiarr: animebytes
avistaz:
tag:
- Avistaz
- tag2
- tag3
notifiarr: avistaz
beyond-hd:
tag: [Beyond-HD, tag2, tag3]
cat: movies
notifiarr: beyondhd
blutopia:
tag: Blutopia
notifiarr: blutopia
cartoonchaos:
tag: CartoonChaos
digitalcore:
tag: DigitalCore
notifiarr: digitalcore
gazellegames:
tag: GGn
hdts:
tag: HDTorrents
landof.tv:
tag: BroadcasTheNet
notifiarr: broadcasthenet
myanonamouse:
tag: MaM
passthepopcorn:
tag: PassThePopcorn
notifiarr: passthepopcorn
privatehd:
tag: PrivateHD
notifiarr:
torrentdb:
tag: TorrentDB
notifiarr: torrentdb
torrentleech|tleechreload:
tag: TorrentLeech
notifiarr: torrentleech
tv-vault:
tag: TV-Vault
# The "other" key is a special keyword and if defined will tag any other trackers that don't match the above trackers into this tag
other:
tag: other
nohardlinks:
# Tag Movies/Series that are not hard linked outside the root directory
# Mandatory to fill out directory parameter above to use this function (root_dir/remote_dir)
# This variable should be set to your category name of your completed movies/completed series in qbit. Acceptable variable can be any category you would like to tag if there are no hardlinks found
movies-completed-4k:
series-completed-4k:
movies-completed:
# <OPTIONAL> exclude_tags var: Will exclude torrents with any of the following tags when searching through the category.
exclude_tags:
- Beyond-HD
- AnimeBytes
- MaM
# <OPTIONAL> ignore_root_dir var: Will ignore any hardlinks detected in the same root_dir (Default True).
ignore_root_dir: true
# Can have additional categories set with separate ratio/seeding times defined.
series-completed:
# <OPTIONAL> exclude_tags var: Will exclude torrents with any of the following tags when searching through the category.
exclude_tags:
- Beyond-HD
- BroadcasTheNet
# <OPTIONAL> ignore_root_dir var: Will ignore any hardlinks detected in the same root_dir (Default True).
ignore_root_dir: true
share_limits:
# Control how torrent share limits are set depending on the priority of your grouping
# Each torrent will be matched with the share limit group with the highest priority that meets the group filter criteria.
# Each torrent can only be matched with one share limit group
# This variable is mandatory and is a text defining the name of your grouping. This can be any string you want
noHL:
# <MANDATORY> priority: <int/float> # This is the priority of your grouping. The lower the number the higher the priority
priority: 1
# <OPTIONAL> include_all_tags: <list> # Filter the group based on one or more tags. Multiple include_all_tags are checked with an AND condition
# All tags defined here must be present in the torrent for it to be included in this group
include_all_tags:
- noHL
# <OPTIONAL> include_any_tags: <list> # Filter the group based on one or more tags. Multiple include_any_tags are checked with an OR condition
# Any tags defined here must be present in the torrent for it to be included in this group
include_any_tags:
- noHL
# <OPTIONAL> exclude_all_tags: <list> # Filter by excluding one or more tags. Multiple exclude_all_tags are checked with an AND condition
# This is useful to combine with the category filter to exclude one or more tags from an entire category
# All tags defined here must be present in the torrent for it to be excluded in this group
exclude_all_tags:
- Beyond-HD
# <OPTIONAL> exclude_any_tags: <list> # Filter by excluding one or more tags. Multiple exclude_any_tags are checked with an OR condition
# This is useful to combine with the category filter to exclude one or more tags from an entire category
# Any tags defined here must be present in the torrent for it to be excluded in this group
exclude_any_tags:
- Beyond-HD
# <OPTIONAL> categories: <list> # Filter by including one or more categories. Multiple categories are checked with an OR condition
# Since one torrent can only be associated with a single category, multiple categories are checked with an OR condition
categories:
- RadarrComplete
- SonarrComplete
# <OPTIONAL> max_ratio <float>: Will set the torrent Maximum share ratio until torrent is stopped from seeding/uploading and may be cleaned up / removed if the minimums have been met.
# Will default to -1 (no limit) if not specified for the group.
max_ratio: 5.0
# <OPTIONAL> max_seeding_time <str>: Will set the torrent Maximum seeding time until torrent is stopped from seeding/uploading and may be cleaned up / removed if the minimums have been met.
# See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2)
# 32m, 2h32m, 3d2h32m, 1w3d2h32m
# Will default to -1 (no limit) if not specified for the group. (Max value of 1 year (525600 minutes))
max_seeding_time: 90d
# <OPTIONAL> min_seeding_time <str>: Will prevent torrent deletion by cleanup variable if torrent has not yet minimum seeding time (minutes).
# This should only be set if you are using this in conjunction with max_seeding_time and max_ratio. If you are not setting a max_ratio, then use max_seeding_time instead.
# If the torrent has not yet reached this minimum seeding time, it will change the share limits back to no limits and resume the torrent to continue seeding.
# See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2)
# 32m, 2h32m, 3d2h32m, 1w3d2h32m
# Will default to 0 if not specified for the group.
min_seeding_time: 30d
# <OPTIONAL> last_active <str>: Will prevent torrent deletion by cleanup variable if torrent has been active within the last x minutes.
# If the torrent has been active within the last x minutes, it will change the share limits back to no limits and resume the torrent to continue seeding.
# See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2)
# 32m, 2h32m, 3d2h32m, 1w3d2h32m
# Will default to 0 if not specified for the group.
last_active: 30d
# <OPTIONAL> Limit Upload Speed <int>: Will limit the upload speed KiB/s (KiloBytes/second) (`-1` : No Limit)
limit_upload_speed: 0
# <OPTIONAL> Enable Group Upload Speed <bool>: Upload speed limits are applied at the group level. This will take limit_upload_speed defined and divide it equally among the number of torrents in the group.
enable_group_upload_speed: false
# <OPTIONAL> cleanup <bool>: WARNING!! Setting this as true Will remove and delete contents of any torrents that satisfies the share limits (max time OR max ratio)
cleanup: false
# <OPTIONAL> resume_torrent_after_change <bool>: This variable will resume your torrent after changing share limits. Default is true
resume_torrent_after_change: true
# <OPTIONAL> add_group_to_tag <bool>: This adds your grouping as a tag with a prefix defined in settings . Default is true
# Example: A grouping defined as noHL will have a tag set to ~share_limit.noHL (if using the default prefix)
add_group_to_tag: true
# <OPTIONAL> min_num_seeds <int>: Will prevent torrent deletion by cleanup variable if the number of seeds is less than the value set here.
# If the torrent has less number of seeds than the min_num_seeds, the share limits will be changed back to no limits and resume the torrent to continue seeding.
# Will default to 0 if not specified for the group.
min_num_seeds: 0
# <OPTIONAL> custom_tag <str>: Apply a custom tag name for this particular group. **WARNING (This tag MUST be unique as it will be used to determine share limits. Please ensure it does not overlap with any other tags in qbt)**
custom_tag: sharelimits_noHL
cross-seed:
priority: 2
include_all_tags:
- cross-seed
max_seeding_time: 7d
cleanup: false
PTP:
priority: 3
include_all_tags:
- PassThePopcorn
max_ratio: 2.0
max_seeding_time: 90d
cleanup: false
default:
priority: 999
max_ratio: -1
max_seeding_time: -1
cleanup: false
recyclebin:
# Recycle Bin method of deletion will move files into the recycle bin (Located in /root_dir/.RecycleBin) instead of directly deleting them in qbit
# By default the Recycle Bin will be emptied on every run of the qbit_manage script if empty_after_x_days is defined.
enabled: true
# <OPTIONAL> empty_after_x_days var:
# Will automatically remove all files and folders in recycle bin after x days. (Checks every script run)
# If this variable is not defined it, the RecycleBin will never be emptied.
# WARNING: Setting this variable to 0 will delete all files immediately upon script run!
empty_after_x_days: 60
# <OPTIONAL> save_torrents var:
# If this option is set to true you MUST fill out the torrents_dir in the directory attribute.
# This will save a copy of your .torrent and .fastresume file in the recycle bin before deleting it from qbittorrent
save_torrents: true
# <OPTIONAL> split_by_category var:
# This will split the recycle bin folder by the save path defined in the `cat` attribute
# and add the base folder name of the recycle bin that was defined in the `recycle_bin` sub-attribute under directory.
split_by_category: false
orphaned:
# Orphaned files are those in the root_dir download directory that are not referenced by any active torrents.
# Will automatically remove all files and folders in orphaned data after x days. (Checks every script run)
# If this variable is not defined it, the orphaned data will never be emptied.
# WARNING: Setting this variable to 0 will delete all files immediately upon script run!
empty_after_x_days: 60
# File patterns that will not be considered orphaned files. Handy for generated files that aren't part of the torrent but belong with the torrent's files
exclude_patterns:
- "**/.DS_Store"
- "**/Thumbs.db"
- "**/@eaDir"
- "/data/torrents/temp/**"
- "**/*.!qB"
- "**/*_unpackerred"
# Set your desired threshold for the maximum number of orphaned files qbm will delete in a single run. (-1 to disable safeguards)
# This will help reduce the number of accidental large amount orphaned deletions in a single run
# WARNING: Setting this variable to -1 will not safeguard against any deletions
max_orphaned_files_to_delete: 50
apprise:
# Apprise integration with webhooks
# Leave Empty/Blank to disable
# Mandatory to fill out the url of your apprise API endpoint
api_url: http://apprise-api:8000
# Mandatory to fill out the notification url/urls based on the notification services provided by apprise. https://github.com/caronc/apprise/wiki
notify_url:
notifiarr:
# Notifiarr integration with webhooks
# Leave Empty/Blank to disable
# Mandatory to fill out API Key
apikey: ####################################
# <OPTIONAL> Set to a unique value (could be your username on notifiarr for example)
instance:
webhooks:
# Webhook notifications:
# Possible values:
# Set value to notifiarr if using notifiarr integration
# Set value to apprise if using apprise integration
# Set value to a valid webhook URL
# Set value to nothing (leave Empty/Blank) to disable
error: https://mywebhookurl.com/qbt_manage
run_start: notifiarr
run_end: apprise
function:
cross_seed: https://mywebhookurl.com/qbt_manage
recheck: notifiarr
cat_update: apprise
tag_update: notifiarr
rem_unregistered: notifiarr
tag_tracker_error: notifiarr
rem_orphaned: notifiarr
tag_nohardlinks: notifiarr
share_limits: notifiarr
cleanup_dirs: notifiarr

View file

@ -1,24 +0,0 @@
[Unit]
Description=qBittorrent manager
Wants=qbittorrent.service
After=qbittorrent.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=ghcr.io/stuffanthings/qbit_manage:latest
ContainerName=qbit_manage
HostName=qbit_manage
Pod=MAMstack.pod
AutoUpdate=registry
Volume=qbit_manage-config:/config
Volume=/volumes/books/qbittorrent/downloads:/data/torrents
Volume=qbittorrent-config:/qbittorrent
EnvironmentFile=qbit_manage.env

View file

@ -1,19 +0,0 @@
QBT_RUN=false
QBT_SCHEDULE=1440
QBT_CONFIG=config.yml
QBT_LOGFILE=activity.log
QBT_CROSS_SEED=false
QBT_RECHECK=false
QBT_CAT_UPDATE=false
QBT_TAG_UPDATE=false
QBT_REM_UNREGISTERED=false
QBT_REM_ORPHANED=false
QBT_TAG_TRACKER_ERROR=false
QBT_TAG_NOHARDLINKS=false
QBT_SHARE_LIMITS=false
QBT_SKIP_CLEANUP=false
QBT_DRY_RUN=false
QBT_LOG_LEVEL=INFO
QBT_DIVIDER==
QBT_WIDTH=100

View file

@ -1,26 +0,0 @@
[Unit]
Description=Port forward updater for qbittorrent over gluetun
After=gluetun.service
After=qbittorrent.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
# TODO: Replace this with one that has tags
# Probably have to repack my own
Image=docker.io/mjmeli/qbittorrent-port-forward-gluetun-server:latest
ContainerName=qbittorrent-port-forward-gluetun-server
HostName=qbittorrent-port-forward-gluetun-server
Pod=MAMstack
AutoUpdate=registry
Environment=QBT_USERNAME=$qbt_user
Environment=QBT_ADDR=http://localhost:8080
Environment=GTN_ADDR=http://localhost:8000
Secret=qbt_pw,type=env,target=QBT_PASSWORD

View file

@ -1,27 +0,0 @@
[Unit]
Description=qbittorrent client
After=gluetun.service
BindsTo=gluetun.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/qbittorrentofficial/qbittorrent-nox:$qbt_version
ContainerName=qbittorrent
HostName=qbittorrent
AutoUpdate=registry
Network=
Volume=/volumes/books/qbittorrent/config:/config
Volume=/volumes/books/qbittorrent/downloads:/downloads
Environment=QBT_LEGAL_NOTICE=confirm
Environment=QBT_VERSION=$qbt_version
Environment=TZ=$timezone

View file

@ -1,28 +0,0 @@
[Unit]
Description=Update qbittorrent session IP for tracker
After=qbittorrent.service
After=gluetun.service
BindsTo=gluetun.service
BindsTo=qbittorrent.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
# TODO: Is `latest` safe for this container?
Image=docker.io/myanonamouse/seedboxapi:latest
ContainerName=seedboxapi
HostName=seedboxapi
Pod=MAMstack.pod
AutoUpdate=registry
Volume=/volumes/books/seedboxapi/config:/config
Environment=DEBUG=1
Environment=interval=1
Secret=mam_id,type=env,target=mam_id

View file

@ -1,19 +0,0 @@
[Unit]
Description=IRC client
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=ghcr.io/thelounge/thelounge:latest
ContainerName=thelounge
HostName=thelounge
Pod=MAMstack.pod
AutoUpdate=registry
Volume=thelounge-data:/var/opt/thelounge

View file

@ -1,3 +1,3 @@
# Quadlets ## Quadlets
Quadlets go in `~/.config/containers/systemd`. Quadlets go in `~/.config/containers/systemd`.

View file

@ -1,5 +1,5 @@
[Unit] [Unit]
Description=Budget management Description=Actual budget management
[Service] [Service]
@ -12,6 +12,8 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/actualbudget/actual-server:latest Image=docker.io/actualbudget/actual-server:latest
ContainerName=actual ContainerName=actual
Network=actual.network
HostName=actual HostName=actual
Volume=actual-data:/data Volume=actual-data:/data

View file

@ -0,0 +1 @@
[Network]

View file

@ -11,8 +11,9 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/adguard/adguardhome:latest Image=docker.io/adguard/adguardhome:latest
ContainerName=adguard ContainerName=adguard
HostName=adguard
Network=adguard.network
HostName=adguard
PublishPort=53:53/tcp PublishPort=53:53/tcp
PublishPort=53:53/udp PublishPort=53:53/udp
PublishPort=784:784/udp PublishPort=784:784/udp
@ -21,6 +22,6 @@ PublishPort=3000:3000/tcp
PublishPort=8844:80/tcp PublishPort=8844:80/tcp
PublishPort=8443:443/tcp PublishPort=8443:443/tcp
Volume=adguard-config:/opt/adguardhome/work:z Volume=adguard-config:/opt/adguardhome/work
Volume=adguard-work:/opt/adguardhome/conf:z Volume=adguard-work:/opt/adguardhome/conf
Volume=/var/log/AdGuardHome.log:/var/log/AdGuardHome.log:z Volume=/var/log/AdGuardHome.log:/var/log/AdGuardHome.log

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,25 @@
[Unit]
Description=Apprise API
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/caronc/apprise:latest
ContainerName=apprise
AutoUpdate=registry
Network=apprise.network
HostName=apprise
PublishPort=8000:8000
Volume=apprise-config:/config
Volume=apprise-plugin:/plugin
Volume=apprise-attahc:/attahc
Environment=APPRISE_STATEFUL_MODE=simple
Environment=APPRISE_WORKER_COUNT=1

View file

@ -0,0 +1,6 @@
[Unit]
Description=Apprise network
[Network]
NetworkName=apprise

View file

@ -0,0 +1,5 @@
[Volume]
VolumeName=apprise-config
VolumeName=apprise-plugin
VolumeName=apprise-attach

View file

@ -11,13 +11,11 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/advplyr/audiobookshelf:latest Image=docker.io/advplyr/audiobookshelf:latest
ContainerName=audiobookshelf ContainerName=audiobookshelf
HostName=audiobookshelf
Network=audiobookshelf.network
HostName=audiobookshelf
PublishPort=13378:80 PublishPort=13378:80
Volume=audiobookshelf-config:/metadata:z Volume=audiobookshelf-config:/metadata
Volume=audiobookshelf-metadata:/config:z Volume=audiobookshelf-metadata:/config
Volume=audiobookshelf-audiobooks:/audiobooks:z Volume=audiobookshelf-audiobooks:/audiobooks
Environment=AUDIOBOOKSHELF_UID=USER_UID_HERE
Environment=AUDIOBOOKSHELF_GID=USER_GID_HERE

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,24 @@
[Unit]
Description=betanin
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/sentriz/betanin
ContainerName=betanin
AutoUpdate=registry
Network=betanin.network
HostName=betanin
PublishPort=9393:9393
Volume=betanin-data:/b/.local/share/betanin
Volume=betanin-config:/b/.local/share/config
Volume=betanin-beets:/b/.local/share/beets
Volume=/path/to/music:/music
Volume=/path/to/downloads:/downloads

View file

@ -0,0 +1,6 @@
[Unit]
Description=betanin network
[Network]
NetworkName=betanin

View file

@ -0,0 +1,5 @@
[Volume]
VolumeName=betanin-data
VolumeName=betanin-config
VolumeName=betanin-beets

View file

@ -0,0 +1,26 @@
[Unit]
Description=Postgres for Blinko
Wants=blinko.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/postgres
ContainerName=blinko-db
Network=blinko.network
HostName=blinko-db
PublishPort=5435:5432
Volume=blinko-db:/var/lib/postgresql/data
Environment=POSTGRES_DB=postgres
Environment=POSTGRES_USER=postgres
Environment=TZ=Etc/UTC
Secret=blinko-db-pw,type=env,target=POSTGRES_PASSWORD

View file

@ -0,0 +1,28 @@
[Unit]
Description=Blinko
Requires=blinko-db.service
After=blinko-db.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/blinkospace/blinko
ContainerName=blinko
Network=blinko.network
HostName=blinko
PublishPort=1111:1111
Volume=blinko-data:/app/.blinko
Environment=NODE_ENV=production
#Environment=NEXTAUTH_URL=http://localhost:1111
#Environment=NEXT_PUBLIC_BASE_URL=http://localhost:1111
Environment=DATABASE_URL=postgresql://postgres:$mysecretpassword@blinko-db:5432/postgres
Secret=blinko-nextauth-secret,type=env,target=NEXTAUTH_SECRET

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,4 @@
[Volume]
VolumeName=blinko-data
VolumeName=blinko-db

View file

@ -1,7 +1,6 @@
[Unit] [Unit]
Description=Reverse proxy Description=Reverse proxy
[Service] [Service]
Restart=on-failure Restart=on-failure
@ -11,8 +10,9 @@ WantedBy=default.target
[Container] [Container]
Image=caddy.build Image=caddy.build
ContainerName=caddy ContainerName=caddy
HostName=caddy
Network=reverse-proxy.network
HostName=caddy
PublishPort=80:80 PublishPort=80:80
PublishPort=443:443 PublishPort=443:443
PublishPort=443:443/udp PublishPort=443:443/udp

View file

@ -1,2 +1,3 @@
[Volume] [Volume]
VolumeName=caddy-config VolumeName=caddy-config
VolumeName=caddy-data

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=caddy-config

View file

@ -1,2 +0,0 @@
[Volume]
VolumeName=caddy-data

View file

@ -0,0 +1 @@
[Network]

View file

@ -11,8 +11,9 @@ WantedBy=default.target
[Container] [Container]
Image=lscr.io/linuxserver/calibre:latest Image=lscr.io/linuxserver/calibre:latest
ContainerName=calibre ContainerName=calibre
HostName=calibre
Network=calibre.network
HostName=calibre
PublishPort=8080 PublishPort=8080
Volume=calibre-config:/config Volume=calibre-config:/config

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,19 @@
[Unit]
Description=ChartDB diagramming editor
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=ghcr.io/chartdb/chartdb
ContainerName=chartdb
Network=chartdb.network
HostName=chartdb
PublishPort=8080:80
Secret=openai-api-key,type=env,target=OPENAI_API_KEY

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,22 @@
[Unit]
Description=Checkmate mongodb
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/bluewaveuptime/uptime_database_mongo:latest
ContainerName=checkmate-mongodb
AutoUpdate=registry
Network=checkmate.network
HostName=checkmate-mongodb
PublishPort=27017:27017
Volume=checkmate-mongodb:/data/db
Exec=mongod --quiet

View file

@ -0,0 +1,20 @@
[Unit]
Description=Checkmate Redis
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/bluewaveuptime/uptime_redis:latest
ContainerName=checkmate-redis
AutoUpdate=registry
Network=checkmate.network
HostName=checkmate-redis
PublishPort=6379:6379
Volume=checkmate-redis:/data

View file

@ -0,0 +1,27 @@
[Unit]
Description=Checkmate server
Requires=checkmate-mongodb.service
Requires=checkmate-redis.service
After=checkmate-mongodb.service
After=checkmate-redis.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/bluewaveuptime/uptime_server:latest
ContainerName=checkmate-server
AutoUpdate=registry
Network=checkmate.network
HostName=checkmate-server
PublishPort=5000:5000
Volume=%t/podman/podman.sock:/run/user/1000/podman/podman.sock:ro
Environment=REDIS_HOST=checkmate-redis
Environment=DB_CONNECTION_STRING=mongodb://checkmate-mongodb:27017/uptime_db

View file

@ -0,0 +1,23 @@
[Unit]
Description=Checkmate
Requires=checkmate-server.service
After=checkmate-server.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/bluewaveuptime/uptime_client:latest
ContainerName=checkmate
AutoUpdate=registry
Network=checkmate.network
HostName=checkmate
PublishPort=80:80
PublishPort=443:443
Environment=UPTIME_APP_API_BASE_URL=http://localhost:5000/api/v1

View file

@ -0,0 +1,6 @@
[Unit]
Description=Checkmate network
[Network]
NetworkName=checkmate

View file

@ -0,0 +1,4 @@
[Volume]
VolumeName=checkmate-mongodb
VolumeName=checkmate-redis

View file

@ -0,0 +1,26 @@
[Unit]
Description=dashdot-nvidia
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/mauricenino/dashdot:nvidia
ContainerName=dashdot-nvidia
AutoUpdate=registry
Network=dashdot.network
HostName=dashdot
PublishPort=3001:3001
## FIXME: compose uses super weird syntax to find the gpu instead of mapping it directly
# AddDevice=/dev/dri/renderD129:/dev/dri/renderD129
Volume=/:/mnt/host:ro
EnvironmentFile=dashdot.env

View file

@ -0,0 +1,23 @@
[Unit]
Description=dashdot
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/mauricenino/dashdot
ContainerName=dashdot
AutoUpdate=registry
Network=dashdot.network
HostName=dashdot
PublishPort=3001:3001
Volume=/:/mnt/host:ro
EnvironmentFile=dashdot.env

View file

@ -0,0 +1,109 @@
# basic
DASHDOT_WIDGET_LIST=os,cpu,storage,ram,network
DASHDOT_PORT=
DASHDOT_PAGE_TITLE=
DASHDOT_DISABLE_INTEGRATIONS=
DASHDOT_SHOW_DASH_VERSION=
DASHDOT_USE_IMPERIAL=
DASHDOT_ALWAYS_SHOW_PERCENTAGES=
# server
DASHDOT_OS_LABEL_LIST=
DASHDOT_SHOW_HOST=
DASHDOT_CUSTOM_HOST=
## styles
DASHDOT_OS_WIDGET_GROW=
DASHDOT_OS_WIDGET_MIN_WIDTH=
## overrides
DASHDOT_OVERRIDE_OS=
DASHDOT_OVERRIDE_ARCH=
# cpu
DASHDOT_CPU_LABEL_LIST=
DASHDOT_ENABLE_CPU_TEMPS=
DASHDOT_CPU_TEMPS_MODE=
DASHDOT_CPU_CORES_TOGGLE_MODE=
## styles
DASHDOT_CPU_WIDGET_GROW=
DASHDOT_CPU_WIDGET_MIN_WIDTH=
DASHDOT_CPU_SHOWN_DATAPOINTS=
DASHDOT_CPU_POLL_INTERVAL=
## overrides
DASHDOT_OVERRIDE_CPU_BRAND=
DASHDOT_OVERRIDE_CPU_MODEL=
DASHDOT_OVERRIDE_CPU_CORES=
DASHDOT_OVERRIDE_CPU_THREADS=
DASHDOT_OVERRIDE_CPU_FREQUENCY=
# storage
DASHDOT_STORAGE_LABEL_LIST=
DASHDOT_FS_DEVICE_FILTER=
DASHDOT_FS_TYPE_FILTER=
DASHDOT_FS_VIRTUAL_MOUNTS=
## styles
DASHDOT_STORAGE_WIDGET_ITEMS_PER_PAGE=
DASHDOT_STORAGE_WIDGET_GROW=
DASHDOT_STORAGE_WIDGET_MIN_WIDTH=
DASHDOT_STORAGE_POLL_INTERVAL=
## overrides
DASHDOT_OVERRIDE_STORAGE_BRANDS=
DASHDOT_OVERRIDE_STORAGE_SIZES=
DASHDOT_OVERRIDE_STORAGE_TYPES=
# ram
DASHDOT_RAM_LABEL_LIST=
## styles
DASHDOT_RAM_WIDGET_GROW=
DASHDOT_RAM_WIDGET_MIN_WIDTH=
DASHDOT_RAM_SHOWN_DATAPOINTS=
DASHDOT_RAM_POLL_INTERVAL=
## overrides
DASHDOT_OVERRIDE_RAM_BRAND=
DASHDOT_OVERRIDE_RAM_SIZE=
DASHDOT_OVERRIDE_RAM_TYPE=
DASHDOT_OVERRIDE_RAM_FREQUENCY=
# network
DASHDOT_NETWORK_LABEL_LIST=
DASHDOT_ACCEPT_OOKLA_EULA=
DASHDOT_USE_NETWORK_INTERFACE=
DASHDOT_SPEED_TEST_FROM_PATH=
DASHDOT_NETWORK_SPEED_AS_BYTES=
## styles
DASHDOT_SPEED_TEST_INTERVAL=
DASHDOT_SPEED_TEST_INTERVAL_CRON=
DASHDOT_NETWORK_WIDGET_GROW=
DASHDOT_NETWORK_WIDGET_MIN_WIDTH=
DASHDOT_NETWORK_POLL_INTERVAL=
## overrides
DASHDOT_OVERRIDE_NETWORK_TYPE=
DASHDOT_OVERRIDE_NETWORK_SPEED_UP=
DASHDOT_OVERRIDE_NETWORK_SPEED_DOWN=
DASHDOT_OVERRIDE_NETWORK_INTERFACE_SPEED=
DASHDOT_OVERRIDE_NETWORK_PUBLIC_IP=
# gpu
DASHDOT_GPU_LABEL_LIST=
## styles
DASHDOT_GPU_WIDGET_GROW=
DASHDOT_GPU_WIDGET_MIN_WIDTH=
DASHDOT_GPU_SHOWN_DATAPOINTS=
DASHDOT_GPU_POLL_INTERVAL=
## overrides
DASHDOT_OVERRIDE_GPU_BRANDS=
DASHDOT_OVERRIDE_GPU_MODELS=
DASHDOT_OVERRIDE_GPU_MEMORIES=

View file

@ -0,0 +1 @@
[Network]

View file

@ -12,10 +12,10 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/lissy93/dashy:$dashy_version Image=docker.io/lissy93/dashy:$dashy_version
ContainerName=dashy ContainerName=dashy
HostName=dashy
AutoUpdate=registry AutoUpdate=registry
Network= Network=dashy.network
HostName=dashy
Volume=./user-data:/app/user-data Volume=./user-data:/app/user-data

View file

@ -0,0 +1 @@
[Network]

View file

@ -12,8 +12,11 @@ WantedBy=multi-user.target default.target
Image=docker.io/hurlenko/filebrowser:latest Image=docker.io/hurlenko/filebrowser:latest
ContainerName=filebrowser ContainerName=filebrowser
Network=filebrowser.network
Hostname=filebrowser
Volume=/path/to/what/you/want/to/share:/data:z Volume=/path/to/what/you/want/to/share:/data:z
Volume=fb-config:/config:z Volume=fb-config:/config:z
Volume=fb-branding:/branding:z Volume=fb-branding:/branding:z
PublishPort=8008:8080 PublishPort=8008:8080

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,23 @@
[Unit]
Description=Filestash wopi
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/collabora/code:24.04.10.2.1
ContainerName=filestash-wopi
AutoUpdate=registry
Network=filestash.network
HostName=filestash-wopi
PublishPort=9980:9980
Environment=extra_params=--o:ssl.enable=false
Environment=aliasgroup1="https://.*:443"
Exec=bash -c '/start-collabora-online.sh cool'

View file

@ -0,0 +1,28 @@
[Unit]
Description=Filestash
Wants=filestash-wopi.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/machines/filestash:latest
ContainerName=filestash
AutoUpdate=registry
Network=filestash.network
HostName=filestash
PublishPort=8334:8334
Volume=filestash:/app/data/state
Environment=APPLICATION_URL=https://filestash.example.com
Environment=CANARY=true
Environment=OFFICE_URL=http://filestash-wopi:9980
Environment=OFFICE_FILESTASH_URL=http://filestash:8334
Environment=OFFICE_REWRITE_URL=http://127.0.0.1:9980

View file

@ -0,0 +1 @@
[Network]

View file

@ -0,0 +1,2 @@
[Volume]
VolumeName=forgejo-data

View file

@ -0,0 +1,22 @@
[Unit]
Description=Forgejo
After=
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=codeberg.org/forgejo/forgejo:10
ContainerName=forgejo
AutoUpdate=registry
Network=forgejo.network
HostName=forgejo
PublishPort=222:22
PublishPort=3000:3000
Volume=forgejo-data:/data

View file

@ -0,0 +1 @@
[Network]

View file

@ -11,19 +11,19 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/felddy/foundryvtt:release Image=docker.io/felddy/foundryvtt:release
ContainerName=foundryvtt ContainerName=foundryvtt
HostName=foundryvtt AutoUpdate=registry
Network=foundryvtt.network
HostName=foundryvtt
Volume=foundryvtt-data:/data Volume=foundryvtt-data:/data
PublishPort=30000:30000 PublishPort=30000:30000
Environment=TIMEZONE= Environment=TIMEZONE=Etc/UTC
Environment=FOUNDRY_UID= Secret=foundry-password,type=env,target=FOUNDRY_PASSWORD
Environment=FOUNDRY_GID=
Environment=FOUNDRY_PASSWORD=
Environment=FOUNDRY_USERNAME= Environment=FOUNDRY_USERNAME=
Environment=FOUNDRY_ADMIN_KEY= Secret=foundry-admin-key,type=env,target=FOUNDRY_ADMIN_KEY
Environment=FOUNDRY_LICENSE_KEY=XXXX-XXXX-XXXX-XXXX-XXXX-XXXX Secret=foundry-license-key,type=env,target=FOUNDRY_LICENSE_KEY=XXXX-XXXX-XXXX-XXXX-XXXX-XXXX
Environment=FOUNDRY_HOT_RELOAD=true Environment=FOUNDRY_HOT_RELOAD=false
Environment=CONTAINER_PRESERVE_CONFIG=true Environment=CONTAINER_PRESERVE_CONFIG=true
Environment=CONTAINER_CACHE="/data/container_cache" Environment=CONTAINER_CACHE="/data/container_cache"

View file

@ -0,0 +1 @@
[Network]

View file

@ -11,9 +11,9 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/heussd/fivefilters-full-text-rss:latest Image=docker.io/heussd/fivefilters-full-text-rss:latest
ContainerName=fivefilters ContainerName=fivefilters
HostName=fivefilters
Network=freshrss.network Network=freshrss.network
HostName=fivefilters
PublishPort=5000:80 PublishPort=5000:80
Environment=FTR_ADMIN_PASSWORD=XXXXXXXX Secret=ftr-admin-password,type=env,target=FTR_ADMIN_PASSWORD

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=FreshRSS Quadlet Description=FreshRSS Quadlet
Requires=fivefilters.service
After=fivefilters.service After=fivefilters.service
[Service] [Service]
@ -12,13 +13,11 @@ WantedBy=default.target
[Container] [Container]
Image=docker.io/linuxserver/freshrss:latest Image=docker.io/linuxserver/freshrss:latest
ContainerName=freshrss ContainerName=freshrss
HostName=freshrss
Network=freshrss.network Network=freshrss.network
HostName=freshrss
PublishPort=4422:80 PublishPort=4422:80
Volume=freshrss-config:/config:z Volume=freshrss-config:/config
Environment=PUID=1001 Environment=TZ=Etc/UTC
Environment=PGID=1001
Environment=TZ=Europe/London

View file

@ -1,9 +1 @@
[Network] [Network]
Subnet=10.10.10.0/24
Gateway=10.10.10.1
Label=app=freshrss
Driver=pasta

View file

@ -0,0 +1,23 @@
[Unit]
Description=Gaseous MariaDB
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/mariadb:latest
ContainerName=gaseous-mariadb
AutoUpdate=registry
Network=gaseous.network
HostName=gaseous-mariadb
Volume=gaseous-mariadb:/var/lib/mysql
Environment=MARIADB_ROOT_PASSWORD=gaseous
Environment=MARIADB_USER=gaseous
Environment=MARIADB_PASSWORD=gaseous

View file

@ -0,0 +1,29 @@
[Unit]
Description=Gaseous ROM manager
Requires=gaseous-mariadb.service
After=gaseous-mariadb.service
[Service]
Restart=on-failure
TimeoutStartSec=900
[Install]
WantedBy=default.target
[Container]
Image=docker.io/gaseousgames/gaseousserver:latest
ContainerName=gaseous
AutoUpdate=registry
Network=gaseous.network
HostName=gaseous
PublishPort=5198:80
Volume=gaseous:/root/.gaseous-server
Environment=TZ=Etc/UTC
Environment=dbhost=gsdb
Environment=dbuser=root
Environment=dbpass=gaseous
Environment=igdbclientid=
Environment=igdbclientsecret=

Some files were not shown because too many files have changed in this diff Show more