From 052a6334cbeb18f8b5f0356ae9548e775cb55bfc Mon Sep 17 00:00:00 2001 From: d3vyce Date: Tue, 20 Feb 2024 20:12:33 +0100 Subject: [PATCH] convert pgn into webp + start hugo migration article --- .gitea/workflows/build_blog_image.yml | 2 +- Dockerfile | 6 +- README.md | 2 + assets/img/author.png | 3 - assets/img/author.webp | 3 + assets/img/author_transparent.png | 3 - assets/img/author_transparent.webp | 3 + config/_default/languages.en.toml | 2 +- config/_default/params.toml | 2 +- .../posts/authelia-selfhosted-sso/index.md | 10 +- .../index.md | 16 +- .../how-to-index-your-blog-on-google/index.md | 14 +- .../index.md | 8 +- .../img/image-1.png | 3 + .../img/image-1.webp | 3 + .../img/image-2.png | 3 + .../img/image-2.webp | 3 + .../posts/migrate-from-ghost-to-hugo/index.md | 144 +++++++++++++++++- content/posts/my-current-homelab/index.md | 28 ++-- nginx/{ => conf.d}/default.conf | 0 20 files changed, 207 insertions(+), 51 deletions(-) delete mode 100644 assets/img/author.png create mode 100644 assets/img/author.webp delete mode 100644 assets/img/author_transparent.png create mode 100644 assets/img/author_transparent.webp create mode 100644 content/posts/migrate-from-ghost-to-hugo/img/image-1.png create mode 100644 content/posts/migrate-from-ghost-to-hugo/img/image-1.webp create mode 100644 content/posts/migrate-from-ghost-to-hugo/img/image-2.png create mode 100644 content/posts/migrate-from-ghost-to-hugo/img/image-2.webp rename nginx/{ => conf.d}/default.conf (100%) diff --git a/.gitea/workflows/build_blog_image.yml b/.gitea/workflows/build_blog_image.yml index d73b20b..b50c4fe 100644 --- a/.gitea/workflows/build_blog_image.yml +++ b/.gitea/workflows/build_blog_image.yml @@ -9,7 +9,7 @@ jobs: build docker: runs-on: linux_amd steps: - - name: checkout code + - name: Checkout code uses: actions/checkout@v3 # with: # lfs: 'true' diff --git a/Dockerfile b/Dockerfile index 1f2cab1..91c12ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,4 @@ +# Build Stage FROM git.d3vyce.fr/d3vyce/hugo:latest AS build WORKDIR /opt/blog @@ -6,12 +7,11 @@ COPY . /opt/blog/ RUN git submodule update --init --recursive RUN hugo - +# Publish Stage FROM nginx:1.25-alpine WORKDIR /usr/share/nginx/html COPY --from=build /opt/blog/public /usr/share/nginx/html/ -COPY nginx/nginx.conf /etc/nginx/nginx.conf -COPY nginx/default.conf /etc/nginx/conf.d/default.conf +COPY nginx/ /etc/nginx/ EXPOSE 80/tcp diff --git a/README.md b/README.md index beb29de..e845695 100644 --- a/README.md +++ b/README.md @@ -6,4 +6,6 @@ export PATH=$PATH:/usr/local/go/bin CGO_ENABLED=1 go install -tags extended github.com/gohugoio/hugo@latest git submodule update --recursive git lfs pull + +hugo server --buildDrafts ``` \ No newline at end of file diff --git a/assets/img/author.png b/assets/img/author.png deleted file mode 100644 index 76e50af..0000000 --- a/assets/img/author.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a89f4ba16439af6309f7fbe406b4f1486b0c38b824e5635121df5a410a9dfb38 -size 43125 diff --git a/assets/img/author.webp b/assets/img/author.webp new file mode 100644 index 0000000..9cb81ab --- /dev/null +++ b/assets/img/author.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60fb4999ab2345d2c97535f51f2dde60235f54d29c737b1597e46656391bc82d +size 20568 diff --git a/assets/img/author_transparent.png b/assets/img/author_transparent.png deleted file mode 100644 index f6c7f92..0000000 --- a/assets/img/author_transparent.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fa20195ff8ba78f47eac9fb20d88851ad97fcb62e602962c3ac931c5307da692 -size 39187 diff --git a/assets/img/author_transparent.webp b/assets/img/author_transparent.webp new file mode 100644 index 0000000..4fda074 --- /dev/null +++ b/assets/img/author_transparent.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cd5dcc4727b3fe28eb86fc5429ea59ddeafc5bf4f5219caa508ceb45427e306 +size 15844 diff --git a/config/_default/languages.en.toml b/config/_default/languages.en.toml index b6a8d81..afa71f7 100644 --- a/config/_default/languages.en.toml +++ b/config/_default/languages.en.toml @@ -8,7 +8,7 @@ title = "d3vyce Blog" isoCode = "en" rtl = false dateFormat = "2 January 2006" - logo = "img/author_transparent.png" + logo = "img/author_transparent.webp" # secondaryLogo = "img/secondary-logo.png" description = "Hi πŸ‘‹, Welcome to my Blog!" copyright = "d3vyce 2024 Β© All rights reserved." diff --git a/config/_default/params.toml b/config/_default/params.toml index 805aa68..a06b1f2 100644 --- a/config/_default/params.toml +++ b/config/_default/params.toml @@ -37,7 +37,7 @@ smartTOCHideUnfocusedChildren = false [homepage] layout = "custom" # valid options: page, profile, hero, card, background, custom - homepageImage = "img/ocean.jpg" # used in: hero, and card + homepageImage = "img/ocean.webp" # used in: hero, and card showRecent = true showRecentItems = 9 showMoreLink = true diff --git a/content/posts/authelia-selfhosted-sso/index.md b/content/posts/authelia-selfhosted-sso/index.md index 64505a0..e541b9e 100644 --- a/content/posts/authelia-selfhosted-sso/index.md +++ b/content/posts/authelia-selfhosted-sso/index.md @@ -28,7 +28,7 @@ First of all, let's start by installing and configuring the docker. For that you In my case I use Unraid and a template is directly available. I just have to set the port to use for the web interface. -![Unraid docker setup](img/image-1.png) +![Unraid docker setup](img/image-1.webp) Before launching the docker we will have to make several changes to the configuration file. @@ -104,7 +104,7 @@ docker run --rm authelia/authelia:latest authelia hash-password 'yourpassword' If this does not work you can manually create the hash using this [site](https://argon2.online) and these parameters: -![Argon2 hash generator](img/image-2.png) +![Argon2 hash generator](img/image-2.webp) ### Access control For the access policy we will do something simple with a single role. But nothing prevents you from creating several roles with different rights and access. @@ -284,17 +284,17 @@ real_ip_recursive on; Then you can add it in the `Advanced` tab of the desired subdomain: -![NginxProxyManager new proxy host advanced options](img/image-3.png) +![NginxProxyManager new proxy host advanced options](img/image-3.webp) After saving, go to the address of your subdomain to verify that it works. You should arrive on the following page: -![Authelia login page](img/image-4.png) +![Authelia login page](img/image-4.webp) You can connect with one of the credencials you created in the `users_database.yml` file. Once the connection is done, you should be redirected to the application hosted on the subdomain! To configure/modify the double authentication method, go to the subdomain you have configured for Authelia (`ex. auth.youdomain.com`). Then select `Methods`: -![Authelia 2FA modal](img/image-5.png) +![Authelia 2FA modal](img/image-5.webp) ## Conclusion diff --git a/content/posts/how-to-host-multiple-services-on-one-public-ip/index.md b/content/posts/how-to-host-multiple-services-on-one-public-ip/index.md index 8ccf858..b5ed0eb 100644 --- a/content/posts/how-to-host-multiple-services-on-one-public-ip/index.md +++ b/content/posts/how-to-host-multiple-services-on-one-public-ip/index.md @@ -18,7 +18,7 @@ As described in the intro one of the big problems of self hosted on a non profes > Dynamic DNS (DDNS) is a method of automatically updating a name server in the Domain Name System (DNS), often in real time, with the active DDNS configuration of its configured hostnames, addresses or other information. > β€” [Wikipedia](https://en.wikipedia.org/wiki/Dynamic_DNS) -![DDNS schematic](img/image-1.png) +![DDNS schematic](img/image-1.webp) The operation of DDNS is separated into 3 parts: @@ -36,11 +36,11 @@ If you have a fixed IP with your internet provider, you do not need to do the fo In my case I decided to use [DuckDNS](https://www.duckdns.org), it's a free service and easily configurable. First you will have to create an account with the service of your choice. Then you have to get your token, it's your unique identifier that will allow DuckDNS to identify you. -![DuckDNS token page](img/image-2.png) +![DuckDNS token page](img/image-2.webp) You will now have to create a sub domain to the duckdns.org domain. To do this, simply fill in the "sub domain" field and click on "add domain". -![DuckDNS add domain](img/image-3.png) +![DuckDNS add domain](img/image-3.webp) Then go to your docker manager to install the [linuxserver/duckdns](https://hub.docker.com/r/linuxserver/duckdns) docker. The docker compose is quite simple, you just have to indicate the two following elements: @@ -51,7 +51,7 @@ TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx You can then launch the docker, if all is well configured you can return to DuckDNS and verify that it has received your IP: -![DuckDNS current IP](img/image-4.png) +![DuckDNS current IP](img/image-4.webp) ## Sub-domain creation Now that we have a domain at DuckDNS, we will have to link our personal domain/sub-domain to the DuckDNS sub-domain. @@ -65,7 +65,7 @@ To do the redirection you have to create DNS entries of type CNAME. To create a CNAME entry, all you need is a sub-domain and a target : -![OVH DNS zone setup](img/image-5.png) +![OVH DNS zone setup](img/image-5.webp) In this example I create a sub-domain "www.d3vyce.fr" which redirects to the DuckDNS domain "xxxx.duckdns.org". Once the information is propagated on the different DNS servers, I should be able to access the IP of my box via this sub-domain. @@ -96,7 +96,7 @@ To do this we will set up a Reserve Proxy. > A reverse proxy server is a type of proxy server that typically sits behind the firewall in a private network and directs client requests to the appropriate backend server. > β€” [Nginx.com](https://www.nginx.com/resources/glossary/reverse-proxy-server) -![Reverse Proxy schematic](img/image-6.png) +![Reverse Proxy schematic](img/image-6.webp) Globally the reverse proxy will inspect the source domain to determine which local IP/Port to redirect the request to. @@ -144,7 +144,7 @@ Password: changeme After creating a user, we can add our first service! To do this go to the Hosts -> Proxy Hosts tab. Now click on "Add Proxy Host". -![NginxProxyManager new proxy host](img/image-7.png) +![NginxProxyManager new proxy host](img/image-7.webp) This is where we will have to fill in our sub-domain, local IP of the service and its port. In the example above, I configure the sub-domain "test.d3vyce.fr" with the local web server which is at the address 192.168.1.10:80. @@ -156,7 +156,7 @@ The "Websockets Support" option can be the cause of problems for some applicatio Now let's configure our SSL certificate. -![NginxProxyManager new proxy host SSL options](img/image-8.png) +![NginxProxyManager new proxy host SSL options](img/image-8.webp) Select the option "Request a new SSL Certificate" then the options "Force SSL", "HTTP/2 Support" and "HSTS Enabled". Then fill in our email and accept the terms of service. You can now save. After a few seconds you should see the status "Online" for your subdomain. If you have no errors you can now access your service with this subdomain! Using the same principle, you can setup other services. diff --git a/content/posts/how-to-index-your-blog-on-google/index.md b/content/posts/how-to-index-your-blog-on-google/index.md index 4d591f5..b525c08 100644 --- a/content/posts/how-to-index-your-blog-on-google/index.md +++ b/content/posts/how-to-index-your-blog-on-google/index.md @@ -24,7 +24,7 @@ https://search.google.com/u/0/search-console/welcome You should land on the following page which gives us the choice between two options. -![Google Search Console](img/image-1.png) +![Google Search Console](img/image-1.webp) Domain : this option will be used only if you use your domain name only for your website and you don't have any subdomain. For example in my case I have several subdomains (ex. status.d3vyce.fr) but I don't want them to be indexed. @@ -32,7 +32,7 @@ URL prefix : this second option allows to declare a precise URL and not an entir You can then enter your domain. In my case it is https://www.d3vyce.fr. If all goes well the ownership verification of your domain is automatic. But if it's not the case, don't panic, an error message will tell you how to solve the problem. Globally, Google will provide you with a file that you should host on your site, this will verify that you have control of the site and therefore the domain. -![Ownership auto verification](img/image-2.png) +![Ownership auto verification](img/image-2.webp) From this moment, the google robot will have to visit your site soon to do a scan. We could stop here but we will provide additional information to help the robot ! @@ -42,18 +42,18 @@ The majority of CMS (Content management system) have this functionality integrat In my case the sitemaps files are located at the following address: https://www.d3vyce.fr/sitemap.xml -![Sitemap list generated by Ghost](img/image-3.png) +![Sitemap list generated by Ghost](img/image-3.webp) This link leads to a sitemap index which is itself composed of several sitemap files. We have the choice to add file by file or directly the sitemap index. To add our sitemap, we must go to Index > Sitemaps. Then we add the link to our index file. -![Add sitemap modal](img/image-4.png) +![Add sitemap modal](img/image-4.webp) After a few minutes we notice that our sitemap index has been detected and that our 5 sitemaps have been imported! -![Submitted sitemaps](img/image-5.png) -![Sitemaps list](img/image-6.png) +![Submitted sitemaps](img/image-5.webp) +![Sitemaps list](img/image-6.webp) After this step, there is nothing left to do but wait. This can take from a few hours to several weeks in some cases.--- @@ -61,7 +61,7 @@ After this step, there is nothing left to do but wait. This can take from a few ## Update After about 36 hours, my blog has been indexed on google and is now accessible with a simple search! -![Google result for blog](img/image-7.png) +![Google result for blog](img/image-7.webp) I then went to see the access logs of the site and we can observe the passages of the Googlebot which scans the site: ``` diff --git a/content/posts/how-to-make-daily-backups-of-your-homelab/index.md b/content/posts/how-to-make-daily-backups-of-your-homelab/index.md index 09453b6..086c386 100644 --- a/content/posts/how-to-make-daily-backups-of-your-homelab/index.md +++ b/content/posts/how-to-make-daily-backups-of-your-homelab/index.md @@ -16,7 +16,7 @@ You have your local homelab in which you store all your data. You have set up se > - 1: The final β€œone” referred to the rule that one copy of the two backups should be taken off-site, so that anything that affected the first copy would not (hopefully) affect it. > β€” [computerweekly.com](computerweekly.com) -![321 backup rule](img/image-1.png) +![321 backup rule](img/image-1.webp) 3-2-1 Backup Rule allows to store data with almost no risk of loss. Today many people agree that this rule does not really make sense with the arrival of the cloud. Indeed, providers such as Google, Amazon, ... have replication systems on several disks, but especially in several locations. All this remains invisible for the user, but these securities are well present. @@ -89,17 +89,17 @@ In the script plugin of Unraid I add the different scripts with the following ex This should result in the following: -![Script list in Unraid](img/image-2.png) +![Script list in Unraid](img/image-2.webp) After waiting one day I check on the drive that the backup has been done: -![Save zip in Google Drive](img/image-3.png) +![Save zip in Google Drive](img/image-3.webp) And indeed there is an archive of about 1Gb that has been uploaded in the save_mark1 folder. The system works ! I then let the script run for several days to see if the history system works well. As you can see I have a history of the file for about 30 days. An interesting thing to know is that only the archive consumes space on my drive and not all the versions in the history. This makes it consume ~1Gb with 30 versions of the archive accessible. -![Versions list in Google Drive](img/image-4.png) +![Versions list in Google Drive](img/image-4.webp) ## Conclusion diff --git a/content/posts/migrate-from-ghost-to-hugo/img/image-1.png b/content/posts/migrate-from-ghost-to-hugo/img/image-1.png new file mode 100644 index 0000000..6da41c2 --- /dev/null +++ b/content/posts/migrate-from-ghost-to-hugo/img/image-1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f95546b5bce5aa0d006d140022f6a8a61bf792fa7f9487f15c7617aa5f698789 +size 12863 diff --git a/content/posts/migrate-from-ghost-to-hugo/img/image-1.webp b/content/posts/migrate-from-ghost-to-hugo/img/image-1.webp new file mode 100644 index 0000000..72b02ba --- /dev/null +++ b/content/posts/migrate-from-ghost-to-hugo/img/image-1.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaa0fcf06ba055ed7ad3c989a3eccba7c4b4e00d118273f14dcc736bef4c8b23 +size 8750 diff --git a/content/posts/migrate-from-ghost-to-hugo/img/image-2.png b/content/posts/migrate-from-ghost-to-hugo/img/image-2.png new file mode 100644 index 0000000..9c7ffbc --- /dev/null +++ b/content/posts/migrate-from-ghost-to-hugo/img/image-2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee74442892227fa52585be1ba435dab2447e047a670ea1be6d0090896e4d5f16 +size 10120 diff --git a/content/posts/migrate-from-ghost-to-hugo/img/image-2.webp b/content/posts/migrate-from-ghost-to-hugo/img/image-2.webp new file mode 100644 index 0000000..0f19a0c --- /dev/null +++ b/content/posts/migrate-from-ghost-to-hugo/img/image-2.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb268ac007d2b58d033aa7971fa899e01583142a93860d9f5013d23408ae76a2 +size 8522 diff --git a/content/posts/migrate-from-ghost-to-hugo/index.md b/content/posts/migrate-from-ghost-to-hugo/index.md index 49059d5..619ce93 100644 --- a/content/posts/migrate-from-ghost-to-hugo/index.md +++ b/content/posts/migrate-from-ghost-to-hugo/index.md @@ -3,7 +3,149 @@ title: "Migrate from Ghost to Hugo" date: 2024-02-17 draft: true slug: "migrate-from-ghost-to-hugo" -tags: ["ci/cd", "docker", "hugo"] +tags: ["ci/cd", "docker", "git", "hugo"] type: "programming" --- +## Current solution +I've had a blog since early 2022. Historically, I chose Ghost to make this site. At the time, several factors led me to choose this CMS rather than another: +- Ease of deployment and use +- Online administration page and article editor +- Simple, modern theme +- Regular updates + +But after using it for quite some time, a number of problems have arisen that can't really be corrected: +- Limited customization +- Complex theme modification if you want to be able to update it afterwards +- Significant resource requirements for Ghost+Mysql (see Conclusion) +- No options for advanced content organization +- Too many unused options to justify this choice (subscription, user account, etc.) + +All these problems, combined with the fact that my needs had evolved, led me to change the technical solution for my blog. + +## Choosing a new solution +Today, there are many options for blogging. Third-party hosted options like Medium, CMS like Wordpress and Ghost, but also static websites generators. For this new version of my blog, I've opted for a static websites generator. + +Here again, there are several solutions, but I've settled on [Hugo](https://gohugo.io/). + +Hugo is a GO-based opensource framewok created in 2013. It's known for being very fast, highly customizable and with a very active community. + +After choosing Hugo I had to choose a theme, I had several requirements in terms of features. I ended up choosing [Blowfish](https://blowfish.page/). + +It's a highly flexible and customizable theme, regularly updated, with a minimalist, modern design. + +## Migration +### Settings + + +### Contents and optimization + +```bash +find ./content/ -type f -name '*.png' -exec sh -c 'cwebp -q 90 $1 -o "${1%.png}.webp"' _ {} \; +``` +https://pawelgrzybek.com/webp-and-avif-images-on-a-hugo-website/ + +## Storage and automatic deployment +### Git-LFS + +```bash +apt install git-lfs +git lfs install +git lfs migrate \ + import \ + --include="*.jpg,*.svg,*.ttf,*.woff*,*.min.*,*.webp,*.ico,*.png,*.jpeg" \ + --include-ref=refs/heads/main +``` + +### CI/CD + +hugo.Dockerfile: +```dockerfile +FROM golang:1.22-alpine AS build + +ARG CGO=1 +ENV CGO_ENABLED=${CGO} +ENV GOOS=linux +ENV GO111MODULE=on + +RUN apk update && \ + apk add --no-cache gcc musl-dev g++ git +RUN go install -tags extended github.com/gohugoio/hugo@v0.122.0 +``` +I then use the following commands to build the Hugo image: +``` +docker build -t git.d3vyce.fr/d3vyce/hugo:latest -f hugo.Dockerfile . +docker push git.d3vyce.fr/d3vyce/hugo:latest +``` + +Dockerfile: +```dockerfile +# Build Stage +FROM git.d3vyce.fr/d3vyce/hugo:latest AS build + +WORKDIR /opt/blog +COPY . /opt/blog/ + +RUN git submodule update --init --recursive +RUN hugo + +# Publish Stage +FROM nginx:1.25-alpine + +WORKDIR /usr/share/nginx/html +COPY --from=build /opt/blog/public /usr/share/nginx/html/ +COPY nginx/ /etc/nginx/ + +EXPOSE 80/tcp +``` + +```yaml +name: Build Blog Docker Image + +on: + push: + branches: + - main + +jobs: + build docker: + runs-on: linux_amd + steps: + - name: checkout code + uses: actions/checkout@v3 + # with: + # lfs: 'true' + - name: Checkout LFS + run: | + function EscapeForwardSlash() { echo "$1" | sed 's/\//\\\//g'; } + readonly ReplaceStr="EscapeForwardSlash ${{ gitea.repository }}.git/info/lfs/objects/batch"; sed -i "s/\(\[http\)\( \".*\)\"\]/\1\2`$ReplaceStr`\"]/" .git/config + git config --local lfs.transfer.maxretries 1 + /usr/bin/git lfs fetch origin refs/remotes/origin/${{ gitea.ref_name }} + /usr/bin/git lfs checkout + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Login to Docker registry + uses: docker/login-action@v2 + with: + registry: git.d3vyce.fr + username: ${{ github.actor }} + password: ${{ secrets.GIT_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + platforms: linux/amd64 + push: true + tags: git.d3vyce.fr/${{ github.repository }}:latest +``` + +https://gitea.com/gitea/act_runner/issues/164 + +## Conclusion: before/after comparison + +![Ghost based blog lighthouse result](img/image-1.webp) + +![Hugo based blog lighthouse result](img/image-2.webp) diff --git a/content/posts/my-current-homelab/index.md b/content/posts/my-current-homelab/index.md index 32ca065..3a90c45 100644 --- a/content/posts/my-current-homelab/index.md +++ b/content/posts/my-current-homelab/index.md @@ -44,7 +44,7 @@ Currently my homelab is composed of the following elements: --- ## Topology -![Homelab topology](img/image-1.png) +![Homelab topology](img/image-1.webp) In terms of network architecture it is quite simple, there is only one subnet, the 10.0.0.0/24, that I have subdivided for the different equipment: - 10.0.0.1 : Unifi Dream Machine @@ -58,7 +58,7 @@ The vast majority of services/VM/storage are on the Mark1 server. This server is [Unraid](https://unraid.net) is a paid OS that is offered in 3 versions: -![Unraid license options](img/image-2.png) +![Unraid license options](img/image-2.webp) The only difference is the number of storage devives we can install in our server. In my case I am on the "Plus" version. It's a one time payment that allows you to unlock all the features. @@ -74,17 +74,17 @@ Mark2 is a server under Ubuntu Server, it is notably used for game servers (Mine As you can see on the diagram there are many services running in my homelab. Most of them are on the "Mark1" server and are Dockers. | | Name | Description | | ----------- | ----------- | ----------- | -| radarr | Radarr | Movie collection manager | -| sonarr | Sonarr | Series collection manager | -| bazzar | Bazzar | Subtittle finder for movie and series | -| jackett | Jackett | Proxy server for indexer | -| adguardhome | AdGuardHome | DNS for blocking ads and tracking | -| bitwarden | Bitwarden | Password manager | -| deluge | Deluge | Torrent downloader | -| gitea | Gitea | Local github | -| homeassistant | Home Assistant | IOT manager (Zigbee) | -| nginxproxymanager | Nginx Proxy Manager | Reverse Proxy | -| plex | Plex | Movie and series remote access | +| radarr | Radarr | Movie collection manager | +| sonarr | Sonarr | Series collection manager | +| bazzar | Bazzar | Subtittle finder for movie and series | +| jackett | Jackett | Proxy server for indexer | +| adguardhome | AdGuardHome | DNS for blocking ads and tracking | +| bitwarden | Bitwarden | Password manager | +| deluge | Deluge | Torrent downloader | +| gitea | Gitea | Local github | +| homeassistant | Home Assistant | IOT manager (Zigbee) | +| nginxproxymanager | Nginx Proxy Manager | Reverse Proxy | +| plex | Plex | Movie and series remote access | @@ -92,7 +92,7 @@ In addition to these services, I have two database managers: MariaDB and Redis. In terms of VMs on Mark1, I have 2 Ubuntu VMs for web hosting. A GNS3 VM for network tests. A VM containing Home Assistant. A Debian VM for a Docker project in progress and a Kali VM to do Pentesting and have access to cyber tools in remote. -![Virtual Machines tab in Unraid](img/image-3.png) +![Virtual Machines tab in Unraid](img/image-3.webp) --- diff --git a/nginx/default.conf b/nginx/conf.d/default.conf similarity index 100% rename from nginx/default.conf rename to nginx/conf.d/default.conf