diff --git a/.github/paths-filter.yml b/.github/paths-filter.yml index 127eb3c42..f37c979e4 100644 --- a/.github/paths-filter.yml +++ b/.github/paths-filter.yml @@ -5,6 +5,7 @@ addons_updater: addons_updater/config.* # Image : yes arpspoof: arpspoof/config.* # Image : yes autobrr: autobrr/config.* # Image : yes baikal: baikal/config.* # Image : yes +battybirdnet-pi: battybirdnet-pi/config.* # Image : yes bazarr: bazarr/config.* # Image : yes binance-trading-bot: binance-trading-bot/config.* # Image : yes birdnet-go: birdnet-go/config.* # Image : yes diff --git a/.github/stars.svg b/.github/stars.svg index 7e510ec69..9acec9721 100644 --- a/.github/stars.svg +++ b/.github/stars.svg @@ -116,179 +116,179 @@ -\ndanbruno +\nddcash + + + + +\ndanbruno + + + -\nNODeeJay - +\nNODeeJay + - + -\nbaflo - +\nbaflo + - + -\nairtonix - +\nairtonix + - + -\nmarciogranzotto - +\nmarciogranzotto + - + -\ntyjtyj - +\ntyjtyj + - + -\nfkhofmann - +\nfkhofmann + - + -\niBobik - +\niBobik + - + -\nDDanii - +\nDDanii + - + -\nproffalken - +\nproffalken + - + -\nleokeba - +\nleokeba + - + -\noverride80 - +\noverride80 + - + -\nmechanarchy - +\nmechanarchy + - + -\nSilvioMessi - +\nghisch + - + + + + + +\nSilvioMessi + + + -\nreedy - +\nreedy + - + -\nakrigator - +\nMiSebCo + - - - - - -\nMiSebCo - - - + -\nEalenn - +\nEalenn + - + -\nzagi988 - +\nzagi988 + - + -\nAlecRust - +\nAlecRust + - + -\nBerghsJelmer - - - - - - - -\ntmcarr +\nBerghsJelmer - + @@ -356,212 +356,220 @@ -\nphedoreanu +\nMiguelAngelLV - - - - -\nacervicius - - - - - - - -\ndavoloko - - - - - - - -\nghisch - - - - - - - -\nmonkey-debugger - - - - - - - -\nmxwi - - - - - - - -\nnovisys - - - - - - - -\npullaf - - - - - - - -\ntdn131 - - - - - - - -\nwesleygas - - - - - - - -\naigarius - - - - - - - -\nankushg - - - - - - - -\nbvhme - - - - - - - -\nChronoRadion - - - - - - - -\nEpicLPer - - - - - - - -\ngieljnssns - - - - - - - -\nImgBotApp - - - - - - - -\njeffothy - - - - - - - -\nJohnnyPicnic - - - - - - - -\njpgimenez - - - - - - - -\nKairuByte - - - - - - - -\nApipa169 - - - - - - - -\nkvanzuijlen - - - - - - - -\nkylevandenelsen - - - - - - - -\nMaxWinterstein - - - - - - - -\nMiguelAngelLV - - - +\nphedoreanu + + + + + + + +\ntmcarr + + + + + + + +\nacervicius + + + + + + + +\ndavoloko + + + + + + + +\nmonkey-debugger + + + + + + + +\nmxwi + + + + + + + +\nnovisys + + + + + + + +\npullaf + + + + + + + +\ntdn131 + + + + + + + +\nwesleygas + + + + + + + +\naigarius + + + + + + + +\nankushg + + + + + + + +\nbvhme + + + + + + + +\nChronoRadion + + + + + + + +\nEpicLPer + + + + + + + +\ngieljnssns + + + + + + + +\nigorsantos07 + + + + + + + +\nImgBotApp + + + + + + + +\njeffothy + + + + + + + +\nJohnnyPicnic + + + + + + + +\njpgimenez + + + + + + + +\nKairuByte + + + + + + + +\nApipa169 + + + + + + + +\nkvanzuijlen + + + + + + + +\nkylevandenelsen + + + + + + + +\nMaxWinterstein + + + + + + + diff --git a/.github/starsevol.svg b/.github/starsevol.svg index 8e336af50..6839a43a8 100644 --- a/.github/starsevol.svg +++ b/.github/starsevol.svg @@ -1 +1 @@ -star-history.com202220232024 20040060080010001200alexbelgium/hassio-addonsStar HistoryDateGitHub Stars \ No newline at end of file +star-history.com202220232024 200400600800100012001400alexbelgium/hassio-addonsStar HistoryDateGitHub Stars \ No newline at end of file diff --git a/.github/stats.png b/.github/stats.png index 7d93869e7..48b33e24d 100644 Binary files a/.github/stats.png and b/.github/stats.png differ diff --git a/.github/stats_addons.png b/.github/stats_addons.png index e795a1e5f..a07b20b86 100644 Binary files a/.github/stats_addons.png and b/.github/stats_addons.png differ diff --git a/.github/workflows/daily_README.yaml b/.github/workflows/daily_README.yaml index 703077b2c..cf42aa7ca 100644 --- a/.github/workflows/daily_README.yaml +++ b/.github/workflows/daily_README.yaml @@ -7,6 +7,7 @@ on: workflow_dispatch: null jobs: README_updater: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-20.04 steps: - name: Checkout Repo diff --git a/.github/workflows/helper_stats_graphs.yaml b/.github/workflows/helper_stats_graphs.yaml index c999f765d..b26cc545e 100644 --- a/.github/workflows/helper_stats_graphs.yaml +++ b/.github/workflows/helper_stats_graphs.yaml @@ -7,6 +7,7 @@ on: jobs: stats_graphs: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest steps: - name: Checkout Repo diff --git a/.github/workflows/onpush_builder.yaml b/.github/workflows/onpush_builder.yaml index 9a615090b..a58b68736 100644 --- a/.github/workflows/onpush_builder.yaml +++ b/.github/workflows/onpush_builder.yaml @@ -16,6 +16,7 @@ on: jobs: correct_path_filters: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest steps: - name: ↩️ Checkout @@ -169,7 +170,7 @@ jobs: - name: Build ${{ matrix.addon }} add-on id: builderstep if: steps.check.outputs.build_arch == 'true' - uses: home-assistant/builder@master + uses: home-assistant/builder@2024.03.5 env: CAS_API_KEY: ${{ secrets.CAS_API_KEY }} with: diff --git a/.github/workflows/weekly-supelinter.yaml b/.github/workflows/weekly-supelinter.yaml index 350975d8d..b30b2e18c 100644 --- a/.github/workflows/weekly-supelinter.yaml +++ b/.github/workflows/weekly-supelinter.yaml @@ -14,6 +14,7 @@ on: jobs: make-executable: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest steps: - name: Checkout Repo @@ -63,6 +64,7 @@ jobs: check_crlf: name: Check CRLF action + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest steps: diff --git a/.github/workflows/weekly_crlftolf.yaml b/.github/workflows/weekly_crlftolf.yaml index 11d737a10..525433a0f 100644 --- a/.github/workflows/weekly_crlftolf.yaml +++ b/.github/workflows/weekly_crlftolf.yaml @@ -8,6 +8,7 @@ on: jobs: crlf-to-lf: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest steps: - name: Checkout repository contents @@ -24,6 +25,7 @@ jobs: fix-crlf: name: Fix CRLF Endings + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-latest # Use a Linux runner steps: - name: Checkout repository contents diff --git a/.github/workflows/weekly_reduceimagesize.yml b/.github/workflows/weekly_reduceimagesize.yml index 5234def7b..667ee4333 100644 --- a/.github/workflows/weekly_reduceimagesize.yml +++ b/.github/workflows/weekly_reduceimagesize.yml @@ -9,6 +9,7 @@ on: jobs: calibre: + if: github.repository_owner == 'alexbelgium' name: calibreapp/image-actions runs-on: ubuntu-latest steps: diff --git a/.github/workflows/weekly_sortjson.yaml b/.github/workflows/weekly_sortjson.yaml index f64a8456c..655dd38fc 100644 --- a/.github/workflows/weekly_sortjson.yaml +++ b/.github/workflows/weekly_sortjson.yaml @@ -9,6 +9,7 @@ on: jobs: sort_json: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-20.04 steps: - name: Checkout Repo diff --git a/.github/workflows/weekly_stats.yaml b/.github/workflows/weekly_stats.yaml index 160368b16..a3734c62a 100644 --- a/.github/workflows/weekly_stats.yaml +++ b/.github/workflows/weekly_stats.yaml @@ -8,6 +8,7 @@ on: jobs: stats_updater: + if: github.repository_owner == 'alexbelgium' runs-on: ubuntu-20.04 steps: - name: Checkout Repo diff --git a/README.md b/README.md index 1158b8e8c..32fb92661 100644 --- a/README.md +++ b/README.md @@ -44,20 +44,20 @@ If you want to do add the repository manually, please follow the procedure highl ### Number of addons -- In the repository : 92 -- Installed : 80926 +- In the repository : 93 +- Installed : 86596 ### Top 3 -1. Portainer (11142x) -2. Filebrowser (9880x) -3. Arpspoof (2505x) +1. Portainer (16729x) +2. Filebrowser (11150x) +3. Sabnzbd (5295x) ### Architectures used -- amd64: 58% -- aarch64: 34% -- armv7: 8% +- amd64: 61% +- aarch64: 31% +- armv7: 7% ### Total downloads evolution @@ -75,8 +75,7 @@ If you want to do add the repository manually, please follow the procedure highl ## Add-ons provided by this repository - -✓ [Arpspoof (2505x)](arpspoof/) : block internet connection for local network devices +✓ [Arpspoof](arpspoof/) : block internet connection for local network devices   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fupdater.json) @@ -95,7 +94,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Baikal](baikal/) : Calendar+Contacts server +✓ [Baikal](baikal/) : Calendar+Contacts server   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbaikal%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbaikal%2Fupdater.json) @@ -103,7 +102,19 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Bazarr NAS](bazarr/) : Companion application to Sonarr and Radarr to download subtitles +✓ ![image](https://api.iconify.design/mdi/bird.svg) [BattyBirdNET-Pi](battybirdnet-pi/) : A realtime acoustic bat & bird classification system for the Raspberry Pi 4/5 built on BattyBirdNET-Analyzer + +  ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbattybirdnet-pi%2Fconfig.json) +![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbattybirdnet-pi%2Fupdater.json) +![aarch64][aarch64-badge] +![amd64no][amd64no-badge] +![armv7no][armv7no-badge] +![ingress][ingress-badge] +![mqtt][mqtt-badge] +![smb][smb-badge] +![localdisks][localdisks-badge] + +✓ [Bazarr NAS](bazarr/) : Companion application to Sonarr and Radarr to download subtitles   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbazarr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbazarr%2Fupdater.json) @@ -147,7 +158,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Booksonic-air](booksonic_air/) : platform for accessing the audiobooks you own wherever you are +✓ [Booksonic-air](booksonic_air/) : platform for accessing the audiobooks you own wherever you are   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbooksonic_air%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbooksonic_air%2Fupdater.json) @@ -157,7 +168,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Browserless Chromium](browserless_chrome/) : Chromium as a service container +✓ [Browserless Chromium](browserless_chrome/) : Chromium as a service container   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbrowserless_chrome%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbrowserless_chrome%2Fupdater.json) @@ -187,7 +198,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Castsponsorskip](sponsorblockcast/) : Skip YouTube ads and sponsorships on all local Google Cast devices +✓ [Castsponsorskip](sponsorblockcast/) : Skip YouTube ads and sponsorships on all local Google Cast devices   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsponsorblockcast%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsponsorblockcast%2Fupdater.json) @@ -195,7 +206,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Changedetection.io](changedetection.io/) : web page monitoring, notification and change detection +✓ [Changedetection.io](changedetection.io/) : web page monitoring, notification and change detection   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fupdater.json) @@ -235,7 +246,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Elasticsearch server](elasticsearch/) : Free and Open, Distributed, RESTful Search Engine +✓ [Elasticsearch server](elasticsearch/) : Free and Open, Distributed, RESTful Search Engine   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Felasticsearch%2Fconfig.json) ![aarch64][aarch64-badge] @@ -253,7 +264,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Epic Games Free](epicgamesfree/) : Automatically login and redeem promotional free games from Epic Games Store +✓ [Epic Games Free](epicgamesfree/) : Automatically login and redeem promotional free games from Epic Games Store   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fepicgamesfree%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fepicgamesfree%2Fupdater.json) @@ -261,7 +272,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ ![image](https://api.iconify.design/mdi/file-search.svg) [Filebrowser (9880x)](filebrowser/) : filebrowser provides a file managing interface within a specified directory and it can be used to upload, delete, preview, rename and edit your files +✓ ![image](https://api.iconify.design/mdi/file-search.svg) [Filebrowser (11150x)](filebrowser/) : filebrowser provides a file managing interface within a specified directory and it can be used to upload, delete, preview, rename and edit your files   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffilebrowser%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffilebrowser%2Fupdater.json) @@ -272,7 +283,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Firefly iii](fireflyiii/) : A free and open source personal finance manager +✓ [Firefly iii](fireflyiii/) : A free and open source personal finance manager   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffireflyiii%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffireflyiii%2Fupdater.json) @@ -281,7 +292,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![MariaDB][mariadb-badge] -✓ [Firefly iii Data Importer](fireflyiii_data_importer/) : Data importer for Firefly III (separate addon) +✓ [Firefly iii Data Importer](fireflyiii_data_importer/) : Data importer for Firefly III (separate addon)   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffireflyiii_data_importer%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffireflyiii_data_importer%2Fupdater.json) @@ -289,14 +300,14 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Firefly iii FinTS Importer](fireflyiii_fints_importer/) : Import financial transactions from your FinTS enabled bank into Firefly III +✓ [Firefly iii FinTS Importer](fireflyiii_fints_importer/) : Import financial transactions from your FinTS enabled bank into Firefly III   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffireflyiii_fints_importer%2Fconfig.json) ![aarch64][aarch64-badge] ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [FlareSolverr](flaresolverr/) : Proxy server to bypass Cloudflare protection +✓ [FlareSolverr](flaresolverr/) : Proxy server to bypass Cloudflare protection   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fflaresolverr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fflaresolverr%2Fupdater.json) @@ -304,7 +315,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Flexget](flexget/) : FlexGet is a multipurpose automation tool for all of your media +✓ [Flexget](flexget/) : FlexGet is a multipurpose automation tool for all of your media   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fflexget%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fflexget%2Fupdater.json) @@ -312,7 +323,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Free Games Claimer (NoVNC not working)](free_games_claimer/) : automatically claims free games on the Epic Games Store, Amazon Prime Gaming and GOG +✓ [Free Games Claimer (NoVNC not working)](free_games_claimer/) : automatically claims free games on the Epic Games Store, Amazon Prime Gaming and GOG   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffree_games_claimer%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Ffree_games_claimer%2Fupdater.json) @@ -320,7 +331,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Gazpar2mqtt](gazpar2mqtt/) : fetch GRDF data and publish data to a mqtt broker +✓ [Gazpar2mqtt](gazpar2mqtt/) : fetch GRDF data and publish data to a mqtt broker   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgazpar2mqtt%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgazpar2mqtt%2Fupdater.json) @@ -329,7 +340,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![mqtt][mqtt-badge] -✓ [Gitea](gitea/) : Gitea for Home Assistant +✓ [Gitea](gitea/) : Gitea for Home Assistant   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fupdater.json) @@ -337,7 +348,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Grav web server](grav/) : Fast, Simple, and Flexible, file-based Web-platform +✓ [Grav web server](grav/) : Fast, Simple, and Flexible, file-based Web-platform   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgrav%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgrav%2Fupdater.json) @@ -365,7 +376,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Inadyn](inadyn/) : Inadyn is a small and simple Dynamic DNS, DDNS, client with HTTPS support. A large number of dynamic dns providers are supported (https://github.com/troglobit/inadyn#supported-providers). For those that are not, you can use a custom provider as per this project's documentation (https://github.com/nalipaz/hassio-addons/blob/master/README.md) +✓ [Inadyn](inadyn/) : Inadyn is a small and simple Dynamic DNS, DDNS, client with HTTPS support. A large number of dynamic dns providers are supported (https://github.com/troglobit/inadyn#supported-providers). For those that are not, you can use a custom provider as per this project's documentation (https://github.com/nalipaz/hassio-addons/blob/master/README.md)   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Finadyn%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Finadyn%2Fupdater.json) @@ -373,7 +384,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Jackett NAS](jackett/) : Translates queries from apps (Sonarr, Sickrage, CouchPotato, Mylar, etc) into tracker-site-specific http queries, parses the html response, then sends results back to the requesting software +✓ [Jackett NAS](jackett/) : Translates queries from apps (Sonarr, Sickrage, CouchPotato, Mylar, etc) into tracker-site-specific http queries, parses the html response, then sends results back to the requesting software   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjackett%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjackett%2Fupdater.json) @@ -394,7 +405,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Jellyseerr](jellyseerr/) : fork of overseerr for jellyfin support +✓ [Jellyseerr](jellyseerr/) : fork of overseerr for jellyfin support   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjellyseerr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjellyseerr%2Fupdater.json) @@ -411,7 +422,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![ingress][ingress-badge] -✓ [Joplin Server](joplin/) : Self-hosted open source note-taking application +✓ [Joplin Server](joplin/) : Self-hosted open source note-taking application   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjoplin%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fjoplin%2Fupdater.json) @@ -419,7 +430,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Kometa](kometa/) : Python script to update metadata information for movies, shows, and collections as well as automatically build collections +✓ [Kometa](kometa/) : Python script to update metadata information for movies, shows, and collections as well as automatically build collections   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2kometa%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fkometa%2Fupdater.json) @@ -427,7 +438,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Lidarr NAS](lidarr/) : Music collection manager for Usenet and BitTorrent users +✓ [Lidarr NAS](lidarr/) : Music collection manager for Usenet and BitTorrent users   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Flidarr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Flidarr%2Fupdater.json) @@ -437,7 +448,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Linkwarden](linkwarden/) : collaborative bookmark manager to collect, organize, and preserve webpages and articles +✓ [Linkwarden](linkwarden/) : collaborative bookmark manager to collect, organize, and preserve webpages and articles   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Flinkwarden%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Flinkwarden%2Fupdater.json) @@ -473,7 +484,7 @@ If you want to do add the repository manually, please follow the procedure highl ![ingress][ingress-badge] ![mqtt][mqtt-badge] -✓ [Mylar3](mylar3/) : Automated comic book downloader for use with NZB and torrents +✓ [Mylar3](mylar3/) : Automated comic book downloader for use with NZB and torrents   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fmylar3%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fmylar3%2Fupdater.json) @@ -483,7 +494,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Navidrome](navidrome/) : Navidrome for Home Assistant +✓ [Navidrome](navidrome/) : Navidrome for Home Assistant   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fnavidrome%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fnavidrome%2Fupdater.json) @@ -513,7 +524,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Omada](omada/) : TP-Link Omada Controller +✓ [Omada](omada/) : TP-Link Omada Controller   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fomada%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fomada%2Fupdater.json) @@ -521,7 +532,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Omada v3.2](omada_v3/) : TP-Link Omada Controller +✓ [Omada v3.2](omada_v3/) : TP-Link Omada Controller   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fomada_v3%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fomada_v3%2Fupdater.json) @@ -529,7 +540,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Ombi](ombi/) : Self-hosted Plex Request and user management system +✓ [Ombi](ombi/) : Self-hosted Plex Request and user management system   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fombi%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fombi%2Fupdater.json) @@ -537,7 +548,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Openproject](openproject/) : Openproject for Home Assistant +✓ [Openproject](openproject/) : Openproject for Home Assistant   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fopenproject%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fopenproject%2Fupdater.json) @@ -545,7 +556,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Organizr](organizr/) : An HTPC/Homelab services organizer that is written in PHP +✓ [Organizr](organizr/) : An HTPC/Homelab services organizer that is written in PHP   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Forganizr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Forganizr%2Fupdater.json) @@ -553,7 +564,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Overseerr](overseerr/) : Request management and media discovery tool built to work with your existing Plex ecosystem +✓ [Overseerr](overseerr/) : Request management and media discovery tool built to work with your existing Plex ecosystem   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Foverseerr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Foverseerr%2Fupdater.json) @@ -561,7 +572,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Papermerge](papermerge/) : Open source document management system (DMS) +✓ [Papermerge](papermerge/) : Open source document management system (DMS)   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpapermerge%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpapermerge%2Fupdater.json) @@ -582,7 +593,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Piwigo](piwigo/) : Piwigo is a photo gallery software for the web +✓ [Piwigo](piwigo/) : Piwigo is a photo gallery software for the web   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpiwigo%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpiwigo%2Fupdater.json) @@ -592,7 +603,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Plex NAS](plex/) : Plex organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices +✓ [Plex NAS](plex/) : Plex organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fplex%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fplex%2Fupdater.json) @@ -602,7 +613,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ ![image](https://api.iconify.design/mdi/docker.svg) [Portainer (11142x)](portainer/) : Manage your Docker environment with ease +✓ ![image](https://api.iconify.design/mdi/docker.svg) [Portainer (16729x)](portainer/) : Manage your Docker environment with ease   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fportainer%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fportainer%2Fupdater.json) @@ -611,7 +622,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![ingress][ingress-badge] -✓ [Portainer (11142x) Agent](portainer_agent/) : An agent used to manage all the resources in a Swarm cluster +✓ [Portainer (16729x) Agent](portainer_agent/) : An agent used to manage all the resources in a Swarm cluster   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fportainer_agent%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fportainer_agent%2Fupdater.json) @@ -620,7 +631,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![full_access][full_access-badge] -✓ [Postgres 15](postgres/) : Postgres 15 with pgvecto.rs support +✓ [Postgres 15](postgres/) : Postgres 15 with pgvecto.rs support   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpostgres%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fpostgres%2Fupdater.json) @@ -628,7 +639,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Prowlarr NAS](prowlarr/) : Torrent Trackers and Usenet indexers offering complete management ofSonarr, Radarr, Lidarr, and Readarr indexers with no per app setup required +✓ [Prowlarr NAS](prowlarr/) : Torrent Trackers and Usenet indexers offering complete management ofSonarr, Radarr, Lidarr, and Readarr indexers with no per app setup required   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fprowlarr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fprowlarr%2Fupdater.json) @@ -660,14 +671,14 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Repository Updater](addons_updater/) : Automatic addons update by aligning version tag with upstream releases +✓ [Repository Updater](addons_updater/) : Automatic addons update by aligning version tag with upstream releases   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Faddons_updater%2Fconfig.json) ![aarch64][aarch64-badge] ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Requestrr](requestrr/) : Chatbot used to simplify using services like Sonarr/Radarr/Ombi via the use of chat +✓ [Requestrr](requestrr/) : Chatbot used to simplify using services like Sonarr/Radarr/Ombi via the use of chat   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Frequestrr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Frequestrr%2Fupdater.json) @@ -686,7 +697,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ ![image](https://api.iconify.design/mdi/arrow-down-bold-circle-outline.svg) [Sabnzbd](sabnzbd/) : Makes Usenet as simple and streamlined as possible by automating everything we can +✓ ![image](https://api.iconify.design/mdi/arrow-down-bold-circle-outline.svg) [Sabnzbd (5295x)](sabnzbd/) : Makes Usenet as simple and streamlined as possible by automating everything we can   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsabnzbd%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsabnzbd%2Fupdater.json) @@ -726,13 +737,14 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Signalk Server](signalk/) : An implementation of a Signal K central server for boats +✓ [Signalk Server](signalk/) : An implementation of a Signal K central server for boats   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsignalk%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fsignalk%2Fupdater.json) ![aarch64][aarch64-badge] ![amd64][amd64-badge] ![armv7][armv7-badge] +![full_access][full_access-badge] ✓ ![image](https://api.iconify.design/mdi/television-classic.svg) [Sonarr](sonarr/) : Can monitor multiple RSS feeds for new episodes of your favorite shows and will grab, sort and rename them @@ -774,7 +786,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Teamspeak server](teamspeak/) : voice communication for online gaming, education and training +✓ [Teamspeak server](teamspeak/) : voice communication for online gaming, education and training   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fteamspeak%2Fconfig.json) ![aarch64][aarch64-badge] @@ -803,7 +815,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Ubooquity](ubooquity/) : Free, lightweight, and easy-to-use home server for your comics and ebooks +✓ [Ubooquity](ubooquity/) : Free, lightweight, and easy-to-use home server for your comics and ebooks   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fubooquity%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fubooquity%2Fupdater.json) @@ -813,7 +825,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Unpackerr](unpackerr/) : Unpacks RARd files for Sonarr, Lidarr and Radarr +✓ [Unpackerr](unpackerr/) : Unpacks RARd files for Sonarr, Lidarr and Radarr   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Funpackerr%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Funpackerr%2Fupdater.json) @@ -834,7 +846,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Webtrees](webtrees/) : web's leading on-line collaborative genealogy application +✓ [Webtrees](webtrees/) : web's leading on-line collaborative genealogy application   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwebtrees%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwebtrees%2Fupdater.json) @@ -845,7 +857,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [Wger](wger/) : manage your personal workouts, weight and diet plans +✓ [Wger](wger/) : manage your personal workouts, weight and diet plans   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwger%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwger%2Fupdater.json) @@ -853,7 +865,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7no][armv7no-badge] -✓ [Whatsapper](whatsapper/) : Whatsapper for Home Assistant +✓ [Whatsapper](whatsapper/) : Whatsapper for Home Assistant   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwhatsapper%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fwhatsapper%2Fupdater.json) @@ -870,7 +882,7 @@ If you want to do add the repository manually, please follow the procedure highl ![armv7][armv7-badge] ![ingress][ingress-badge] -✓ [Xteve](xteve/) : M3U Proxy for Plex DVR and Emby Live TV +✓ [Xteve](xteve/) : M3U Proxy for Plex DVR and Emby Live TV   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fxteve%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fxteve%2Fupdater.json) @@ -878,7 +890,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [Zoneminder](zoneminder/) : A full-featured, open source, state-of-the-art video surveillance software system +✓ [Zoneminder](zoneminder/) : A full-featured, open source, state-of-the-art video surveillance software system   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzoneminder%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzoneminder%2Fupdater.json) @@ -898,7 +910,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [zzz_archived - Paperless NGX](zzz_archived_paperless_ngx/) : Scan, index and archive all your physical documents +✓ [zzz_archived - Paperless NGX](zzz_archived_paperless_ngx/) : Scan, index and archive all your physical documents   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_paperless_ngx%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_paperless_ngx%2Fupdater.json) @@ -909,7 +921,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [zzz_archived - Vaultwarden](zzz_archived_bitwarden/) : Deprecated - please use community version +✓ [zzz_archived - Vaultwarden](zzz_archived_bitwarden/) : Deprecated - please use community version   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_bitwarden%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_bitwarden%2Fupdater.json) @@ -917,7 +929,7 @@ If you want to do add the repository manually, please follow the procedure highl ![amd64][amd64-badge] ![armv7][armv7-badge] -✓ [zzz_archived : Code-server (VScode)](zzz_archived_code-server/) : Deprecated : Code-server is VS Code running on a remote server, accessible through the browser +✓ [zzz_archived : Code-server (VScode)](zzz_archived_code-server/) : Deprecated : Code-server is VS Code running on a remote server, accessible through the browser   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_code-server%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fzzz_archived_code-server%2Fupdater.json) @@ -927,7 +939,7 @@ If you want to do add the repository manually, please follow the procedure highl ![smb][smb-badge] ![localdisks][localdisks-badge] -✓ [zzz_archived - Plex meta manager](zzz_archived_plex_meta_manager/) : Deprecated : renamed to Kometa +✓ [zzz_archived - Plex meta manager](zzz_archived_plex_meta_manager/) : Deprecated : renamed to Kometa   ![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fplex_meta_manager%2Fconfig.json) ![Update](https://img.shields.io/badge/dynamic/json?label=Updated&query=%24.last_update&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fplex_meta_manager%2Fupdater.json) diff --git a/Stats b/Stats index 1a0e7c556..e910b6ed6 100644 --- a/Stats +++ b/Stats @@ -1,93 +1,94 @@ -2024-07-05 2024-06-28 2024-06-21 2024-06-14 2024-06-07 2024-05-31 2024-05-24 2024-05-24 2024-05-17 2024-05-10 2024-05-03 2024-04-26 2024-04-19 2024-04-12 2024-04-05 2024-03-29 2024-03-22 2024-03-15 2024-03-08 2024-03-01 2024-02-16 2024-02-09 2024-02-02 2024-01-26 2024-01-19 2024-01-12 2024-01-05 2023-12-22 2023-12-15 2023-12-08 2023-11-24 2023-11-17 2023-11-10 2023-11-03 2023-10-27 2023-10-20 2023-10-13 2023-10-06 2023-09-29 2023-09-22 2023-09-15 2023-09-08 2023-09-01 2023-08-25 2023-08-18 2023-08-11 2023-08-04 2023-07-28 2023-07-21 2023-07-14 2023-07-07 2023-06-30 2023-06-23 2023-06-16 2023-06-09 2023-06-02 2023-05-26 2023-05-19 2023-05-12 2023-05-05 2023-04-28 2023-04-21 2023-04-14 2023-04-07 2023-03-31 2023-03-24 2023-03-17 2023-03-10 2023-03-03 2023-02-24 2023-02-17 2023-02-10 2023-02-03 2023-01-27 2023-01-21 2023-01-13 2023-01-06 2022-12-30 2022-12-24 2022-12-16 2022-12-09 2022-12-08 Date -80926 72882 71830 70159 69833 67653 69012 69005 70922 69934 66029 66168 66686 64308 67070 65017 73318 76478 73677 74019 70426 62241 55701 53744 52326 49500 48175 42473 42336 39905 34679 46033 42786 42795 40675 39766 38360 36480 37779 40026 39455 41790 43334 50337 49386 48913 46173 43611 43648 42982 41225 38304 38828 30434 30232 30051 32193 31615 29210 26192 19917 20377 19409 19253 18223 18044 16127 15701 14436 13970 13610 14133 12886 12065 11500 10919 9777 7647 7966 4937 3908 3691 TOTAL -11142 8709 7725 7470 7186 6864 6583 6582 6246 5910 5512 5013 4412 5496 5045 4389 12880 12601 12221 11881 10434 7402 5947 5571 5139 4655 4160 3131 2395 2557 2129 5838 5509 5176 4858 4541 4136 3752 3228 3684 3192 5490 5246 4979 4713 4452 4159 3836 3472 3016 4488 4238 4010 3585 3255 2851 3665 3335 2880 3319 2766 4137 3852 3527 3207 2848 2476 2542 2400 2836 2366 3080 2786 2538 2238 1987 1630 1327 1176 847 588 555 portainer -9880 9545 9204 8880 8509 8102 7701 7701 7265 6739 6139 5257 6152 5411 9957 9569 9181 8879 8506 8131 7318 6854 6384 5921 5304 4283 4406 3468 5309 4756 3599 6078 5712 5363 5001 4643 4144 3597 4034 3492 4252 3817 3241 8250 8000 7767 7521 7293 7034 6803 6575 6325 6136 5808 5613 5426 5247 5029 4775 4482 4212 3978 3759 3498 3263 3035 2810 2532 2229 1960 1569 2036 1730 1356 1359 1359 1070 888 727 477 285 264 filebrowser -2505 2487 2464 2450 2440 2423 2407 2407 2384 2349 2303 2264 2222 2190 2150 2095 2068 2031 1999 1974 1910 1883 1845 1813 1780 1736 1693 1612 1574 1546 1469 1429 1378 1343 1321 1301 1277 1260 1239 1223 1199 1191 1178 1167 1122 1079 1036 1001 959 915 841 787 778 647 635 622 618 589 527 451 353 326 313 300 289 277 262 252 240 227 218 193 183 168 142 126 98 88 75 39 30 29 arpspoof -2357 2342 2327 2310 2299 2289 2268 2268 2249 2215 2165 2131 2088 2053 2010 1967 1936 1903 1867 1837 1767 1733 1693 1656 1626 1578 1540 1468 1429 1388 1314 1285 1253 1208 1183 1176 1163 1152 1141 1134 1125 1113 1105 1084 1038 995 949 907 867 827 759 707 700 577 568 557 544 519 461 389 283 261 249 238 231 219 210 195 189 179 166 152 133 123 108 98 80 66 51 23 15 14 xteve -2290 2283 2275 2262 2252 2241 2234 2234 2205 2180 2142 2105 2076 2030 1989 1939 1910 1884 1855 1830 1780 1737 1692 1669 1642 1605 1564 1494 1452 1414 1345 1318 1279 1245 1213 1208 1196 1185 1176 1168 1153 1141 1137 1129 1084 1045 1000 960 919 877 805 749 741 608 595 582 577 541 481 414 313 288 275 263 249 240 231 214 200 190 171 157 148 134 110 100 85 78 70 37 25 22 organizr -2213 814 726 627 609 775 652 651 2399 2319 2230 2155 2068 1983 1896 1811 1727 1643 1565 1462 1259 1167 1071 971 860 768 660 460 351 344 358 373 330 692 630 570 487 426 1712 1662 1590 1542 1500 1457 1373 1302 1234 1154 1075 995 896 818 788 619 561 523 461 375 261 205 495 448 409 353 291 331 285 274 254 173 262 268 268 251 193 193 225 172 181 156 201 196 jellyfin -2080 2063 2044 2032 2009 1985 1968 1968 1946 1910 1853 1801 1755 1702 1655 1599 1562 1514 1484 1460 1380 1342 1298 1260 1219 1173 1125 1032 995 957 869 825 766 723 687 659 636 619 608 588 576 552 540 529 477 415 364 306 255 200 119 864 836 693 659 637 621 586 516 434 302 271 235 212 178 144 121 76 188 170 121 148 113 223 174 119 117 123 104 45 19 14 zoneminder -2007 2001 1990 1986 1979 1967 1961 1961 1941 1909 1872 1828 1796 1762 1724 1676 1650 1626 1607 1583 1534 1508 1483 1463 1436 1401 1378 1297 1259 1218 1156 1117 1083 1054 1037 1023 1017 1007 1003 1000 994 988 982 977 946 909 867 829 785 747 679 628 617 489 480 472 462 432 376 310 215 189 177 170 166 158 153 138 126 114 104 89 87 82 64 52 46 43 37 19 13 13 teamspeak -1822 130 118 105 97 150 145 145 113 103 273 250 224 200 167 142 107 212 192 175 130 104 196 171 149 122 356 299 277 256 213 188 166 135 114 105 93 77 105 99 94 85 78 104 81 83 84 77 72 94 85 58 103 50 49 47 61 82 94 120 49 44 31 33 28 31 27 23 20 16 11 7 2 - - - - - - - - - sabnzbd -1808 1806 1804 1799 1796 1793 1785 1785 1768 1739 1693 1658 1620 1596 1555 1514 1486 1467 1441 1419 1377 1350 1328 1304 1279 1236 1209 1151 1128 1098 1035 1005 968 931 912 908 905 900 895 893 889 888 879 874 843 810 777 742 708 676 615 563 560 438 436 431 427 396 344 279 188 169 150 143 139 133 123 105 83 125 117 108 92 89 68 53 47 38 29 13 7 7 gazpar2mqtt -1754 1750 1739 1730 1726 1723 1721 1721 1711 1685 1649 1621 1589 1559 1524 1485 1448 1430 1407 1382 1342 1313 1288 1264 1237 1209 1176 1106 1079 1048 994 970 929 903 889 885 883 878 869 866 858 856 853 850 817 784 749 711 675 645 580 519 509 385 381 372 366 335 287 227 136 119 110 106 98 89 82 71 67 62 56 49 44 37 68 68 57 41 37 19 10 9 ubooquity -1466 1430 1381 1346 1303 1266 1233 1232 1151 1091 1008 940 880 806 732 628 530 5291 5234 5190 4304 1538 380 500 412 312 700 568 509 427 424 348 333 387 322 281 232 254 219 228 196 161 198 208 770 723 674 621 571 521 431 363 343 205 187 172 156 104 338 263 152 113 92 74 54 35 14 - - - - - - - - - - - - - - - sponsorblockcast -1455 1450 1442 1439 1434 1432 1425 1425 1407 1381 1347 1316 1284 1250 1215 1176 1142 1118 1094 1069 1020 992 966 940 905 877 842 775 750 723 667 634 594 564 547 540 536 529 521 516 514 512 508 501 464 427 391 356 314 277 209 159 153 29 287 280 277 248 197 130 34 39 26 55 46 42 37 29 22 26 55 51 43 37 44 44 40 31 30 12 8 7 tdarr -1455 1445 1443 1430 1423 1417 1409 1409 1396 1375 1355 1326 1298 1269 1248 1219 1197 1180 1151 1132 1077 1056 1025 1010 991 968 935 879 858 832 788 764 741 720 706 702 698 687 681 676 670 662 659 649 620 590 559 529 503 478 422 385 376 292 279 269 260 232 193 146 76 47 61 49 35 39 31 17 9 - - - - - - - - - - - - - binance_trading_bot -1417 1321 1214 1112 981 1255 1165 1165 1059 933 801 647 1201 1063 887 1843 1734 1654 1540 1439 1180 1009 832 615 803 787 557 181 428 826 472 819 643 582 819 712 590 485 894 828 739 638 1466 1404 1300 1216 1123 1019 916 817 672 526 610 384 352 1101 1049 963 847 694 481 709 627 556 461 359 363 504 363 293 364 340 340 325 323 323 337 312 407 266 240 225 qbittorrent -1357 1314 1258 1179 1093 548 2125 2125 2052 1970 1870 1794 1714 1654 1561 1466 1383 1282 1092 910 927 1710 1628 1527 1419 1289 1169 916 951 831 477 1416 1310 1145 1001 874 811 755 653 860 797 726 640 511 860 753 639 533 1392 1321 1240 1163 1124 965 939 910 887 836 761 656 515 492 834 803 755 709 672 607 450 490 416 365 279 254 504 397 403 272 289 35 59 39 myelectricaldata -1346 1346 1346 1346 1346 1346 1346 1346 1334 1314 1280 1256 1228 1194 1169 1143 1123 1101 1085 1071 1036 1020 1004 988 962 947 923 875 853 831 783 759 728 704 691 691 691 691 691 691 691 691 691 691 659 629 599 569 539 508 446 398 398 278 278 275 275 251 203 143 53 38 31 31 31 31 31 31 30 30 120 110 99 91 68 65 53 42 35 16 11 9 papermerge -1345 1310 1255 1202 1163 1115 1071 1071 999 950 886 831 764 708 633 537 467 399 322 243 341 263 170 690 629 561 493 376 313 252 128 164 254 194 158 130 95 69 51 37 72 29 289 274 226 183 141 102 62 16 223 166 163 31 18 5 - - - - - - - - - - - - - - - - - - - - - - - - - - postgres -1338 1327 1321 1315 1309 1303 1292 1292 1282 1256 1231 1210 1188 1166 1142 1111 1092 1072 1043 1026 976 956 933 912 896 877 853 810 793 772 730 707 680 663 647 641 637 635 628 621 615 610 607 597 571 545 523 498 474 449 404 369 363 276 269 263 251 225 189 146 83 70 62 59 54 49 41 26 90 85 86 78 72 69 52 47 45 31 31 13 7 7 elasticsearch -1297 1271 1246 1234 1222 1207 1194 1194 1182 1154 1128 1100 1076 1056 1026 991 963 948 924 898 859 832 807 779 758 723 695 627 599 566 516 486 450 418 402 386 377 357 340 328 318 302 296 283 245 207 169 131 308 280 226 183 166 562 550 541 522 477 417 348 242 221 209 196 186 180 171 145 144 133 119 102 103 150 132 119 99 73 64 33 27 27 overseerr -1227 1205 1178 1146 1126 1097 1076 1076 1044 1001 951 907 866 824 788 736 696 664 617 574 498 466 410 364 311 262 202 369 317 245 129 224 169 715 685 653 621 597 573 553 519 490 465 440 399 358 301 255 196 146 77 22 124 40 26 37 6 - - - - - - - - - - - - - - - - - - - - - - - - - free_games_claimer -1155 1136 1099 1064 1037 993 962 961 905 860 809 756 705 652 582 506 412 1111 1054 1013 923 877 835 789 732 687 631 531 490 453 322 357 547 492 453 420 350 302 247 242 199 169 137 415 352 660 606 554 506 456 367 281 404 279 291 215 394 345 272 137 235 373 337 306 282 232 284 228 298 268 243 201 170 122 188 144 176 150 131 91 59 57 guacamole -1045 382 370 378 394 388 483 483 417 416 460 379 428 23 433 430 436 426 352 356 403 457 460 457 474 485 468 454 518 439 377 430 422 336 384 391 65 65 65 65 65 65 65 65 35 1424 1360 1294 1246 1191 1107 1033 1008 865 829 803 773 722 639 546 425 380 346 320 283 247 246 288 248 191 243 192 192 217 146 146 121 90 88 55 37 35 sonarr_nas -1038 1034 1019 1006 998 988 983 983 966 938 903 873 833 791 748 698 661 629 603 573 520 478 440 412 382 346 305 237 201 164 87 184 138 196 168 156 147 133 124 117 109 96 91 767 725 685 650 613 579 539 471 416 406 278 270 249 242 215 154 75 103 76 54 259 251 240 232 218 216 204 199 180 168 148 135 119 97 82 72 41 32 29 whoogle-search -1034 1028 1024 1014 1008 999 993 993 978 952 909 877 844 809 774 724 693 673 641 614 554 518 482 454 424 384 341 276 236 206 146 106 73 35 11 8 6 1 546 535 527 519 512 504 467 436 404 365 326 285 217 163 157 30 59 48 44 291 240 174 77 51 40 34 32 54 46 38 29 22 38 34 19 25 61 61 50 49 43 21 17 17 seafile -1028 1021 1015 1015 1008 1004 1002 1002 975 959 924 889 859 827 789 750 714 694 674 648 607 577 554 528 511 476 442 377 348 317 248 216 180 149 128 119 110 104 98 92 85 75 64 608 570 529 482 448 410 376 307 258 250 118 109 91 82 341 287 220 121 103 90 88 84 80 73 59 93 92 87 81 73 65 50 40 39 30 24 9 4 4 ombi -992 977 956 947 938 918 901 901 883 844 798 763 715 683 638 586 546 520 485 456 386 347 311 272 244 198 133 218 170 135 372 325 281 228 200 182 165 157 144 135 111 83 1008 994 954 904 863 824 772 735 657 594 577 444 422 414 397 360 297 222 109 95 256 245 233 216 204 183 162 148 140 117 90 72 145 145 126 80 97 67 54 54 transmission_openvpn -887 883 882 875 868 860 856 856 834 806 766 728 695 659 622 585 560 534 511 489 436 405 371 339 300 276 242 169 132 99 33 98 59 531 500 488 476 476 475 473 467 462 462 461 431 401 366 336 304 271 202 153 152 30 28 19 334 306 256 194 101 88 82 76 74 71 67 63 61 55 55 51 50 48 34 33 31 32 32 20 18 18 joal -886 873 860 841 828 813 791 791 770 738 697 656 621 587 555 518 485 458 422 400 329 288 249 217 182 385 335 256 219 193 116 155 113 143 274 252 238 215 201 185 172 156 129 137 149 139 46 139 144 154 152 108 146 111 101 61 103 46 111 99 102 84 21 107 94 84 101 105 88 51 23 66 10 86 22 22 73 72 81 69 73 69 calibre_web -822 803 780 765 743 725 712 712 686 665 634 607 579 541 504 467 440 410 382 354 285 250 203 159 266 227 185 247 218 187 86 213 157 166 129 173 156 140 117 450 433 421 409 386 344 306 257 215 164 272 199 129 331 194 184 171 153 247 177 97 203 180 151 138 120 104 108 150 133 116 96 159 149 130 105 85 69 74 81 48 24 23 joplin -811 804 793 783 780 776 768 768 753 728 686 654 625 587 552 507 477 453 426 399 344 322 292 261 239 215 189 115 79 43 194 166 130 93 78 75 63 58 49 831 824 821 819 806 767 728 695 664 632 599 534 485 474 348 344 335 328 297 248 188 88 71 62 59 54 51 41 31 27 79 70 63 56 46 29 25 55 49 45 27 23 22 spotweb -799 654 690 629 718 632 730 730 641 477 327 627 716 622 809 705 595 554 648 583 619 519 286 6 972 914 853 732 689 641 533 485 421 347 289 233 808 771 732 710 680 660 631 607 567 520 468 415 372 330 258 197 168 175 142 230 203 153 169 321 231 185 129 606 588 562 542 501 465 434 404 373 368 328 276 192 142 7 90 53 38 35 mealie -778 764 745 721 700 676 655 655 609 571 512 451 399 344 289 208 645 601 560 524 450 404 346 289 228 147 714 624 564 521 419 380 323 270 231 200 173 144 218 200 177 158 140 394 350 301 250 205 139 466 376 315 298 149 133 531 520 478 417 328 216 186 170 155 138 121 88 303 282 266 241 221 198 168 148 125 102 87 80 45 21 19 addons_updater -761 752 740 737 730 712 698 698 679 656 614 578 547 514 477 433 401 377 339 309 264 230 206 175 142 106 47 386 351 321 249 212 171 134 107 97 83 77 71 568 563 556 551 548 505 469 431 393 358 324 260 206 199 69 63 55 407 373 323 255 157 137 126 120 114 109 104 98 91 86 79 78 74 71 59 46 40 34 29 12 7 7 inadyn -749 733 720 707 694 683 678 678 665 642 612 585 564 538 510 473 451 431 414 393 346 318 295 267 231 207 188 126 100 138 74 98 53 267 248 234 214 206 195 189 186 178 171 162 129 94 50 548 510 485 435 399 395 307 304 293 290 274 236 184 115 94 81 76 66 52 43 169 153 143 138 131 119 111 89 75 65 55 46 29 17 15 wger -740 488 538 352 732 65 65 65 45 31 9 1160 1016 883 730 1132 1001 867 625 1101 836 665 798 653 479 339 619 457 546 315 302 760 579 750 620 443 453 545 403 455 342 211 353 203 429 700 576 419 500 653 653 545 456 558 499 404 758 649 497 310 481 367 209 298 299 359 394 249 365 269 316 429 325 238 468 468 405 328 283 193 124 94 nextcloud_ocr -738 648 1184 1137 1102 1061 1015 1015 941 850 772 679 593 499 716 602 499 390 82 1556 1399 1331 1255 1164 1085 995 885 673 525 412 1073 989 910 836 771 719 668 628 572 513 452 380 354 285 267 265 291 272 271 280 283 234 316 313 246 225 233 263 271 321 319 216 211 213 221 274 203 206 277 145 202 333 260 277 254 254 294 218 282 205 204 195 transmission_ls -707 679 654 630 605 577 554 554 516 473 423 377 325 274 125 1223 1190 1165 1111 1075 994 953 910 868 824 778 728 640 600 553 483 438 378 330 281 252 224 200 169 152 118 96 65 115 85 55 109 80 984 928 841 778 758 610 579 562 544 496 427 342 226 183 149 140 143 130 137 156 125 97 113 118 152 127 122 122 120 86 122 79 67 65 webtop_kde -678 590 1025 970 921 863 814 814 718 619 84 839 764 687 562 586 584 595 501 501 511 773 675 579 696 583 782 539 522 543 1171 1097 1007 918 855 785 726 673 615 573 503 447 463 659 583 485 623 537 452 622 531 441 492 465 567 496 392 596 490 620 472 711 651 561 472 692 642 565 495 397 374 437 353 493 407 393 377 98 359 91 321 313 epicgamesfree -651 634 620 603 579 560 539 539 482 438 384 335 282 31 352 279 329 271 362 305 334 268 162 250 468 419 374 272 215 216 336 289 234 187 151 116 251 233 218 208 189 167 223 193 452 411 365 320 266 209 196 150 240 99 176 145 179 407 339 251 259 232 211 194 169 139 200 167 150 123 143 132 184 157 93 114 94 73 52 44 34 32 tandoor_recipes -565 560 558 553 551 548 544 544 533 513 487 470 449 430 409 385 365 352 335 318 281 259 240 215 199 178 160 109 91 71 26 85 54 68 53 49 39 36 27 584 580 579 575 572 549 525 503 477 454 429 381 342 339 255 254 250 246 225 192 150 88 76 68 63 60 58 55 50 47 45 41 39 33 29 20 16 8 24 18 5 7 7 fireflyiii_fints_importer -532 491 433 489 410 503 436 436 450 449 532 449 461 554 457 530 443 443 398 475 384 439 446 549 461 535 441 495 425 504 337 376 385 374 343 346 385 324 367 312 232 316 413 358 352 353 348 339 437 364 400 303 364 260 310 306 404 326 354 432 288 279 250 254 307 243 252 236 226 196 225 281 231 243 288 288 237 181 298 226 195 195 plex_nas -521 488 461 424 393 344 476 476 414 347 512 456 403 355 401 334 133 119 576 541 447 410 358 295 264 448 389 299 251 302 211 414 353 302 262 247 217 187 157 192 166 135 498 476 419 364 309 262 199 143 296 229 207 53 47 28 41 - - - - - - - - - - - - - - - - - - - - - - - - - portainer_agent -473 439 402 375 347 312 273 273 202 148 158 878 828 762 702 625 552 493 419 359 224 128 46 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - whatsapper -425 357 335 456 420 382 597 597 530 477 392 553 489 405 471 388 469 383 556 496 347 437 359 443 367 374 343 448 388 337 179 395 316 336 271 357 302 257 238 270 232 244 329 281 412 348 283 272 442 382 286 222 293 262 224 340 313 251 386 287 220 217 217 249 224 215 226 214 395 371 339 300 287 251 207 169 163 144 110 68 47 47 fireflyiii -411 369 544 519 497 454 409 409 559 510 449 370 804 758 706 644 594 543 440 357 863 797 756 703 643 582 497 590 518 447 387 513 443 343 442 386 414 362 299 519 481 451 420 376 704 640 567 497 436 354 599 529 491 341 286 291 291 267 503 411 280 382 347 322 281 250 86 290 257 222 235 191 186 208 200 200 128 95 91 47 31 29 radarr_nas -409 393 366 350 331 315 287 287 241 339 293 239 290 246 227 232 231 453 408 373 307 250 230 416 383 352 307 231 221 217 146 216 171 125 213 191 229 206 161 201 173 283 262 235 175 334 278 228 176 484 413 357 342 215 204 187 164 467 406 331 227 198 174 147 114 147 130 184 179 157 135 114 107 114 102 102 87 72 61 33 22 20 cloudcommander -340 335 321 304 290 279 265 265 213 182 133 618 576 545 508 446 410 382 351 324 260 221 163 127 83 13 32 267 223 186 112 78 36 1065 1044 1031 1023 1011 1006 998 989 982 979 973 937 897 864 827 789 750 685 633 623 498 489 483 471 438 383 317 223 203 193 190 183 171 165 154 149 143 132 122 118 104 85 76 67 56 49 24 20 18 baikal -336 251 270 459 397 343 279 278 323 255 250 398 329 263 252 221 205 207 188 203 315 250 552 495 442 380 314 208 145 138 62 160 147 149 105 137 90 92 69 35 105 98 82 111 103 87 97 92 94 92 35 205 194 100 80 54 74 74 174 124 42 34 46 58 53 70 60 48 21 23 29 22 29 28 2 2 0 - - - - - immich -335 331 327 326 324 322 315 315 276 248 213 181 150 120 82 39 1349 1322 1284 1267 1224 1197 1172 1144 1119 1091 1069 1019 994 973 921 887 856 829 815 810 804 802 801 797 795 790 787 785 751 715 679 647 613 579 511 461 457 335 331 324 322 298 247 184 91 75 67 64 63 60 60 58 55 52 51 49 45 41 28 24 25 22 18 4 1 1 requestrr -329 326 324 316 307 297 290 290 260 236 207 177 148 119 89 57 30 10 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - comixed -319 329 391 371 347 309 545 545 497 462 405 354 296 526 480 424 382 330 245 238 396 346 299 223 157 343 297 200 167 221 124 183 265 228 194 181 163 150 136 131 125 118 251 241 206 152 191 146 356 314 243 184 171 129 114 99 384 352 293 226 130 111 95 88 100 94 82 146 138 128 117 112 100 90 70 56 44 39 34 11 7 6 flaresolverr -315 292 268 236 474 439 416 416 377 340 297 256 202 345 307 259 205 274 229 645 568 541 501 457 433 392 345 256 218 180 100 274 230 196 166 484 470 453 438 426 419 404 385 371 332 300 271 242 208 179 124 79 503 411 391 371 363 335 289 233 159 131 131 266 256 237 225 203 184 171 150 150 136 113 91 89 71 43 60 38 22 20 scrutiny-fa -304 285 249 210 245 226 251 251 1141 1107 1059 1018 970 922 874 815 776 738 695 669 599 560 527 494 466 426 371 284 235 168 163 186 254 213 180 137 423 406 384 371 359 339 332 318 288 255 197 135 398 350 288 246 235 126 269 247 228 191 133 207 108 121 278 261 246 225 214 112 170 152 126 96 91 87 143 143 121 112 103 78 81 81 photoprism -301 299 295 295 293 293 293 293 258 244 209 185 152 128 94 64 48 184 162 146 103 79 54 40 97 70 41 121 98 76 28 105 75 48 35 34 29 27 25 22 21 20 17 52 120 87 50 390 359 327 263 214 211 90 87 84 81 56 189 129 38 26 20 17 17 46 43 40 34 28 27 23 21 20 25 25 24 22 19 6 3 3 mylar3 -297 294 281 270 262 253 238 238 196 168 130 204 172 133 456 408 376 360 323 302 249 218 188 156 120 469 432 360 332 296 227 188 156 121 101 94 88 77 66 87 79 72 57 70 84 88 88 83 83 107 104 47 184 54 60 60 79 94 198 131 68 52 46 44 41 55 53 47 51 44 53 44 37 56 59 45 42 33 63 33 32 31 plex-media-manager -288 282 279 276 274 266 256 256 235 206 172 134 92 179 136 95 682 655 626 589 513 481 443 405 343 302 269 198 160 125 46 187 128 81 57 112 109 99 90 87 75 71 67 66 29 187 150 115 74 644 571 518 510 386 384 376 374 352 301 240 145 113 107 106 101 97 90 84 74 64 67 61 56 51 29 29 20 8 44 28 26 26 webtrees -281 276 269 258 253 247 241 241 222 204 179 154 131 116 92 68 50 24 138 125 95 74 52 52 80 58 35 197 179 156 118 94 72 53 41 39 31 25 18 22 16 103 100 92 67 41 60 38 213 191 145 112 108 21 213 209 205 188 153 111 47 37 30 27 36 35 28 21 29 26 22 23 17 25 32 32 21 22 20 11 9 9 unpackerr -278 382 343 317 275 257 374 374 333 283 360 293 325 262 366 314 261 24 215 324 212 332 286 237 300 249 343 234 307 262 129 193 127 252 196 189 125 181 171 151 172 976 968 953 927 881 841 800 757 718 666 616 598 501 477 453 436 396 350 291 219 187 148 161 186 150 145 86 127 83 155 124 112 127 115 115 125 98 112 86 107 69 calibre -275 267 263 266 268 269 303 303 310 296 305 298 304 306 309 296 300 302 250 250 284 311 311 331 344 340 352 323 299 308 270 300 296 290 272 257 264 257 252 244 244 245 235 268 254 254 253 230 251 261 253 214 256 226 212 201 237 241 244 273 183 186 177 170 168 173 168 157 146 123 148 151 120 138 129 129 120 114 142 100 114 113 prowlarr -261 258 251 250 246 236 230 230 189 163 117 73 256 216 183 147 122 100 58 157 98 64 186 155 125 102 72 70 594 563 494 459 429 395 377 371 362 354 351 347 341 333 331 325 286 248 202 160 120 82 300 250 244 115 113 107 99 67 267 201 100 84 67 59 47 39 35 25 17 15 38 32 23 26 32 32 22 19 53 33 27 26 piwigo -260 195 230 204 217 185 129 128 264 200 9 723 668 606 559 501 456 409 340 279 152 171 104 367 326 275 212 270 230 185 86 219 164 288 244 203 164 112 128 171 134 676 654 633 590 546 509 461 418 380 301 253 230 133 113 87 413 377 330 266 188 158 126 109 95 79 119 92 66 42 277 270 243 225 193 164 139 109 97 61 43 40 browserless_chrome -246 221 264 249 225 201 117 117 279 227 215 211 257 212 298 250 222 186 261 229 156 244 195 154 186 143 168 177 138 109 80 102 91 38 99 65 50 48 31 36 17 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - changedetection.io -243 228 206 192 168 149 135 135 102 73 254 220 185 143 111 53 21 3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - linkwarden -242 234 235 245 253 239 283 282 275 275 287 267 268 279 286 253 274 272 234 222 2600 1318 270 257 258 211 268 250 236 241 215 229 237 221 205 203 187 195 195 194 195 195 192 237 231 219 224 219 233 244 241 189 247 188 193 187 220 184 239 253 183 177 181 171 169 161 159 154 148 110 134 140 111 129 118 118 94 106 136 104 95 95 jackett_nas -218 209 197 182 159 318 305 305 276 253 221 180 252 218 166 142 140 171 127 32 170 141 167 129 209 183 148 134 56 133 65 83 160 130 109 97 84 85 76 91 77 133 113 94 129 92 132 101 85 117 61 310 299 215 209 203 189 163 114 134 102 68 20 69 62 94 87 74 59 45 79 72 63 53 39 57 38 45 32 21 14 12 gitea -187 167 150 126 353 337 326 326 283 244 205 167 125 237 209 174 128 220 164 606 540 497 460 420 386 345 308 237 200 150 70 220 177 144 114 797 792 778 770 757 751 740 732 717 684 658 631 603 575 545 497 459 442 348 335 320 313 290 248 188 112 91 104 158 144 132 119 103 81 130 127 112 97 74 81 81 67 48 84 58 58 58 scrutiny -184 154 131 148 129 362 340 340 284 246 205 159 1035 987 951 900 859 829 801 754 665 629 599 568 529 493 443 376 342 313 239 204 166 130 107 99 88 79 66 52 57 44 37 610 561 521 480 440 399 357 281 228 223 88 78 65 55 102 45 26 - - - - - - - - - - - - - - - - - - - - - - jellyseerr -179 172 163 147 614 608 597 597 561 521 484 447 413 385 350 308 270 241 187 153 173 473 435 404 374 338 302 217 176 128 120 259 214 172 143 183 160 151 144 128 359 352 343 331 293 251 201 145 154 253 174 445 441 311 299 287 268 227 169 206 101 158 140 132 122 117 106 98 127 120 117 107 93 80 84 84 49 103 98 75 69 68 bazarr -179 165 151 144 157 142 188 188 179 191 185 177 170 240 181 182 153 211 147 114 109 197 177 173 170 164 158 164 200 157 115 135 142 134 128 120 124 82 107 115 126 102 100 129 136 555 509 463 420 380 309 250 239 105 131 108 127 228 159 181 112 108 90 105 286 266 248 221 200 179 161 123 107 85 150 150 145 127 99 60 72 66 emby -156 149 143 138 131 122 110 110 215 188 147 110 365 330 292 248 216 185 148 120 385 355 318 296 262 234 199 131 334 295 228 191 150 120 94 81 105 97 94 91 84 79 77 110 116 111 104 101 110 139 133 77 205 76 55 71 141 109 224 150 82 76 66 64 75 69 60 60 62 50 67 58 43 65 58 58 52 42 68 51 45 44 lidarr_nas -141 150 177 162 225 215 195 195 191 318 292 257 218 318 287 238 339 282 381 465 478 403 378 402 375 445 370 350 352 319 150 1274 1214 1155 1121 1094 1070 1043 1025 1003 979 956 942 922 868 814 764 709 657 605 511 433 400 238 311 284 252 227 137 301 244 187 351 322 301 262 236 202 158 111 176 148 152 108 134 134 123 84 104 57 86 80 paperless_ng -140 247 237 227 211 194 173 173 128 317 274 219 267 215 150 2 314 281 237 181 190 241 197 206 327 287 225 307 267 228 147 243 197 136 225 212 198 184 162 151 135 127 122 328 285 239 194 141 222 180 114 253 243 110 427 419 414 382 327 256 156 134 120 99 86 95 177 156 131 132 118 103 84 83 77 73 62 51 47 23 16 15 fireflyiii_data_importer -133 133 133 129 128 128 128 128 110 86 344 320 293 262 227 201 175 155 133 119 92 69 43 24 1 319 287 213 184 145 75 95 181 145 122 116 105 96 77 71 60 50 218 212 175 138 88 130 87 628 556 499 491 367 362 353 343 314 260 193 90 63 50 51 207 193 186 167 160 149 140 133 119 103 80 64 50 39 26 7 4 4 vaultwarden -126 111 96 75 143 128 113 113 66 1929 1888 1853 1821 1790 1754 1705 1684 1660 1631 1603 1532 1495 1457 1431 1393 1352 1310 1240 1202 1168 1107 1067 1033 994 976 971 962 952 939 933 928 923 917 906 870 837 797 757 719 680 615 566 556 435 426 412 406 377 323 259 162 142 128 117 108 94 83 76 61 57 55 45 39 31 37 37 19 11 28 12 10 8 resiliosync -119 113 107 101 92 87 75 75 240 225 190 167 151 131 101 72 282 266 246 226 182 158 132 114 97 72 301 254 227 207 154 125 100 72 53 44 39 266 262 256 247 239 230 219 182 146 108 67 265 229 163 109 103 58 45 126 114 84 236 170 75 56 42 35 30 13 9 42 27 98 87 73 72 65 45 38 55 48 43 21 10 9 grav -102 86 95 155 152 146 134 134 123 220 179 137 133 265 223 192 159 178 158 292 171 233 167 389 342 290 229 265 209 154 102 290 227 169 120 186 155 125 110 326 306 290 267 244 195 144 202 143 305 255 166 208 176 195 181 152 128 371 305 232 118 194 161 140 119 88 98 146 119 104 139 121 98 75 155 155 134 110 95 82 17 17 code_server -84 82 73 67 70 66 446 446 413 391 350 322 293 268 237 184 131 113 77 55 50 86 81 165 136 112 85 83 161 119 53 129 91 132 105 89 86 73 64 63 57 58 52 93 116 70 74 73 73 331 269 216 203 80 77 72 61 263 211 144 49 39 99 87 78 72 69 63 54 28 17 20 17 22 69 62 41 28 45 18 7 5 myelectricaldata_dev -78 73 79 72 78 81 97 97 89 99 92 96 90 92 95 86 80 85 57 76 87 96 90 86 91 109 143 92 105 69 103 148 153 149 165 134 160 124 332 332 332 332 332 332 312 292 272 252 232 212 170 138 138 58 58 111 119 128 142 166 110 109 112 105 103 97 108 95 115 76 82 75 79 84 127 105 93 88 78 53 51 51 omada -66 61 62 61 60 61 92 92 1885 1858 1824 1786 1759 1714 1677 1638 1618 1595 1565 1544 1494 1456 1420 1392 1360 1322 1283 1219 1187 1156 1111 1078 1048 1018 990 984 977 968 957 952 950 944 941 936 894 856 819 781 747 711 645 594 584 458 449 443 440 405 352 292 194 174 160 154 150 146 138 129 115 112 109 100 94 72 62 62 76 61 84 57 50 50 nzbget -65 36 37 47 3 28 33 33 34 82 19 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - birdnet-pi -60 77 67 67 75 68 124 124 96 140 105 85 114 20 120 103 106 98 107 87 129 102 99 115 114 116 115 118 107 134 67 146 115 82 82 83 89 90 79 90 82 79 76 106 99 97 95 88 87 110 111 72 111 79 71 64 84 105 116 143 79 70 57 58 56 71 65 60 59 49 60 48 54 62 52 52 43 39 65 49 42 41 readarr_nas -58 49 34 27 19 10 20 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - openproject -52 42 149 140 130 115 108 108 72 157 117 83 161 117 80 149 128 106 59 98 99 62 61 54 218 186 145 70 89 55 30 92 52 35 30 24 40 40 27 40 186 173 163 154 113 73 337 299 262 222 151 99 87 68 64 52 47 132 76 133 36 29 21 14 22 13 1 - - - - - - - - - - - - - - - autobrr -48 37 35 37 37 39 80 80 54 73 62 69 117 96 64 57 58 57 61 41 55 64 64 52 54 60 45 48 43 50 35 626 592 559 544 540 536 531 529 525 520 517 511 511 474 437 400 359 325 283 217 167 161 35 35 31 106 73 86 111 52 41 29 45 32 38 32 33 32 30 31 36 26 37 30 23 20 24 38 22 26 26 flexget -31 24 17 3 4 - 17 17 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - navidrome -28 34 35 46 2 49 151 151 111 70 68 15 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - birdnet-go -25 28 25 20 69 64 58 58 35 80 53 36 34 37 64 39 37 56 30 48 31 50 33 31 34 32 79 32 36 31 29 40 38 31 31 17 21 15 29 13 14 18 22 323 299 277 251 229 206 184 139 103 94 8 5 14 33 42 52 70 28 20 13 11 17 13 15 17 17 10 19 17 14 27 43 38 34 30 24 7 13 11 omada_v3 -19 19 27 41 28 18 122 122 93 61 36 46 27 28 28 21 42 21 6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - codex -2 50 31 13 12 8 38 38 109 87 54 34 32 105 76 45 19 25 178 160 117 92 75 51 22 172 146 94 68 46 2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - signalk -- - - - - - - - - - - - - - 43 35 23 17 8 22 11 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - overleaf +2024-08-02 2024-07-26 2024-07-19 2024-07-12 2024-07-05 2024-06-28 2024-06-21 2024-06-14 2024-06-07 2024-05-31 2024-05-24 2024-05-24 2024-05-17 2024-05-10 2024-05-03 2024-04-26 2024-04-19 2024-04-12 2024-04-05 2024-03-29 2024-03-22 2024-03-15 2024-03-08 2024-03-01 2024-02-16 2024-02-09 2024-02-02 2024-01-26 2024-01-19 2024-01-12 2024-01-05 2023-12-22 2023-12-15 2023-12-08 2023-11-24 2023-11-17 2023-11-10 2023-11-03 2023-10-27 2023-10-20 2023-10-13 2023-10-06 2023-09-29 2023-09-22 2023-09-15 2023-09-08 2023-09-01 2023-08-25 2023-08-18 2023-08-11 2023-08-04 2023-07-28 2023-07-21 2023-07-14 2023-07-07 2023-06-30 2023-06-23 2023-06-16 2023-06-09 2023-06-02 2023-05-26 2023-05-19 2023-05-12 2023-05-05 2023-04-28 2023-04-21 2023-04-14 2023-04-07 2023-03-31 2023-03-24 2023-03-17 2023-03-10 2023-03-03 2023-02-24 2023-02-17 2023-02-10 2023-02-03 2023-01-27 2023-01-21 2023-01-13 2023-01-06 2022-12-30 2022-12-24 2022-12-16 2022-12-09 2022-12-08 Date +86596 92746 94295 90039 80926 72882 71830 70159 69833 67653 69012 69005 70922 69934 66029 66168 66686 64308 67070 65017 73318 76478 73677 74019 70426 62241 55701 53744 52326 49500 48175 42473 42336 39905 34679 46033 42786 42795 40675 39766 38360 36480 37779 40026 39455 41790 43334 50337 49386 48913 46173 43611 43648 42982 41225 38304 38828 30434 30232 30051 32193 31615 29210 26192 19917 20377 19409 19253 18223 18044 16127 15701 14436 13970 13610 14133 12886 12065 11500 10919 9777 7647 7966 4937 3908 3691 TOTAL +16729 16499 16274 14119 11142 8709 7725 7470 7186 6864 6583 6582 6246 5910 5512 5013 4412 5496 5045 4389 12880 12601 12221 11881 10434 7402 5947 5571 5139 4655 4160 3131 2395 2557 2129 5838 5509 5176 4858 4541 4136 3752 3228 3684 3192 5490 5246 4979 4713 4452 4159 3836 3472 3016 4488 4238 4010 3585 3255 2851 3665 3335 2880 3319 2766 4137 3852 3527 3207 2848 2476 2542 2400 2836 2366 3080 2786 2538 2238 1987 1630 1327 1176 847 588 555 portainer +11150 10830 10519 10206 9880 9545 9204 8880 8509 8102 7701 7701 7265 6739 6139 5257 6152 5411 9957 9569 9181 8879 8506 8131 7318 6854 6384 5921 5304 4283 4406 3468 5309 4756 3599 6078 5712 5363 5001 4643 4144 3597 4034 3492 4252 3817 3241 8250 8000 7767 7521 7293 7034 6803 6575 6325 6136 5808 5613 5426 5247 5029 4775 4482 4212 3978 3759 3498 3263 3035 2810 2532 2229 1960 1569 2036 1730 1356 1359 1359 1070 888 727 477 285 264 filebrowser +5295 5289 5283 3973 1822 130 118 105 97 150 145 145 113 103 273 250 224 200 167 142 107 212 192 175 130 104 196 171 149 122 356 299 277 256 213 188 166 135 114 105 93 77 105 99 94 85 78 104 81 83 84 77 72 94 85 58 103 50 49 47 61 82 94 120 49 44 31 33 28 31 27 23 20 16 11 7 2 - - - - - - - - - sabnzbd +2561 2548 2530 2517 2505 2487 2464 2450 2440 2423 2407 2407 2384 2349 2303 2264 2222 2190 2150 2095 2068 2031 1999 1974 1910 1883 1845 1813 1780 1736 1693 1612 1574 1546 1469 1429 1378 1343 1321 1301 1277 1260 1239 1223 1199 1191 1178 1167 1122 1079 1036 1001 959 915 841 787 778 647 635 622 618 589 527 451 353 326 313 300 289 277 262 252 240 227 218 193 183 168 142 126 98 88 75 39 30 29 arpspoof +2404 2391 2381 2367 2357 2342 2327 2310 2299 2289 2268 2268 2249 2215 2165 2131 2088 2053 2010 1967 1936 1903 1867 1837 1767 1733 1693 1656 1626 1578 1540 1468 1429 1388 1314 1285 1253 1208 1183 1176 1163 1152 1141 1134 1125 1113 1105 1084 1038 995 949 907 867 827 759 707 700 577 568 557 544 519 461 389 283 261 249 238 231 219 210 195 189 179 166 152 133 123 108 98 80 66 51 23 15 14 xteve +2336 2324 2315 2303 2290 2283 2275 2262 2252 2241 2234 2234 2205 2180 2142 2105 2076 2030 1989 1939 1910 1884 1855 1830 1780 1737 1692 1669 1642 1605 1564 1494 1452 1414 1345 1318 1279 1245 1213 1208 1196 1185 1176 1168 1153 1141 1137 1129 1084 1045 1000 960 919 877 805 749 741 608 595 582 577 541 481 414 313 288 275 263 249 240 231 214 200 190 171 157 148 134 110 100 85 78 70 37 25 22 organizr +2174 2158 2129 2107 2080 2063 2044 2032 2009 1985 1968 1968 1946 1910 1853 1801 1755 1702 1655 1599 1562 1514 1484 1460 1380 1342 1298 1260 1219 1173 1125 1032 995 957 869 825 766 723 687 659 636 619 608 588 576 552 540 529 477 415 364 306 255 200 119 864 836 693 659 637 621 586 516 434 302 271 235 212 178 144 121 76 188 170 121 148 113 223 174 119 117 123 104 45 19 14 zoneminder +2049 2038 2030 2021 2007 2001 1990 1986 1979 1967 1961 1961 1941 1909 1872 1828 1796 1762 1724 1676 1650 1626 1607 1583 1534 1508 1483 1463 1436 1401 1378 1297 1259 1218 1156 1117 1083 1054 1037 1023 1017 1007 1003 1000 994 988 982 977 946 909 867 829 785 747 679 628 617 489 480 472 462 432 376 310 215 189 177 170 166 158 153 138 126 114 104 89 87 82 64 52 46 43 37 19 13 13 teamspeak +1824 1822 1818 1812 1808 1806 1804 1799 1796 1793 1785 1785 1768 1739 1693 1658 1620 1596 1555 1514 1486 1467 1441 1419 1377 1350 1328 1304 1279 1236 1209 1151 1128 1098 1035 1005 968 931 912 908 905 900 895 893 889 888 879 874 843 810 777 742 708 676 615 563 560 438 436 431 427 396 344 279 188 169 150 143 139 133 123 105 83 125 117 108 92 89 68 53 47 38 29 13 7 7 gazpar2mqtt +1779 1774 1768 1764 1754 1750 1739 1730 1726 1723 1721 1721 1711 1685 1649 1621 1589 1559 1524 1485 1448 1430 1407 1382 1342 1313 1288 1264 1237 1209 1176 1106 1079 1048 994 970 929 903 889 885 883 878 869 866 858 856 853 850 817 784 749 711 675 645 580 519 509 385 381 372 366 335 287 227 136 119 110 106 98 89 82 71 67 62 56 49 44 37 68 68 57 41 37 19 10 9 ubooquity +1711 1640 1561 1508 1417 1321 1214 1112 981 1255 1165 1165 1059 933 801 647 1201 1063 887 1843 1734 1654 1540 1439 1180 1009 832 615 803 787 557 181 428 826 472 819 643 582 819 712 590 485 894 828 739 638 1466 1404 1300 1216 1123 1019 916 817 672 526 610 384 352 1101 1049 963 847 694 481 709 627 556 461 359 363 504 363 293 364 340 340 325 323 323 337 312 407 266 240 225 qbittorrent +1626 1581 1534 1510 1466 1430 1381 1346 1303 1266 1233 1232 1151 1091 1008 940 880 806 732 628 530 5291 5234 5190 4304 1538 380 500 412 312 700 568 509 427 424 348 333 387 322 281 232 254 219 228 196 161 198 208 770 723 674 621 571 521 431 363 343 205 187 172 156 104 338 263 152 113 92 74 54 35 14 - - - - - - - - - - - - - - - sponsorblockcast +1611 1513 1435 1394 1345 1310 1255 1202 1163 1115 1071 1071 999 950 886 831 764 708 633 537 467 399 322 243 341 263 170 690 629 561 493 376 313 252 128 164 254 194 158 130 95 69 51 37 72 29 289 274 226 183 141 102 62 16 223 166 163 31 18 5 - - - - - - - - - - - - - - - - - - - - - - - - - - postgres +1525 1477 1447 1412 1357 1314 1258 1179 1093 548 2125 2125 2052 1970 1870 1794 1714 1654 1561 1466 1383 1282 1092 910 927 1710 1628 1527 1419 1289 1169 916 951 831 477 1416 1310 1145 1001 874 811 755 653 860 797 726 640 511 860 753 639 533 1392 1321 1240 1163 1124 965 939 910 887 836 761 656 515 492 834 803 755 709 672 607 450 490 416 365 279 254 504 397 403 272 289 35 59 39 myelectricaldata +1491 1481 1476 1470 1455 1445 1443 1430 1423 1417 1409 1409 1396 1375 1355 1326 1298 1269 1248 1219 1197 1180 1151 1132 1077 1056 1025 1010 991 968 935 879 858 832 788 764 741 720 706 702 698 687 681 676 670 662 659 649 620 590 559 529 503 478 422 385 376 292 279 269 260 232 193 146 76 47 61 49 35 39 31 17 9 - - - - - - - - - - - - - binance_trading_bot +1482 1474 1471 1463 1455 1450 1442 1439 1434 1432 1425 1425 1407 1381 1347 1316 1284 1250 1215 1176 1142 1118 1094 1069 1020 992 966 940 905 877 842 775 750 723 667 634 594 564 547 540 536 529 521 516 514 512 508 501 464 427 391 356 314 277 209 159 153 29 287 280 277 248 197 130 34 39 26 55 46 42 37 29 22 26 55 51 43 37 44 44 40 31 30 12 8 7 tdarr +1407 1382 1357 1322 1297 1271 1246 1234 1222 1207 1194 1194 1182 1154 1128 1100 1076 1056 1026 991 963 948 924 898 859 832 807 779 758 723 695 627 599 566 516 486 450 418 402 386 377 357 340 328 318 302 296 283 245 207 169 131 308 280 226 183 166 562 550 541 522 477 417 348 242 221 209 196 186 180 171 145 144 133 119 102 103 150 132 119 99 73 64 33 27 27 overseerr +1346 1346 1346 1346 1346 1346 1346 1346 1346 1346 1346 1346 1334 1314 1280 1256 1228 1194 1169 1143 1123 1101 1085 1071 1036 1020 1004 988 962 947 923 875 853 831 783 759 728 704 691 691 691 691 691 691 691 691 691 691 659 629 599 569 539 508 446 398 398 278 278 275 275 251 203 143 53 38 31 31 31 31 31 31 30 30 120 110 99 91 68 65 53 42 35 16 11 9 papermerge +1319 1296 1275 1249 1227 1205 1178 1146 1126 1097 1076 1076 1044 1001 951 907 866 824 788 736 696 664 617 574 498 466 410 364 311 262 202 369 317 245 129 224 169 715 685 653 621 597 573 553 519 490 465 440 399 358 301 255 196 146 77 22 124 40 26 37 6 - - - - - - - - - - - - - - - - - - - - - - - - - free_games_claimer +1273 1233 1208 1185 1155 1136 1099 1064 1037 993 962 961 905 860 809 756 705 652 582 506 412 1111 1054 1013 923 877 835 789 732 687 631 531 490 453 322 357 547 492 453 420 350 302 247 242 199 169 137 415 352 660 606 554 506 456 367 281 404 279 291 215 394 345 272 137 235 373 337 306 282 232 284 228 298 268 243 201 170 122 188 144 176 150 131 91 59 57 guacamole +1082 1069 1061 1057 1034 1028 1024 1014 1008 999 993 993 978 952 909 877 844 809 774 724 693 673 641 614 554 518 482 454 424 384 341 276 236 206 146 106 73 35 11 8 6 1 546 535 527 519 512 504 467 436 404 365 326 285 217 163 157 30 59 48 44 291 240 174 77 51 40 34 32 54 46 38 29 22 38 34 19 25 61 61 50 49 43 21 17 17 seafile +1080 1056 1050 1043 1038 1034 1019 1006 998 988 983 983 966 938 903 873 833 791 748 698 661 629 603 573 520 478 440 412 382 346 305 237 201 164 87 184 138 196 168 156 147 133 124 117 109 96 91 767 725 685 650 613 579 539 471 416 406 278 270 249 242 215 154 75 103 76 54 259 251 240 232 218 216 204 199 180 168 148 135 119 97 82 72 41 32 29 whoogle-search +1064 978 889 765 799 654 690 629 718 632 730 730 641 477 327 627 716 622 809 705 595 554 648 583 619 519 286 6 972 914 853 732 689 641 533 485 421 347 289 233 808 771 732 710 680 660 631 607 567 520 468 415 372 330 258 197 168 175 142 230 203 153 169 321 231 185 129 606 588 562 542 501 465 434 404 373 368 328 276 192 142 7 90 53 38 35 mealie +1054 1038 1029 1013 992 977 956 947 938 918 901 901 883 844 798 763 715 683 638 586 546 520 485 456 386 347 311 272 244 198 133 218 170 135 372 325 281 228 200 182 165 157 144 135 111 83 1008 994 954 904 863 824 772 735 657 594 577 444 422 414 397 360 297 222 109 95 256 245 233 216 204 183 162 148 140 117 90 72 145 145 126 80 97 67 54 54 transmission_openvpn +948 903 849 800 738 648 1184 1137 1102 1061 1015 1015 941 850 772 679 593 499 716 602 499 390 82 1556 1399 1331 1255 1164 1085 995 885 673 525 412 1073 989 910 836 771 719 668 628 572 513 452 380 354 285 267 265 291 272 271 280 283 234 316 313 246 225 233 263 271 321 319 216 211 213 221 274 203 206 277 145 202 333 260 277 254 254 294 218 282 205 204 195 transmission_ls +900 897 895 892 887 883 882 875 868 860 856 856 834 806 766 728 695 659 622 585 560 534 511 489 436 405 371 339 300 276 242 169 132 99 33 98 59 531 500 488 476 476 475 473 467 462 462 461 431 401 366 336 304 271 202 153 152 30 28 19 334 306 256 194 101 88 82 76 74 71 67 63 61 55 55 51 50 48 34 33 31 32 32 20 18 18 joal +840 835 830 819 811 804 793 783 780 776 768 768 753 728 686 654 625 587 552 507 477 453 426 399 344 322 292 261 239 215 189 115 79 43 194 166 130 93 78 75 63 58 49 831 824 821 819 806 767 728 695 664 632 599 534 485 474 348 344 335 328 297 248 188 88 71 62 59 54 51 41 31 27 79 70 63 56 46 29 25 55 49 45 27 23 22 spotweb +837 830 809 797 778 764 745 721 700 676 655 655 609 571 512 451 399 344 289 208 645 601 560 524 450 404 346 289 228 147 714 624 564 521 419 380 323 270 231 200 173 144 218 200 177 158 140 394 350 301 250 205 139 466 376 315 298 149 133 531 520 478 417 328 216 186 170 155 138 121 88 303 282 266 241 221 198 168 148 125 102 87 80 45 21 19 addons_updater +806 788 774 767 749 733 720 707 694 683 678 678 665 642 612 585 564 538 510 473 451 431 414 393 346 318 295 267 231 207 188 126 100 138 74 98 53 267 248 234 214 206 195 189 186 178 171 162 129 94 50 548 510 485 435 399 395 307 304 293 290 274 236 184 115 94 81 76 66 52 43 169 153 143 138 131 119 111 89 75 65 55 46 29 17 15 wger +805 771 744 723 707 679 654 630 605 577 554 554 516 473 423 377 325 274 125 1223 1190 1165 1111 1075 994 953 910 868 824 778 728 640 600 553 483 438 378 330 281 252 224 200 169 152 118 96 65 115 85 55 109 80 984 928 841 778 758 610 579 562 544 496 427 342 226 183 149 140 143 130 137 156 125 97 113 118 152 127 122 122 120 86 122 79 67 65 webtop_kde +788 781 776 769 761 752 740 737 730 712 698 698 679 656 614 578 547 514 477 433 401 377 339 309 264 230 206 175 142 106 47 386 351 321 249 212 171 134 107 97 83 77 71 568 563 556 551 548 505 469 431 393 358 324 260 206 199 69 63 55 407 373 323 255 157 137 126 120 114 109 104 98 91 86 79 78 74 71 59 46 40 34 29 12 7 7 inadyn +752 1073 986 897 740 488 538 352 732 65 65 65 45 31 9 1160 1016 883 730 1132 1001 867 625 1101 836 665 798 653 479 339 619 457 546 315 302 760 579 750 620 443 453 545 403 455 342 211 353 203 429 700 576 419 500 653 653 545 456 558 499 404 758 649 497 310 481 367 209 298 299 359 394 249 365 269 316 429 325 238 468 468 405 328 283 193 124 94 nextcloud_ocr +671 579 781 718 678 590 1025 970 921 863 814 814 718 619 84 839 764 687 562 586 584 595 501 501 511 773 675 579 696 583 782 539 522 543 1171 1097 1007 918 855 785 726 673 615 573 503 447 463 659 583 485 623 537 452 622 531 441 492 465 567 496 392 596 490 620 472 711 651 561 472 692 642 565 495 397 374 437 353 493 407 393 377 98 359 91 321 313 epicgamesfree +652 5900 5837 4452 2213 814 726 627 609 775 652 651 2399 2319 2230 2155 2068 1983 1896 1811 1727 1643 1565 1462 1259 1167 1071 971 860 768 660 460 351 344 358 373 330 692 630 570 487 426 1712 1662 1590 1542 1500 1457 1373 1302 1234 1154 1075 995 896 818 788 619 561 523 461 375 261 205 495 448 409 353 291 331 285 274 254 173 262 268 268 251 193 193 225 172 181 156 201 196 jellyfin +612 594 558 535 521 488 461 424 393 344 476 476 414 347 512 456 403 355 401 334 133 119 576 541 447 410 358 295 264 448 389 299 251 302 211 414 353 302 262 247 217 187 157 192 166 135 498 476 419 364 309 262 199 143 296 229 207 53 47 28 41 - - - - - - - - - - - - - - - - - - - - - - - - - portainer_agent +583 580 577 574 565 560 558 553 551 548 544 544 533 513 487 470 449 430 409 385 365 352 335 318 281 259 240 215 199 178 160 109 91 71 26 85 54 68 53 49 39 36 27 584 580 579 575 572 549 525 503 477 454 429 381 342 339 255 254 250 246 225 192 150 88 76 68 63 60 58 55 50 47 45 41 39 33 29 20 16 8 24 18 5 7 7 fireflyiii_fints_importer +450 412 377 348 319 329 391 371 347 309 545 545 497 462 405 354 296 526 480 424 382 330 245 238 396 346 299 223 157 343 297 200 167 221 124 183 265 228 194 181 163 150 136 131 125 118 251 241 206 152 191 146 356 314 243 184 171 129 114 99 384 352 293 226 130 111 95 88 100 94 82 146 138 128 117 112 100 90 70 56 44 39 34 11 7 6 flaresolverr +411 417 415 415 532 491 433 489 410 503 436 436 450 449 532 449 461 554 457 530 443 443 398 475 384 439 446 549 461 535 441 495 425 504 337 376 385 374 343 346 385 324 367 312 232 316 413 358 352 353 348 339 437 364 400 303 364 260 310 306 404 326 354 432 288 279 250 254 307 243 252 236 226 196 225 281 231 243 288 288 237 181 298 226 195 195 plex_nas +376 368 362 354 340 335 321 304 290 279 265 265 213 182 133 618 576 545 508 446 410 382 351 324 260 221 163 127 83 13 32 267 223 186 112 78 36 1065 1044 1031 1023 1011 1006 998 989 982 979 973 937 897 864 827 789 750 685 633 623 498 489 483 471 438 383 317 223 203 193 190 183 171 165 154 149 143 132 122 118 104 85 76 67 56 49 24 20 18 baikal +364 385 1510 2162 1045 382 370 378 394 388 483 483 417 416 460 379 428 23 433 430 436 426 352 356 403 457 460 457 474 485 468 454 518 439 377 430 422 336 384 391 65 65 65 65 65 65 65 65 35 1424 1360 1294 1246 1191 1107 1033 1008 865 829 803 773 722 639 546 425 380 346 320 283 247 246 288 248 191 243 192 192 217 146 146 121 90 88 55 37 35 sonarr_nas +349 345 339 332 329 326 324 316 307 297 290 290 260 236 207 177 148 119 89 57 30 10 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - comixed +344 342 340 337 335 331 327 326 324 322 315 315 276 248 213 181 150 120 82 39 1349 1322 1284 1267 1224 1197 1172 1144 1119 1091 1069 1019 994 973 921 887 856 829 815 810 804 802 801 797 795 790 787 785 751 715 679 647 613 579 511 461 457 335 331 324 322 298 247 184 91 75 67 64 63 60 60 58 55 52 51 49 45 41 28 24 25 22 18 4 1 1 requestrr +341 496 473 449 411 369 544 519 497 454 409 409 559 510 449 370 804 758 706 644 594 543 440 357 863 797 756 703 643 582 497 590 518 447 387 513 443 343 442 386 414 362 299 519 481 451 420 376 704 640 567 497 436 354 599 529 491 341 286 291 291 267 503 411 280 382 347 322 281 250 86 290 257 222 235 191 186 208 200 200 128 95 91 47 31 29 radarr_nas +341 310 270 684 651 634 620 603 579 560 539 539 482 438 384 335 282 31 352 279 329 271 362 305 334 268 162 250 468 419 374 272 215 216 336 289 234 187 151 116 251 233 218 208 189 167 223 193 452 411 365 320 266 209 196 150 240 99 176 145 179 407 339 251 259 232 211 194 169 139 200 167 150 123 143 132 184 157 93 114 94 73 52 44 34 32 tandoor_recipes +335 292 261 315 278 382 343 317 275 257 374 374 333 283 360 293 325 262 366 314 261 24 215 324 212 332 286 237 300 249 343 234 307 262 129 193 127 252 196 189 125 181 171 151 172 976 968 953 927 881 841 800 757 718 666 616 598 501 477 453 436 396 350 291 219 187 148 161 186 150 145 86 127 83 155 124 112 127 115 115 125 98 112 86 107 69 calibre +333 326 312 307 297 294 281 270 262 253 238 238 196 168 130 204 172 133 456 408 376 360 323 302 249 218 188 156 120 469 432 360 332 296 227 188 156 121 101 94 88 77 66 87 79 72 57 70 84 88 88 83 83 107 104 47 184 54 60 60 79 94 198 131 68 52 46 44 41 55 53 47 51 44 53 44 37 56 59 45 42 33 63 33 32 31 plex-media-manager +327 323 313 303 288 282 279 276 274 266 256 256 235 206 172 134 92 179 136 95 682 655 626 589 513 481 443 405 343 302 269 198 160 125 46 187 128 81 57 112 109 99 90 87 75 71 67 66 29 187 150 115 74 644 571 518 510 386 384 376 374 352 301 240 145 113 107 106 101 97 90 84 74 64 67 61 56 51 29 29 20 8 44 28 26 26 webtrees +315 312 308 307 301 299 295 295 293 293 293 293 258 244 209 185 152 128 94 64 48 184 162 146 103 79 54 40 97 70 41 121 98 76 28 105 75 48 35 34 29 27 25 22 21 20 17 52 120 87 50 390 359 327 263 214 211 90 87 84 81 56 189 129 38 26 20 17 17 46 43 40 34 28 27 23 21 20 25 25 24 22 19 6 3 3 mylar3 +305 285 260 225 409 393 366 350 331 315 287 287 241 339 293 239 290 246 227 232 231 453 408 373 307 250 230 416 383 352 307 231 221 217 146 216 171 125 213 191 229 206 161 201 173 283 262 235 175 334 278 228 176 484 413 357 342 215 204 187 164 467 406 331 227 198 174 147 114 147 130 184 179 157 135 114 107 114 102 102 87 72 61 33 22 20 cloudcommander +296 268 230 320 304 285 249 210 245 226 251 251 1141 1107 1059 1018 970 922 874 815 776 738 695 669 599 560 527 494 466 426 371 284 235 168 163 186 254 213 180 137 423 406 384 371 359 339 332 318 288 255 197 135 398 350 288 246 235 126 269 247 228 191 133 207 108 121 278 261 246 225 214 112 170 152 126 96 91 87 143 143 121 112 103 78 81 81 photoprism +290 288 301 304 336 251 270 459 397 343 279 278 323 255 250 398 329 263 252 221 205 207 188 203 315 250 552 495 442 380 314 208 145 138 62 160 147 149 105 137 90 92 69 35 105 98 82 111 103 87 97 92 94 92 35 205 194 100 80 54 74 74 174 124 42 34 46 58 53 70 60 48 21 23 29 22 29 28 2 2 0 - - - - - immich +279 260 221 902 886 873 860 841 828 813 791 791 770 738 697 656 621 587 555 518 485 458 422 400 329 288 249 217 182 385 335 256 219 193 116 155 113 143 274 252 238 215 201 185 172 156 129 137 149 139 46 139 144 154 152 108 146 111 101 61 103 46 111 99 102 84 21 107 94 84 101 105 88 51 23 66 10 86 22 22 73 72 81 69 73 69 calibre_web +259 264 256 255 275 267 263 266 268 269 303 303 310 296 305 298 304 306 309 296 300 302 250 250 284 311 311 331 344 340 352 323 299 308 270 300 296 290 272 257 264 257 252 244 244 245 235 268 254 254 253 230 251 261 253 214 256 226 212 201 237 241 244 273 183 186 177 170 168 173 168 157 146 123 148 151 120 138 129 129 120 114 142 100 114 113 prowlarr +252 193 254 217 260 195 230 204 217 185 129 128 264 200 9 723 668 606 559 501 456 409 340 279 152 171 104 367 326 275 212 270 230 185 86 219 164 288 244 203 164 112 128 171 134 676 654 633 590 546 509 461 418 380 301 253 230 133 113 87 413 377 330 266 188 158 126 109 95 79 119 92 66 42 277 270 243 225 193 164 139 109 97 61 43 40 browserless_chrome +245 235 220 205 184 154 131 148 129 362 340 340 284 246 205 159 1035 987 951 900 859 829 801 754 665 629 599 568 529 493 443 376 342 313 239 204 166 130 107 99 88 79 66 52 57 44 37 610 561 521 480 440 399 357 281 228 223 88 78 65 55 102 45 26 - - - - - - - - - - - - - - - - - - - - - - jellyseerr +240 374 361 339 315 292 268 236 474 439 416 416 377 340 297 256 202 345 307 259 205 274 229 645 568 541 501 457 433 392 345 256 218 180 100 274 230 196 166 484 470 453 438 426 419 404 385 371 332 300 271 242 208 179 124 79 503 411 391 371 363 335 289 233 159 131 131 266 256 237 225 203 184 171 150 150 136 113 91 89 71 43 60 38 22 20 scrutiny-fa +228 238 233 230 242 234 235 245 253 239 283 282 275 275 287 267 268 279 286 253 274 272 234 222 2600 1318 270 257 258 211 268 250 236 241 215 229 237 221 205 203 187 195 195 194 195 195 192 237 231 219 224 219 233 244 241 189 247 188 193 187 220 184 239 253 183 177 181 171 169 161 159 154 148 110 134 140 111 129 118 118 94 106 136 104 95 95 jackett_nas +208 202 195 190 179 172 163 147 614 608 597 597 561 521 484 447 413 385 350 308 270 241 187 153 173 473 435 404 374 338 302 217 176 128 120 259 214 172 143 183 160 151 144 128 359 352 343 331 293 251 201 145 154 253 174 445 441 311 299 287 268 227 169 206 101 158 140 132 122 117 106 98 127 120 117 107 93 80 84 84 49 103 98 75 69 68 bazarr +202 192 175 157 218 209 197 182 159 318 305 305 276 253 221 180 252 218 166 142 140 171 127 32 170 141 167 129 209 183 148 134 56 133 65 83 160 130 109 97 84 85 76 91 77 133 113 94 129 92 132 101 85 117 61 310 299 215 209 203 189 163 114 134 102 68 20 69 62 94 87 74 59 45 79 72 63 53 39 57 38 45 32 21 14 12 gitea +200 190 174 160 140 247 237 227 211 194 173 173 128 317 274 219 267 215 150 2 314 281 237 181 190 241 197 206 327 287 225 307 267 228 147 243 197 136 225 212 198 184 162 151 135 127 122 328 285 239 194 141 222 180 114 253 243 110 427 419 414 382 327 256 156 134 120 99 86 95 177 156 131 132 118 103 84 83 77 73 62 51 47 23 16 15 fireflyiii_data_importer +186 875 860 841 822 803 780 765 743 725 712 712 686 665 634 607 579 541 504 467 440 410 382 354 285 250 203 159 266 227 185 247 218 187 86 213 157 166 129 173 156 140 117 450 433 421 409 386 344 306 257 215 164 272 199 129 331 194 184 171 153 247 177 97 203 180 151 138 120 104 108 150 133 116 96 159 149 130 105 85 69 74 81 48 24 23 joplin +175 128 216 126 246 221 264 249 225 201 117 117 279 227 215 211 257 212 298 250 222 186 261 229 156 244 195 154 186 143 168 177 138 109 80 102 91 38 99 65 50 48 31 36 17 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - changedetection.io +166 165 269 215 179 165 151 144 157 142 188 188 179 191 185 177 170 240 181 182 153 211 147 114 109 197 177 173 170 164 158 164 200 157 115 135 142 134 128 120 124 82 107 115 126 102 100 129 136 555 509 463 420 380 309 250 239 105 131 108 127 228 159 181 112 108 90 105 286 266 248 221 200 179 161 123 107 85 150 150 145 127 99 60 72 66 emby +160 156 151 142 126 111 96 75 143 128 113 113 66 1929 1888 1853 1821 1790 1754 1705 1684 1660 1631 1603 1532 1495 1457 1431 1393 1352 1310 1240 1202 1168 1107 1067 1033 994 976 971 962 952 939 933 928 923 917 906 870 837 797 757 719 680 615 566 556 435 426 412 406 377 323 259 162 142 128 117 108 94 83 76 61 57 55 45 39 31 37 37 19 11 28 12 10 8 resiliosync +142 58 86 24 473 439 402 375 347 312 273 273 202 148 158 878 828 762 702 625 552 493 419 359 224 128 46 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - whatsapper +136 230 210 201 187 167 150 126 353 337 326 326 283 244 205 167 125 237 209 174 128 220 164 606 540 497 460 420 386 345 308 237 200 150 70 220 177 144 114 797 792 778 770 757 751 740 732 717 684 658 631 603 575 545 497 459 442 348 335 320 313 290 248 188 112 91 104 158 144 132 119 103 81 130 127 112 97 74 81 81 67 48 84 58 58 58 scrutiny +136 134 129 123 119 113 107 101 92 87 75 75 240 225 190 167 151 131 101 72 282 266 246 226 182 158 132 114 97 72 301 254 227 207 154 125 100 72 53 44 39 266 262 256 247 239 230 219 182 146 108 67 265 229 163 109 103 58 45 126 114 84 236 170 75 56 42 35 30 13 9 42 27 98 87 73 72 65 45 38 55 48 43 21 10 9 grav +124 153 136 152 141 150 177 162 225 215 195 195 191 318 292 257 218 318 287 238 339 282 381 465 478 403 378 402 375 445 370 350 352 319 150 1274 1214 1155 1121 1094 1070 1043 1025 1003 979 956 942 922 868 814 764 709 657 605 511 433 400 238 311 284 252 227 137 301 244 187 351 322 301 262 236 202 158 111 176 148 152 108 134 134 123 84 104 57 86 80 paperless_ng +116 86 289 258 243 228 206 192 168 149 135 135 102 73 254 220 185 143 111 53 21 3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - linkwarden +109 94 89 108 102 86 95 155 152 146 134 134 123 220 179 137 133 265 223 192 159 178 158 292 171 233 167 389 342 290 229 265 209 154 102 290 227 169 120 186 155 125 110 326 306 290 267 244 195 144 202 143 305 255 166 208 176 195 181 152 128 371 305 232 118 194 161 140 119 88 98 146 119 104 139 121 98 75 155 155 134 110 95 82 17 17 code_server +102 95 91 90 84 82 73 67 70 66 446 446 413 391 350 322 293 268 237 184 131 113 77 55 50 86 81 165 136 112 85 83 161 119 53 129 91 132 105 89 86 73 64 63 57 58 52 93 116 70 74 73 73 331 269 216 203 80 77 72 61 263 211 144 49 39 99 87 78 72 69 63 54 28 17 20 17 22 69 62 41 28 45 18 7 5 myelectricaldata_dev +99 93 80 72 58 49 34 27 19 10 20 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - openproject +77 74 174 163 156 149 143 138 131 122 110 110 215 188 147 110 365 330 292 248 216 185 148 120 385 355 318 296 262 234 199 131 334 295 228 191 150 120 94 81 105 97 94 91 84 79 77 110 116 111 104 101 110 139 133 77 205 76 55 71 141 109 224 150 82 76 66 64 75 69 60 60 62 50 67 58 43 65 58 58 52 42 68 51 45 44 lidarr_nas +77 68 58 55 65 36 37 47 3 28 33 33 34 82 19 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - birdnet-pi +77 64 55 45 28 34 35 46 2 49 151 151 111 70 68 15 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - birdnet-go +77 532 498 469 425 357 335 456 420 382 597 597 530 477 392 553 489 405 471 388 469 383 556 496 347 437 359 443 367 374 343 448 388 337 179 395 316 336 271 357 302 257 238 270 232 244 329 281 412 348 283 272 442 382 286 222 293 262 224 340 313 251 386 287 220 217 217 249 224 215 226 214 395 371 339 300 287 251 207 169 163 144 110 68 47 47 fireflyiii +77 1046 1036 1032 1028 1021 1015 1015 1008 1004 1002 1002 975 959 924 889 859 827 789 750 714 694 674 648 607 577 554 528 511 476 442 377 348 317 248 216 180 149 128 119 110 104 98 92 85 75 64 608 570 529 482 448 410 376 307 258 250 118 109 91 82 341 287 220 121 103 90 88 84 80 73 59 93 92 87 81 73 65 50 40 39 30 24 9 4 4 ombi +77 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - battybirdnet-pi +69 60 50 44 31 24 17 3 4 - 17 17 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - navidrome +66 63 60 58 66 61 62 61 60 61 92 92 1885 1858 1824 1786 1759 1714 1677 1638 1618 1595 1565 1544 1494 1456 1420 1392 1360 1322 1283 1219 1187 1156 1111 1078 1048 1018 990 984 977 968 957 952 950 944 941 936 894 856 819 781 747 711 645 594 584 458 449 443 440 405 352 292 194 174 160 154 150 146 138 129 115 112 109 100 94 72 62 62 76 61 84 57 50 50 nzbget +66 36 1348 1341 1338 1327 1321 1315 1309 1303 1292 1292 1282 1256 1231 1210 1188 1166 1142 1111 1092 1072 1043 1026 976 956 933 912 896 877 853 810 793 772 730 707 680 663 647 641 637 635 628 621 615 610 607 597 571 545 523 498 474 449 404 369 363 276 269 263 251 225 189 146 83 70 62 59 54 49 41 26 90 85 86 78 72 69 52 47 45 31 31 13 7 7 elasticsearch +63 77 73 75 78 73 79 72 78 81 97 97 89 99 92 96 90 92 95 86 80 85 57 76 87 96 90 86 91 109 143 92 105 69 103 148 153 149 165 134 160 124 332 332 332 332 332 332 312 292 272 252 232 212 170 138 138 58 58 111 119 128 142 166 110 109 112 105 103 97 108 95 115 76 82 75 79 84 127 105 93 88 78 53 51 51 omada +61 65 71 59 60 77 67 67 75 68 124 124 96 140 105 85 114 20 120 103 106 98 107 87 129 102 99 115 114 116 115 118 107 134 67 146 115 82 82 83 89 90 79 90 82 79 76 106 99 97 95 88 87 110 111 72 111 79 71 64 84 105 116 143 79 70 57 58 56 71 65 60 59 49 60 48 54 62 52 52 43 39 65 49 42 41 readarr_nas +55 49 36 281 281 276 269 258 253 247 241 241 222 204 179 154 131 116 92 68 50 24 138 125 95 74 52 52 80 58 35 197 179 156 118 94 72 53 41 39 31 25 18 22 16 103 100 92 67 41 60 38 213 191 145 112 108 21 213 209 205 188 153 111 47 37 30 27 36 35 28 21 29 26 22 23 17 25 32 32 21 22 20 11 9 9 unpackerr +44 35 266 264 261 258 251 250 246 236 230 230 189 163 117 73 256 216 183 147 122 100 58 157 98 64 186 155 125 102 72 70 594 563 494 459 429 395 377 371 362 354 351 347 341 333 331 325 286 248 202 160 120 82 300 250 244 115 113 107 99 67 267 201 100 84 67 59 47 39 35 25 17 15 38 32 23 26 32 32 22 19 53 33 27 26 piwigo +42 82 71 67 52 42 149 140 130 115 108 108 72 157 117 83 161 117 80 149 128 106 59 98 99 62 61 54 218 186 145 70 89 55 30 92 52 35 30 24 40 40 27 40 186 173 163 154 113 73 337 299 262 222 151 99 87 68 64 52 47 132 76 133 36 29 21 14 22 13 1 - - - - - - - - - - - - - - - autobrr +42 36 37 55 48 37 35 37 37 39 80 80 54 73 62 69 117 96 64 57 58 57 61 41 55 64 64 52 54 60 45 48 43 50 35 626 592 559 544 540 536 531 529 525 520 517 511 511 474 437 400 359 325 283 217 167 161 35 35 31 106 73 86 111 52 41 29 45 32 38 32 33 32 30 31 36 26 37 30 23 20 24 38 22 26 26 flexget +39 36 32 30 25 28 25 20 69 64 58 58 35 80 53 36 34 37 64 39 37 56 30 48 31 50 33 31 34 32 79 32 36 31 29 40 38 31 31 17 21 15 29 13 14 18 22 323 299 277 251 229 206 184 139 103 94 8 5 14 33 42 52 70 28 20 13 11 17 13 15 17 17 10 19 17 14 27 43 38 34 30 24 7 13 11 omada_v3 +34 33 31 134 133 133 133 129 128 128 128 128 110 86 344 320 293 262 227 201 175 155 133 119 92 69 43 24 1 319 287 213 184 145 75 95 181 145 122 116 105 96 77 71 60 50 218 212 175 138 88 130 87 628 556 499 491 367 362 353 343 314 260 193 90 63 50 51 207 193 186 167 160 149 140 133 119 103 80 64 50 39 26 7 4 4 vaultwarden +24 20 13 7 2 50 31 13 12 8 38 38 109 87 54 34 32 105 76 45 19 25 178 160 117 92 75 51 22 172 146 94 68 46 2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - signalk +22 18 18 22 19 19 27 41 28 18 122 122 93 61 36 46 27 28 28 21 42 21 6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - codex +- - - - - - - - - - - - - - - - - - 43 35 23 17 8 22 11 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - overleaf diff --git a/Stats2 b/Stats2 index fc36aba66..d5b330f30 100644 --- a/Stats2 +++ b/Stats2 @@ -1,93 +1,94 @@ Name Total armv7 amd64 aarch64 ------------------------------ -paperless_ng 141 - 78 63 -code_server 102 7 61 34 -vaultwarden 133 28 60 45 -zoneminder 2080 343 1045 692 -xteve 2357 457 1107 793 -whoogle-search 1038 200 484 354 -whatsapper 473 - 289 184 -wger 749 - 455 294 -webtrees 288 53 141 94 -webtop_kde 707 - 474 233 -unpackerr 281 - 162 119 -ubooquity 1754 450 713 591 -transmission_openvpn 992 147 486 359 -transmission_ls 738 8 483 247 -teamspeak 2007 466 845 696 -tdarr 1455 350 626 479 -tandoor_recipes 651 56 386 209 -spotweb 811 191 374 246 -sponsorblockcast 1466 113 811 542 -sonarr_nas 1045 1 925 119 -signalk 2 1 1 0 -seafile 1034 230 468 336 -scrutiny-fa 315 - 208 107 -scrutiny 187 - 130 57 -sabnzbd 1822 - 1781 41 -resiliosync 126 - 71 55 -requestrr 335 87 133 115 -readarr_nas 60 - 49 11 -radarr_nas 411 4 281 126 -qbittorrent 1417 - 882 535 -prowlarr 275 - 188 87 -postgres 1345 116 743 486 -portainer_agent 521 4 342 175 -portainer 11142 206 7756 3180 -plex-media-manager 297 32 179 86 -plex_nas 532 2 337 193 -piwigo 261 47 130 84 -photoprism 304 - 193 111 +paperless_ng 124 - 72 52 +code_server 109 6 62 41 +vaultwarden 34 0 24 10 +zoneminder 2174 343 1100 731 +xteve 2404 457 1137 810 +whoogle-search 1080 200 514 366 +whatsapper 142 - 91 51 +wger 806 - 498 308 +webtrees 327 53 161 113 +webtop_kde 805 - 531 274 +unpackerr 55 - 37 18 +ubooquity 1779 450 731 598 +transmission_openvpn 1054 147 524 383 +transmission_ls 948 10 607 331 +teamspeak 2049 466 871 712 +tdarr 1482 350 646 486 +tandoor_recipes 341 4 235 102 +spotweb 840 191 392 257 +sponsorblockcast 1626 113 906 607 +sonarr_nas 364 1 254 109 +signalk 24 3 6 15 +seafile 1082 230 500 352 +scrutiny-fa 240 - 160 80 +scrutiny 136 - 97 39 +sabnzbd 5295 - 5240 55 +resiliosync 160 - 90 70 +requestrr 344 87 139 118 +readarr_nas 61 - 45 16 +radarr_nas 341 0 239 102 +qbittorrent 1711 - 1057 654 +prowlarr 259 - 176 83 +postgres 1611 116 926 569 +portainer_agent 612 5 398 209 +portainer 16729 219 12918 3592 +plex-media-manager 333 33 200 100 +plex_nas 411 2 258 151 +piwigo 44 1 28 15 +photoprism 296 - 181 115 papermerge 1346 446 460 440 -overseerr 1297 - 800 497 -organizr 2290 463 1074 753 -openproject 58 - 39 19 -ombi 1028 221 467 340 -omada_v3 25 - 15 10 -omada 78 - 49 29 -nzbget 66 0 44 22 -nextcloud_ocr 740 - 430 310 -navidrome 31 - 19 12 -mylar3 301 85 120 96 -mealie 799 - 539 260 -linkwarden 243 - 142 101 -lidarr_nas 156 11 104 41 -joplin 822 - 493 329 -joal 887 201 238 448 -jellyseerr 184 0 129 55 -jellyfin 2213 3 1975 235 -jackett_nas 242 2 168 72 -inadyn 761 144 365 252 -immich 336 - 222 114 -guacamole 1155 - 787 368 -grav 119 - 73 46 -gitea 218 - 130 88 -gazpar2mqtt 1808 467 676 665 -free_games_claimer 1227 - 689 538 -flexget 48 - 29 19 -flaresolverr 319 3 202 114 -fireflyiii_fints_importer 565 - 310 255 -fireflyiii_data_importer 140 1 88 51 -fireflyiii 425 4 269 152 -filebrowser 9880 225 5820 3835 -epicgamesfree 678 - 439 239 -myelectricaldata_dev 84 2 41 41 -myelectricaldata 1357 36 734 587 -emby 179 2 115 62 -elasticsearch 1338 - 755 583 -comixed 329 - 118 211 -codex 19 - 10 9 -cloudcommander 409 - 271 138 -changedetection.io 246 - 151 95 -calibre_web 886 - 497 389 -calibre 278 - 169 109 -browserless_chrome 260 - 173 87 -calibre_web 886 - 497 389 -birdnet-pi 65 - 41 24 -birdnet-go 28 - 21 7 -binance_trading_bot 1455 - 803 652 -bazarr 179 0 130 49 -baikal 340 42 152 146 -autobrr 52 0 33 19 -arpspoof 2505 478 1204 823 -addons_updater 778 88 383 307 +overseerr 1407 - 868 539 +organizr 2336 463 1114 759 +openproject 99 - 68 31 +ombi 77 0 61 16 +omada_v3 39 - 25 14 +omada 63 - 44 19 +nzbget 66 0 45 21 +nextcloud_ocr 752 - 443 309 +navidrome 69 - 44 25 +mylar3 315 85 131 99 +mealie 1064 - 721 343 +linkwarden 116 - 68 48 +lidarr_nas 77 0 57 20 +joplin 186 - 124 62 +joal 900 201 248 451 +jellyseerr 245 0 176 69 +jellyfin 652 3 429 220 +jackett_nas 228 1 166 61 +inadyn 788 144 384 260 +immich 290 - 206 84 +guacamole 1273 - 863 410 +grav 136 - 83 53 +gitea 202 - 113 89 +gazpar2mqtt 1824 467 682 675 +free_games_claimer 1319 - 741 578 +flexget 42 - 26 16 +flaresolverr 450 3 283 164 +fireflyiii_fints_importer 583 - 318 265 +fireflyiii_data_importer 200 1 126 73 +fireflyiii 77 0 45 32 +filebrowser 11150 259 6593 4298 +epicgamesfree 671 - 434 237 +myelectricaldata_dev 102 3 50 49 +myelectricaldata 1525 41 824 660 +emby 166 2 99 65 +elasticsearch 66 - 49 17 +comixed 349 - 135 214 +codex 22 - 15 7 +cloudcommander 305 - 214 91 +changedetection.io 175 - 105 70 +calibre_web 279 - 172 107 +calibre 335 - 203 132 +browserless_chrome 252 - 170 82 +calibre_web 279 - 172 107 +birdnet-pi 77 - 45 32 +birdnet-go 77 - 58 19 +binance_trading_bot 1491 - 825 666 +bazarr 208 0 150 58 +battybirdnet-pi 6 - - 6 +baikal 376 43 174 159 +autobrr 42 0 28 14 +arpspoof 2561 478 1240 843 +addons_updater 837 90 413 334 diff --git a/addons_updater/stats.png b/addons_updater/stats.png index 935eb7681..ad2b60167 100644 Binary files a/addons_updater/stats.png and b/addons_updater/stats.png differ diff --git a/arpspoof/stats.png b/arpspoof/stats.png index da086e1ec..b3ab2974b 100644 Binary files a/arpspoof/stats.png and b/arpspoof/stats.png differ diff --git a/autobrr/CHANGELOG.md b/autobrr/CHANGELOG.md index d69ff97c6..7d552739b 100644 --- a/autobrr/CHANGELOG.md +++ b/autobrr/CHANGELOG.md @@ -1,4 +1,7 @@ +## 1.44.0 (27-07-2024) +- Update to latest version from autobrr/autobrr (changelog : https://github.com/autobrr/autobrr/releases) + ## 1.43.0 (22-06-2024) - Update to latest version from autobrr/autobrr (changelog : https://github.com/autobrr/autobrr/releases) diff --git a/autobrr/Dockerfile b/autobrr/Dockerfile index e00bdaedb..f59bcb16a 100644 --- a/autobrr/Dockerfile +++ b/autobrr/Dockerfile @@ -16,7 +16,7 @@ ARG BUILD_FROM ARG BUILD_VERSION -ARG BUILD_UPSTREAM="1.43.0" +ARG BUILD_UPSTREAM="1.44.0" FROM ${BUILD_FROM} ################## diff --git a/autobrr/config.json b/autobrr/config.json index 4cf6d9c2c..f550710f9 100644 --- a/autobrr/config.json +++ b/autobrr/config.json @@ -112,5 +112,5 @@ "slug": "autobrr", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/autobrr", - "version": "1.43.0" + "version": "1.44.0" } diff --git a/autobrr/stats.png b/autobrr/stats.png index 4596ded76..978e2f1e3 100644 Binary files a/autobrr/stats.png and b/autobrr/stats.png differ diff --git a/autobrr/updater.json b/autobrr/updater.json index 28f1ab670..e534229d3 100644 --- a/autobrr/updater.json +++ b/autobrr/updater.json @@ -1,8 +1,8 @@ { - "last_update": "22-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "autborr", "source": "github", "upstream_repo": "autobrr/autobrr", - "upstream_version": "1.43.0" + "upstream_version": "1.44.0" } diff --git a/baikal/CHANGELOG.md b/baikal/CHANGELOG.md index decbc71d6..5805541b5 100644 --- a/baikal/CHANGELOG.md +++ b/baikal/CHANGELOG.md @@ -1,3 +1,5 @@ +## 0.9.5_updated (06-08-2024) +- Minor bugs fixed ## 0.9.5 (27-04-2024) - Update to latest version from ckulka/baikal-docker (changelog : https://github.com/ckulka/baikal-docker/releases) diff --git a/baikal/build.json b/baikal/build.json index cf6c9a5b7..28711b6e8 100644 --- a/baikal/build.json +++ b/baikal/build.json @@ -1,8 +1,8 @@ { "build_from": { - "aarch64": "ckulka/baikal:nginx", - "amd64": "ckulka/baikal:nginx", - "armv7": "ckulka/baikal:nginx" + "aarch64": "ghcr.io/mralucarddante/baikal-docker-hass:latest", + "amd64": "ghcr.io/mralucarddante/baikal-docker-hass:latest", + "armv7": "ghcr.io/mralucarddante/baikal-docker-hass:latest" }, "codenotary": { "signer": "alexandrep.github@gmail.com" diff --git a/baikal/config.json b/baikal/config.json index 5b5c70d33..94bbf9969 100644 --- a/baikal/config.json +++ b/baikal/config.json @@ -82,6 +82,6 @@ "slug": "baikal", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "0.9.5", + "version": "0.9.5_updated", "webui": "[PROTO:ssl]://[HOST]:[PORT:80]" } diff --git a/baikal/stats.png b/baikal/stats.png index e72012c36..8b455570b 100644 Binary files a/baikal/stats.png and b/baikal/stats.png differ diff --git a/battybirdnet-pi/CHANGELOG.md b/battybirdnet-pi/CHANGELOG.md new file mode 100644 index 000000000..7ea1db5b6 --- /dev/null +++ b/battybirdnet-pi/CHANGELOG.md @@ -0,0 +1,3 @@ + +## 0.1 (27-07-2024) +- Initial build diff --git a/battybirdnet-pi/Dockerfile b/battybirdnet-pi/Dockerfile new file mode 100644 index 000000000..6b5ae0733 --- /dev/null +++ b/battybirdnet-pi/Dockerfile @@ -0,0 +1,205 @@ +#============================# +# ALEXBELGIUM'S DOCKERFILE # +#============================# +# _.------. +# _.-` ('>.-`"""-. +# '.--'` _'` _ .--.) +# -' '-.-';` ` +# ' - _.' ``'--. +# '---` .-'""` +# /` +#=== Home Assistant Addon ===# + +################# +# 1 Build Image # +################# + +ARG BUILD_VERSION +ARG BUILD_FROM +FROM ${BUILD_FROM} + +ENV DEBIAN_FRONTEND="noninteractive" \ + BIRDNET_USER="pi" \ + USER="pi" \ + PUID=1000 \ + PGID=1000 \ + HOME="/home/pi" \ + XDG_RUNTIME_DIR="/run/user/1000" \ + PYTHON_VIRTUAL_ENV="/home/pi/BirdNET-Pi/birdnet/bin/python3" \ + my_dir=/home/pi/BirdNET-Pi/scripts + +# Global LSIO modifications +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_lsio.sh" "/ha_lsio.sh" +ARG CONFIGLOCATION="/config" +RUN chmod 744 /ha_lsio.sh && if grep -qr "lsio" /etc; then /ha_lsio.sh "$CONFIGLOCATION"; fi && rm /ha_lsio.sh + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# hadolint ignore=DL3015,SC2016 +RUN \ + # Install dependencies + echo "Install dependencies" && \ + apt-get update -y && apt-get install curl gcc python3-dev git jq sudo php-mbstring procps -y && \ + \ + # Correct for systemctl + curl -f -L -s -S https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl3.py -o /bin/systemctl && \ + chmod a+x /bin/systemctl && \ + \ + # Change user to pi and create /home/pi + echo "setting users" && \ + if id abc >/dev/null 2>&1; then groupmod -o -g 101 abc && usermod -o -u 101 abc; fi && \ + groupadd --non-unique -g 1000 "$USER" && \ + useradd --non-unique --uid 1000 --gid 1000 -m "$USER" && \ + \ + # Ensure permissions + echo "setting permissions" && \ + echo "$USER ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers && \ + mkdir -p /home/"$USER"/.config/matplotlib && \ + chown -R "$USER":"$USER" /home/"$USER" && \ + git config --global --add safe.directory '*' && \ + \ + # Download installer + curl -f -L -s -S "https://raw.githubusercontent.com/rdz-oss/BattyBirdNET-Pi/main/newinstaller.sh" -o /newinstaller.sh && \ + chmod 777 /newinstaller.sh && \ + \ + # Use installer to modify other scripts + ####################################### + # Define file + sed -i "1a /./newinstallermod.sh" /newinstaller.sh && \ + echo '#!/bin/bash' >> /newinstallermod.sh && \ + # Remove all instances of sudo from all other scripts + echo 'for file in $(grep -srl "sudo" $HOME/BirdNET-Pi/scripts); do sed -i "s|sudo ||" "$file"; done' >> /newinstallermod.sh && \ + echo 'for file in $(grep -srl "my_dir" $HOME/BirdNET-Pi/scripts); do sed -i "s|\$my_dir|/config|" "$file"; done' >> /newinstallermod.sh && \ + # Disable pulseaudio + echo 'for file in $(grep -srl "pulseaudio --start" $HOME/BirdNET-Pi/scripts); do sed -i "/pulseaudio --start/d" "$file"; done' >> /newinstallermod.sh && \ + # Set permission + chmod +x /newinstallermod.sh && \ + \ + # Modify installer + ################## + # Avoid rebooting at end of installation + sed -i "/reboot/d" /newinstaller.sh && \ + # Use apt-get as without user action + sed -i "s|apt |apt-get |g" /newinstaller.sh && \ + # Ensure chmod + sed -i "/git clone/a chown -R 1000:1000 $HOME" /newinstaller.sh && \ + # Remove all instances of sudo from the newinstaller + sed -i -e "s|== 0|== 7|g" -e "s|sudo -n true|true|g" -e "s|sudo -K|true|g" /newinstaller.sh && \ + \ + # Execute installer + /./newinstaller.sh && \ + \ + # Install dateparser + $PYTHON_VIRTUAL_ENV /usr/bin/pip3 install dateparser && \ + \ + # Adapt for lsio usage of /app + if [ -d /app ]; then rm -r /app; fi && \ + ln -s /home/"$USER" /app && \ + chown -R "$USER":"$USER" /home/"$USER" /app && \ + \ + # Give access to caddy for files owned by the user, to allow files modification + groupmod -o -g 1000 caddy && usermod -o -u 1000 caddy && \ + \ + # Cleanup + apt-get clean all && \ + rm -rf /var/lib/apt/lists/* + +################## +# 2 Modify Image # +################## + +# Set S6 wait time +ENV S6_CMD_WAIT_FOR_SERVICES=1 \ + S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 \ + S6_SERVICES_GRACETIME=0 + +################## +# 3 Install apps # +################## + +# Add rootfs +COPY rootfs/ / + +# Uses /bin for compatibility purposes +# hadolint ignore=DL4005 +RUN if [ ! -f /bin/sh ] && [ -f /usr/bin/sh ]; then ln -s /usr/bin/sh /bin/sh; fi && \ + if [ ! -f /bin/bash ] && [ -f /usr/bin/bash ]; then ln -s /usr/bin/bash /bin/bash; fi + +# Modules +ARG MODULES="00-local_mounts.sh 00-smb_mounts.sh" + +# Automatic modules download +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_automodules.sh" "/ha_automodules.sh" +RUN chmod 744 /ha_automodules.sh && /ha_automodules.sh "$MODULES" && rm /ha_automodules.sh + +# Manual apps +ENV PACKAGES="alsa-utils libasound2-plugins" + +# Automatic apps & bashio +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh" +RUN chmod 744 /ha_autoapps.sh && /ha_autoapps.sh "$PACKAGES" && rm /ha_autoapps.sh + +################ +# 4 Entrypoint # +################ + +# Add entrypoint +ENV S6_STAGE2_HOOK=/ha_entrypoint.sh +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh" + +# Entrypoint modifications +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh" +RUN chmod 777 /ha_entrypoint.sh /ha_entrypoint_modif.sh && /ha_entrypoint_modif.sh && rm /ha_entrypoint_modif.sh + +# Avoid config.yaml interference +WORKDIR /config +#ENTRYPOINT ["/lib/systemd/systemd"] +#ENTRYPOINT [ "/usr/bin/env" ] +#CMD [ "/ha_entrypoint.sh" ] +#SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Allow a dockerfile independent from HA +EXPOSE 80 +RUN mkdir -p /data /config + +############ +# 5 Labels # +############ + +ARG BUILD_ARCH +ARG BUILD_DATE +ARG BUILD_DESCRIPTION +ARG BUILD_NAME +ARG BUILD_REF +ARG BUILD_REPOSITORY +ARG BUILD_VERSION +LABEL \ + io.hass.name="${BUILD_NAME}" \ + io.hass.description="${BUILD_DESCRIPTION}" \ + io.hass.arch="${BUILD_ARCH}" \ + io.hass.type="addon" \ + io.hass.version=${BUILD_VERSION} \ + maintainer="alexbelgium (https://github.com/alexbelgium)" \ + org.opencontainers.image.title="${BUILD_NAME}" \ + org.opencontainers.image.description="${BUILD_DESCRIPTION}" \ + org.opencontainers.image.vendor="Home Assistant Add-ons" \ + org.opencontainers.image.authors="alexbelgium (https://github.com/alexbelgium)" \ + org.opencontainers.image.licenses="MIT" \ + org.opencontainers.image.url="https://github.com/alexbelgium" \ + org.opencontainers.image.source="https://github.com/${BUILD_REPOSITORY}" \ + org.opencontainers.image.documentation="https://github.com/${BUILD_REPOSITORY}/blob/main/README.md" \ + org.opencontainers.image.created=${BUILD_DATE} \ + org.opencontainers.image.revision=${BUILD_REF} \ + org.opencontainers.image.version=${BUILD_VERSION} + +################# +# 6 Healthcheck # +################# + +ENV HEALTH_PORT="80" \ + HEALTH_URL="" +HEALTHCHECK \ + --interval=5s \ + --retries=5 \ + --start-period=30s \ + --timeout=25s \ + CMD curl --fail "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1 diff --git a/battybirdnet-pi/README.md b/battybirdnet-pi/README.md new file mode 100644 index 000000000..1c8f8e9a6 --- /dev/null +++ b/battybirdnet-pi/README.md @@ -0,0 +1,159 @@ +## ⚠ Open Request : [✨ [REQUEST] BattyBirdnet-Pi x86-64 (opened 2024-07-29)](https://github.com/alexbelgium/hassio-addons/issues/1498) by [@mrcrunchybeans](https://github.com/mrcrunchybeans) +# Home assistant add-on: battybirdnet-pi + +[![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) +[![Donate][paypal-badge]](https://www.paypal.com/donate/?hosted_button_id=DZFULJZTP3UQA) + +![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbattybirdnet-pi%2Fconfig.json) +![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbattybirdnet-pi%2Fconfig.json) +![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fbattybirdnet-pi%2Fconfig.json) + +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade) +[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml) +[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml) + +[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white +[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white + +_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_ + +[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers) + +![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/BirdNET-Pi/stats.png) + +## About + +--- + +[battybirdnet-pi](https://github.com/Nachtzuster/BirdNET-Pi) is an AI solution for continuous avian monitoring and identification originally developed by @mcguirepr89 on github (https://github.com/mcguirepr89/BirdNET-Pi), whose work is continued by @Nachtzuster and other developers on an active fork (https://github.com/Nachtzuster/BirdNET-Pi) + +Features of the addon : +- Robust base image provided by [linuxserver](https://github.com/linuxserver/docker-baseimage-debian) +- Working docker system thanks to https://github.com/gdraheim/docker-systemctl-replacement +- Uses HA pulseaudio server +- Uses HA tmpfs to store temporary files in ram and avoid disk wear +- Exposes all config files to /config to allow remanence and easy access +- Allows to modify the location of the stored bird songs (preferably to an external hdd) +- Supports ingress, to allow secure remote access without exposing ports + +## Configuration + +--- + +Install, then start the addon a first time +Webui can be found by two ways : +- Ingress from HA (no password but some functions don't work) +- Direct access with , port being the one defined in the birdnet.conf. The username when asked for a password is `birdnet`, the password is the one that you can define in the birdnet.con (blank by default). This is different than the password from the addon options, which is the one that must be used to access the web terminal + +Web terminal access : uesrname `pi`, password : as defined in the addon options + +You'll need a microphone : either use one connected to HA or the audio stream of a rstp camera. + +Options can be configured through three ways : + +- Addon options + +```yaml +BIRDSONGS_FOLDER: folder to store birdsongs file # It should be an ssd if you want to avoid clogging of analysis +MQTT_DISABLED : if true, disables automatic mqtt publishing. Only valid if there is a local broker already available +LIVESTREAM_BOOT_ENABLED: start livestream from boot, or from settings +PROCESSED_FOLDER_ENABLED : if enabled, you need to set in the birdnet.conf (or the setting of birdnet) the number of last wav files that will be saved in the temporary folder "/tmp/Processed" within the tmpfs (so no disk wear) in case you want to retrieve them. This amount can be adapted from the addon options +TZ: Etc/UTC specify a timezone to use, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List +pi_password: set the user password to access the web terminal +localdisks: sda1 #put the hardware name of your drive to mount separated by commas, or its label. ex. sda1, sdb1, MYNAS... +networkdisks: "//SERVER/SHARE" # optional, list of smb servers to mount, separated by commas +cifsusername: "username" # optional, smb username, same for all smb shares +cifspassword: "password" # optional, smb password +cifsdomain: "domain" # optional, allow setting the domain for the smb share +``` + +- Config.yaml +Additional variables can be configured using the config.yaml file found in /config/db21ed7f_battybirdnet-pi/config.yaml using the Filebrowser addon + +- Config_env.yaml +Additional environment variables can be configured there + +## Installation + +--- + +The installation of this add-on is pretty straightforward and not different in comparison to installing any other add-on. + +1. Add my add-ons repository to your home assistant instance (in supervisor addons store at top right, or click button below if you have configured my HA) + [![Open your Home Assistant instance and show the add add-on repository dialog with a specific repository URL pre-filled.](https://my.home-assistant.io/badges/supervisor_add_addon_repository.svg)](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons) +1. Install this add-on. +1. Click the `Save` button to store your configuration. +1. Set the add-on options to your preferences +1. Start the add-on. +1. Check the logs of the add-on to see if everything went well. +1. Open the webUI and adapt the software options + +## Integration with HA + +--- +### Apprise + +You can use apprise to send notifications with mqtt, then act on those using HomeAssistant +Further informations : https://wander.ingstar.com/projects/birdnetpi.html + +### Automatic mqtt + +If mqtt is installed, the addon automatically updates the birdnet topic with each detected species + +## Using ssl + +--- + +Option 1 : Install let's encrypt addon, generate certificates. They are by default certfile.pem and keyfile.pem stored in /ssl. Just enable ssl from the addon option and it will work. + +Option 2 : enable port 80, define your battybirdnet-pi URL as https. Certificate will be automatically generated by caddy + +## Improve detections + +--- + +### Gain for card + +Using alsamixer in the Terminal tab, make sure that the sound level is high enough but not too high (not in the red part) +https://github.com/mcguirepr89/BirdNET-Pi/wiki/Adjusting-your-sound-card + +### Ferrite + +Adding ferrite beads lead in my case to worst noise + +### Aux to usb adapters + +Based on my test, only adapters using KT0210 (such as Ugreen's) work. I couldn't get adapters based on ALC to be detected. + +### Microphone comparison + +Recommended microphones ([full discussion here](https://github.com/mcguirepr89/BirdNET-Pi/discussions/39)): +- Clippy EM272 (https://www.veldshop.nl/en/smart-clippy-em272z1-mono-omni-microphone.html) + ugreen aux to usb connector : best sensitivity with lavalier tech +- Boya By-LM40 : best quality/price +- Hyperx Quadcast : best sensitivity with cardioid tech + +Conclusion, using mic from Dahua is good enough, EM272 is optimal, but Boya by-lm40 is a very good compromise as birndet model analysis the 0-15000Hz range + +![image](https://github.com/alexbelgium/hassio-addons/assets/44178713/df992b79-7171-4f73-b0c0-55eb4256cd5b) + +### Denoise ([Full discussion here](https://github.com/mcguirepr89/BirdNET-Pi/discussions/597)) + +Denoise is frowned upon by serious researchers. However it does seem to significantly increase quality of detection ! Here is how to do it in HA : +- Using Portainer addon, go in the hassio_audio container, and modify the file /etc/pulse/system.pa to add the line `load-module module-echo-cancel` +- Go in the Terminal addon, and type `ha audio restart` +- Select the echo cancelled device as input device in the addon options + +### High pass + +Should be avoided as the model uses the whole 0-15khz range + +## Common issues + +Not yet available + +## Support + +Create an issue on github + +--- + diff --git a/battybirdnet-pi/apparmor.txt b/battybirdnet-pi/apparmor.txt new file mode 100644 index 000000000..660a9003f --- /dev/null +++ b/battybirdnet-pi/apparmor.txt @@ -0,0 +1,66 @@ +#include + +profile battybirdnet-pi_addon flags=(attach_disconnected,mediate_deleted) { + #include + + capability, + file, + signal, + mount, + umount, + remount, + network udp, + network tcp, + network dgram, + network stream, + network inet, + network inet6, + network netlink raw, + network unix dgram, + + capability setgid, + capability setuid, + capability sys_admin, + capability dac_read_search, + # capability dac_override, + # capability sys_rawio, + +# S6-Overlay + /init ix, + /run/{s6,s6-rc*,service}/** ix, + /package/** ix, + /command/** ix, + /run/{,**} rwk, + /dev/tty rw, + /bin/** ix, + /usr/bin/** ix, + /usr/lib/bashio/** ix, + /etc/s6/** rix, + /run/s6/** rix, + /etc/services.d/** rwix, + /etc/cont-init.d/** rwix, + /etc/cont-finish.d/** rwix, + /init rix, + /var/run/** mrwkl, + /var/run/ mrwkl, + /dev/i2c-1 mrwkl, + # Files required + /dev/fuse mrwkl, + /dev/sda1 mrwkl, + /dev/sdb1 mrwkl, + /dev/nvme0 mrwkl, + /dev/nvme1 mrwkl, + /dev/mmcblk0p1 mrwkl, + /dev/* mrwkl, + /tmp/** mrkwl, + + # Data access + /data/** rw, + + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer=docker-default, + + # docker daemon confinement requires explict allow rule for signal + signal (receive) set=(kill,term) peer=/usr/bin/docker, + +} diff --git a/battybirdnet-pi/build.yaml b/battybirdnet-pi/build.yaml new file mode 100644 index 000000000..717ce5f7b --- /dev/null +++ b/battybirdnet-pi/build.yaml @@ -0,0 +1,5 @@ +--- +build_from: + aarch64: ghcr.io/linuxserver/baseimage-debian:arm64v8-bookworm +codenotary: + signer: alexandrep.github@gmail.com diff --git a/battybirdnet-pi/config.json b/battybirdnet-pi/config.json new file mode 100644 index 000000000..fe7d02f78 --- /dev/null +++ b/battybirdnet-pi/config.json @@ -0,0 +1,124 @@ +{ + "arch": [ + "aarch64" + ], + "audio": true, + "backup": "cold", + "codenotary": "alexandrep.github@gmail.com", + "description": "A realtime acoustic bat & bird classification system for the Raspberry Pi 4/5 built on BattyBirdNET-Analyzer", + "devices": [ + "/dev/dri", + "/dev/dri/card0", + "/dev/dri/card1", + "/dev/dri/renderD128", + "/dev/vchiq", + "/dev/video10", + "/dev/video11", + "/dev/video12", + "/dev/video13", + "/dev/video14", + "/dev/video15", + "/dev/video16", + "/dev/ttyUSB0", + "/dev/sda", + "/dev/sdb", + "/dev/sdc", + "/dev/sdd", + "/dev/sde", + "/dev/sdf", + "/dev/sdg", + "/dev/nvme", + "/dev/nvme0n1p1", + "/dev/nvme0n1p2", + "/dev/mmcblk", + "/dev/fuse", + "/dev/sda1", + "/dev/sdb1", + "/dev/sdc1", + "/dev/sdd1", + "/dev/sde1", + "/dev/sdf1", + "/dev/sdg1", + "/dev/sda2", + "/dev/sdb2", + "/dev/sdc2", + "/dev/sdd2", + "/dev/sde2", + "/dev/sdf2", + "/dev/sdg2", + "/dev/sda3", + "/dev/sdb3", + "/dev/sda4", + "/dev/sdb4", + "/dev/sda5", + "/dev/sda6", + "/dev/sda7", + "/dev/sda8", + "/dev/nvme0", + "/dev/nvme1", + "/dev/nvme2" + ], + "image": "ghcr.io/alexbelgium/battybirdnet-pi-{arch}", + "ingress": true, + "ingress_stream": true, + "init": false, + "map": [ + "addon_config:rw", + "media:rw", + "share:rw", + "ssl" + ], + "name": "BattyBirdNET-Pi", + "options": { + "BIRDSONGS_FOLDER": "/config/BirdSongs", + "LIVESTREAM_BOOT_ENABLED": false, + "TZ": "Europe/Paris", + "certfile": "fullchain.pem", + "keyfile": "privkey.pem", + "ssl": false + }, + "panel_admin": false, + "panel_icon": "mdi:bird", + "ports": { + "80/tcp": null, + "8081/tcp": 8081 + }, + "ports_description": { + "80/tcp": "Optional : set to 80 to use caddy's automatic ssl", + "8081/tcp": "Web ui" + }, + "privileged": [ + "SYS_ADMIN", + "DAC_READ_SEARCH" + ], + "schema": { + "BIRDSONGS_FOLDER": "str?", + "LIVESTREAM_BOOT_ENABLED": "bool", + "MQTT_DISABLED": "bool?", + "MQTT_HOST_manual": "str?", + "MQTT_PASSWORD_manual": "password?", + "MQTT_PORT_manual": "int?", + "MQTT_USER_manual": "str?", + "PROCESSED_FOLDER_ENABLED": "bool?", + "TZ": "str?", + "certfile": "str", + "cifsdomain": "str?", + "cifspassword": "str?", + "cifsusername": "str?", + "keyfile": "str", + "localdisks": "str?", + "networkdisks": "str?", + "pi_password": "password", + "ssl": "bool" + }, + "services": [ + "mqtt:want" + ], + "slug": "battybirdnet-pi", + "tmpfs": true, + "udev": true, + "url": "https://github.com/alexbelgium/hassio-addons/tree/master/battybirdnet-pi", + "usb": true, + "version": "0.1", + "video": true +} diff --git a/battybirdnet-pi/icon.png b/battybirdnet-pi/icon.png new file mode 100644 index 000000000..8c85f107a Binary files /dev/null and b/battybirdnet-pi/icon.png differ diff --git a/battybirdnet-pi/logo.png b/battybirdnet-pi/logo.png new file mode 100644 index 000000000..8c85f107a Binary files /dev/null and b/battybirdnet-pi/logo.png differ diff --git a/battybirdnet-pi/rootfs/custom-services.d/00-php_pfm.sh b/battybirdnet-pi/rootfs/custom-services.d/00-php_pfm.sh new file mode 100755 index 000000000..fcdd6e875 --- /dev/null +++ b/battybirdnet-pi/rootfs/custom-services.d/00-php_pfm.sh @@ -0,0 +1,12 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +# Correct /config permissions after startup +chown pi:pi /config + +# Waiting for dbus +until [[ -e /var/run/dbus/system_bus_socket ]]; do + sleep 1s +done +echo "Starting service: php pfm" +exec /usr/sbin/php-fpm* -F diff --git a/battybirdnet-pi/rootfs/custom-services.d/01-avahi.sh b/battybirdnet-pi/rootfs/custom-services.d/01-avahi.sh new file mode 100755 index 000000000..1963c3d25 --- /dev/null +++ b/battybirdnet-pi/rootfs/custom-services.d/01-avahi.sh @@ -0,0 +1,9 @@ +#!/usr/bin/with-contenv bashio +# Waiting for dbus +until [[ -e /var/run/dbus/system_bus_socket ]]; do + sleep 1s +done + +echo "Starting service: avahi daemon" +exec \ + avahi-daemon --no-chroot diff --git a/battybirdnet-pi/rootfs/custom-services.d/02-caddy.sh b/battybirdnet-pi/rootfs/custom-services.d/02-caddy.sh new file mode 100755 index 000000000..511299480 --- /dev/null +++ b/battybirdnet-pi/rootfs/custom-services.d/02-caddy.sh @@ -0,0 +1,21 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash + +# Dependencies +sockfile="empty" +until [[ -e /var/run/dbus/system_bus_socket ]] && [[ -e "$sockfile" ]]; do + sleep 1s + sockfile="$(find /run/php -name "*.sock")" +done + +# Correct fpm.sock +chown caddy:caddy /run/php/php*-fpm.sock +sed -i "s|/run/php/php-fpm.sock|$sockfile|g" /helpers/caddy_ingress.sh +sed -i "s|/run/php/php-fpm.sock|$sockfile|g" /etc/caddy/Caddyfile +sed -i "s|/run/php/php-fpm.sock|$sockfile|g" "$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh + +# Update caddyfile with password +/."$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh &>/dev/null || true + +echo "Starting service: caddy" +/usr/bin/caddy run --config /etc/caddy/Caddyfile diff --git a/battybirdnet-pi/rootfs/custom-services.d/02-nginx.sh b/battybirdnet-pi/rootfs/custom-services.d/02-nginx.sh new file mode 100755 index 000000000..08b3e2bd2 --- /dev/null +++ b/battybirdnet-pi/rootfs/custom-services.d/02-nginx.sh @@ -0,0 +1,6 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash +set -e + +echo "Starting service: nginx" +nginx diff --git a/battybirdnet-pi/rootfs/custom-services.d/30-monitoring.sh b/battybirdnet-pi/rootfs/custom-services.d/30-monitoring.sh new file mode 100755 index 000000000..ff9ae0487 --- /dev/null +++ b/battybirdnet-pi/rootfs/custom-services.d/30-monitoring.sh @@ -0,0 +1,92 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash + +echo "Starting service: throttlerecording" +touch "$HOME"/BirdSongs/StreamData/analyzing_now.txt + +# variables for readability +srv="birdnet_recording" +analyzing_now="." +counter=10 +set +u +# shellcheck disable=SC1091 +source /config/birdnet.conf 2>/dev/null + +# Ensure folder exists +ingest_dir="$RECS_DIR/StreamData" + +# Check permissions +mkdir -p "$ingest_dir" +chown -R pi:pi "$ingest_dir" +chmod -R 755 "$ingest_dir" +ingest_dir="$(readlink -f "$ingest_dir")" || true +mkdir -p "$ingest_dir" +chown -R pi:pi "$ingest_dir" +chmod -R 755 "$ingest_dir" + +function apprisealert() { + # Set failed check so it only runs once + touch "$HOME"/BirdNET-Pi/failed_servicescheck + NOTIFICATION="" + STOPPEDSERVICE="
Stopped services: " + services=(birdnet_analysis + chart_viewer + spectrogram_viewer + icecast2 + birdnet_recording + birdnet_log + birdnet_stats) + for i in "${services[@]}"; do + if [[ "$(sudo systemctl is-active "${i}".service)" == "inactive" ]]; then + STOPPEDSERVICE+="${i}; " + fi + done + NOTIFICATION+="$STOPPEDSERVICE" + NOTIFICATION+="
Additional informations: " + NOTIFICATION+="
Since: ${LASTCHECK:-unknown}" + NOTIFICATION+="
System: ${SITE_NAME:-$(hostname)}" + NOTIFICATION+="
Available disk space: $(df -h "$(readlink -f "$HOME/BirdSongs")" | awk 'NR==2 {print $4}')" + if [ -n "$BIRDNETPI_URL" ]; then + NOTIFICATION+="
Access your battybirdnet-pi" + fi + TITLE="BirdNET-Analyzer stopped" + "$HOME"/BirdNET-Pi/birdnet/bin/apprise -vv -t "$TITLE" -b "${NOTIFICATION}" --input-format=html --config="$HOME/BirdNET-Pi/apprise.txt" +} + +while true; do + sleep 61 + + # Restart analysis if clogged + ############################ + + if ((counter <= 0)); then + latest="$(cat "$ingest_dir"/analyzing_now.txt)" + if [[ "$latest" == "$analyzing_now" ]]; then + echo "$(date) WARNING no change in analyzing_now for 10 iterations, restarting services" + /."$HOME"/BirdNET-Pi/scripts/restart_services.sh + fi + counter=10 + analyzing_now=$(cat "$ingest_dir"/analyzing_now.txt) + fi + + # Pause recorder to catch-up + ############################ + + wavs="$(find "$ingest_dir" -maxdepth 1 -name '*.wav' | wc -l)" + state="$(systemctl is-active "$srv")" + + bashio::log.green "$(date) INFO ${wavs} wav files waiting in $ingest_dir, $srv state is $state" + + if ((wavs > 100)); then + bashio::log.red "$(date) WARNING too many files in queue, pausing $srv" + sudo systemctl stop "$srv" + sudo systemctl restart birdnet_analysis + if [ -s "$HOME/BirdNET-Pi/apprise.txt" ]; then apprisealert; fi + elif [[ "$state" != "active" ]]; then + bashio::log.yellow "$(date) INFO started $srv service" + sudo systemctl start $srv + sudo systemctl restart birdnet_analysis + fi + + ((counter--)) +done diff --git a/battybirdnet-pi/rootfs/etc/cont-finish.d/savestreamdata.sh b/battybirdnet-pi/rootfs/etc/cont-finish.d/savestreamdata.sh new file mode 100755 index 000000000..215b7f9fd --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-finish.d/savestreamdata.sh @@ -0,0 +1,18 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash + +if [ -d "$HOME"/BirdSongs/StreamData ]; then + bashio::log.fatal "Container stopping, saving temporary files" + + # Stop the services in parallel + systemctl stop birdnet_analysis & + systemctl stop birdnet_recording + + # Check if there are files in StreamData and move them to /data/StreamData + mkdir -p /data/StreamData + if [ "$(ls -A "$HOME"/BirdSongs/StreamData)" ]; then + mv -v "$HOME"/BirdSongs/StreamData/* /data/StreamData/ + fi + + bashio::log.fatal "... files safe, allowing container to stop" +fi diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/01-structure.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/01-structure.sh new file mode 100755 index 000000000..39d3019cd --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/01-structure.sh @@ -0,0 +1,87 @@ +#!/command/with-contenv bashio +# shellcheck shell=bash +set -e + +############### +# SET /CONFIG # +############### + +echo " " +bashio::log.info "Ensuring the file structure is correct :" + +# Define structure +echo "... creating default files" +touch /config/include_species_list.txt # Should be null +for files in apprise.txt exclude_species_list.txt IdentifiedSoFar.txt disk_check_exclude.txt confirmed_species_list.txt blacklisted_images.txt; do + if [ ! -f /config/"$files" ]; then + echo "" > /config/"$files" + fi +done + +# Get BirdSongs folder locations +BIRDSONGS_FOLDER="/config/BirdSongs" +if bashio::config.has_value "BIRDSONGS_FOLDER"; then + BIRDSONGS_FOLDER_OPTION="$(bashio::config "BIRDSONGS_FOLDER")" + echo "... BIRDSONGS_FOLDER set to $BIRDSONGS_FOLDER_OPTION" + mkdir -p "$BIRDSONGS_FOLDER_OPTION" || bashio::log.fatal "...... folder couldn't be created" + chown -R pi:pi "$BIRDSONGS_FOLDER_OPTION" || bashio::log.fatal "...... folder couldn't be given permissions for 1000:1000" + if [ -d "$BIRDSONGS_FOLDER_OPTION" ] && [ "$(stat -c '%u:%g' "$BIRDSONGS_FOLDER_OPTION")" == "1000:1000" ]; then + BIRDSONGS_FOLDER="$BIRDSONGS_FOLDER_OPTION" + else + bashio::log.yellow "BIRDSONGS_FOLDER reverted to /config/BirdSongs" + fi +fi + +# Create BirdSongs folder +echo "... creating default folders ; it is highly recommended to store those on a ssd" +mkdir -p "$BIRDSONGS_FOLDER"/By_Date +mkdir -p "$BIRDSONGS_FOLDER"/Charts + +# If tmpfs is installed, use it +if df -T /tmp | grep -q "tmpfs"; then + echo "... tmpfs detected, using it for StreamData and Processed to reduce disk wear" + mkdir -p /tmp/StreamData + mkdir -p /tmp/Processed + rm -r "$HOME"/BirdSongs/StreamData + rm -r "$HOME"/BirdSongs/Processed + sudo -u pi ln -fs /tmp/StreamData "$HOME"/BirdSongs/StreamData + sudo -u pi ln -fs /tmp/Processed "$HOME"/BirdSongs/Processed +fi + +# Permissions for created files and folders +echo "... set permissions to user pi" +chown -R pi:pi /config /etc/birdnet "$BIRDSONGS_FOLDER" /tmp +chmod -R 755 /config /config /etc/birdnet "$BIRDSONGS_FOLDER" /tmp + +# Save default birdnet.conf to perform sanity check +cp "$HOME"/BirdNET-Pi/birdnet.conf "$HOME"/BirdNET-Pi/birdnet.bak + +# Symlink files +echo "... creating symlink" +for files in "$HOME/BirdNET-Pi/birdnet.conf" "$HOME/BirdNET-Pi/blacklisted_images.txt" "$HOME/BirdNET-Pi/scripts/birds.db" "$HOME/BirdNET-Pi/BirdDB.txt" "$HOME/BirdNET-Pi/scripts/disk_check_exclude.txt" "$HOME/BirdNET-Pi/apprise.txt" "$HOME/BirdNET-Pi/exclude_species_list.txt" "$HOME/BirdNET-Pi/include_species_list.txt" "$HOME/BirdNET-Pi/IdentifiedSoFar.txt" "$HOME/BirdNET-Pi/confirmed_species_list.txt"; do + filename="${files##*/}" + if [ ! -f /config/"$filename" ]; then + if [ -f "$files" ]; then + echo "... copying $filename" && sudo -u pi mv "$files" /config/ + else + touch /config/"$filename" + fi + fi + if [ -e "$files" ]; then rm "$files"; fi + sudo -u pi ln -fs /config/"$filename" "$files" || bashio::log.fatal "Symlink creation failed for $filename" + sudo -u pi ln -fs /config/"$filename" /etc/birdnet/"$filename" || bashio::log.fatal "Symlink creation failed for $filename" +done + +# Symlink folders +for folders in By_Date Charts; do + echo "... creating symlink for $BIRDSONGS_FOLDER/$folders" + rm -r "$HOME/BirdSongs/Extracted/${folders:?}" + sudo -u pi ln -fs "$BIRDSONGS_FOLDER"/"$folders" "$HOME/BirdSongs/Extracted/$folders" +done + +# Permissions for created files and folders +echo "... check permissions" +chmod -R 755 /config/* +chmod 777 /config + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/02-restorestreamdata.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/02-restorestreamdata.sh new file mode 100755 index 000000000..61fca2bc6 --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/02-restorestreamdata.sh @@ -0,0 +1,24 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash + +# Check if there are files in "$HOME"/BirdSongs/StreamData and move them to /data/StreamData +if [ -d /data/StreamData ] && [ "$(ls -A /data/StreamData/)" ]; then + + bashio::log.warning "Container was stopped while files were still being analysed, restoring them" + + # Copy files + if [ "$(ls -A /data/StreamData)" ]; then + mv -v /data/StreamData/* "$HOME"/BirdSongs/StreamData/ + fi + echo "... done" + echo "" + + # Setting permissions + chown -R pi:pi "$HOME"/BirdSongs + chmod -R 755 "$HOME"/BirdSongs + + # Cleaning folder + rm -r /data/StreamData + +fi + diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/31-checks.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/31-checks.sh new file mode 100755 index 000000000..47ffa5b77 --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/31-checks.sh @@ -0,0 +1,54 @@ +#!/command/with-contenv bashio +# shellcheck shell=bash +set -e + +###################### +# CHECK BIRDNET.CONF # +###################### + +echo " " +bashio::log.info "Checking your birndet.conf file integrity" + +# Set variables +configcurrent="$HOME"/BirdNET-Pi/birdnet.conf +configtemplate="$HOME"/BirdNET-Pi/birdnet.bak + +# Extract variable names from config template and read each one +grep -o '^[^#=]*=' "$configtemplate" | sed 's/=//' | while read -r var; do + # Check if the variable is in configcurrent, if not, append it + if ! grep -q "^$var=" "$configcurrent"; then + # At which line was the variable in the initial file + bashio::log.yellow "...$var was missing from your birdnet.conf file, it was re-added" + grep "^$var=" "$configtemplate" >> "$configcurrent" + fi + # Check for duplicates + if [ "$(grep -c "^$var=" "$configcurrent")" -gt 1 ]; then + bashio::log.error "Duplicate variable $var found in $configcurrent, all were commented out expect for the first one" + awk -v var="$var" '{ if ($0 ~ "^[[:blank:]]*"var && c++ > 0) print "#" $0; else print $0; }' "$configcurrent" > temp && mv temp "$configcurrent" + fi +done + +################ +# CHECK AMIXER # +################ + +# If default capture is set at 0%, increase it to 50% +# current_volume="$(amixer sget Capture | grep -oP '\[\d+%]' | tr -d '[]%' | head -1)" 2>/dev/null || true +# current_volume="${current_volume:-100}" + +# Set the default microphone volume to 50% if it's currently at 0% +# if [[ "$current_volume" -eq 0 ]]; then +# amixer sset Capture 70% +# bashio::log.warning "Microphone was off, volume set to 70%." +# fi + +############## +# CHECK PORT # +############## + +if [[ "$(bashio::addon.port "80")" == 3000 ]]; then + bashio::log.fatal "This is crazy but your port is set to 3000 and streamlit doesn't accept this port! You need to change it from the addon options and restart. Thanks" + sleep infinity +fi + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh new file mode 100755 index 000000000..2d6516a73 --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh @@ -0,0 +1,47 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash +set -e + +if bashio::services.available 'mqtt' && ! bashio::config.true 'MQTT_DISABLED' ; then + bashio::log.green "---" + bashio::log.blue "MQTT addon is active on your system! battybirdnet-pi is now automatically configured to send its ouptut to MQTT" + bashio::log.blue "MQTT user : $(bashio::services "mqtt" "username")" + bashio::log.blue "MQTT password : $(bashio::services "mqtt" "password")" + bashio::log.blue "MQTT broker : tcp://$(bashio::services "mqtt" "host"):$(bashio::services "mqtt" "port")" + bashio::log.green "---" + bashio::log.blue "Data will be posted to the topic : 'birdnet'" + bashio::log.blue "Json data : {'Date', 'Time', 'ScientificName', 'CommonName', 'Confidence', 'SpeciesCode', 'ClipName', 'url'}" + bashio::log.blue "---" + + # Apply MQTT settings + sed -i "s|%%mqtt_server%%|$(bashio::services "mqtt" "host")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_port%%|$(bashio::services "mqtt" "port")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_user%%|$(bashio::services "mqtt" "username")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_pass%%|$(bashio::services "mqtt" "password")|g" /helpers/birdnet_to_mqtt.py + + # Copy script + cp /helpers/birdnet_to_mqtt.py /usr/bin/birdnet_to_mqtt.py + cp /helpers/birdnet_to_mqtt.sh /custom-services.d + chmod 777 /usr/bin/birdnet_to_mqtt.py + chmod 777 /custom-services.d/birdnet_to_mqtt.sh +elif bashio::config.has_value "MQTT_HOST_manual" && bashio::config.has_value "MQTT_PORT_manual"; then + bashio::log.green "---" + bashio::log.blue "MQTT is manually configured in the addon options" + bashio::log.blue "battybirdnet-pi is now automatically configured to send its ouptut to MQTT" + bashio::log.green "---" + bashio::log.blue "Data will be posted to the topic : 'birdnet'" + bashio::log.blue "Json data : {'Date', 'Time', 'ScientificName', 'CommonName', 'Confidence', 'SpeciesCode', 'ClipName', 'url'}" + bashio::log.blue "---" + + # Apply MQTT settings + sed -i "s|%%mqtt_server%%|$(bashio::config "MQTT_HOST_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_port%%|$(bashio::config "MQTT_PORT_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_user%%|$(bashio::config "MQTT_USER_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_pass%%|$(bashio::config "MQTT_PASSWORD_manual")|g" /helpers/birdnet_to_mqtt.py + + # Copy script + cp /helpers/birdnet_to_mqtt.py /usr/bin/birdnet_to_mqtt.py + cp /helpers/birdnet_to_mqtt.sh /custom-services.d + chmod +x /usr/bin/birdnet_to_mqtt.py + chmod +x /custom-services.d/birdnet_to_mqtt.sh +fi diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh new file mode 100755 index 000000000..b4fbae83a --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh @@ -0,0 +1,63 @@ +#!/command/with-contenv bashio +# shellcheck shell=bash +set -e + +################ +# ADD FEATURES # +################ + +echo " " +bashio::log.info "Adding optional features" + +# Denoiser +#if bashio::config.true "DENOISER_ANALYSIS_ENABLED"; then +# sed -i "s|ar 48000|ar 48000 -af \"arnndn=m=sample.rnnn\"|g" "$HOME"/BirdNET-Pi/scripts/birdnet_recording.sh +# sed -i "s|ar 48000|ar 48000 -af afftdn=nr=30:nt=w:om=o|g" "$HOME"/BirdNET-Pi/scripts/birdnet_recording.sh +#fi + +# Enable the Processed folder +############################# + +if bashio::config.true "PROCESSED_FOLDER_ENABLED" && ! grep -q "processed_size" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py; then + echo "... Enabling the Processed folder : the last 15 wav files will be stored there" + # Adapt config.php + sed -i "/GET\[\"info_site\"\]/a\ \$processed_size = \$_GET\[\"processed_size\"\];" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\$contents = file_get_contents/a\ \$contents = preg_replace\(\"/PROCESSED_SIZE=\.\*/\", \"PROCESSED_SIZE=\$processed_size\", \$contents\);" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i
" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i

Processed folder management

" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i " "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i \"/>" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i
" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i Processed is the directory where the formerly 'Analyzed' files are moved after extractions, mostly for troubleshooting purposes.
" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i This value defines the maximum amount of files that are kept before replacement with new files.
" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i
" "$HOME"/BirdNET-Pi/scripts/config.php + sed -i "/\"success\"/i\
" "$HOME"/BirdNET-Pi/scripts/config.php + # Adapt birdnet_analysis.py - move_to_processed + sed -i "/log.info('handle_reporting_queue done')/a\ os.remove(files.pop(0))" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ while len(files) > processed_size:" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ files.sort(key=os.path.getmtime)" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ files = glob.glob(os.path.join(processed_dir, '*'))" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ os.rename(file_name, os.path.join(processed_dir, os.path.basename(file_name)))" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ processed_dir = os.path.join(get_settings()['RECS_DIR'], 'Processed')" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\def move_to_processed(file_name, processed_size):" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ " "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + # Adapt birdnet_analysis.py - get_processed_size + sed -i "/log.info('handle_reporting_queue done')/a\ return 0" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ except (ValueError, TypeError):" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ return processed_size if isinstance(processed_size, int) else 0" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ processed_size = get_settings().getint('PROCESSED_SIZE')" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ try:" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\def get_processed_size():" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/log.info('handle_reporting_queue done')/a\ " "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + # Modify calls + sed -i "/from subprocess import CalledProcessError/a\import glob" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/from subprocess import CalledProcessError/a\import time" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + # Modify main code + sed -i "/os.remove(file.file_name)/i\ processed_size = get_processed_size()" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/os.remove(file.file_name)/i\ if processed_size > 0:" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/os.remove(file.file_name)/i\ move_to_processed(file.file_name, processed_size)" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/os.remove(file.file_name)/i\ else:" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py + sed -i "/os.remove(file.file_name)/c\ os.remove(file.file_name)" "$HOME"/BirdNET-Pi/scripts/birdnet_analysis.py +fi || true + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/81-modifications.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/81-modifications.sh new file mode 100755 index 000000000..0b8372cfc --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/81-modifications.sh @@ -0,0 +1,66 @@ +#!/command/with-contenv bashio +# shellcheck shell=bash +set -e + +################ +# MODIFY WEBUI # +################ + +echo " " +bashio::log.info "Adapting webui" + +# Remove services tab +echo "... removing System Controls from webui as should be used from HA" +sed -i '/>System Controls/d' "$HOME"/BirdNET-Pi/homepage/views.php + +# Remove services tab +echo "... removing Ram drive from webui as it is handled from HA" +sed -i '/Ram drive/{n;s/center"/center" style="display: none;"/;}' "$HOME"/BirdNET-Pi/scripts/service_controls.php +sed -i '/Ram drive/d' "$HOME"/BirdNET-Pi/scripts/service_controls.php + +# Correct services to start as user pi +echo "... correct services to start as pi" +for file in $(find "$HOME"/BirdNET-Pi/templates/birdnet*.service -print0 | xargs -0 basename -a) livestream.service chart_viewer.service chart_viewer.service spectrogram_viewer.service; do + if [[ "$file" != "birdnet_log.service" ]]; then + sed -i "s|ExecStart=|ExecStart=/usr/bin/sudo -u pi |g" "$HOME/BirdNET-Pi/templates/$file" + fi +done + +# Send services log to container logs +echo "... send services log to container logs" +for file in $(find "$HOME"/BirdNET-Pi/templates/birdnet*.service -print0 | xargs -0 basename -a) livestream.service chart_viewer.service chart_viewer.service spectrogram_viewer.service; do + sed -i "/Service/a StandardError=append:/proc/1/fd/1" "$HOME/BirdNET-Pi/templates/$file" + sed -i "/Service/a StandardOutput=append:/proc/1/fd/1" "$HOME/BirdNET-Pi/templates/$file" +done + +# Avoid preselection in include and exclude lists +echo "... avoid preselecting options in include and exclude lists" +sed -i "s|option selected|option disabled|g" "$HOME"/BirdNET-Pi/scripts/include_list.php +sed -i "s|option selected|option disabled|g" "$HOME"/BirdNET-Pi/scripts/exclude_list.php + +# Correct log services to show /proc/1/fd/1 +echo "... show container logs in /logs" +sed -i "/User=pi/d" "$HOME/BirdNET-Pi/templates/birdnet_log.service" +sed -i "s|birdnet_log.sh|cat /proc/1/fd/1|g" "$HOME/BirdNET-Pi/templates/birdnet_log.service" + +# Make sure config is correctly formatted. +echo "... caddyfile modifications" +#Correct instructions +caddy fmt --overwrite /etc/caddy/Caddyfile +#Change port to leave 80 free for certificate requests +sed -i "s|http://|http://:8081|g" /etc/caddy/Caddyfile +sed -i "s|http://|http://:8081|g" "$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh +#Remove default file that blocks 80 +if [ -f /etc/caddy/Caddyfile.original ]; then rm /etc/caddy/Caddyfile.original; fi + +# Improve webui paths to facilitate ingress +echo "... correcting webui paths" +sed -i "s|/stats|/stats/|g" "$HOME"/BirdNET-Pi/homepage/views.php +sed -i "s|/log|/log/|g" "$HOME"/BirdNET-Pi/homepage/views.php + +# If port 80 is enabled, make sure it is still 80 +if [ -n "$(bashio::addon.port 80)" ] && [ "$(bashio::addon.port 80)" != 80 ]; then + bashio::log.fatal "The port 80 is enabled, but should still be 80 if you want the automatic ssl certificates generation to work" +fi + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/91-nginx_ingress.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/91-nginx_ingress.sh new file mode 100755 index 000000000..d2749a17b --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/91-nginx_ingress.sh @@ -0,0 +1,37 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash +set -e + +################# +# NGINX SETTING # +################# + +declare ingress_interface +declare ingress_port +declare ingress_entry + +# Variables +ingress_port=$(bashio::addon.ingress_port) +ingress_interface=$(bashio::addon.ip_address) +ingress_entry=$(bashio::addon.ingress_entry) + +# Quits if ingress not active +if [ -z "$ingress_entry" ]; then exit 0; fi + +echo " " +bashio::log.info "Adapting for ingress" +echo "... setting up nginx" +sed -i "s/%%port%%/${ingress_port}/g" /etc/nginx/servers/ingress.conf +sed -i "s/%%interface%%/${ingress_interface}/g" /etc/nginx/servers/ingress.conf +sed -i "s|%%ingress_entry%%|${ingress_entry}|g" /etc/nginx/servers/ingress.conf + +echo "... ensuring restricted area access" +echo "${ingress_entry}" > /ingress_url +sed -i "/function is_authenticated/a if (strpos(\$_SERVER['HTTP_REFERER'], '/api/hassio_ingress') !== false && strpos(\$_SERVER['HTTP_REFERER'], trim(file_get_contents('/ingress_url'))) !== false) { \$ret = true; return \$ret; }" "$HOME"/BirdNET-Pi/scripts/common.php + +echo "... adapt Caddyfile for ingress" +chmod +x /helpers/caddy_ingress.sh +/./helpers/caddy_ingress.sh +sed -i "/sudo caddy fmt --overwrite/i /./helpers/caddy_ingress.sh" "$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/92-ssl.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/92-ssl.sh new file mode 100755 index 000000000..8b6b05c24 --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/92-ssl.sh @@ -0,0 +1,20 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash +set -e + +############### +# SSL SETTING # +############### + +if bashio::config.true 'ssl'; then + bashio::log.info "Ssl is enabled using addon options, setting up nginx" + bashio::config.require.ssl + certfile=$(bashio::config 'certfile') + keyfile=$(bashio::config 'keyfile') + sed -i "2a\ tls /ssl/${certfile} /ssl/${keyfile}" /etc/caddy/Caddyfile + sed -i "s|http://:8081|https://:8081|g" /etc/caddy/Caddyfile + sed -i "s|http://:8081|https://:8081|g" "$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh + sed -i "/https:/a tls /ssl/${certfile} /ssl/${keyfile}" "$HOME"/BirdNET-Pi/scripts/update_caddyfile.sh +fi + +echo " " diff --git a/battybirdnet-pi/rootfs/etc/cont-init.d/99-run.sh b/battybirdnet-pi/rootfs/etc/cont-init.d/99-run.sh new file mode 100755 index 000000000..8ac46778e --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/cont-init.d/99-run.sh @@ -0,0 +1,72 @@ +#!/command/with-contenv bashio +# shellcheck shell=bash +set -e + +############## +# SET SYSTEM # +############## + +echo " " +bashio::log.info "Setting password for the user pi" +echo "pi:$(bashio::config "pi_password")" | sudo chpasswd +echo "... done" + +echo " " +bashio::log.info "Starting system services" + +# Set TZ +if bashio::config.has_value 'TZ'; then + TIMEZONE=$(bashio::config 'TZ') + echo "... setting timezone to $TIMEZONE" + ln -snf /usr/share/zoneinfo/"$TIMEZONE" /etc/localtime + echo "$TIMEZONE" >/etc/timezone +fi || (bashio::log.fatal "Error : $TIMEZONE not found. Here is a list of valid timezones : https://manpages.ubuntu.com/manpages/focal/man3/DateTime::TimeZone::Catalog.3pm.html") + +# Correcting systemctl +echo "... correcting systemctl" +mv /helpers/systemctl3.py /bin/systemctl +chmod a+x /bin/systemctl + +# Correcting systemctl +echo "... correcting datetimectl" +mv /helpers/timedatectl /usr/bin/timedatectl +chmod a+x /usr/bin/timedatectl + +# Correct language labels +export "$(grep "^DATABASE_LANG" /config/birdnet.conf)" +# Saving default of en +cp "$HOME"/BirdNET-Pi/model/labels.txt "$HOME"/BirdNET-Pi/model/labels.bak +# Adapt to new language +echo "... adapting labels according to birdnet.conf file to $DATABASE_LANG" +/."$HOME"/BirdNET-Pi/scripts/install_language_label_nm.sh -l "$DATABASE_LANG" + +echo "... starting cron" +systemctl start cron + +# Starting dbus +echo "... starting dbus" +service dbus start + +# Starting journald +# echo "... starting journald" +# systemctl start systemd-journald + +# Starting services +echo "" +bashio::log.info "Starting battybirdnet-pi services" +chmod +x "$HOME"/BirdNET-Pi/scripts/restart_services.sh +"$HOME"/BirdNET-Pi/scripts/restart_services.sh + +if bashio::config.true LIVESTREAM_BOOT_ENABLED; then + echo "... starting livestream" + sudo systemctl enable icecast2 + sudo systemctl start icecast2.service + sudo systemctl enable --now livestream.service +fi + +# Correct the phpsysinfo for the correct gotty service +gottyservice="$(pgrep -l "gotty" | awk '{print $NF}' | head -n 1)" +echo "... using $gottyservice in phpsysinfo" +sed -i "s/,gotty,/,${gottyservice:-gotty},/g" "$HOME"/BirdNET-Pi/templates/phpsysinfo.ini + +echo " " diff --git a/fireflyiii/rootfs/etc/nginx/includes/mime.types b/battybirdnet-pi/rootfs/etc/nginx/includes/mime.types similarity index 100% rename from fireflyiii/rootfs/etc/nginx/includes/mime.types rename to battybirdnet-pi/rootfs/etc/nginx/includes/mime.types diff --git a/fireflyiii/rootfs/etc/nginx/includes/proxy_params.conf b/battybirdnet-pi/rootfs/etc/nginx/includes/proxy_params.conf similarity index 86% rename from fireflyiii/rootfs/etc/nginx/includes/proxy_params.conf rename to battybirdnet-pi/rootfs/etc/nginx/includes/proxy_params.conf index 1990d4959..924ba949e 100644 --- a/fireflyiii/rootfs/etc/nginx/includes/proxy_params.conf +++ b/battybirdnet-pi/rootfs/etc/nginx/includes/proxy_params.conf @@ -5,11 +5,12 @@ proxy_redirect off; proxy_send_timeout 86400s; proxy_max_temp_file_size 0; +proxy_hide_header X-Frame-Options; proxy_set_header Accept-Encoding ""; proxy_set_header Connection $connection_upgrade; -proxy_set_header Host $http_host; proxy_set_header Upgrade $http_upgrade; +proxy_set_header Host $http_host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-NginX-Proxy true; -proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Real-IP $remote_addr; diff --git a/fireflyiii/rootfs/etc/nginx/includes/resolver.conf b/battybirdnet-pi/rootfs/etc/nginx/includes/resolver.conf similarity index 100% rename from fireflyiii/rootfs/etc/nginx/includes/resolver.conf rename to battybirdnet-pi/rootfs/etc/nginx/includes/resolver.conf diff --git a/fireflyiii/rootfs/etc/nginx/includes/server_params.conf b/battybirdnet-pi/rootfs/etc/nginx/includes/server_params.conf similarity index 100% rename from fireflyiii/rootfs/etc/nginx/includes/server_params.conf rename to battybirdnet-pi/rootfs/etc/nginx/includes/server_params.conf diff --git a/fireflyiii/rootfs/etc/nginx/includes/ssl_params.conf b/battybirdnet-pi/rootfs/etc/nginx/includes/ssl_params.conf similarity index 100% rename from fireflyiii/rootfs/etc/nginx/includes/ssl_params.conf rename to battybirdnet-pi/rootfs/etc/nginx/includes/ssl_params.conf diff --git a/battybirdnet-pi/rootfs/etc/nginx/includes/upstream.conf b/battybirdnet-pi/rootfs/etc/nginx/includes/upstream.conf new file mode 100644 index 000000000..6dc04d8b4 --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/nginx/includes/upstream.conf @@ -0,0 +1,3 @@ +upstream backend { + server 127.0.0.1:80; +} diff --git a/fireflyiii/rootfs/etc/nginx/nginx.conf b/battybirdnet-pi/rootfs/etc/nginx/nginx.conf similarity index 65% rename from fireflyiii/rootfs/etc/nginx/nginx.conf rename to battybirdnet-pi/rootfs/etc/nginx/nginx.conf index 7e5bc6f7c..fb597811c 100644 --- a/fireflyiii/rootfs/etc/nginx/nginx.conf +++ b/battybirdnet-pi/rootfs/etc/nginx/nginx.conf @@ -1,3 +1,4 @@ + # Run nginx in foreground. daemon off; @@ -8,7 +9,7 @@ user root; pid /var/run/nginx.pid; # Set number of worker processes. -worker_processes 1; +worker_processes auto; # Enables the use of JIT for regular expressions to speed-up their processing. pcre_jit on; @@ -24,12 +25,33 @@ include /etc/nginx/modules/*.conf; # Max num of simultaneous connections by a worker process. events { - worker_connections 512; + worker_connections 8192; } http { include /etc/nginx/includes/mime.types; + # https://emby.media/community/index.php?/topic/93074-how-to-emby-with-nginx-with-windows-specific-tips-and-csp-options/ + server_names_hash_bucket_size 64; + gzip_disable "msie6"; + gzip_comp_level 6; + gzip_min_length 1100; + gzip_buffers 16 8k; + gzip_proxied any; + gzip_types + text/plain + text/css + text/js + text/xml + text/javascript + application/javascript + application/x-javascript + application/json + application/xml + application/rss+xml + image/svg+xml; + proxy_connect_timeout 1h; + log_format hassio '[$time_local] $status ' '$http_x_forwarded_for($remote_addr) ' '$request ($http_user_agent)'; diff --git a/battybirdnet-pi/rootfs/etc/nginx/servers/ingress.conf b/battybirdnet-pi/rootfs/etc/nginx/servers/ingress.conf new file mode 100644 index 000000000..3fd68901e --- /dev/null +++ b/battybirdnet-pi/rootfs/etc/nginx/servers/ingress.conf @@ -0,0 +1,47 @@ + server { + listen %%interface%%:%%port%% default_server; + include /etc/nginx/includes/server_params.conf; + include /etc/nginx/includes/proxy_params.conf; + + proxy_buffering off; + auth_basic_user_file /home/pi/.htpasswd; + + location /log { + # Proxy pass + proxy_pass http://localhost:8082; + } + + location /stats { + # Proxy pass + proxy_pass http://localhost:8082; + } + + location /terminal { + # Proxy pass + proxy_pass http://localhost:8082; + } + + location / { + # Proxy pass + proxy_pass http://localhost:8082; + + # Next three lines allow websockets + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + # Correct base_url + proxy_set_header Accept-Encoding ""; + sub_filter_once off; + sub_filter_types *; + sub_filter /spectrogram %%ingress_entry%%/spectrogram; + sub_filter /By_Date/ %%ingress_entry%%/By_Date/; + sub_filter /Charts/ %%ingress_entry%%/Charts/; + sub_filter /todays %%ingress_entry%%/todays; + sub_filter href=\"/ href=\"%%ingress_entry%%/; + sub_filter src=\"/ src=\"%%ingress_entry%%/; + sub_filter hx-get=\"/ hx-get=\"%%ingress_entry%%/; + sub_filter action=\"/ action=\"%%ingress_entry%%/; + } + +} diff --git a/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.py b/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.py new file mode 100644 index 000000000..63c679e55 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.py @@ -0,0 +1,124 @@ +#! /usr/bin/env python3 +# birdnet_to_mqtt.py +# +# Adapted from : https://gist.github.com/deepcoder/c309087c456fc733435b47d83f4113ff +# Adapted from : https://gist.github.com/JuanMeeske/08b839246a62ff38778f701fc1da5554 +# +# monitor the records in the syslog file for info from the birdnet system on birds that it detects +# publish this data to mqtt +# + +import time +import re +import dateparser +import datetime +import json +import logging +import paho.mqtt.client as mqtt +import subprocess + +# Setup basic configuration for logging +logging.basicConfig(level=logging.INFO) + +# this generator function monitors the requested file handle for new lines added at its end +# the newly added line is returned by the function +def file_row_generator(s): + while True : + line = s.readline() + if not line: + time.sleep(0.1) + continue + yield line + +# mqtt server +mqtt_server = "%%mqtt_server%%" # server for mqtt +mqtt_user = "%%mqtt_user%%" # Replace with your MQTT username +mqtt_pass = "%%mqtt_pass%%" # Replace with your MQTT password +mqtt_port = %%mqtt_port%% # port for mqtt + +# mqtt topic for bird heard above threshold will be published +mqtt_topic_confident_birds = 'birdnet' + +# url base for website that will be used to look up info about bird +bird_lookup_url_base = 'http://en.wikipedia.org/wiki/' + +# regular expression patters used to decode the records from birdnet +re_high_clean = re.compile(r'(?<=^\[birdnet_analysis\]\[INFO\] ).*?(?=\.mp3$)') + +syslog = open('/proc/1/fd/1', 'r') + +def on_connect(client, userdata, flags, rc, properties=None): + """ Callback for when the client receives a CONNACK response from the server. """ + if rc == 0: + logging.info("Connected to MQTT Broker!") + else: + logging.error(f"Failed to connect, return code {rc}\n") + +def get_bird_code(scientific_name): + with open('/home/pi/BirdNET-Pi/scripts/ebird.php', 'r') as file: + data = file.read() + + # Extract the array from the PHP file + array_str = re.search(r'\$ebirds = \[(.*?)\];', data, re.DOTALL).group(1) + + # Convert the PHP array to a Python dictionary + bird_dict = {re.search(r'"(.*?)"', line).group(1): re.search(r'=> "(.*?)"', line).group(1) + for line in array_str.split('\n') if '=>' in line} + + # Return the corresponding value for the given bird's scientific name + return bird_dict.get(scientific_name) + +# this little hack is to make each received record for the all birds section unique +# the date and time that the log returns is only down to the 1 second accuracy, do +# you can get multiple records with same date and time, this will make Home Assistant not +# think there is a new reading so we add a incrementing tenth of second to each record received +ts_noise = 0.0 + +#try : +# connect to MQTT server +mqttc = mqtt.Client('birdnet_mqtt') # Create instance of client with client ID +mqttc.username_pw_set(mqtt_user, mqtt_pass) # Use credentials +mqttc.connect(mqtt_server, mqtt_port) # Connect to (broker, port, keepalive-time) +mqttc.on_connect = on_connect +mqttc.loop_start() + +# call the generator function and process each line that is returned +for row in file_row_generator(syslog): + # bird found above confidence level found, process it + if re_high_clean.search(row) : + + # this slacker regular expression work, extracts the data about the bird found from the log line + # I do the parse in two passes, because I did not know the re to do it in one! + + raw_high_bird = re.search(re_high_clean, row) + raw_high_bird = raw_high_bird.group(0) + + # the fields we want are separated by semicolons, so split + high_bird_fields = raw_high_bird.split(';') + + # build a structure in python that will be converted to json + bird = {} + + # human time in this record is in two fields, date and time. They are human format + # combine them together separated by a space and they turn the human data into a python + # timestamp + raw_ts = high_bird_fields[0] + ' ' + high_bird_fields[1] + + #bird['ts'] = str(datetime.datetime.timestamp(dateparser.parse(raw_ts))) + bird['Date'] = high_bird_fields[0] + bird['Time'] = high_bird_fields[1] + bird['ScientificName'] = high_bird_fields[2] + bird['CommonName'] = high_bird_fields[3] + bird['Confidence'] = high_bird_fields[4] + bird['SpeciesCode'] = get_bird_code(high_bird_fields[2]) + bird['ClipName'] = high_bird_fields[11] + + # build a url from scientific name of bird that can be used to lookup info about bird + bird['url'] = bird_lookup_url_base + high_bird_fields[2].replace(' ', '_') + + # convert to json string we can sent to mqtt + json_bird = json.dumps(bird) + + print('Posted to MQTT : ok') + + mqttc.publish(mqtt_topic_confident_birds, json_bird, 1) diff --git a/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.sh b/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.sh new file mode 100755 index 000000000..54045d508 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/birdnet_to_mqtt.sh @@ -0,0 +1,5 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash + +echo "Starting service: mqtt automated publish" +"$PYTHON_VIRTUAL_ENV" /usr/bin/birdnet_to_mqtt.py &>/proc/1/fd/1 diff --git a/battybirdnet-pi/rootfs/helpers/caddy_ingress.sh b/battybirdnet-pi/rootfs/helpers/caddy_ingress.sh new file mode 100755 index 000000000..76fda30e1 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/caddy_ingress.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# shellcheck shell=bash + +# Get values +source /etc/birdnet/birdnet.conf + +# Create ingress configuration for Caddyfile + cat << EOF >> /etc/caddy/Caddyfile +:8082 { + root * ${EXTRACTED} + file_server browse + handle /By_Date/* { + file_server browse + } + handle /Charts/* { + file_server browse + } + reverse_proxy /stream localhost:8000 + php_fastcgi unix//run/php/php-fpm.sock + reverse_proxy /log* localhost:8080 + reverse_proxy /stats* localhost:8501 + reverse_proxy /terminal* localhost:8888 +} +EOF diff --git a/battybirdnet-pi/rootfs/helpers/convert_list.php b/battybirdnet-pi/rootfs/helpers/convert_list.php new file mode 100644 index 000000000..f4259d341 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/convert_list.php @@ -0,0 +1,116 @@ + + + +

This tool will allow to convert on-the-fly species to compensate for model errors. It SHOULD NOT BE USED except if you know what you are doing, instead the model errors should be reported to the owner. However, it is still convenient for systematic biases that are confirmed through careful listening of samples, while waiting for the models to be updated.

+ +
+
+ +

Specie to convert from :

+ + + +

+

Specie to convert to :

+ + + + +
+
+ +
+
+ +
+
+ +

+ +
+
+ +
+
+

Converted Species List

+ + +
+
+ +
+
+ + + + diff --git a/battybirdnet-pi/rootfs/helpers/spectral_analysis.py b/battybirdnet-pi/rootfs/helpers/spectral_analysis.py new file mode 100644 index 000000000..5a8351a44 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/spectral_analysis.py @@ -0,0 +1,70 @@ +import numpy as np +import scipy.io.wavfile as wavfile +import matplotlib.pyplot as plt +import os +import glob +import sys # Import the sys module + +from utils.helpers import get_settings + +# Dependencies /usr/bin/pip install numpy scipy matplotlib + +# Define the directory containing the WAV files +conf = get_settings() +input_directory = os.path.join(conf['RECS_DIR'], 'StreamData') +output_directory = os.path.join(conf['RECS_DIR'], 'Extracted/Charts') + +# Ensure the output directory exists +if not os.path.exists(output_directory): + os.makedirs(output_directory) + +# Check if a command-line argument is provided +if len(sys.argv) > 1: + # If an argument is provided, use it as the file to analyze + wav_files = [sys.argv[1]] +else: + # If no argument is provided, analyze all WAV files in the directory + wav_files = glob.glob(os.path.join(input_directory, '*.wav')) + +# Process each file +for file_path in wav_files: + # Load the WAV file + sample_rate, audio_data = wavfile.read(file_path) + + # If stereo, select only one channel + if len(audio_data.shape) > 1: + audio_data = audio_data[:, 0] + + # Apply the Hamming window to the audio data + hamming_window = np.hamming(len(audio_data)) + windowed_data = audio_data * hamming_window + + # Compute the FFT of the windowed audio data + audio_fft = np.fft.fft(windowed_data) + audio_fft = np.abs(audio_fft) + + # Compute the frequencies associated with the FFT values + frequencies = np.fft.fftfreq(len(windowed_data), d=1/sample_rate) + + # Select the range of interest + idx = np.where((frequencies >= 150) & (frequencies <= 15000)) + + # Calculate the saturation threshold based on the bit depth + bit_depth = audio_data.dtype.itemsize * 8 + max_amplitude = 2**(bit_depth - 1) - 1 + saturation_threshold = 0.8 * max_amplitude + + # Plot the spectrum with a logarithmic Y-axis + plt.figure(figsize=(10, 6)) + plt.semilogy(frequencies[idx], audio_fft[idx], label='Spectrum') + plt.axhline(y=saturation_threshold, color='r', linestyle='--', label='Saturation Threshold') + plt.xlabel("Frequency (Hz)") + plt.ylabel("Amplitude (Logarithmic)") + plt.title(f"Frequency Spectrum (150 - 15000 Hz) - {os.path.basename(file_path)}") + plt.legend() + plt.grid(True) + + # Save the plot as a PNG file + output_filename = os.path.basename(file_path).replace('.wav', '_spectrum.png') + plt.savefig(os.path.join(output_directory, output_filename)) + plt.close() # Close the figure to free memory diff --git a/battybirdnet-pi/rootfs/helpers/spectral_analysis.sh b/battybirdnet-pi/rootfs/helpers/spectral_analysis.sh new file mode 100755 index 000000000..fb6378af1 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/spectral_analysis.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# Performs the recording from the specified RTSP stream or soundcard +source /etc/birdnet/birdnet.conf + +# Read the logging level from the configuration option +LOGGING_LEVEL="${LogLevel_BirdnetRecordingService}" +# If empty for some reason default to log level of error +[ -z "$LOGGING_LEVEL" ] && LOGGING_LEVEL='error' +# Additionally if we're at debug or info level then allow printing of script commands and variables +if [ "$LOGGING_LEVEL" == "info" ] || [ "$LOGGING_LEVEL" == "debug" ];then + # Enable printing of commands/variables etc to terminal for debugging + set -x +fi + +[ -z "$RECORDING_LENGTH" ] && RECORDING_LENGTH=15 +[ -d "$RECS_DIR"/StreamData ] || mkdir -p "$RECS_DIR"/StreamData + +filename="Spectrum_$(date "+%Y-%m-%d_%H:%M").wav" + +if [ ! -z "$RTSP_STREAM" ];then + # Explode the RSPT steam setting into an array so we can count the number we have + RTSP_STREAMS_EXPLODED_ARRAY=("${RTSP_STREAM//,/ }") + + while true;do + + # Initially start the count off at 1 - our very first stream + RTSP_STREAMS_STARTED_COUNT=1 + FFMPEG_PARAMS="" + + # Loop over the streams + for i in "${RTSP_STREAMS_EXPLODED_ARRAY[@]}" + do + # Map id used to map input to output (first stream being 0), this is 0 based in ffmpeg so decrement our counter (which is more human readable) by 1 + MAP_ID="$((RTSP_STREAMS_STARTED_COUNT-1))" + # Build up the parameters to process the RSTP stream, including mapping for the output + FFMPEG_PARAMS+="-vn -thread_queue_size 512 -i ${i} -map ${MAP_ID}:a:0 -t ${RECORDING_LENGTH} -acodec pcm_s16le -ac 2 -ar 48000 file:${RECS_DIR}/StreamData/$filename " + # Increment counter + ((RTSP_STREAMS_STARTED_COUNT += 1)) + done + + # Make sure were passing something valid to ffmpeg, ffmpeg will run interactive and control our loop by waiting ${RECORDING_LENGTH} between loops because it will stop once that much has been recorded + if [ -n "$FFMPEG_PARAMS" ];then + ffmpeg -hide_banner -loglevel "$LOGGING_LEVEL" -nostdin "$FFMPEG_PARAMS" + fi + + done +else + if pgrep arecord &> /dev/null ;then + echo "Recording" + else + if [ -z "${REC_CARD}" ];then + arecord -f S16_LE -c"${CHANNELS}" -r48000 -t wav --max-file-time "${RECORDING_LENGTH}"\ + --use-strftime "${RECS_DIR}"/StreamData/"$filename" + else + arecord -f S16_LE -c"${CHANNELS}" -r48000 -t wav --max-file-time "${RECORDING_LENGTH}"\ + -D "${REC_CARD}" --use-strftime "${RECS_DIR}"/StreamData/"$filename" + fi + fi +fi + +# Create the spectral analysis +"$PYTHON_VIRTUAL_ENV" "$HOME"/BirdNET-Pi/scripts/spectral_analysis.py diff --git a/battybirdnet-pi/rootfs/helpers/systemctl3.py b/battybirdnet-pi/rootfs/helpers/systemctl3.py new file mode 100644 index 000000000..c29ba2d37 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/systemctl3.py @@ -0,0 +1,6851 @@ +#! /usr/bin/python3 +# type hints are provided in 'types/systemctl3.pyi' +from __future__ import print_function +import threading +import grp +import pwd +import hashlib +import select +import fcntl +import string +import datetime +import socket +import time +import signal +import sys +import os +import errno +import collections +import shlex +import fnmatch +import re +from types import GeneratorType + +__copyright__ = "(C) 2016-2024 Guido U. Draheim, licensed under the EUPL" +__version__ = "1.5.8066" + +# | +# | +# | +# | +# | +# | +# | +# | +# | +# | +# | +# | +# | + +import logging +logg = logging.getLogger("systemctl") + + +if sys.version[0] == '3': + basestring = str + xrange = range + +DEBUG_AFTER = False +DEBUG_STATUS = False +DEBUG_BOOTTIME = False +DEBUG_INITLOOP = False +DEBUG_KILLALL = False +DEBUG_FLOCK = False +DebugPrintResult = False +TestListen = False +TestAccept = False + +HINT = (logging.DEBUG + logging.INFO) // 2 +NOTE = (logging.WARNING + logging.INFO) // 2 +DONE = (logging.WARNING + logging.ERROR) // 2 +logging.addLevelName(HINT, "HINT") +logging.addLevelName(NOTE, "NOTE") +logging.addLevelName(DONE, "DONE") + +def logg_debug_flock(format, *args): + if DEBUG_FLOCK: + logg.debug(format, *args) # pragma: no cover +def logg_debug_after(format, *args): + if DEBUG_AFTER: + logg.debug(format, *args) # pragma: no cover + +NOT_A_PROBLEM = 0 # FOUND_OK +NOT_OK = 1 # FOUND_ERROR +NOT_ACTIVE = 2 # FOUND_INACTIVE +NOT_FOUND = 4 # FOUND_UNKNOWN + +# defaults for options +_extra_vars = [] +_force = False +_full = False +_log_lines = 0 +_no_pager = False +_now = False +_no_reload = False +_no_legend = False +_no_ask_password = False +_preset_mode = "all" +_quiet = False +_root = "" +_show_all = False +_user_mode = False +_only_what = [] +_only_type = [] +_only_state = [] +_only_property = [] + +# common default paths +_system_folders = [ + "/etc/systemd/system", + "/run/systemd/system", + "/var/run/systemd/system", + "/usr/local/lib/systemd/system", + "/usr/lib/systemd/system", + "/lib/systemd/system", +] +_user_folders = [ + "{XDG_CONFIG_HOME}/systemd/user", + "/etc/systemd/user", + "{XDG_RUNTIME_DIR}/systemd/user", + "/run/systemd/user", + "/var/run/systemd/user", + "{XDG_DATA_HOME}/systemd/user", + "/usr/local/lib/systemd/user", + "/usr/lib/systemd/user", + "/lib/systemd/user", +] +_init_folders = [ + "/etc/init.d", + "/run/init.d", + "/var/run/init.d", +] +_preset_folders = [ + "/etc/systemd/system-preset", + "/run/systemd/system-preset", + "/var/run/systemd/system-preset", + "/usr/local/lib/systemd/system-preset", + "/usr/lib/systemd/system-preset", + "/lib/systemd/system-preset", +] + +# standard paths +_dev_null = "/dev/null" +_dev_zero = "/dev/zero" +_etc_hosts = "/etc/hosts" +_rc3_boot_folder = "/etc/rc3.d" +_rc3_init_folder = "/etc/init.d/rc3.d" +_rc5_boot_folder = "/etc/rc5.d" +_rc5_init_folder = "/etc/init.d/rc5.d" +_proc_pid_stat = "/proc/{pid}/stat" +_proc_pid_status = "/proc/{pid}/status" +_proc_pid_cmdline= "/proc/{pid}/cmdline" +_proc_pid_dir = "/proc" +_proc_sys_uptime = "/proc/uptime" +_proc_sys_stat = "/proc/stat" + +# default values +SystemCompatibilityVersion = 219 +SysInitTarget = "sysinit.target" +SysInitWait = 5 # max for target +MinimumYield = 0.5 +MinimumTimeoutStartSec = 4 +MinimumTimeoutStopSec = 4 +DefaultTimeoutStartSec = 90 # official value +DefaultTimeoutStopSec = 90 # official value +DefaultTimeoutAbortSec = 3600 # officially it none (usually larget than StopSec) +DefaultMaximumTimeout = 200 # overrides all other +DefaultRestartSec = 0.1 # official value of 100ms +DefaultStartLimitIntervalSec = 10 # official value +DefaultStartLimitBurst = 5 # official value +InitLoopSleep = 5 +MaxLockWait = 0 # equals DefaultMaximumTimeout +DefaultPath = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +ResetLocale = ["LANG", "LANGUAGE", "LC_CTYPE", "LC_NUMERIC", "LC_TIME", "LC_COLLATE", "LC_MONETARY", + "LC_MESSAGES", "LC_PAPER", "LC_NAME", "LC_ADDRESS", "LC_TELEPHONE", "LC_MEASUREMENT", + "LC_IDENTIFICATION", "LC_ALL"] +LocaleConf="/etc/locale.conf" +DefaultListenBacklog=2 + +ExitWhenNoMoreServices = False +ExitWhenNoMoreProcs = False +DefaultUnit = os.environ.get("SYSTEMD_DEFAULT_UNIT", "default.target") # systemd.exe --unit=default.target +DefaultTarget = os.environ.get("SYSTEMD_DEFAULT_TARGET", "multi-user.target") # DefaultUnit fallback +# LogLevel = os.environ.get("SYSTEMD_LOG_LEVEL", "info") # systemd.exe --log-level +# LogTarget = os.environ.get("SYSTEMD_LOG_TARGET", "journal-or-kmsg") # systemd.exe --log-target +# LogLocation = os.environ.get("SYSTEMD_LOG_LOCATION", "no") # systemd.exe --log-location +# ShowStatus = os.environ.get("SYSTEMD_SHOW_STATUS", "auto") # systemd.exe --show-status +DefaultStandardInput=os.environ.get("SYSTEMD_STANDARD_INPUT", "null") +DefaultStandardOutput=os.environ.get("SYSTEMD_STANDARD_OUTPUT", "journal") # systemd.exe --default-standard-output +DefaultStandardError=os.environ.get("SYSTEMD_STANDARD_ERROR", "inherit") # systemd.exe --default-standard-error + +EXEC_SPAWN = False +EXEC_DUP2 = True +REMOVE_LOCK_FILE = False +BOOT_PID_MIN = 0 +BOOT_PID_MAX = -9 +PROC_MAX_DEPTH = 100 +EXPAND_VARS_MAXDEPTH = 20 +EXPAND_KEEP_VARS = True +RESTART_FAILED_UNITS = True +ACTIVE_IF_ENABLED=False + +TAIL_CMDS = ["/bin/tail", "/usr/bin/tail", "/usr/local/bin/tail"] +LESS_CMDS = ["/bin/less", "/usr/bin/less", "/usr/local/bin/less"] +CAT_CMDS = ["/bin/cat", "/usr/bin/cat", "/usr/local/bin/cat"] + +# The systemd default was NOTIFY_SOCKET="/var/run/systemd/notify" +_notify_socket_folder = "{RUN}/systemd" # alias /run/systemd +_journal_log_folder = "{LOG}/journal" + +SYSTEMCTL_DEBUG_LOG = "{LOG}/systemctl.debug.log" +SYSTEMCTL_EXTRA_LOG = "{LOG}/systemctl.log" + +_default_targets = ["poweroff.target", "rescue.target", "sysinit.target", "basic.target", "multi-user.target", "graphical.target", "reboot.target"] +_feature_targets = ["network.target", "remote-fs.target", "local-fs.target", "timers.target", "nfs-client.target"] +_all_common_targets = ["default.target"] + _default_targets + _feature_targets + +# inside a docker we pretend the following +_all_common_enabled = ["default.target", "multi-user.target", "remote-fs.target"] +_all_common_disabled = ["graphical.target", "resue.target", "nfs-client.target"] + +target_requires = {"graphical.target": "multi-user.target", "multi-user.target": "basic.target", "basic.target": "sockets.target"} + +_runlevel_mappings = {} # the official list +_runlevel_mappings["0"] = "poweroff.target" +_runlevel_mappings["1"] = "rescue.target" +_runlevel_mappings["2"] = "multi-user.target" +_runlevel_mappings["3"] = "multi-user.target" +_runlevel_mappings["4"] = "multi-user.target" +_runlevel_mappings["5"] = "graphical.target" +_runlevel_mappings["6"] = "reboot.target" + +_sysv_mappings = {} # by rule of thumb +_sysv_mappings["$local_fs"] = "local-fs.target" +_sysv_mappings["$network"] = "network.target" +_sysv_mappings["$remote_fs"] = "remote-fs.target" +_sysv_mappings["$timer"] = "timers.target" + + +# sections from conf +Unit = "Unit" +Service = "Service" +Socket = "Socket" +Install = "Install" + +# https://tldp.org/LDP/abs/html/exitcodes.html +# https://freedesktop.org/software/systemd/man/systemd.exec.html#id-1.20.8 +EXIT_SUCCESS = 0 +EXIT_FAILURE = 1 + +def strINET(value): + if value == socket.SOCK_DGRAM: + return "UDP" + if value == socket.SOCK_STREAM: + return "TCP" + if value == socket.SOCK_RAW: # pragma: no cover + return "RAW" + if value == socket.SOCK_RDM: # pragma: no cover + return "RDM" + if value == socket.SOCK_SEQPACKET: # pragma: no cover + return "SEQ" + return "" # pragma: no cover + +def strYes(value): + if value is True: + return "yes" + if not value: + return "no" + return str(value) +def strE(part): + if not part: + return "" + return str(part) +def strQ(part): + if part is None: + return "" + if isinstance(part, int): + return str(part) + return "'%s'" % part +def shell_cmd(cmd): + return " ".join([strQ(part) for part in cmd]) +def to_intN(value, default = None): + if not value: + return default + try: + return int(value) + except: + return default +def to_int(value, default = 0): + try: + return int(value) + except: + return default +def to_list(value): + if not value: + return [] + if isinstance(value, list): + return value + if isinstance(value, tuple): + return list(value) + return str(value or "").split(",") +def commalist(value): + return list(_commalist(value)) +def _commalist(value): + for val in value: + if not val: + continue + for elem in val.strip().split(","): + yield elem +def int_mode(value): + try: return int(value, 8) + except: return None # pragma: no cover +def unit_of(module): + if "." not in module: + return module + ".service" + return module +def o22(part): + if isinstance(part, basestring): + if len(part) <= 22: + return part + return part[:5] + "..." + part[-14:] + return part # pragma: no cover (is always str) +def o44(part): + if isinstance(part, basestring): + if len(part) <= 44: + return part + return part[:10] + "..." + part[-31:] + return part # pragma: no cover (is always str) +def o77(part): + if isinstance(part, basestring): + if len(part) <= 77: + return part + return part[:20] + "..." + part[-54:] + return part # pragma: no cover (is always str) +def path44(filename): + if not filename: + return "" + x = filename.find("/", 8) + if len(filename) <= 40: + if "/" not in filename: + return ".../" + filename + elif len(filename) <= 44: + return filename + if 0 < x and x < 14: + out = filename[:x+1] + out += "..." + else: + out = filename[:10] + out += "..." + remain = len(filename) - len(out) + y = filename.find("/", remain) + if 0 < y and y < remain+5: + out += filename[y:] + else: + out += filename[remain:] + return out + +def unit_name_escape(text): + # https://www.freedesktop.org/software/systemd/man/systemd.unit.html#id-1.6 + esc = re.sub("([^a-z-AZ.-/])", lambda m: "\\x%02x" % ord(m.group(1)[0]), text) + return esc.replace("/", "-") +def unit_name_unescape(text): + esc = text.replace("-", "/") + return re.sub("\\\\x(..)", lambda m: "%c" % chr(int(m.group(1), 16)), esc) + +def is_good_root(root): + if not root: + return True + return root.strip(os.path.sep).count(os.path.sep) > 1 +def os_path(root, path): + if not root: + return path + if not path: + return path + if is_good_root(root) and path.startswith(root): + return path + while path.startswith(os.path.sep): + path = path[1:] + return os.path.join(root, path) +def path_replace_extension(path, old, new): + if path.endswith(old): + path = path[:-len(old)] + return path + new +def get_exist_path(paths): + for p in paths: + if os.path.exists(p): + return p + return None + +def get_PAGER(): + PAGER = os.environ.get("PAGER", "less") + pager = os.environ.get("SYSTEMD_PAGER", "{PAGER}").format(**locals()) + options = os.environ.get("SYSTEMD_LESS", "FRSXMK") # see 'man timedatectl' + if not pager: pager = "cat" + if "less" in pager and options: + return [pager, "-" + options] + return [pager] + +def os_getlogin(): + """ NOT using os.getlogin() """ + return pwd.getpwuid(os.geteuid()).pw_name + +def get_runtime_dir(): + explicit = os.environ.get("XDG_RUNTIME_DIR", "") + if explicit: return explicit + user = os_getlogin() + return "/tmp/run-"+user +def get_RUN(root = False): + tmp_var = get_TMP(root) + if _root: + tmp_var = _root + if root: + for p in ("/run", "/var/run", "{tmp_var}/run"): + path = p.format(**locals()) + if os.path.isdir(path) and os.access(path, os.W_OK): + return path + os.makedirs(path) # "/tmp/run" + return path + else: + uid = get_USER_ID(root) + for p in ("/run/user/{uid}", "/var/run/user/{uid}", "{tmp_var}/run-{uid}"): + path = p.format(**locals()) + if os.path.isdir(path) and os.access(path, os.W_OK): + return path + os.makedirs(path, 0o700) # "/tmp/run/user/{uid}" + return path +def get_PID_DIR(root = False): + if root: + return get_RUN(root) + else: + return os.path.join(get_RUN(root), "run") # compat with older systemctl.py + +def get_home(): + if False: # pragma: no cover + explicit = os.environ.get("HOME", "") # >> On Unix, an initial ~ (tilde) is replaced by the + if explicit: return explicit # environment variable HOME if it is set; otherwise + uid = os.geteuid() # the current users home directory is looked up in the + # # password directory through the built-in module pwd. + return pwd.getpwuid(uid).pw_name # An initial ~user i looked up directly in the + return os.path.expanduser("~") # password directory. << from docs(os.path.expanduser) +def get_HOME(root = False): + if root: return "/root" + return get_home() +def get_USER_ID(root = False): + ID = 0 + if root: return ID + return os.geteuid() +def get_USER(root = False): + if root: return "root" + uid = os.geteuid() + return pwd.getpwuid(uid).pw_name +def get_GROUP_ID(root = False): + ID = 0 + if root: return ID + return os.getegid() +def get_GROUP(root = False): + if root: return "root" + gid = os.getegid() + return grp.getgrgid(gid).gr_name +def get_TMP(root = False): + TMP = "/tmp" + if root: return TMP + return os.environ.get("TMPDIR", os.environ.get("TEMP", os.environ.get("TMP", TMP))) +def get_VARTMP(root = False): + VARTMP = "/var/tmp" + if root: return VARTMP + return os.environ.get("TMPDIR", os.environ.get("TEMP", os.environ.get("TMP", VARTMP))) +def get_SHELL(root = False): + SHELL = "/bin/sh" + if root: return SHELL + return os.environ.get("SHELL", SHELL) +def get_RUNTIME_DIR(root = False): + RUN = "/run" + if root: return RUN + return os.environ.get("XDG_RUNTIME_DIR", get_runtime_dir()) +def get_CONFIG_HOME(root = False): + CONFIG = "/etc" + if root: return CONFIG + HOME = get_HOME(root) + return os.environ.get("XDG_CONFIG_HOME", HOME + "/.config") +def get_CACHE_HOME(root = False): + CACHE = "/var/cache" + if root: return CACHE + HOME = get_HOME(root) + return os.environ.get("XDG_CACHE_HOME", HOME + "/.cache") +def get_DATA_HOME(root = False): + SHARE = "/usr/share" + if root: return SHARE + HOME = get_HOME(root) + return os.environ.get("XDG_DATA_HOME", HOME + "/.local/share") +def get_LOG_DIR(root = False): + LOGDIR = "/var/log" + if root: return LOGDIR + CONFIG = get_CONFIG_HOME(root) + return os.path.join(CONFIG, "log") +def get_VARLIB_HOME(root = False): + VARLIB = "/var/lib" + if root: return VARLIB + CONFIG = get_CONFIG_HOME(root) + return CONFIG +def expand_path(path, root = False): + HOME = get_HOME(root) + RUN = get_RUN(root) + LOG = get_LOG_DIR(root) + XDG_DATA_HOME=get_DATA_HOME(root) + XDG_CONFIG_HOME=get_CONFIG_HOME(root) + XDG_RUNTIME_DIR=get_RUNTIME_DIR(root) + return os.path.expanduser(path.replace("${", "{").format(**locals())) + +def shutil_chown(path, user, group): + if user or group: + uid, gid = -1, -1 + if user: + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + if group: + gid = grp.getgrnam(group).gr_gid + os.chown(path, uid, gid) +def shutil_fchown(fileno, user, group): + if user or group: + uid, gid = -1, -1 + if user: + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + if group: + gid = grp.getgrnam(group).gr_gid + os.fchown(fileno, uid, gid) +def shutil_setuid(user = None, group = None, xgroups = None): + """ set fork-child uid/gid (returns pw-info env-settings)""" + if group: + gid = grp.getgrnam(group).gr_gid + os.setgid(gid) + logg.debug("setgid %s for %s", gid, strQ(group)) + groups = [gid] + try: + os.setgroups(groups) + logg.debug("setgroups %s < (%s)", groups, group) + except OSError as e: # pragma: no cover (it will occur in non-root mode anyway) + logg.debug("setgroups %s < (%s) : %s", groups, group, e) + if user: + pw = pwd.getpwnam(user) + gid = pw.pw_gid + gname = grp.getgrgid(gid).gr_name + if not group: + os.setgid(gid) + logg.debug("setgid %s for user %s", gid, strQ(user)) + groupnames = [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem] + if xgroups: + groups += [g.gr_gid for g in grp.getgrall() if g.gr_name in xgroups and g.gr_gid not in groups] + if not groups: + if group: + gid = grp.getgrnam(group).gr_gid + groups = [gid] + try: + os.setgroups(groups) + logg.debug("setgroups %s > %s ", groups, groupnames) + except OSError as e: # pragma: no cover (it will occur in non-root mode anyway) + logg.debug("setgroups %s > %s : %s", groups, groupnames, e) + uid = pw.pw_uid + os.setuid(uid) + logg.debug("setuid %s for user %s", uid, strQ(user)) + home = pw.pw_dir + shell = pw.pw_shell + logname = pw.pw_name + return {"USER": user, "LOGNAME": logname, "HOME": home, "SHELL": shell} + return {} + +def shutil_truncate(filename): + """ truncates the file (or creates a new empty file)""" + filedir = os.path.dirname(filename) + if not os.path.isdir(filedir): + os.makedirs(filedir) + f = open(filename, "w") + f.write("") + f.close() + +# http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid +def pid_exists(pid): + """Check whether pid exists in the current process table.""" + if pid is None: # pragma: no cover (is never null) + return False + return _pid_exists(int(pid)) +def _pid_exists(pid): + """Check whether pid exists in the current process table. + UNIX only. + """ + if pid < 0: + return False + if pid == 0: + # According to "man 2 kill" PID 0 refers to every process + # in the process group of the calling process. + # On certain systems 0 is a valid PID but we have no way + # to know that in a portable fashion. + raise ValueError('invalid PID 0') + try: + os.kill(pid, 0) + except OSError as err: + if err.errno == errno.ESRCH: + # ESRCH == No such process + return False + elif err.errno == errno.EPERM: + # EPERM clearly means there's a process to deny access to + return True + else: + # According to "man 2 kill" possible error values are + # (EINVAL, EPERM, ESRCH) + raise + else: + return True +def pid_zombie(pid): + """ may be a pid exists but it is only a zombie """ + if pid is None: + return False + return _pid_zombie(int(pid)) +def _pid_zombie(pid): + """ may be a pid exists but it is only a zombie """ + if pid < 0: + return False + if pid == 0: + # According to "man 2 kill" PID 0 refers to every process + # in the process group of the calling process. + # On certain systems 0 is a valid PID but we have no way + # to know that in a portable fashion. + raise ValueError('invalid PID 0') + check = _proc_pid_status.format(**locals()) + try: + for line in open(check): + if line.startswith("State:"): + return "Z" in line + except IOError as e: + if e.errno != errno.ENOENT: + logg.error("%s (%s): %s", check, e.errno, e) + return False + return False + +def checkprefix(cmd): + prefix = "" + for i, c in enumerate(cmd): + if c in "-+!@:": + prefix = prefix + c + else: + newcmd = cmd[i:] + return prefix, newcmd + return prefix, "" + +ExecMode = collections.namedtuple("ExecMode", ["mode", "check", "nouser", "noexpand", "argv0"]) +def exec_path(cmd): + """ Hint: exec_path values are usually not moved by --root (while load_path are)""" + prefix, newcmd = checkprefix(cmd) + check = "-" not in prefix + nouser = "+" in prefix or "!" in prefix + noexpand = ":" in prefix + argv0 = "@" in prefix + mode = ExecMode(prefix, check, nouser, noexpand, argv0) + return mode, newcmd +LoadMode = collections.namedtuple("LoadMode", ["mode", "check"]) +def load_path(ref): + """ Hint: load_path values are usually moved by --root (while exec_path are not)""" + prefix, filename = "", ref + while filename.startswith("-"): + prefix = prefix + filename[0] + filename = filename[1:] + check = "-" not in prefix + mode = LoadMode(prefix, check) + return mode, filename + +# https://github.com/phusion/baseimage-docker/blob/rel-0.9.16/image/bin/my_init +def ignore_signals_and_raise_keyboard_interrupt(signame): + signal.signal(signal.SIGTERM, signal.SIG_IGN) + signal.signal(signal.SIGINT, signal.SIG_IGN) + raise KeyboardInterrupt(signame) + +_default_dict_type = collections.OrderedDict +_default_conf_type = collections.OrderedDict + +class SystemctlConfData: + """ A *.service files has a structure similar to an *.ini file so + that data is structured in sections and values. Actually the + values are lists - the raw data is in .getlist(). Otherwise + .get() will return the first line that was encountered. """ + # | + # | + # | + # | + # | + # | + def __init__(self, defaults=None, dict_type=None, conf_type=None, allow_no_value=False): + self._defaults = defaults or {} + self._conf_type = conf_type or _default_conf_type + self._dict_type = dict_type or _default_dict_type + self._allow_no_value = allow_no_value + self._conf = self._conf_type() + self._files = [] + def defaults(self): + return self._defaults + def sections(self): + return list(self._conf.keys()) + def add_section(self, section): + if section not in self._conf: + self._conf[section] = self._dict_type() + def has_section(self, section): + return section in self._conf + def has_option(self, section, option): + if section not in self._conf: + return False + return option in self._conf[section] + def set(self, section, option, value): + if section not in self._conf: + self._conf[section] = self._dict_type() + if value is None: + self._conf[section][option] = [] + elif option not in self._conf[section]: + self._conf[section][option] = [value] + else: + self._conf[section][option].append(value) + def getstr(self, section, option, default = None, allow_no_value = False): + done = self.get(section, option, strE(default), allow_no_value) + if done is None: return strE(default) + return done + def get(self, section, option, default = None, allow_no_value = False): + allow_no_value = allow_no_value or self._allow_no_value + if section not in self._conf: + if default is not None: + return default + if allow_no_value: + return None + logg.warning("section {} does not exist".format(section)) + logg.warning(" have {}".format(self.sections())) + raise AttributeError("section {} does not exist".format(section)) + if option not in self._conf[section]: + if default is not None: + return default + if allow_no_value: + return None + raise AttributeError("option {} in {} does not exist".format(option, section)) + if not self._conf[section][option]: # i.e. an empty list + if default is not None: + return default + if allow_no_value: + return None + raise AttributeError("option {} in {} is None".format(option, section)) + return self._conf[section][option][0] # the first line in the list of configs + def getlist(self, section, option, default = None, allow_no_value = False): + allow_no_value = allow_no_value or self._allow_no_value + if section not in self._conf: + if default is not None: + return default + if allow_no_value: + return [] + logg.warning("section {} does not exist".format(section)) + logg.warning(" have {}".format(self.sections())) + raise AttributeError("section {} does not exist".format(section)) + if option not in self._conf[section]: + if default is not None: + return default + if allow_no_value: + return [] + raise AttributeError("option {} in {} does not exist".format(option, section)) + return self._conf[section][option] # returns a list, possibly empty + def filenames(self): + return self._files + +class SystemctlConfigParser(SystemctlConfData): + """ A *.service files has a structure similar to an *.ini file but it is + actually not like it. Settings may occur multiple times in each section + and they create an implicit list. In reality all the settings are + globally uniqute, so that an 'environment' can be printed without + adding prefixes. Settings are continued with a backslash at the end + of the line. """ + # def __init__(self, defaults=None, dict_type=None, allow_no_value=False): + # SystemctlConfData.__init__(self, defaults, dict_type, allow_no_value) + def read(self, filename): + return self.read_sysd(filename) + def read_sysd(self, filename): + initscript = False + initinfo = False + section = "GLOBAL" + nextline = False + name, text = "", "" + if os.path.isfile(filename): + self._files.append(filename) + for orig_line in open(filename): + if nextline: + text += orig_line + if text.rstrip().endswith("\\") or text.rstrip().endswith("\\\n"): + text = text.rstrip() + "\n" + else: + self.set(section, name, text) + nextline = False + continue + line = orig_line.strip() + if not line: + continue + if line.startswith("#"): + continue + if line.startswith(";"): + continue + if line.startswith(".include"): + logg.error("the '.include' syntax is deprecated. Use x.service.d/ drop-in files!") + includefile = re.sub(r'^\.include[ ]*', '', line).rstrip() + if not os.path.isfile(includefile): + raise Exception("tried to include file that doesn't exist: %s" % includefile) + self.read_sysd(includefile) + continue + if line.startswith("["): + x = line.find("]") + if x > 0: + section = line[1:x] + self.add_section(section) + continue + m = re.match(r"(\w+) *=(.*)", line) + if not m: + logg.warning("bad ini line: %s", line) + raise Exception("bad ini line") + name, text = m.group(1), m.group(2).strip() + if text.endswith("\\") or text.endswith("\\\n"): + nextline = True + text = text + "\n" + else: + # hint: an empty line shall reset the value-list + self.set(section, name, text and text or None) + return self + def read_sysv(self, filename): + """ an LSB header is scanned and converted to (almost) + equivalent settings of a SystemD ini-style input """ + initscript = False + initinfo = False + section = "GLOBAL" + if os.path.isfile(filename): + self._files.append(filename) + for orig_line in open(filename): + line = orig_line.strip() + if line.startswith("#"): + if " BEGIN INIT INFO" in line: + initinfo = True + section = "init.d" + if " END INIT INFO" in line: + initinfo = False + if initinfo: + m = re.match(r"\S+\s*(\w[\w_-]*):(.*)", line) + if m: + key, val = m.group(1), m.group(2).strip() + self.set(section, key, val) + continue + self.systemd_sysv_generator(filename) + return self + def systemd_sysv_generator(self, filename): + """ see systemd-sysv-generator(8) """ + self.set(Unit, "SourcePath", filename) + description = self.get("init.d", "Description", "") + if description: + self.set(Unit, "Description", description) + check = self.get("init.d", "Required-Start", "") + if check: + for item in check.split(" "): + if item.strip() in _sysv_mappings: + self.set(Unit, "Requires", _sysv_mappings[item.strip()]) + provides = self.get("init.d", "Provides", "") + if provides: + self.set(Install, "Alias", provides) + # if already in multi-user.target then start it there. + runlevels = self.getstr("init.d", "Default-Start", "3 5") + for item in runlevels.split(" "): + if item.strip() in _runlevel_mappings: + self.set(Install, "WantedBy", _runlevel_mappings[item.strip()]) + self.set(Service, "Restart", "no") + self.set(Service, "TimeoutSec", strE(DefaultMaximumTimeout)) + self.set(Service, "KillMode", "process") + self.set(Service, "GuessMainPID", "no") + # self.set(Service, "RemainAfterExit", "yes") + # self.set(Service, "SuccessExitStatus", "5 6") + self.set(Service, "ExecStart", filename + " start") + self.set(Service, "ExecStop", filename + " stop") + if description: # LSB style initscript + self.set(Service, "ExecReload", filename + " reload") + self.set(Service, "Type", "forking") # not "sysv" anymore + +# UnitConfParser = ConfigParser.RawConfigParser +UnitConfParser = SystemctlConfigParser + +class SystemctlSocket: + def __init__(self, conf, sock, skip = False): + self.conf = conf + self.sock = sock + self.skip = skip + def fileno(self): + return self.sock.fileno() + def listen(self, backlog = None): + if backlog is None: + backlog = DefaultListenBacklog + dgram = (self.sock.type == socket.SOCK_DGRAM) + if not dgram and not self.skip: + self.sock.listen(backlog) + def name(self): + return self.conf.name() + def addr(self): + stream = self.conf.get(Socket, "ListenStream", "") + dgram = self.conf.get(Socket, "ListenDatagram", "") + return stream or dgram + def close(self): + self.sock.close() + +class SystemctlConf: + # | + # | + # | + # | + # | + # | + # | + # | + # | + def __init__(self, data, module = None): + self.data = data # UnitConfParser + self.env = {} + self.status = None + self.masked = None + self.module = module + self.nonloaded_path = "" + self.drop_in_files = {} + self._root = _root + self._user_mode = _user_mode + def root_mode(self): + return not self._user_mode + def loaded(self): + files = self.data.filenames() + if self.masked: + return "masked" + if len(files): + return "loaded" + return "" + def filename(self): + """ returns the last filename that was parsed """ + files = self.data.filenames() + if files: + return files[0] + return None + def overrides(self): + """ drop-in files are loaded alphabetically by name, not by full path """ + return [self.drop_in_files[name] for name in sorted(self.drop_in_files)] + def name(self): + """ the unit id or defaults to the file name """ + name = self.module or "" + filename = self.filename() + if filename: + name = os.path.basename(filename) + return self.module or name + def set(self, section, name, value): + return self.data.set(section, name, value) + def get(self, section, name, default, allow_no_value = False): + return self.data.getstr(section, name, default, allow_no_value) + def getlist(self, section, name, default = None, allow_no_value = False): + return self.data.getlist(section, name, default or [], allow_no_value) + def getbool(self, section, name, default = None): + value = self.data.get(section, name, default or "no") + if value: + if value[0] in "TtYy123456789": + return True + return False + +class PresetFile: + # | + # | + def __init__(self): + self._files = [] + self._lines = [] + def filename(self): + """ returns the last filename that was parsed """ + if self._files: + return self._files[-1] + return None + def read(self, filename): + self._files.append(filename) + for line in open(filename): + self._lines.append(line.strip()) + return self + def get_preset(self, unit): + for line in self._lines: + m = re.match(r"(enable|disable)\s+(\S+)", line) + if m: + status, pattern = m.group(1), m.group(2) + if fnmatch.fnmatchcase(unit, pattern): + logg.debug("%s %s => %s %s", status, pattern, unit, strQ(self.filename())) + return status + return None + +## with waitlock(conf): self.start() +class waitlock: + # | + # | + # | + def __init__(self, conf): + self.conf = conf # currently unused + self.opened = -1 + self.lockfolder = expand_path(_notify_socket_folder, conf.root_mode()) + try: + folder = self.lockfolder + if not os.path.isdir(folder): + os.makedirs(folder) + except Exception as e: + logg.warning("oops, %s", e) + def lockfile(self): + unit = "" + if self.conf: + unit = self.conf.name() + return os.path.join(self.lockfolder, str(unit or "global") + ".lock") + def __enter__(self): + try: + lockfile = self.lockfile() + lockname = os.path.basename(lockfile) + self.opened = os.open(lockfile, os.O_RDWR | os.O_CREAT, 0o600) + for attempt in xrange(int(MaxLockWait or DefaultMaximumTimeout)): + try: + logg_debug_flock("[%s] %s. trying %s _______ ", os.getpid(), attempt, lockname) + fcntl.flock(self.opened, fcntl.LOCK_EX | fcntl.LOCK_NB) + st = os.fstat(self.opened) + if not st.st_nlink: + logg_debug_flock("[%s] %s. %s got deleted, trying again", os.getpid(), attempt, lockname) + os.close(self.opened) + self.opened = os.open(lockfile, os.O_RDWR | os.O_CREAT, 0o600) + continue + content = "{ 'systemctl': %s, 'lock': '%s' }\n" % (os.getpid(), lockname) + os.write(self.opened, content.encode("utf-8")) + logg_debug_flock("[%s] %s. holding lock on %s", os.getpid(), attempt, lockname) + return True + except IOError as e: + whom = os.read(self.opened, 4096) + os.lseek(self.opened, 0, os.SEEK_SET) + logg.info("[%s] %s. systemctl locked by %s", os.getpid(), attempt, whom.rstrip()) + time.sleep(1) # until MaxLockWait + continue + logg.error("[%s] not able to get the lock to %s", os.getpid(), lockname) + except Exception as e: + logg.warning("[%s] oops %s, %s", os.getpid(), str(type(e)), e) + # TODO# raise Exception("no lock for %s", self.unit or "global") + return False + def __exit__(self, type, value, traceback): + try: + os.lseek(self.opened, 0, os.SEEK_SET) + os.ftruncate(self.opened, 0) + if REMOVE_LOCK_FILE: # an optional implementation + lockfile = self.lockfile() + lockname = os.path.basename(lockfile) + os.unlink(lockfile) # ino is kept allocated because opened by this process + logg.debug("[%s] lockfile removed for %s", os.getpid(), lockname) + fcntl.flock(self.opened, fcntl.LOCK_UN) + os.close(self.opened) # implies an unlock but that has happend like 6 seconds later + self.opened = -1 + except Exception as e: + logg.warning("oops, %s", e) + +SystemctlWaitPID = collections.namedtuple("SystemctlWaitPID", ["pid", "returncode", "signal"]) + +def must_have_failed(waitpid, cmd): + # found to be needed on ubuntu:16.04 to match test result from ubuntu:18.04 and other distros + # .... I have tracked it down that python's os.waitpid() returns an exitcode==0 even when the + # .... underlying process has actually failed with an exitcode<>0. It is unknown where that + # .... bug comes from but it seems a bit serious to trash some very basic unix functionality. + # .... Essentially a parent process does not get the correct exitcode from its own children. + if cmd and cmd[0] == "/bin/kill": + pid = None + for arg in cmd[1:]: + if not arg.startswith("-"): + pid = arg + if pid is None: # unknown $MAINPID + if not waitpid.returncode: + logg.error("waitpid %s did return %s => correcting as 11", cmd, waitpid.returncode) + waitpid = SystemctlWaitPID(waitpid.pid, 11, waitpid.signal) + return waitpid + +def subprocess_waitpid(pid): + run_pid, run_stat = os.waitpid(pid, 0) + return SystemctlWaitPID(run_pid, os.WEXITSTATUS(run_stat), os.WTERMSIG(run_stat)) +def subprocess_testpid(pid): + run_pid, run_stat = os.waitpid(pid, os.WNOHANG) + if run_pid: + return SystemctlWaitPID(run_pid, os.WEXITSTATUS(run_stat), os.WTERMSIG(run_stat)) + else: + return SystemctlWaitPID(pid, None, 0) + +SystemctlUnitName = collections.namedtuple("SystemctlUnitName", ["fullname", "name", "prefix", "instance", "suffix", "component"]) + +def parse_unit(fullname): # -> object(prefix, instance, suffix, ...., name, component) + name, suffix = fullname, "" + has_suffix = fullname.rfind(".") + if has_suffix > 0: + name = fullname[:has_suffix] + suffix = fullname[has_suffix+1:] + prefix, instance = name, "" + has_instance = name.find("@") + if has_instance > 0: + prefix = name[:has_instance] + instance = name[has_instance+1:] + component = "" + has_component = prefix.rfind("-") + if has_component > 0: + component = prefix[has_component+1:] + return SystemctlUnitName(fullname, name, prefix, instance, suffix, component) + +def time_to_seconds(text, maximum): + value = 0. + for part in str(text).split(" "): + item = part.strip() + if item == "infinity": + return maximum + if item.endswith("m"): + try: value += 60 * int(item[:-1]) + except: pass # pragma: no cover + if item.endswith("min"): + try: value += 60 * int(item[:-3]) + except: pass # pragma: no cover + elif item.endswith("ms"): + try: value += int(item[:-2]) / 1000. + except: pass # pragma: no cover + elif item.endswith("s"): + try: value += int(item[:-1]) + except: pass # pragma: no cover + elif item: + try: value += int(item) + except: pass # pragma: no cover + if value > maximum: + return maximum + if not value and text.strip() == "0": + return 0. + if not value: + return 1. + return value +def seconds_to_time(seconds): + seconds = float(seconds) + mins = int(int(seconds) / 60) + secs = int(int(seconds) - (mins * 60)) + msecs = int(int(seconds * 1000) - (secs * 1000 + mins * 60000)) + if mins and secs and msecs: + return "%smin %ss %sms" % (mins, secs, msecs) + elif mins and secs: + return "%smin %ss" % (mins, secs) + elif secs and msecs: + return "%ss %sms" % (secs, msecs) + elif mins and msecs: + return "%smin %sms" % (mins, msecs) + elif mins: + return "%smin" % (mins) + else: + return "%ss" % (secs) + +def getBefore(conf): + result = [] + beforelist = conf.getlist(Unit, "Before", []) + for befores in beforelist: + for before in befores.split(" "): + name = before.strip() + if name and name not in result: + result.append(name) + return result + +def getAfter(conf): + result = [] + afterlist = conf.getlist(Unit, "After", []) + for afters in afterlist: + for after in afters.split(" "): + name = after.strip() + if name and name not in result: + result.append(name) + return result + +def compareAfter(confA, confB): + idA = confA.name() + idB = confB.name() + for after in getAfter(confA): + if after == idB: + logg.debug("%s After %s", idA, idB) + return -1 + for after in getAfter(confB): + if after == idA: + logg.debug("%s After %s", idB, idA) + return 1 + for before in getBefore(confA): + if before == idB: + logg.debug("%s Before %s", idA, idB) + return 1 + for before in getBefore(confB): + if before == idA: + logg.debug("%s Before %s", idB, idA) + return -1 + return 0 + +def conf_sortedAfter(conflist, cmp = compareAfter): + # the normal sorted() does only look at two items + # so if "A after C" and a list [A, B, C] then + # it will see "A = B" and "B = C" assuming that + # "A = C" and the list is already sorted. + # + # To make a totalsorted we have to create a marker + # that informs sorted() that also B has a relation. + # It only works when 'after' has a direction, so + # anything without 'before' is a 'after'. In that + # case we find that "B after C". + class SortTuple: + def __init__(self, rank, conf): + self.rank = rank + self.conf = conf + sortlist = [SortTuple(0, conf) for conf in conflist] + for check in xrange(len(sortlist)): # maxrank = len(sortlist) + changed = 0 + for A in xrange(len(sortlist)): + for B in xrange(len(sortlist)): + if A != B: + itemA = sortlist[A] + itemB = sortlist[B] + before = compareAfter(itemA.conf, itemB.conf) + if before > 0 and itemA.rank <= itemB.rank: + logg_debug_after(" %-30s before %s", itemA.conf.name(), itemB.conf.name()) + itemA.rank = itemB.rank + 1 + changed += 1 + if before < 0 and itemB.rank <= itemA.rank: + logg_debug_after(" %-30s before %s", itemB.conf.name(), itemA.conf.name()) + itemB.rank = itemA.rank + 1 + changed += 1 + if not changed: + logg_debug_after("done in check %s of %s", check, len(sortlist)) + break + # because Requires is almost always the same as the After clauses + # we are mostly done in round 1 as the list is in required order + for conf in conflist: + logg_debug_after(".. %s", conf.name()) + for item in sortlist: + logg_debug_after("(%s) %s", item.rank, item.conf.name()) + sortedlist = sorted(sortlist, key = lambda item: -item.rank) + for item in sortedlist: + logg_debug_after("[%s] %s", item.rank, item.conf.name()) + return [item.conf for item in sortedlist] + +class SystemctlListenThread(threading.Thread): + def __init__(self, systemctl): + threading.Thread.__init__(self, name="listen") + self.systemctl = systemctl + self.stopped = threading.Event() + def stop(self): + self.stopped.set() + def run(self): + READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR + READ_WRITE = READ_ONLY | select.POLLOUT + me = os.getpid() + if DEBUG_INITLOOP: # pragma: no cover + logg.info("[%s] listen: new thread", me) + if not self.systemctl._sockets: + return + if DEBUG_INITLOOP: # pragma: no cover + logg.info("[%s] listen: start thread", me) + listen = select.poll() + for sock in self.systemctl._sockets.values(): + listen.register(sock, READ_ONLY) + sock.listen() + logg.debug("[%s] listen: %s :%s", me, sock.name(), sock.addr()) + timestamp = time.time() + while not self.stopped.is_set(): + try: + sleep_sec = InitLoopSleep - (time.time() - timestamp) + if sleep_sec < MinimumYield: + sleep_sec = MinimumYield + sleeping = sleep_sec + while sleeping > 2: + time.sleep(1) # accept signals atleast every second + sleeping = InitLoopSleep - (time.time() - timestamp) + if sleeping < MinimumYield: + sleeping = MinimumYield + break + time.sleep(sleeping) # remainder waits less that 2 seconds + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("[%s] listen: poll", me) + accepting = listen.poll(100) # milliseconds + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("[%s] listen: poll (%s)", me, len(accepting)) + for sock_fileno, event in accepting: + for sock in self.systemctl._sockets.values(): + if sock.fileno() == sock_fileno: + if not self.stopped.is_set(): + if self.systemctl.loop.acquire(): + logg.debug("[%s] listen: accept %s :%s", me, sock.name(), sock_fileno) + self.systemctl.do_accept_socket_from(sock.conf, sock.sock) + except Exception as e: + logg.info("[%s] listen: interrupted - exception %s", me, e) + raise + for sock in self.systemctl._sockets.values(): + try: + listen.unregister(sock) + sock.close() + except Exception as e: + logg.warning("[%s] listen: close socket: %s", me, e) + return + +class Systemctl: + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + # | + def __init__(self): + self.error = NOT_A_PROBLEM # program exitcode or process returncode + # from command line options or the defaults + self._extra_vars = _extra_vars + self._force = _force + self._full = _full + self._init = _init + self._no_ask_password = _no_ask_password + self._no_legend = _no_legend + self._now = _now + self._preset_mode = _preset_mode + self._quiet = _quiet + self._root = _root + self._show_all = _show_all + self._only_what = commalist(_only_what) or [""] + self._only_property = commalist(_only_property) + self._only_state = commalist(_only_state) + self._only_type = commalist(_only_type) + # some common constants that may be changed + self._systemd_version = SystemCompatibilityVersion + self._journal_log_folder = _journal_log_folder + # and the actual internal runtime state + self._loaded_file_sysv = {} # /etc/init.d/name => config data + self._loaded_file_sysd = {} # /etc/systemd/system/name.service => config data + self._file_for_unit_sysv = None # name.service => /etc/init.d/name + self._file_for_unit_sysd = None # name.service => /etc/systemd/system/name.service + self._preset_file_list = None # /etc/systemd/system-preset/* => file content + self._default_target = DefaultTarget + self._sysinit_target = None # stores a UnitConf() + self.doExitWhenNoMoreProcs = ExitWhenNoMoreProcs or False + self.doExitWhenNoMoreServices = ExitWhenNoMoreServices or False + self._user_mode = _user_mode + self._user_getlogin = os_getlogin() + self._log_file = {} # init-loop + self._log_hold = {} # init-loop + self._boottime = None # cache self.get_boottime() + self._SYSTEMD_UNIT_PATH = None + self._SYSTEMD_SYSVINIT_PATH = None + self._SYSTEMD_PRESET_PATH = None + self._restarted_unit = {} + self._restart_failed_units = {} + self._sockets = {} + self.loop = threading.Lock() + def user(self): + return self._user_getlogin + def user_mode(self): + return self._user_mode + def user_folder(self): + for folder in self.user_folders(): + if folder: return folder + raise Exception("did not find any systemd/user folder") + def system_folder(self): + for folder in self.system_folders(): + if folder: return folder + raise Exception("did not find any systemd/system folder") + def preset_folders(self): + SYSTEMD_PRESET_PATH = self.get_SYSTEMD_PRESET_PATH() + for path in SYSTEMD_PRESET_PATH.split(":"): + if path.strip(): yield expand_path(path.strip()) + if SYSTEMD_PRESET_PATH.endswith(":"): + for p in _preset_folders: + yield expand_path(p.strip()) + def init_folders(self): + SYSTEMD_SYSVINIT_PATH = self.get_SYSTEMD_SYSVINIT_PATH() + for path in SYSTEMD_SYSVINIT_PATH.split(":"): + if path.strip(): yield expand_path(path.strip()) + if SYSTEMD_SYSVINIT_PATH.endswith(":"): + for p in _init_folders: + yield expand_path(p.strip()) + def user_folders(self): + SYSTEMD_UNIT_PATH = self.get_SYSTEMD_UNIT_PATH() + for path in SYSTEMD_UNIT_PATH.split(":"): + if path.strip(): yield expand_path(path.strip()) + if SYSTEMD_UNIT_PATH.endswith(":"): + for p in _user_folders: + yield expand_path(p.strip()) + def system_folders(self): + SYSTEMD_UNIT_PATH = self.get_SYSTEMD_UNIT_PATH() + for path in SYSTEMD_UNIT_PATH.split(":"): + if path.strip(): yield expand_path(path.strip()) + if SYSTEMD_UNIT_PATH.endswith(":"): + for p in _system_folders: + yield expand_path(p.strip()) + def get_SYSTEMD_UNIT_PATH(self): + if self._SYSTEMD_UNIT_PATH is None: + self._SYSTEMD_UNIT_PATH = os.environ.get("SYSTEMD_UNIT_PATH", ":") + assert self._SYSTEMD_UNIT_PATH is not None + return self._SYSTEMD_UNIT_PATH + def get_SYSTEMD_SYSVINIT_PATH(self): + if self._SYSTEMD_SYSVINIT_PATH is None: + self._SYSTEMD_SYSVINIT_PATH = os.environ.get("SYSTEMD_SYSVINIT_PATH", ":") + assert self._SYSTEMD_SYSVINIT_PATH is not None + return self._SYSTEMD_SYSVINIT_PATH + def get_SYSTEMD_PRESET_PATH(self): + if self._SYSTEMD_PRESET_PATH is None: + self._SYSTEMD_PRESET_PATH = os.environ.get("SYSTEMD_PRESET_PATH", ":") + assert self._SYSTEMD_PRESET_PATH is not None + return self._SYSTEMD_PRESET_PATH + def sysd_folders(self): + """ if --user then these folders are preferred """ + if self.user_mode(): + for folder in self.user_folders(): + yield folder + if True: + for folder in self.system_folders(): + yield folder + def scan_unit_sysd_files(self, module = None): # -> [ unit-names,... ] + """ reads all unit files, returns the first filename for the unit given """ + if self._file_for_unit_sysd is None: + self._file_for_unit_sysd = {} + for folder in self.sysd_folders(): + if not folder: + continue + folder = os_path(self._root, folder) + if not os.path.isdir(folder): + continue + for name in os.listdir(folder): + path = os.path.join(folder, name) + if os.path.isdir(path): + continue + service_name = name + if service_name not in self._file_for_unit_sysd: + self._file_for_unit_sysd[service_name] = path + logg.debug("found %s sysd files", len(self._file_for_unit_sysd)) + return list(self._file_for_unit_sysd.keys()) + def scan_unit_sysv_files(self, module = None): # -> [ unit-names,... ] + """ reads all init.d files, returns the first filename when unit is a '.service' """ + if self._file_for_unit_sysv is None: + self._file_for_unit_sysv = {} + for folder in self.init_folders(): + if not folder: + continue + folder = os_path(self._root, folder) + if not os.path.isdir(folder): + continue + for name in os.listdir(folder): + path = os.path.join(folder, name) + if os.path.isdir(path): + continue + service_name = name + ".service" # simulate systemd + if service_name not in self._file_for_unit_sysv: + self._file_for_unit_sysv[service_name] = path + logg.debug("found %s sysv files", len(self._file_for_unit_sysv)) + return list(self._file_for_unit_sysv.keys()) + def unit_sysd_file(self, module = None): # -> filename? + """ file path for the given module (systemd) """ + self.scan_unit_sysd_files() + assert self._file_for_unit_sysd is not None + if module and module in self._file_for_unit_sysd: + return self._file_for_unit_sysd[module] + if module and unit_of(module) in self._file_for_unit_sysd: + return self._file_for_unit_sysd[unit_of(module)] + return None + def unit_sysv_file(self, module = None): # -> filename? + """ file path for the given module (sysv) """ + self.scan_unit_sysv_files() + assert self._file_for_unit_sysv is not None + if module and module in self._file_for_unit_sysv: + return self._file_for_unit_sysv[module] + if module and unit_of(module) in self._file_for_unit_sysv: + return self._file_for_unit_sysv[unit_of(module)] + return None + def unit_file(self, module = None): # -> filename? + """ file path for the given module (sysv or systemd) """ + path = self.unit_sysd_file(module) + if path is not None: return path + path = self.unit_sysv_file(module) + if path is not None: return path + return None + def is_sysv_file(self, filename): + """ for routines that have a special treatment for init.d services """ + self.unit_file() # scan all + assert self._file_for_unit_sysd is not None + assert self._file_for_unit_sysv is not None + if not filename: return None + if filename in self._file_for_unit_sysd.values(): return False + if filename in self._file_for_unit_sysv.values(): return True + return None # not True + def is_user_conf(self, conf): + if not conf: # pragma: no cover (is never null) + return False + filename = conf.nonloaded_path or conf.filename() + if filename and "/user/" in filename: + return True + return False + def not_user_conf(self, conf): + """ conf can not be started as user service (when --user)""" + if conf is None: # pragma: no cover (is never null) + return True + if not self.user_mode(): + logg.debug("%s no --user mode >> accept", strQ(conf.filename())) + return False + if self.is_user_conf(conf): + logg.debug("%s is /user/ conf >> accept", strQ(conf.filename())) + return False + # to allow for 'docker run -u user' with system services + user = self.get_User(conf) + if user and user == self.user(): + logg.debug("%s with User=%s >> accept", strQ(conf.filename()), user) + return False + return True + def find_drop_in_files(self, unit): + """ search for some.service.d/extra.conf files """ + result = {} + basename_d = unit + ".d" + for folder in self.sysd_folders(): + if not folder: + continue + folder = os_path(self._root, folder) + override_d = os_path(folder, basename_d) + if not os.path.isdir(override_d): + continue + for name in os.listdir(override_d): + path = os.path.join(override_d, name) + if os.path.isdir(path): + continue + if not path.endswith(".conf"): + continue + if name not in result: + result[name] = path + return result + def load_sysd_template_conf(self, module): # -> conf? + """ read the unit template with a UnitConfParser (systemd) """ + if module and "@" in module: + unit = parse_unit(module) + service = "%s@.service" % unit.prefix + conf = self.load_sysd_unit_conf(service) + if conf: + conf.module = module + return conf + return None + def load_sysd_unit_conf(self, module): # -> conf? + """ read the unit file with a UnitConfParser (systemd) """ + path = self.unit_sysd_file(module) + if not path: return None + assert self._loaded_file_sysd is not None + if path in self._loaded_file_sysd: + return self._loaded_file_sysd[path] + masked = None + if os.path.islink(path) and os.readlink(path).startswith("/dev"): + masked = os.readlink(path) + drop_in_files = {} + data = UnitConfParser() + if not masked: + data.read_sysd(path) + drop_in_files = self.find_drop_in_files(os.path.basename(path)) + # load in alphabetic order, irrespective of location + for name in sorted(drop_in_files): + path = drop_in_files[name] + data.read_sysd(path) + conf = SystemctlConf(data, module) + conf.masked = masked + conf.nonloaded_path = path # if masked + conf.drop_in_files = drop_in_files + conf._root = self._root + self._loaded_file_sysd[path] = conf + return conf + def load_sysv_unit_conf(self, module): # -> conf? + """ read the unit file with a UnitConfParser (sysv) """ + path = self.unit_sysv_file(module) + if not path: return None + assert self._loaded_file_sysv is not None + if path in self._loaded_file_sysv: + return self._loaded_file_sysv[path] + data = UnitConfParser() + data.read_sysv(path) + conf = SystemctlConf(data, module) + conf._root = self._root + self._loaded_file_sysv[path] = conf + return conf + def load_unit_conf(self, module): # -> conf | None(not-found) + """ read the unit file with a UnitConfParser (sysv or systemd) """ + try: + conf = self.load_sysd_unit_conf(module) + if conf is not None: + return conf + conf = self.load_sysd_template_conf(module) + if conf is not None: + return conf + conf = self.load_sysv_unit_conf(module) + if conf is not None: + return conf + except Exception as e: + logg.warning("%s not loaded: %s", module, e) + return None + def default_unit_conf(self, module, description = None): # -> conf + """ a unit conf that can be printed to the user where + attributes are empty and loaded() is False """ + data = UnitConfParser() + data.set(Unit, "Description", description or ("NOT-FOUND " + str(module))) + # assert(not data.loaded()) + conf = SystemctlConf(data, module) + conf._root = self._root + return conf + def get_unit_conf(self, module): # -> conf (conf | default-conf) + """ accept that a unit does not exist + and return a unit conf that says 'not-loaded' """ + conf = self.load_unit_conf(module) + if conf is not None: + return conf + return self.default_unit_conf(module) + def get_unit_type(self, module): + name, ext = os.path.splitext(module) + if ext in [".service", ".socket", ".target"]: + return ext[1:] + return None + def get_unit_section(self, module, default = Service): + return string.capwords(self.get_unit_type(module) or default) + def get_unit_section_from(self, conf, default = Service): + return self.get_unit_section(conf.name(), default) + def match_sysd_templates(self, modules = None, suffix=".service"): # -> generate[ unit ] + """ make a file glob on all known template units (systemd areas). + It returns no modules (!!) if no modules pattern were given. + The module string should contain an instance name already. """ + modules = to_list(modules) + if not modules: + return + self.scan_unit_sysd_files() + assert self._file_for_unit_sysd is not None + for item in sorted(self._file_for_unit_sysd.keys()): + if "@" not in item: + continue + service_unit = parse_unit(item) + for module in modules: + if "@" not in module: + continue + module_unit = parse_unit(module) + if service_unit.prefix == module_unit.prefix: + yield "%s@%s.%s" % (service_unit.prefix, module_unit.instance, service_unit.suffix) + def match_sysd_units(self, modules = None, suffix=".service"): # -> generate[ unit ] + """ make a file glob on all known units (systemd areas). + It returns all modules if no modules pattern were given. + Also a single string as one module pattern may be given. """ + modules = to_list(modules) + self.scan_unit_sysd_files() + assert self._file_for_unit_sysd is not None + for item in sorted(self._file_for_unit_sysd.keys()): + if "." not in item: + pass + elif not modules: + yield item + elif [module for module in modules if fnmatch.fnmatchcase(item, module)]: + yield item + elif [module for module in modules if module+suffix == item]: + yield item + def match_sysv_units(self, modules = None, suffix=".service"): # -> generate[ unit ] + """ make a file glob on all known units (sysv areas). + It returns all modules if no modules pattern were given. + Also a single string as one module pattern may be given. """ + modules = to_list(modules) + self.scan_unit_sysv_files() + assert self._file_for_unit_sysv is not None + for item in sorted(self._file_for_unit_sysv.keys()): + if not modules: + yield item + elif [module for module in modules if fnmatch.fnmatchcase(item, module)]: + yield item + elif [module for module in modules if module+suffix == item]: + yield item + def match_units(self, modules = None, suffix=".service"): # -> [ units,.. ] + """ Helper for about any command with multiple units which can + actually be glob patterns on their respective unit name. + It returns all modules if no modules pattern were given. + Also a single string as one module pattern may be given. """ + found = [] + for unit in self.match_sysd_units(modules, suffix): + if unit not in found: + found.append(unit) + for unit in self.match_sysd_templates(modules, suffix): + if unit not in found: + found.append(unit) + for unit in self.match_sysv_units(modules, suffix): + if unit not in found: + found.append(unit) + return found + def list_service_unit_basics(self): + """ show all the basic loading state of services """ + filename = self.unit_file() # scan all + assert self._file_for_unit_sysd is not None + assert self._file_for_unit_sysv is not None + result = [] + for name, value in self._file_for_unit_sysd.items(): + result += [(name, "SysD", value)] + for name, value in self._file_for_unit_sysv.items(): + result += [(name, "SysV", value)] + return result + def list_service_units(self, *modules): # -> [ (unit,loaded+active+substate,description) ] + """ show all the service units """ + result = {} + active = {} + substate = {} + description = {} + for unit in self.match_units(to_list(modules)): + result[unit] = "not-found" + active[unit] = "inactive" + substate[unit] = "dead" + description[unit] = "" + try: + conf = self.get_unit_conf(unit) + result[unit] = "loaded" + description[unit] = self.get_description_from(conf) + active[unit] = self.get_active_from(conf) + substate[unit] = self.get_substate_from(conf) or "unknown" + except Exception as e: + logg.warning("list-units: %s", e) + if self._only_state: + if result[unit] in self._only_state: + pass + elif active[unit] in self._only_state: + pass + elif substate[unit] in self._only_state: + pass + else: + del result[unit] + return [(unit, result[unit] + " " + active[unit] + " " + substate[unit], description[unit]) for unit in sorted(result)] + def list_units_modules(self, *modules): # -> [ (unit,loaded,description) ] + """ [PATTERN]... -- List loaded units. + If one or more PATTERNs are specified, only units matching one of + them are shown. NOTE: This is the default command.""" + hint = "To show all installed unit files use 'systemctl list-unit-files'." + result = self.list_service_units(*modules) + if self._no_legend: + return result + found = "%s loaded units listed." % len(result) + return result + [("", "", ""), (found, "", ""), (hint, "", "")] + def list_service_unit_files(self, *modules): # -> [ (unit,enabled) ] + """ show all the service units and the enabled status""" + logg.debug("list service unit files for %s", modules) + result = {} + enabled = {} + for unit in self.match_units(to_list(modules)): + if self._only_type and self.get_unit_type(unit) not in self._only_type: + continue + result[unit] = None + enabled[unit] = "" + try: + conf = self.get_unit_conf(unit) + if self.not_user_conf(conf): + result[unit] = None + continue + result[unit] = conf + enabled[unit] = self.enabled_from(conf) + except Exception as e: + logg.warning("list-units: %s", e) + return [(unit, enabled[unit]) for unit in sorted(result) if result[unit]] + def each_target_file(self): + folders = self.system_folders() + if self.user_mode(): + folders = self.user_folders() + for folder1 in folders: + folder = os_path(self._root, folder1) + if not os.path.isdir(folder): + continue + for filename in os.listdir(folder): + if filename.endswith(".target"): + yield (filename, os.path.join(folder, filename)) + def list_target_unit_files(self, *modules): # -> [ (unit,enabled) ] + """ show all the target units and the enabled status""" + enabled = {} + targets = {} + for target, filepath in self.each_target_file(): + logg.info("target %s", filepath) + targets[target] = filepath + enabled[target] = "static" + for unit in _all_common_targets: + targets[unit] = None + enabled[unit] = "static" + if unit in _all_common_enabled: + enabled[unit] = "enabled" + if unit in _all_common_disabled: + enabled[unit] = "disabled" + return [(unit, enabled[unit]) for unit in sorted(targets)] + def list_unit_files_modules(self, *modules): # -> [ (unit,enabled) ] + """[PATTERN]... -- List installed unit files + List installed unit files and their enablement state (as reported + by is-enabled). If one or more PATTERNs are specified, only units + whose filename (just the last component of the path) matches one of + them are shown. This command reacts to limitations of --type being + --type=service or --type=target (and --now for some basics).""" + result = [] + if self._now: + basics = self.list_service_unit_basics() + result = [(name, sysv + " " + filename) for name, sysv, filename in basics] + elif self._only_type: + if "target" in self._only_type: + result = self.list_target_unit_files() + if "service" in self._only_type: + result = self.list_service_unit_files() + else: + result = self.list_target_unit_files() + result += self.list_service_unit_files(*modules) + if self._no_legend: + return result + found = "%s unit files listed." % len(result) + return [("UNIT FILE", "STATE")] + result + [("", ""), (found, "")] + ## + ## + def get_description(self, unit, default = None): + return self.get_description_from(self.load_unit_conf(unit)) + def get_description_from(self, conf, default = None): # -> text + """ Unit.Description could be empty sometimes """ + if not conf: return default or "" + description = conf.get(Unit, "Description", default or "") + return self.expand_special(description, conf) + def read_pid_file(self, pid_file, default = None): + pid = default + if not pid_file: + return default + if not os.path.isfile(pid_file): + return default + if self.truncate_old(pid_file): + return default + try: + # some pid-files from applications contain multiple lines + for line in open(pid_file): + if line.strip(): + pid = to_intN(line.strip()) + break + except Exception as e: + logg.warning("bad read of pid file '%s': %s", pid_file, e) + return pid + def wait_pid_file(self, pid_file, timeout = None): # -> pid? + """ wait some seconds for the pid file to appear and return the pid """ + timeout = int(timeout or (DefaultTimeoutStartSec/2)) + timeout = max(timeout, (MinimumTimeoutStartSec)) + dirpath = os.path.dirname(os.path.abspath(pid_file)) + for x in xrange(timeout): + if not os.path.isdir(dirpath): + time.sleep(1) # until TimeoutStartSec/2 + continue + pid = self.read_pid_file(pid_file) + if not pid: + time.sleep(1) # until TimeoutStartSec/2 + continue + if not pid_exists(pid): + time.sleep(1) # until TimeoutStartSec/2 + continue + return pid + return None + def get_status_pid_file(self, unit): + """ actual file path of pid file (internal) """ + conf = self.get_unit_conf(unit) + return self.pid_file_from(conf) or self.get_status_file_from(conf) + def pid_file_from(self, conf, default = ""): + """ get the specified pid file path (not a computed default) """ + pid_file = self.get_pid_file(conf) or default + return os_path(self._root, self.expand_special(pid_file, conf)) + def get_pid_file(self, conf, default = None): + return conf.get(Service, "PIDFile", default) + def read_mainpid_from(self, conf, default = None): + """ MAINPID is either the PIDFile content written from the application + or it is the value in the status file written by this systemctl.py code """ + pid_file = self.pid_file_from(conf) + if pid_file: + return self.read_pid_file(pid_file, default) + status = self.read_status_from(conf) + if "MainPID" in status: + return to_intN(status["MainPID"], default) + return default + def clean_pid_file_from(self, conf): + pid_file = self.pid_file_from(conf) + if pid_file and os.path.isfile(pid_file): + try: + os.remove(pid_file) + except OSError as e: + logg.warning("while rm %s: %s", pid_file, e) + self.write_status_from(conf, MainPID=None) + def get_status_file(self, unit): # for testing + conf = self.get_unit_conf(unit) + return self.get_status_file_from(conf) + def get_status_file_from(self, conf, default = None): + status_file = self.get_StatusFile(conf) + # this not a real setting, but do the expand_special anyway + return os_path(self._root, self.expand_special(status_file, conf)) + def get_StatusFile(self, conf, default = None): # -> text + """ file where to store a status mark """ + status_file = conf.get(Service, "StatusFile", default) + if status_file: + return status_file + root = conf.root_mode() + folder = get_PID_DIR(root) + name = "%s.status" % conf.name() + return os.path.join(folder, name) + def clean_status_from(self, conf): + status_file = self.get_status_file_from(conf) + if os.path.exists(status_file): + os.remove(status_file) + conf.status = {} + def write_status_from(self, conf, **status): # -> bool(written) + """ if a status_file is known then path is created and the + give status is written as the only content. """ + status_file = self.get_status_file_from(conf) + # if not status_file: return False + dirpath = os.path.dirname(os.path.abspath(status_file)) + if not os.path.isdir(dirpath): + os.makedirs(dirpath) + if conf.status is None: + conf.status = self.read_status_from(conf) + if True: + for key in sorted(status.keys()): + value = status[key] + if key.upper() == "AS": key = "ActiveState" + if key.upper() == "EXIT": key = "ExecMainCode" + if value is None: + try: del conf.status[key] + except KeyError: pass + else: + conf.status[key] = strE(value) + try: + with open(status_file, "w") as f: + for key in sorted(conf.status): + value = conf.status[key] + if key == "MainPID" and str(value) == "0": + logg.warning("ignore writing MainPID=0") + continue + content = "{}={}\n".format(key, str(value)) + logg.debug("writing to %s\n\t%s", status_file, content.strip()) + f.write(content) + except IOError as e: + logg.error("writing STATUS %s: %s\n\t to status file %s", status, e, status_file) + return True + def read_status_from(self, conf): + status_file = self.get_status_file_from(conf) + status = {} + # if not status_file: return status + if not os.path.isfile(status_file): + if DEBUG_STATUS: logg.debug("no status file: %s\n returning %s", status_file, status) + return status + if self.truncate_old(status_file): + if DEBUG_STATUS: logg.debug("old status file: %s\n returning %s", status_file, status) + return status + try: + if DEBUG_STATUS: logg.debug("reading %s", status_file) + for line in open(status_file): + if line.strip(): + m = re.match(r"(\w+)[:=](.*)", line) + if m: + key, value = m.group(1), m.group(2) + if key.strip(): + status[key.strip()] = value.strip() + else: # pragma: no cover + logg.warning("ignored %s", line.strip()) + except: + logg.warning("bad read of status file '%s'", status_file) + return status + def get_status_from(self, conf, name, default = None): + if conf.status is None: + conf.status = self.read_status_from(conf) + return conf.status.get(name, default) + def set_status_from(self, conf, name, value): + if conf.status is None: + conf.status = self.read_status_from(conf) + if value is None: + try: del conf.status[name] + except KeyError: pass + else: + conf.status[name] = value + # + def get_boottime(self): + """ detects the boot time of the container - in general the start time of PID 1 """ + if self._boottime is None: + self._boottime = self.get_boottime_from_proc() + assert self._boottime is not None + return self._boottime + def get_boottime_from_proc(self): + """ detects the latest boot time by looking at the start time of available process""" + pid1 = BOOT_PID_MIN or 0 + pid_max = BOOT_PID_MAX + if pid_max < 0: + pid_max = pid1 - pid_max + for pid in xrange(pid1, pid_max): + proc = _proc_pid_stat.format(**locals()) + try: + if os.path.exists(proc): + # return os.path.getmtime(proc) # did sometimes change + return self.path_proc_started(proc) + except Exception as e: # pragma: no cover + logg.warning("boottime - could not access %s: %s", proc, e) + if DEBUG_BOOTTIME: + logg.debug(" boottime from the oldest entry in /proc [nothing in %s..%s]", pid1, pid_max) + return self.get_boottime_from_old_proc() + def get_boottime_from_old_proc(self): + booted = time.time() + for pid in os.listdir(_proc_pid_dir): + proc = _proc_pid_stat.format(**locals()) + try: + if os.path.exists(proc): + # ctime = os.path.getmtime(proc) + ctime = self.path_proc_started(proc) + if ctime < booted: + booted = ctime + except Exception as e: # pragma: no cover + logg.warning("could not access %s: %s", proc, e) + return booted + + # Use uptime, time process running in ticks, and current time to determine process boot time + # You can't use the modified timestamp of the status file because it isn't static. + # ... using clock ticks it is known to be a linear time on Linux + def path_proc_started(self, proc): + # get time process started after boot in clock ticks + with open(proc) as file_stat: + data_stat = file_stat.readline() + file_stat.close() + stat_data = data_stat.split() + started_ticks = stat_data[21] + # man proc(5): "(22) starttime = The time the process started after system boot." + # ".. the value is expressed in clock ticks (divide by sysconf(_SC_CLK_TCK))." + # NOTE: for containers the start time is related to the boot time of host system. + + clkTickInt = os.sysconf_names['SC_CLK_TCK'] + clockTicksPerSec = os.sysconf(clkTickInt) + started_secs = float(started_ticks) / clockTicksPerSec + if DEBUG_BOOTTIME: + logg.debug(" BOOT .. Proc started time: %.3f (%s)", started_secs, proc) + # this value is the start time from the host system + + # Variant 1: + system_uptime = _proc_sys_uptime + with open(system_uptime, "rb") as file_uptime: + data_uptime = file_uptime.readline() + file_uptime.close() + uptime_data = data_uptime.decode().split() + uptime_secs = float(uptime_data[0]) + if DEBUG_BOOTTIME: + logg.debug(" BOOT 1. System uptime secs: %.3f (%s)", uptime_secs, system_uptime) + + # get time now + now = time.time() + started_time = now - (uptime_secs - started_secs) + if DEBUG_BOOTTIME: + logg.debug(" BOOT 1. Proc has been running since: %s" % (datetime.datetime.fromtimestamp(started_time))) + + # Variant 2: + system_stat = _proc_sys_stat + system_btime = 0. + with open(system_stat, "rb") as f: + for line in f: + assert isinstance(line, bytes) + if line.startswith(b"btime"): + system_btime = float(line.decode().split()[1]) + f.closed + if DEBUG_BOOTTIME: + logg.debug(" BOOT 2. System btime secs: %.3f (%s)", system_btime, system_stat) + + started_btime = system_btime + started_secs + if DEBUG_BOOTTIME: + logg.debug(" BOOT 2. Proc has been running since: %s" % (datetime.datetime.fromtimestamp(started_btime))) + + # return started_time + return started_btime + + def get_filetime(self, filename): + return os.path.getmtime(filename) + def truncate_old(self, filename): + filetime = self.get_filetime(filename) + boottime = self.get_boottime() + if filetime >= boottime: + if DEBUG_BOOTTIME: + logg.debug(" file time: %s (%s)", datetime.datetime.fromtimestamp(filetime), o22(filename)) + logg.debug(" boot time: %s (%s)", datetime.datetime.fromtimestamp(boottime), "status modified later") + return False # OK + if DEBUG_BOOTTIME: + logg.info(" file time: %s (%s)", datetime.datetime.fromtimestamp(filetime), o22(filename)) + logg.info(" boot time: %s (%s)", datetime.datetime.fromtimestamp(boottime), "status TRUNCATED NOW") + try: + shutil_truncate(filename) + except Exception as e: + logg.warning("while truncating: %s", e) + return True # truncated + def getsize(self, filename): + if filename is None: # pragma: no cover (is never null) + return 0 + if not os.path.isfile(filename): + return 0 + if self.truncate_old(filename): + return 0 + try: + return os.path.getsize(filename) + except Exception as e: + logg.warning("while reading file size: %s\n of %s", e, filename) + return 0 + # + def read_env_file(self, env_file): # -> generate[ (name,value) ] + """ EnvironmentFile= is being scanned """ + mode, env_file = load_path(env_file) + real_file = os_path(self._root, env_file) + if not os.path.exists(real_file): + if mode.check: + logg.error("file does not exist: %s", real_file) + else: + logg.debug("file does not exist: %s", real_file) + return + try: + for real_line in open(os_path(self._root, env_file)): + line = real_line.strip() + if not line or line.startswith("#"): + continue + m = re.match(r"(?:export +)?([\w_]+)[=]'([^']*)'", line) + if m: + yield m.group(1), m.group(2) + continue + m = re.match(r'(?:export +)?([\w_]+)[=]"([^"]*)"', line) + if m: + yield m.group(1), m.group(2) + continue + m = re.match(r'(?:export +)?([\w_]+)[=](.*)', line) + if m: + yield m.group(1), m.group(2) + continue + except Exception as e: + logg.info("while reading %s: %s", env_file, e) + def read_env_part(self, env_part): # -> generate[ (name, value) ] + """ Environment== is being scanned """ + # systemd Environment= spec says it is a space-separated list of + # assignments. In order to use a space or an equals sign in a value + # one should enclose the whole assignment with double quotes: + # Environment="VAR1=word word" VAR2=word3 "VAR3=$word 5 6" + # and the $word is not expanded by other environment variables. + try: + for real_line in env_part.split("\n"): + line = real_line.strip() + for found in re.finditer(r'\s*("[\w_]+=[^"]*"|[\w_]+=\S*)', line): + part = found.group(1) + if part.startswith('"'): + part = part[1:-1] + name, value = part.split("=", 1) + yield name, value + except Exception as e: + logg.info("while reading %s: %s", env_part, e) + def command_of_unit(self, unit): + """ [UNIT]. -- show service settings (experimental) + or use -p VarName to show another property than 'ExecStart' """ + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s could not be found.", unit) + self.error |= NOT_FOUND + return None + if self._only_property: + found = [] + for prop in self._only_property: + found += conf.getlist(Service, prop) + return found + return conf.getlist(Service, "ExecStart") + def environment_of_unit(self, unit): + """ [UNIT]. -- show environment parts """ + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s could not be found.", unit) + self.error |= NOT_FOUND + return None + return self.get_env(conf) + def extra_vars(self): + return self._extra_vars # from command line + def get_env(self, conf): + env = os.environ.copy() + for env_part in conf.getlist(Service, "Environment", []): + for name, value in self.read_env_part(self.expand_special(env_part, conf)): + env[name] = value # a '$word' is not special here (lazy expansion) + for env_file in conf.getlist(Service, "EnvironmentFile", []): + for name, value in self.read_env_file(self.expand_special(env_file, conf)): + env[name] = self.expand_env(value, env) # but nonlazy expansion here + logg.debug("extra-vars %s", self.extra_vars()) + for extra in self.extra_vars(): + if extra.startswith("@"): + for name, value in self.read_env_file(extra[1:]): + logg.info("override %s=%s", name, value) + env[name] = self.expand_env(value, env) + else: + for name, value in self.read_env_part(extra): + logg.info("override %s=%s", name, value) + env[name] = value # a '$word' is not special here + return env + def expand_env(self, cmd, env): + def get_env1(m): + name = m.group(1) + if name in env: + return env[name] + namevar = "$%s" % name + logg.debug("can not expand %s", namevar) + return (EXPAND_KEEP_VARS and namevar or "") + def get_env2(m): + name = m.group(1) + if name in env: + return env[name] + namevar = "${%s}" % name + logg.debug("can not expand %s", namevar) + return (EXPAND_KEEP_VARS and namevar or "") + # + maxdepth = EXPAND_VARS_MAXDEPTH + expanded = re.sub(r"[$](\w+)", lambda m: get_env1(m), cmd.replace("\\\n", "")) + for depth in xrange(maxdepth): + new_text = re.sub(r"[$][{](\w+)[}]", lambda m: get_env2(m), expanded) + if new_text == expanded: + return expanded + expanded = new_text + logg.error("shell variable expansion exceeded maxdepth %s", maxdepth) + return expanded + def expand_special(self, cmd, conf): + """ expand %i %t and similar special vars. They are being expanded + before any other expand_env takes place which handles shell-style + $HOME references. """ + def xx(arg): return unit_name_unescape(arg) + def yy(arg): return arg + def get_confs(conf): + confs={"%": "%"} + if conf is None: # pragma: no cover (is never null) + return confs + unit = parse_unit(conf.name()) + # + root = conf.root_mode() + VARTMP = get_VARTMP(root) # $TMPDIR # "/var/tmp" + TMP = get_TMP(root) # $TMPDIR # "/tmp" + RUN = get_RUNTIME_DIR(root) # $XDG_RUNTIME_DIR # "/run" + ETC = get_CONFIG_HOME(root) # $XDG_CONFIG_HOME # "/etc" + DAT = get_VARLIB_HOME(root) # $XDG_CONFIG_HOME # "/var/lib" + LOG = get_LOG_DIR(root) # $XDG_CONFIG_HOME/log # "/var/log" + CACHE = get_CACHE_HOME(root) # $XDG_CACHE_HOME # "/var/cache" + HOME = get_HOME(root) # $HOME or ~ # "/root" + USER = get_USER(root) # geteuid().pw_name # "root" + USER_ID = get_USER_ID(root) # geteuid() # 0 + GROUP = get_GROUP(root) # getegid().gr_name # "root" + GROUP_ID = get_GROUP_ID(root) # getegid() # 0 + SHELL = get_SHELL(root) # $SHELL # "/bin/sh" + # confs["b"] = boot_ID + confs["C"] = os_path(self._root, CACHE) # Cache directory root + confs["E"] = os_path(self._root, ETC) # Configuration directory root + confs["F"] = strE(conf.filename()) # EXTRA + confs["f"] = "/%s" % xx(unit.instance or unit.prefix) + confs["h"] = HOME # User home directory + # confs["H"] = host_NAME + confs["i"] = yy(unit.instance) + confs["I"] = xx(unit.instance) # same as %i but escaping undone + confs["j"] = yy(unit.component) # final component of the prefix + confs["J"] = xx(unit.component) # unescaped final component + confs["L"] = os_path(self._root, LOG) + # confs["m"] = machine_ID + confs["n"] = yy(unit.fullname) # Full unit name + confs["N"] = yy(unit.name) # Same as "%n", but with the type suffix removed. + confs["p"] = yy(unit.prefix) # before the first "@" or same as %n + confs["P"] = xx(unit.prefix) # same as %p but escaping undone + confs["s"] = SHELL + confs["S"] = os_path(self._root, DAT) + confs["t"] = os_path(self._root, RUN) + confs["T"] = os_path(self._root, TMP) + confs["g"] = GROUP + confs["G"] = str(GROUP_ID) + confs["u"] = USER + confs["U"] = str(USER_ID) + confs["V"] = os_path(self._root, VARTMP) + return confs + def get_conf1(m): + confs = get_confs(conf) + if m.group(1) in confs: + return confs[m.group(1)] + logg.warning("can not expand %%%s", m.group(1)) + return "" + result = "" + if cmd: + result = re.sub("[%](.)", lambda m: get_conf1(m), cmd) + # ++# logg.info("expanded => %s", result) + return result + def exec_newcmd(self, cmd, env, conf): + mode, exe = exec_path(cmd) + if mode.noexpand: + newcmd = self.split_cmd(exe) + else: + newcmd = self.expand_cmd(exe, env, conf) + if mode.argv0: + if len(newcmd) > 1: + del newcmd[1] # TODO: keep but allow execve calls to pick it up + return mode, newcmd + def split_cmd(self, cmd): + cmd2 = cmd.replace("\\\n", "") + newcmd = [] + for part in shlex.split(cmd2): + newcmd += [part] + return newcmd + def expand_cmd(self, cmd, env, conf): + """ expand ExecCmd statements including %i and $MAINPID """ + cmd2 = cmd.replace("\\\n", "") + # according to documentation, when bar="one two" then the expansion + # of '$bar' is ["one","two"] and '${bar}' becomes ["one two"]. We + # tackle that by expand $bar before shlex, and the rest thereafter. + def get_env1(m): + name = m.group(1) + if name in env: + return env[name] + logg.debug("can not expand $%s", name) + return "" # empty string + def get_env2(m): + name = m.group(1) + if name in env: + return env[name] + logg.debug("can not expand $%s}}", name) + return "" # empty string + cmd3 = re.sub(r"[$](\w+)", lambda m: get_env1(m), cmd2) + newcmd = [] + for part in shlex.split(cmd3): + part2 = self.expand_special(part, conf) + newcmd += [re.sub(r"[$][{](\w+)[}]", lambda m: get_env2(m), part2)] # type: ignore[arg-type] + return newcmd + def remove_service_directories(self, conf, section = Service): + # | + ok = True + nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section) + keepRuntimeDirectory = self.get_RuntimeDirectoryPreserve(conf, section) + if not keepRuntimeDirectory: + root = conf.root_mode() + for name in nameRuntimeDirectory.split(" "): + if not name.strip(): continue + RUN = get_RUNTIME_DIR(root) + path = os.path.join(RUN, name) + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + if RUN == "/run": + for var_run in ("/var/run", "/tmp/run"): + if os.path.isdir(var_run): + var_path = os.path.join(var_run, name) + var_dirpath = os_path(self._root, var_path) + self.do_rm_tree(var_dirpath) + if not ok: + logg.debug("could not fully remove service directory %s", path) + return ok + def do_rm_tree(self, path): + ok = True + if os.path.isdir(path): + for dirpath, dirnames, filenames in os.walk(path, topdown=False): + for item in filenames: + filepath = os.path.join(dirpath, item) + try: + os.remove(filepath) + except Exception as e: # pragma: no cover + logg.debug("not removed file: %s (%s)", filepath, e) + ok = False + for item in dirnames: + dir_path = os.path.join(dirpath, item) + try: + os.rmdir(dir_path) + except Exception as e: # pragma: no cover + logg.debug("not removed dir: %s (%s)", dir_path, e) + ok = False + try: + os.rmdir(path) + except Exception as e: + logg.debug("not removed top dir: %s (%s)", path, e) + ok = False # pragma: no cover + logg.debug("%s rm_tree %s", ok and "done" or "fail", path) + return ok + def get_RuntimeDirectoryPreserve(self, conf, section = Service): + return conf.getbool(section, "RuntimeDirectoryPreserve", "no") + def get_RuntimeDirectory(self, conf, section = Service): + return self.expand_special(conf.get(section, "RuntimeDirectory", ""), conf) + def get_StateDirectory(self, conf, section = Service): + return self.expand_special(conf.get(section, "StateDirectory", ""), conf) + def get_CacheDirectory(self, conf, section = Service): + return self.expand_special(conf.get(section, "CacheDirectory", ""), conf) + def get_LogsDirectory(self, conf, section = Service): + return self.expand_special(conf.get(section, "LogsDirectory", ""), conf) + def get_ConfigurationDirectory(self, conf, section = Service): + return self.expand_special(conf.get(section, "ConfigurationDirectory", ""), conf) + def get_RuntimeDirectoryMode(self, conf, section = Service): + return conf.get(section, "RuntimeDirectoryMode", "") + def get_StateDirectoryMode(self, conf, section = Service): + return conf.get(section, "StateDirectoryMode", "") + def get_CacheDirectoryMode(self, conf, section = Service): + return conf.get(section, "CacheDirectoryMode", "") + def get_LogsDirectoryMode(self, conf, section = Service): + return conf.get(section, "LogsDirectoryMode", "") + def get_ConfigurationDirectoryMode(self, conf, section = Service): + return conf.get(section, "ConfigurationDirectoryMode", "") + def clean_service_directories(self, conf, which = ""): + ok = True + section = self.get_unit_section_from(conf) + nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section) + nameStateDirectory = self.get_StateDirectory(conf, section) + nameCacheDirectory = self.get_CacheDirectory(conf, section) + nameLogsDirectory = self.get_LogsDirectory(conf, section) + nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section) + root = conf.root_mode() + for name in nameRuntimeDirectory.split(" "): + if not name.strip(): continue + RUN = get_RUNTIME_DIR(root) + path = os.path.join(RUN, name) + if which in ["all", "runtime", ""]: + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + if RUN == "/run": + for var_run in ("/var/run", "/tmp/run"): + var_path = os.path.join(var_run, name) + var_dirpath = os_path(self._root, var_path) + self.do_rm_tree(var_dirpath) + for name in nameStateDirectory.split(" "): + if not name.strip(): continue + DAT = get_VARLIB_HOME(root) + path = os.path.join(DAT, name) + if which in ["all", "state"]: + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + for name in nameCacheDirectory.split(" "): + if not name.strip(): continue + CACHE = get_CACHE_HOME(root) + path = os.path.join(CACHE, name) + if which in ["all", "cache", ""]: + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + for name in nameLogsDirectory.split(" "): + if not name.strip(): continue + LOGS = get_LOG_DIR(root) + path = os.path.join(LOGS, name) + if which in ["all", "logs"]: + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + for name in nameConfigurationDirectory.split(" "): + if not name.strip(): continue + CONFIG = get_CONFIG_HOME(root) + path = os.path.join(CONFIG, name) + if which in ["all", "configuration", ""]: + dirpath = os_path(self._root, path) + ok = self.do_rm_tree(dirpath) and ok + return ok + def env_service_directories(self, conf): + envs = {} + section = self.get_unit_section_from(conf) + nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section) + nameStateDirectory = self.get_StateDirectory(conf, section) + nameCacheDirectory = self.get_CacheDirectory(conf, section) + nameLogsDirectory = self.get_LogsDirectory(conf, section) + nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section) + root = conf.root_mode() + for name in nameRuntimeDirectory.split(" "): + if not name.strip(): continue + RUN = get_RUNTIME_DIR(root) + path = os.path.join(RUN, name) + envs["RUNTIME_DIRECTORY"] = path + for name in nameStateDirectory.split(" "): + if not name.strip(): continue + DAT = get_VARLIB_HOME(root) + path = os.path.join(DAT, name) + envs["STATE_DIRECTORY"] = path + for name in nameCacheDirectory.split(" "): + if not name.strip(): continue + CACHE = get_CACHE_HOME(root) + path = os.path.join(CACHE, name) + envs["CACHE_DIRECTORY"] = path + for name in nameLogsDirectory.split(" "): + if not name.strip(): continue + LOGS = get_LOG_DIR(root) + path = os.path.join(LOGS, name) + envs["LOGS_DIRECTORY"] = path + for name in nameConfigurationDirectory.split(" "): + if not name.strip(): continue + CONFIG = get_CONFIG_HOME(root) + path = os.path.join(CONFIG, name) + envs["CONFIGURATION_DIRECTORY"] = path + return envs + def create_service_directories(self, conf): + envs = {} + section = self.get_unit_section_from(conf) + nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section) + modeRuntimeDirectory = self.get_RuntimeDirectoryMode(conf, section) + nameStateDirectory = self.get_StateDirectory(conf, section) + modeStateDirectory = self.get_StateDirectoryMode(conf, section) + nameCacheDirectory = self.get_CacheDirectory(conf, section) + modeCacheDirectory = self.get_CacheDirectoryMode(conf, section) + nameLogsDirectory = self.get_LogsDirectory(conf, section) + modeLogsDirectory = self.get_LogsDirectoryMode(conf, section) + nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section) + modeConfigurationDirectory = self.get_ConfigurationDirectoryMode(conf, section) + root = conf.root_mode() + user = self.get_User(conf) + group = self.get_Group(conf) + for name in nameRuntimeDirectory.split(" "): + if not name.strip(): continue + RUN = get_RUNTIME_DIR(root) + path = os.path.join(RUN, name) + logg.debug("RuntimeDirectory %s", path) + self.make_service_directory(path, modeRuntimeDirectory) + self.chown_service_directory(path, user, group) + envs["RUNTIME_DIRECTORY"] = path + if RUN == "/run": + for var_run in ("/var/run", "/tmp/run"): + if os.path.isdir(var_run): + var_path = os.path.join(var_run, name) + var_dirpath = os_path(self._root, var_path) + if os.path.isdir(var_dirpath): + if not os.path.islink(var_dirpath): + logg.debug("not a symlink: %s", var_dirpath) + continue + dirpath = os_path(self._root, path) + basepath = os.path.dirname(var_dirpath) + if not os.path.isdir(basepath): + os.makedirs(basepath) + try: + os.symlink(dirpath, var_dirpath) + except Exception as e: + logg.debug("var symlink %s\n\t%s", var_dirpath, e) + for name in nameStateDirectory.split(" "): + if not name.strip(): continue + DAT = get_VARLIB_HOME(root) + path = os.path.join(DAT, name) + logg.debug("StateDirectory %s", path) + self.make_service_directory(path, modeStateDirectory) + self.chown_service_directory(path, user, group) + envs["STATE_DIRECTORY"] = path + for name in nameCacheDirectory.split(" "): + if not name.strip(): continue + CACHE = get_CACHE_HOME(root) + path = os.path.join(CACHE, name) + logg.debug("CacheDirectory %s", path) + self.make_service_directory(path, modeCacheDirectory) + self.chown_service_directory(path, user, group) + envs["CACHE_DIRECTORY"] = path + for name in nameLogsDirectory.split(" "): + if not name.strip(): continue + LOGS = get_LOG_DIR(root) + path = os.path.join(LOGS, name) + logg.debug("LogsDirectory %s", path) + self.make_service_directory(path, modeLogsDirectory) + self.chown_service_directory(path, user, group) + envs["LOGS_DIRECTORY"] = path + for name in nameConfigurationDirectory.split(" "): + if not name.strip(): continue + CONFIG = get_CONFIG_HOME(root) + path = os.path.join(CONFIG, name) + logg.debug("ConfigurationDirectory %s", path) + self.make_service_directory(path, modeConfigurationDirectory) + # not done according the standard + # self.chown_service_directory(path, user, group) + envs["CONFIGURATION_DIRECTORY"] = path + return envs + def make_service_directory(self, path, mode): + ok = True + dirpath = os_path(self._root, path) + if not os.path.isdir(dirpath): + try: + os.makedirs(dirpath) + logg.info("created directory path: %s", dirpath) + except Exception as e: # pragma: no cover + logg.debug("errors directory path: %s\n\t%s", dirpath, e) + ok = False + filemode = int_mode(mode) + if filemode: + try: + os.chmod(dirpath, filemode) + except Exception as e: # pragma: no cover + logg.debug("errors directory path: %s\n\t%s", dirpath, e) + ok = False + else: + logg.debug("path did already exist: %s", dirpath) + if not ok: + logg.debug("could not fully create service directory %s", path) + return ok + def chown_service_directory(self, path, user, group): + # the standard defines an optimization so that if the parent + # directory does have the correct user and group then there + # is no other chown on files and subdirectories to be done. + dirpath = os_path(self._root, path) + if not os.path.isdir(dirpath): + logg.debug("chown did not find %s", dirpath) + return True + if user or group: + st = os.stat(dirpath) + st_user = pwd.getpwuid(st.st_uid).pw_name + st_group = grp.getgrgid(st.st_gid).gr_name + change = False + if user and (user.strip() != st_user and user.strip() != str(st.st_uid)): + change = True + if group and (group.strip() != st_group and group.strip() != str(st.st_gid)): + change = True + if change: + logg.debug("do chown %s", dirpath) + try: + ok = self.do_chown_tree(dirpath, user, group) + logg.info("changed %s:%s %s", user, group, ok) + return ok + except Exception as e: + logg.info("oops %s\n\t%s", dirpath, e) + else: + logg.debug("untouched %s", dirpath) + return True + def do_chown_tree(self, path, user, group): + ok = True + uid, gid = -1, -1 + if user: + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + if group: + gid = grp.getgrnam(group).gr_gid + for dirpath, dirnames, filenames in os.walk(path, topdown=False): + for item in filenames: + filepath = os.path.join(dirpath, item) + try: + os.chown(filepath, uid, gid) + except Exception as e: # pragma: no cover + logg.debug("could not set %s:%s on %s\n\t%s", user, group, filepath, e) + ok = False + for item in dirnames: + dir_path = os.path.join(dirpath, item) + try: + os.chown(dir_path, uid, gid) + except Exception as e: # pragma: no cover + logg.debug("could not set %s:%s on %s\n\t%s", user, group, dir_path, e) + ok = False + try: + os.chown(path, uid, gid) + except Exception as e: # pragma: no cover + logg.debug("could not set %s:%s on %s\n\t%s", user, group, path, e) + ok = False + if not ok: + logg.debug("could not chown %s:%s service directory %s", user, group, path) + return ok + def clean_modules(self, *modules): + """ [UNIT]... -- remove the state directories + /// it recognizes --what=all or any of configuration, state, cache, logs, runtime + while an empty value (the default) removes cache and runtime directories""" + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + lines = _log_lines + follow = _force + ok = self.clean_units(units) + return ok and found_all + def clean_units(self, units, what = ""): + if not what: + what = self._only_what[0] + ok = True + for unit in units: + ok = self.clean_unit(unit, what) and ok + return ok + def clean_unit(self, unit, what = ""): + conf = self.load_unit_conf(unit) + if not conf: return False + return self.clean_unit_from(conf, what) + def clean_unit_from(self, conf, what): + if self.is_active_from(conf): + logg.warning("can not clean active unit: %s", conf.name()) + return False + return self.clean_service_directories(conf, what) + def log_modules(self, *modules): + """ [UNIT]... -- start 'less' on the log files for the services + /// use '-f' to follow and '-n lines' to limit output using 'tail', + using '--no-pager' just does a full 'cat'""" + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + lines = _log_lines + follow = _force + result = self.log_units(units, lines, follow) + if result: + self.error = result + return False + return found_all + def log_units(self, units, lines = None, follow = False): + result = 0 + for unit in self.sortedAfter(units): + exitcode = self.log_unit(unit, lines, follow) + if exitcode < 0: + return exitcode + if exitcode > result: + result = exitcode + return result + def log_unit(self, unit, lines = None, follow = False): + conf = self.load_unit_conf(unit) + if not conf: return -1 + return self.log_unit_from(conf, lines, follow) + def log_unit_from(self, conf, lines = None, follow = False): + cmd_args = [] + log_path = self.get_journal_log_from(conf) + if follow: + tail_cmd = get_exist_path(TAIL_CMDS) + if tail_cmd is None: + print("tail command not found") + return 1 + cmd = [tail_cmd, "-n", str(lines or 10), "-F", log_path] + logg.debug("journalctl %s -> %s", conf.name(), cmd) + cmd_args = [arg for arg in cmd] # satisfy mypy + return os.execvp(cmd_args[0], cmd_args) + elif lines: + tail_cmd = get_exist_path(TAIL_CMDS) + if tail_cmd is None: + print("tail command not found") + return 1 + cmd = [tail_cmd, "-n", str(lines or 10), log_path] + logg.debug("journalctl %s -> %s", conf.name(), cmd) + cmd_args = [arg for arg in cmd] # satisfy mypy + return os.execvp(cmd_args[0], cmd_args) + elif _no_pager: + cat_cmd = get_exist_path(CAT_CMDS) + if cat_cmd is None: + print("cat command not found") + return 1 + cmd = [cat_cmd, log_path] + logg.debug("journalctl %s -> %s", conf.name(), cmd) + cmd_args = [arg for arg in cmd] # satisfy mypy + return os.execvp(cmd_args[0], cmd_args) + else: + less_cmd = get_exist_path(LESS_CMDS) + if less_cmd is None: + print("less command not found") + return 1 + cmd = [less_cmd, log_path] + logg.debug("journalctl %s -> %s", conf.name(), cmd) + cmd_args = [arg for arg in cmd] # satisfy mypy + return os.execvp(cmd_args[0], cmd_args) + def get_journal_log_from(self, conf): + return os_path(self._root, self.get_journal_log(conf)) + def get_journal_log(self, conf): + """ /var/log/zzz.service.log or /var/log/default.unit.log """ + filename = os.path.basename(strE(conf.filename())) + unitname = (conf.name() or "default")+".unit" + name = filename or unitname + log_folder = expand_path(self._journal_log_folder, conf.root_mode()) + log_file = name.replace(os.path.sep, ".") + ".log" + if log_file.startswith("."): + log_file = "dot."+log_file + return os.path.join(log_folder, log_file) + def open_journal_log(self, conf): + log_file = self.get_journal_log_from(conf) + log_folder = os.path.dirname(log_file) + if not os.path.isdir(log_folder): + os.makedirs(log_folder) + return open(os.path.join(log_file), "a") + def get_WorkingDirectory(self, conf): + return conf.get(Service, "WorkingDirectory", "") + def chdir_workingdir(self, conf): + """ if specified then change the working directory """ + # the original systemd will start in '/' even if User= is given + if self._root: + os.chdir(self._root) + workingdir = self.get_WorkingDirectory(conf) + mode, workingdir = load_path(workingdir) + if workingdir: + into = os_path(self._root, self.expand_special(workingdir, conf)) + try: + logg.debug("chdir workingdir '%s'", into) + os.chdir(into) + return False + except Exception as e: + if mode.check: + logg.error("chdir workingdir '%s': %s", into, e) + return into + else: + logg.debug("chdir workingdir '%s': %s", into, e) + return None + return None + NotifySocket = collections.namedtuple("NotifySocket", ["socket", "socketfile"]) + def get_notify_socket_from(self, conf, socketfile = None, debug = False): + """ creates a notify-socket for the (non-privileged) user """ + notify_socket_folder = expand_path(_notify_socket_folder, conf.root_mode()) + notify_folder = os_path(self._root, notify_socket_folder) + notify_name = "notify." + str(conf.name() or "systemctl") + notify_socket = os.path.join(notify_folder, notify_name) + socketfile = socketfile or notify_socket + if len(socketfile) > 100: + # occurs during testsuite.py for ~user/test.tmp/root path + if debug: + logg.debug("https://unix.stackexchange.com/questions/367008/%s", + "why-is-socket-path-length-limited-to-a-hundred-chars") + logg.debug("old notify socketfile (%s) = %s", len(socketfile), socketfile) + notify_name44 = o44(notify_name) + notify_name77 = o77(notify_name) + socketfile = os.path.join(notify_folder, notify_name77) + if len(socketfile) > 100: + socketfile = os.path.join(notify_folder, notify_name44) + pref = "zz.%i.%s" % (get_USER_ID(), o22(os.path.basename(notify_socket_folder))) + if len(socketfile) > 100: + socketfile = os.path.join(get_TMP(), pref, notify_name) + if len(socketfile) > 100: + socketfile = os.path.join(get_TMP(), pref, notify_name77) + if len(socketfile) > 100: # pragma: no cover + socketfile = os.path.join(get_TMP(), pref, notify_name44) + if len(socketfile) > 100: # pragma: no cover + socketfile = os.path.join(get_TMP(), notify_name44) + if debug: + logg.info("new notify socketfile (%s) = %s", len(socketfile), socketfile) + return socketfile + def notify_socket_from(self, conf, socketfile = None): + socketfile = self.get_notify_socket_from(conf, socketfile, debug=True) + try: + if not os.path.isdir(os.path.dirname(socketfile)): + os.makedirs(os.path.dirname(socketfile)) + if os.path.exists(socketfile): + os.unlink(socketfile) + except Exception as e: + logg.warning("error %s: %s", socketfile, e) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.bind(socketfile) + os.chmod(socketfile, 0o777) # the service my run under some User=setting + return Systemctl.NotifySocket(sock, socketfile) + def read_notify_socket(self, notify, timeout): + notify.socket.settimeout(timeout or DefaultMaximumTimeout) + result = "" + try: + result, client_address = notify.socket.recvfrom(4096) + assert isinstance(result, bytes) + if result: + result = result.decode("utf-8") + result_txt = result.replace("\n", "|") + result_len = len(result) + logg.debug("read_notify_socket(%s):%s", result_len, result_txt) + except socket.timeout as e: + if timeout > 2: + logg.debug("socket.timeout %s", e) + return result + def wait_notify_socket(self, notify, timeout, pid = None, pid_file = None): + if not os.path.exists(notify.socketfile): + logg.info("no $NOTIFY_SOCKET exists") + return {} + # + lapseTimeout = max(3, int(timeout / 100)) + mainpidTimeout = lapseTimeout # Apache sends READY before MAINPID + status = "" + logg.info("wait $NOTIFY_SOCKET, timeout %s (lapse %s)", timeout, lapseTimeout) + waiting = " ---" + results = {} + for attempt in xrange(int(timeout)+1): + if pid and not self.is_active_pid(pid): + logg.info("seen dead PID %s", pid) + return results + if not attempt: # first one + time.sleep(1) # until TimeoutStartSec + continue + result = self.read_notify_socket(notify, 1) # sleep max 1 second + for line in result.splitlines(): + # for name, value in self.read_env_part(line) + if "=" not in line: + continue + name, value = line.split("=", 1) + results[name] = value + if name in ["STATUS", "ACTIVESTATE", "MAINPID", "READY"]: + hint="seen notify %s " % (waiting) + logg.debug("%s :%s=%s", hint, name, value) + if status != results.get("STATUS", ""): + mainpidTimeout = lapseTimeout + status = results.get("STATUS", "") + if "READY" not in results: + time.sleep(1) # until TimeoutStart + continue + if "MAINPID" not in results and not pid_file: + mainpidTimeout -= 1 + if mainpidTimeout > 0: + waiting = "%4i" % (-mainpidTimeout) + time.sleep(1) # until TimeoutStart + continue + break # READY and MAINPID + if "READY" not in results: + logg.info(".... timeout while waiting for 'READY=1' status on $NOTIFY_SOCKET") + elif "MAINPID" not in results: + logg.info(".... seen 'READY=1' but no MAINPID update status on $NOTIFY_SOCKET") + logg.debug("notify = %s", results) + try: + notify.socket.close() + except Exception as e: + logg.debug("socket.close %s", e) + return results + def start_modules(self, *modules): + """ [UNIT]... -- start these units + /// SPECIAL: with --now or --init it will + run the init-loop and stop the units afterwards """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + init = self._now or self._init + return self.start_units(units, init) and found_all + def start_units(self, units, init = None): + """ fails if any unit does not start + /// SPECIAL: may run the init-loop and + stop the named units afterwards """ + self.wait_system() + done = True + started_units = [] + for unit in self.sortedAfter(units): + started_units.append(unit) + if not self.start_unit(unit): + done = False + if init: + logg.info("init-loop start") + sig = self.init_loop_until_stop(started_units) + logg.info("init-loop %s", sig) + for unit in reversed(started_units): + self.stop_unit(unit) + return done + def start_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.debug("unit could not be loaded (%s)", unit) + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.start_unit_from(conf) + def get_TimeoutStartSec(self, conf): + timeout = conf.get(Service, "TimeoutSec", strE(DefaultTimeoutStartSec)) + timeout = conf.get(Service, "TimeoutStartSec", timeout) + return time_to_seconds(timeout, DefaultMaximumTimeout) + def get_SocketTimeoutSec(self, conf): + timeout = conf.get(Socket, "TimeoutSec", strE(DefaultTimeoutStartSec)) + return time_to_seconds(timeout, DefaultMaximumTimeout) + def get_RemainAfterExit(self, conf): + return conf.getbool(Service, "RemainAfterExit", "no") + def start_unit_from(self, conf): + if not conf: return False + if self.syntax_check(conf) > 100: return False + with waitlock(conf): + logg.debug(" start unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_start_unit_from(conf) + def do_start_unit_from(self, conf): + if conf.name().endswith(".service"): + return self.do_start_service_from(conf) + elif conf.name().endswith(".socket"): + return self.do_start_socket_from(conf) + elif conf.name().endswith(".target"): + return self.do_start_target_from(conf) + else: + logg.error("start not implemented for unit type: %s", conf.name()) + return False + def do_start_service_from(self, conf): + timeout = self.get_TimeoutStartSec(conf) + doRemainAfterExit = self.get_RemainAfterExit(conf) + runs = conf.get(Service, "Type", "simple").lower() + env = self.get_env(conf) + if not self._quiet: + okee = self.exec_check_unit(conf, env, Service, "Exec") # all... + if not okee and _no_reload: return False + service_directories = self.create_service_directories(conf) + env.update(service_directories) # atleast sshd did check for /run/sshd + # for StopPost on failure: + returncode = 0 + service_result = "success" + if True: + if runs in ["simple", "exec", "forking", "notify", "idle"]: + env["MAINPID"] = strE(self.read_mainpid_from(conf)) + for cmd in conf.getlist(Service, "ExecStartPre", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info(" pre-start %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug(" pre-start done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + if run.returncode and exe.check: + logg.error("the ExecStartPre control process exited with error code") + active = "failed" + self.write_status_from(conf, AS=active) + if self._only_what[0] not in ["none", "keep"]: + self.remove_service_directories(conf) # cleanup that /run/sshd + return False + if runs in ["oneshot"]: + status_file = self.get_status_file_from(conf) + if self.get_status_from(conf, "ActiveState", "unknown") == "active": + logg.warning("the service was already up once") + return True + for cmd in conf.getlist(Service, "ExecStart", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s start %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: # pragma: no cover + os.setsid() # detach child process from parent + self.execve_from(conf, newcmd, env) + run = subprocess_waitpid(forkpid) + if run.returncode and exe.check: + returncode = run.returncode + service_result = "failed" + logg.error("%s start %s (%s) <-%s>", runs, service_result, + run.returncode or "OK", run.signal or "") + break + logg.info("%s start done (%s) <-%s>", runs, + run.returncode or "OK", run.signal or "") + if True: + self.set_status_from(conf, "ExecMainCode", strE(returncode)) + active = returncode and "failed" or "active" + self.write_status_from(conf, AS=active) + elif runs in ["simple", "exec", "idle"]: + status_file = self.get_status_file_from(conf) + pid = self.read_mainpid_from(conf) + if self.is_active_pid(pid): + logg.warning("the service is already running on PID %s", pid) + return True + if doRemainAfterExit: + logg.debug("%s RemainAfterExit -> AS=active", runs) + self.write_status_from(conf, AS="active") + cmdlist = conf.getlist(Service, "ExecStart", []) + for idx, cmd in enumerate(cmdlist): + logg.debug("ExecStart[%s]: %s", idx, cmd) + for cmd in cmdlist: + pid = self.read_mainpid_from(conf) + env["MAINPID"] = strE(pid) + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s start %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: # pragma: no cover + os.setsid() # detach child process from parent + self.execve_from(conf, newcmd, env) + self.write_status_from(conf, MainPID=forkpid) + logg.info("%s started PID %s", runs, forkpid) + env["MAINPID"] = strE(forkpid) + time.sleep(MinimumYield) + run = subprocess_testpid(forkpid) + if run.returncode is not None: + logg.info("%s stopped PID %s (%s) <-%s>", runs, run.pid, + run.returncode or "OK", run.signal or "") + if doRemainAfterExit: + self.set_status_from(conf, "ExecMainCode", strE(run.returncode)) + active = run.returncode and "failed" or "active" + self.write_status_from(conf, AS=active) + if run.returncode and exe.check: + service_result = "failed" + break + elif runs in ["notify"]: + # "notify" is the same as "simple" but we create a $NOTIFY_SOCKET + # and wait for startup completion by checking the socket messages + pid_file = self.pid_file_from(conf) + pid = self.read_mainpid_from(conf) + if self.is_active_pid(pid): + logg.error("the service is already running on PID %s", pid) + return False + notify = self.notify_socket_from(conf) + if notify: + env["NOTIFY_SOCKET"] = notify.socketfile + logg.debug("use NOTIFY_SOCKET=%s", notify.socketfile) + if doRemainAfterExit: + logg.debug("%s RemainAfterExit -> AS=active", runs) + self.write_status_from(conf, AS="active") + cmdlist = conf.getlist(Service, "ExecStart", []) + for idx, cmd in enumerate(cmdlist): + logg.debug("ExecStart[%s]: %s", idx, cmd) + mainpid = None + for cmd in cmdlist: + mainpid = self.read_mainpid_from(conf) + env["MAINPID"] = strE(mainpid) + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s start %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: # pragma: no cover + os.setsid() # detach child process from parent + self.execve_from(conf, newcmd, env) + # via NOTIFY # self.write_status_from(conf, MainPID=forkpid) + logg.info("%s started PID %s", runs, forkpid) + mainpid = forkpid + self.write_status_from(conf, MainPID=mainpid) + env["MAINPID"] = strE(mainpid) + time.sleep(MinimumYield) + run = subprocess_testpid(forkpid) + if run.returncode is not None: + logg.info("%s stopped PID %s (%s) <-%s>", runs, run.pid, + run.returncode or "OK", run.signal or "") + if doRemainAfterExit: + self.set_status_from(conf, "ExecMainCode", strE(run.returncode)) + active = run.returncode and "failed" or "active" + self.write_status_from(conf, AS=active) + if run.returncode and exe.check: + service_result = "failed" + break + if service_result in ["success"] and mainpid: + logg.debug("okay, waiting on socket for %ss", timeout) + results = self.wait_notify_socket(notify, timeout, mainpid, pid_file) + if "MAINPID" in results: + new_pid = to_intN(results["MAINPID"]) + if new_pid and new_pid != mainpid: + logg.info("NEW PID %s from sd_notify (was PID %s)", new_pid, mainpid) + self.write_status_from(conf, MainPID=new_pid) + mainpid = new_pid + logg.info("%s start done %s", runs, mainpid) + pid = self.read_mainpid_from(conf) + if pid: + env["MAINPID"] = strE(pid) + else: + service_result = "timeout" # "could not start service" + elif runs in ["forking"]: + pid_file = self.pid_file_from(conf) + for cmd in conf.getlist(Service, "ExecStart", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + if not newcmd: continue + logg.info("%s start %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: # pragma: no cover + os.setsid() # detach child process from parent + self.execve_from(conf, newcmd, env) + logg.info("%s started PID %s", runs, forkpid) + run = subprocess_waitpid(forkpid) + if run.returncode and exe.check: + returncode = run.returncode + service_result = "failed" + logg.info("%s stopped PID %s (%s) <-%s>", runs, run.pid, + run.returncode or "OK", run.signal or "") + if pid_file and service_result in ["success"]: + pid = self.wait_pid_file(pid_file) # application PIDFile + logg.info("%s start done PID %s [%s]", runs, pid, pid_file) + if pid: + env["MAINPID"] = strE(pid) + if not pid_file: + time.sleep(MinimumTimeoutStartSec) + logg.warning("No PIDFile for forking %s", strQ(conf.filename())) + status_file = self.get_status_file_from(conf) + self.set_status_from(conf, "ExecMainCode", strE(returncode)) + active = returncode and "failed" or "active" + self.write_status_from(conf, AS=active) + else: + logg.error("unsupported run type '%s'", runs) + return False + # POST sequence + if not self.is_active_from(conf): + logg.warning("%s start not active", runs) + # according to the systemd documentation, a failed start-sequence + # should execute the ExecStopPost sequence allowing some cleanup. + env["SERVICE_RESULT"] = service_result + for cmd in conf.getlist(Service, "ExecStopPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-fail %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-fail done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + if self._only_what[0] not in ["none", "keep"]: + self.remove_service_directories(conf) + return False + else: + for cmd in conf.getlist(Service, "ExecStartPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-start %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-start done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + return True + def listen_modules(self, *modules): + """ [UNIT]... -- listen socket units""" + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.listen_units(units) and found_all + def listen_units(self, units): + """ fails if any socket does not start """ + self.wait_system() + done = True + started_units = [] + active_units = [] + for unit in self.sortedAfter(units): + started_units.append(unit) + if not self.listen_unit(unit): + done = False + else: + active_units.append(unit) + if active_units: + logg.info("init-loop start") + sig = self.init_loop_until_stop(started_units) + logg.info("init-loop %s", sig) + for unit in reversed(started_units): + pass # self.stop_unit(unit) + return done + def listen_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.debug("unit could not be loaded (%s)", unit) + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.listen_unit_from(conf) + def listen_unit_from(self, conf): + if not conf: return False + with waitlock(conf): + logg.debug(" listen unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_listen_unit_from(conf) + def do_listen_unit_from(self, conf): + if conf.name().endswith(".socket"): + return self.do_start_socket_from(conf) + else: + logg.error("listen not implemented for unit type: %s", conf.name()) + return False + def do_accept_socket_from(self, conf, sock): + logg.debug("%s: accepting %s", conf.name(), sock.fileno()) + service_unit = self.get_socket_service_from(conf) + service_conf = self.load_unit_conf(service_unit) + if service_conf is None or TestAccept: # pragma: no cover + if sock.type == socket.SOCK_STREAM: + conn, addr = sock.accept() + data = conn.recv(1024) + logg.debug("%s: '%s'", conf.name(), data) + conn.send(b"ERROR: "+data.upper()) + conn.close() + return False + if sock.type == socket.SOCK_DGRAM: + data, sender = sock.recvfrom(1024) + logg.debug("%s: '%s'", conf.name(), data) + sock.sendto(b"ERROR: "+data.upper(), sender) + return False + logg.error("can not accept socket type %s", strINET(sock.type)) + return False + return self.do_start_service_from(service_conf) + def get_socket_service_from(self, conf): + socket_unit = conf.name() + accept = conf.getbool(Socket, "Accept", "no") + service_type = accept and "@.service" or ".service" + service_name = path_replace_extension(socket_unit, ".socket", service_type) + service_unit = conf.get(Socket, Service, service_name) + logg.debug("socket %s -> service %s", socket_unit, service_unit) + return service_unit + def do_start_socket_from(self, conf): + runs = "socket" + timeout = self.get_SocketTimeoutSec(conf) + accept = conf.getbool(Socket, "Accept", "no") + stream = conf.get(Socket, "ListenStream", "") + service_unit = self.get_socket_service_from(conf) + service_conf = self.load_unit_conf(service_unit) + if service_conf is None: + logg.debug("unit could not be loaded (%s)", service_unit) + logg.error("Unit %s not found.", service_unit) + return False + env = self.get_env(conf) + if not self._quiet: + okee = self.exec_check_unit(conf, env, Socket, "Exec") # all... + if not okee and _no_reload: return False + if True: + for cmd in conf.getlist(Socket, "ExecStartPre", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info(" pre-start %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug(" pre-start done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + if run.returncode and exe.check: + logg.error("the ExecStartPre control process exited with error code") + active = "failed" + self.write_status_from(conf, AS=active) + return False + # service_directories = self.create_service_directories(conf) + # env.update(service_directories) + listening=False + if not accept: + sock = self.create_socket(conf) + if sock and TestListen: + listening=True + self._sockets[conf.name()] = SystemctlSocket(conf, sock) + service_result = "success" + state = sock and "active" or "failed" + self.write_status_from(conf, AS=state) + if not listening: + # we do not listen but have the service started right away + done = self.do_start_service_from(service_conf) + service_result = done and "success" or "failed" + if not self.is_active_from(service_conf): + service_result = "failed" + state = service_result + if service_result in ["success"]: + state = "active" + self.write_status_from(conf, AS=state) + # POST sequence + if service_result in ["failed"]: + # according to the systemd documentation, a failed start-sequence + # should execute the ExecStopPost sequence allowing some cleanup. + env["SERVICE_RESULT"] = service_result + for cmd in conf.getlist(Socket, "ExecStopPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-fail %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-fail done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + return False + else: + for cmd in conf.getlist(Socket, "ExecStartPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-start %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-start done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + return True + def create_socket(self, conf): + unsupported = ["ListenUSBFunction", "ListenMessageQueue", "ListenNetlink"] + unsupported += ["ListenSpecial", "ListenFIFO", "ListenSequentialPacket"] + for item in unsupported: + if conf.get(Socket, item, ""): + logg.warning("%s: %s sockets are not implemented", conf.name(), item) + self.error |= NOT_OK + return None + vListenDatagram = conf.get(Socket, "ListenDatagram", "") + vListenStream = conf.get(Socket, "ListenStream", "") + address = vListenStream or vListenDatagram + m = re.match(r"(/.*)", address) + if m: + path = m.group(1) + sock = self.create_unix_socket(conf, path, not vListenStream) + self.set_status_from(conf, "path", path) + return sock + m = re.match(r"(\d+[.]\d*[.]\d*[.]\d+):(\d+)", address) + if m: + addr, port = m.group(1), m.group(2) + sock = self.create_port_ipv4_socket(conf, addr, port, not vListenStream) + self.set_status_from(conf, "port", port) + self.set_status_from(conf, "addr", addr) + return sock + m = re.match(r"\[([0-9a-fA-F:]*)\]:(\d+)", address) + if m: + addr, port = m.group(1), m.group(2) + sock = self.create_port_ipv6_socket(conf, addr, port, not vListenStream) + self.set_status_from(conf, "port", port) + self.set_status_from(conf, "addr", addr) + return sock + m = re.match(r"(\d+)$", address) + if m: + port = m.group(1) + sock = self.create_port_socket(conf, port, not vListenStream) + self.set_status_from(conf, "port", port) + return sock + if re.match("@.*", address): + logg.warning("%s: abstract namespace socket not implemented (%s)", conf.name(), address) + return None + if re.match("vsock:.*", address): + logg.warning("%s: virtual machine socket not implemented (%s)", conf.name(), address) + return None + logg.error("%s: unknown socket address type (%s)", conf.name(), address) + return None + def create_unix_socket(self, conf, path, dgram): + sock_stream = dgram and socket.SOCK_DGRAM or socket.SOCK_STREAM + sock = socket.socket(socket.AF_UNIX, sock_stream) + try: + dirmode = conf.get(Socket, "DirectoryMode", "0755") + mode = conf.get(Socket, "SocketMode", "0666") + user = conf.get(Socket, "SocketUser", "") + group = conf.get(Socket, "SocketGroup", "") + symlinks = conf.getlist(Socket, "SymLinks", []) + dirpath = os.path.dirname(path) + if not os.path.isdir(dirpath): + os.makedirs(dirpath, int(dirmode, 8)) + if os.path.exists(path): + os.unlink(path) + sock.bind(path) + os.fchmod(sock.fileno(), int(mode, 8)) + shutil_fchown(sock.fileno(), user, group) + if symlinks: + logg.warning("%s: symlinks for socket not implemented (%s)", conf.name(), path) + except Exception as e: + logg.error("%s: create socket failed [%s]: %s", conf.name(), path, e) + sock.close() + return None + return sock + def create_port_socket(self, conf, port, dgram): + inet = dgram and socket.SOCK_DGRAM or socket.SOCK_STREAM + sock = socket.socket(socket.AF_INET, inet) + try: + sock.bind(('', int(port))) + logg.info("%s: bound socket at %s %s:%s", conf.name(), strINET(inet), "*", port) + except Exception as e: + logg.error("%s: create socket failed (%s:%s): %s", conf.name(), "*", port, e) + sock.close() + return None + return sock + def create_port_ipv4_socket(self, conf, addr, port, dgram): + inet = dgram and socket.SOCK_DGRAM or socket.SOCK_STREAM + sock = socket.socket(socket.AF_INET, inet) + try: + sock.bind((addr, int(port))) + logg.info("%s: bound socket at %s %s:%s", conf.name(), strINET(inet), addr, port) + except Exception as e: + logg.error("%s: create socket failed (%s:%s): %s", conf.name(), addr, port, e) + sock.close() + return None + return sock + def create_port_ipv6_socket(self, conf, addr, port, dgram): + inet = dgram and socket.SOCK_DGRAM or socket.SOCK_STREAM + sock = socket.socket(socket.AF_INET6, inet) + try: + sock.bind((addr, int(port))) + logg.info("%s: bound socket at %s [%s]:%s", conf.name(), strINET(inet), addr, port) + except Exception as e: + logg.error("%s: create socket failed ([%s]:%s): %s", conf.name(), addr, port, e) + sock.close() + return None + return sock + def extend_exec_env(self, env): + env = env.copy() + # implant DefaultPath into $PATH + path = env.get("PATH", DefaultPath) + parts = path.split(os.pathsep) + for part in DefaultPath.split(os.pathsep): + if part and part not in parts: + parts.append(part) + env["PATH"] = str(os.pathsep).join(parts) + # reset locale to system default + for name in ResetLocale: + if name in env: + del env[name] + locale = {} + path = env.get("LOCALE_CONF", LocaleConf) + parts = path.split(os.pathsep) + for part in parts: + if os.path.isfile(part): + for var, val in self.read_env_file("-"+part): + locale[var] = val + env[var] = val + if "LANG" not in locale: + env["LANG"] = locale.get("LANGUAGE", locale.get("LC_CTYPE", "C")) + return env + def expand_list(self, group_lines, conf): + result = [] + for line in group_lines: + for item in line.split(): + if item: + result.append(self.expand_special(item, conf)) + return result + def get_User(self, conf): + return self.expand_special(conf.get(Service, "User", ""), conf) + def get_Group(self, conf): + return self.expand_special(conf.get(Service, "Group", ""), conf) + def get_SupplementaryGroups(self, conf): + return self.expand_list(conf.getlist(Service, "SupplementaryGroups", []), conf) + def skip_journal_log(self, conf): + if self.get_unit_type(conf.name()) not in ["service"]: + return True + std_out = conf.get(Service, "StandardOutput", DefaultStandardOutput) + std_err = conf.get(Service, "StandardError", DefaultStandardError) + out, err = False, False + if std_out in ["null"]: out = True + if std_out.startswith("file:"): out = True + if std_err in ["inherit"]: std_err = std_out + if std_err in ["null"]: err = True + if std_err.startswith("file:"): err = True + if std_err.startswith("append:"): err = True + return out and err + def dup2_journal_log(self, conf): + msg = "" + std_inp = conf.get(Service, "StandardInput", DefaultStandardInput) + std_out = conf.get(Service, "StandardOutput", DefaultStandardOutput) + std_err = conf.get(Service, "StandardError", DefaultStandardError) + inp, out, err = None, None, None + if std_inp in ["null"]: + inp = open(_dev_null, "r") + elif std_inp.startswith("file:"): + fname = std_inp[len("file:"):] + if os.path.exists(fname): + inp = open(fname, "r") + else: + inp = open(_dev_zero, "r") + else: + inp = open(_dev_zero, "r") + assert inp is not None + try: + if std_out in ["null"]: + out = open(_dev_null, "w") + elif std_out.startswith("file:"): + fname = std_out[len("file:"):] + fdir = os.path.dirname(fname) + if not os.path.exists(fdir): + os.makedirs(fdir) + out = open(fname, "w") + elif std_out.startswith("append:"): + fname = std_out[len("append:"):] + fdir = os.path.dirname(fname) + if not os.path.exists(fdir): + os.makedirs(fdir) + out = open(fname, "a") + except Exception as e: + msg += "\n%s: %s" % (fname, e) + if out is None: + out = self.open_journal_log(conf) + err = out + assert out is not None + try: + if std_err in ["inherit"]: + err = out + elif std_err in ["null"]: + err = open(_dev_null, "w") + elif std_err.startswith("file:"): + fname = std_err[len("file:"):] + fdir = os.path.dirname(fname) + if not os.path.exists(fdir): + os.makedirs(fdir) + err = open(fname, "w") + elif std_err.startswith("append:"): + fname = std_err[len("append:"):] + fdir = os.path.dirname(fname) + if not os.path.exists(fdir): + os.makedirs(fdir) + err = open(fname, "a") + except Exception as e: + msg += "\n%s: %s" % (fname, e) + if err is None: + err = self.open_journal_log(conf) + assert err is not None + if msg: + err.write("ERROR:") + err.write(msg.strip()) + err.write("\n") + if EXEC_DUP2: + os.dup2(inp.fileno(), sys.stdin.fileno()) + os.dup2(out.fileno(), sys.stdout.fileno()) + os.dup2(err.fileno(), sys.stderr.fileno()) + def execve_from(self, conf, cmd, env): + """ this code is commonly run in a child process // returns exit-code""" + # | + runs = conf.get(Service, "Type", "simple").lower() + # logg.debug("%s process for %s => %s", runs, strE(conf.name()), strQ(conf.filename())) + self.dup2_journal_log(conf) + cmd_args = [] + # + runuser = self.get_User(conf) + rungroup = self.get_Group(conf) + xgroups = self.get_SupplementaryGroups(conf) + envs = shutil_setuid(runuser, rungroup, xgroups) + badpath = self.chdir_workingdir(conf) # some dirs need setuid before + if badpath: + logg.error("(%s): bad workingdir: '%s'", shell_cmd(cmd), badpath) + sys.exit(1) + env = self.extend_exec_env(env) + env.update(envs) # set $HOME to ~$USER + try: + if EXEC_SPAWN: + cmd_args = [arg for arg in cmd] # satisfy mypy + exitcode = os.spawnvpe(os.P_WAIT, cmd[0], cmd_args, env) + sys.exit(exitcode) + else: # pragma: no cover + os.execve(cmd[0], cmd, env) + sys.exit(11) # pragma: no cover (can not be reached / bug like mypy#8401) + except Exception as e: + logg.error("(%s): %s", shell_cmd(cmd), e) + sys.exit(1) + def test_start_unit(self, unit): + """ helper function to test the code that is normally forked off """ + conf = self.load_unit_conf(unit) + if not conf: return None + env = self.get_env(conf) + for cmd in conf.getlist(Service, "ExecStart", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + self.execve_from(conf, newcmd, env) + return None + def stop_modules(self, *modules): + """ [UNIT]... -- stop these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.stop_units(units) and found_all + def stop_units(self, units): + """ fails if any unit fails to stop """ + self.wait_system() + done = True + for unit in self.sortedBefore(units): + if not self.stop_unit(unit): + done = False + return done + def stop_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.stop_unit_from(conf) + + def get_TimeoutStopSec(self, conf): + timeout = conf.get(Service, "TimeoutSec", strE(DefaultTimeoutStartSec)) + timeout = conf.get(Service, "TimeoutStopSec", timeout) + return time_to_seconds(timeout, DefaultMaximumTimeout) + def stop_unit_from(self, conf): + if not conf: return False + if self.syntax_check(conf) > 100: return False + with waitlock(conf): + logg.info(" stop unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_stop_unit_from(conf) + def do_stop_unit_from(self, conf): + if conf.name().endswith(".service"): + return self.do_stop_service_from(conf) + elif conf.name().endswith(".socket"): + return self.do_stop_socket_from(conf) + elif conf.name().endswith(".target"): + return self.do_stop_target_from(conf) + else: + logg.error("stop not implemented for unit type: %s", conf.name()) + return False + def do_stop_service_from(self, conf): + # | + timeout = self.get_TimeoutStopSec(conf) + runs = conf.get(Service, "Type", "simple").lower() + env = self.get_env(conf) + if not self._quiet: + okee = self.exec_check_unit(conf, env, Service, "ExecStop") + if not okee and _no_reload: return False + service_directories = self.env_service_directories(conf) + env.update(service_directories) + returncode = 0 + service_result = "success" + if runs in ["oneshot"]: + status_file = self.get_status_file_from(conf) + if self.get_status_from(conf, "ActiveState", "unknown") == "inactive": + logg.warning("the service is already down once") + return True + for cmd in conf.getlist(Service, "ExecStop", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s stop %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + if run.returncode and exe.check: + returncode = run.returncode + service_result = "failed" + break + if True: + if returncode: + self.set_status_from(conf, "ExecStopCode", strE(returncode)) + self.write_status_from(conf, AS="failed") + else: + self.clean_status_from(conf) # "inactive" + # fallback Stop => Kill for ["simple","notify","forking"] + elif not conf.getlist(Service, "ExecStop", []): + logg.info("no ExecStop => systemctl kill") + if True: + self.do_kill_unit_from(conf) + self.clean_pid_file_from(conf) + self.clean_status_from(conf) # "inactive" + elif runs in ["simple", "exec", "notify", "idle"]: + status_file = self.get_status_file_from(conf) + size = os.path.exists(status_file) and os.path.getsize(status_file) + logg.info("STATUS %s %s", status_file, size) + pid = 0 + for cmd in conf.getlist(Service, "ExecStop", []): + env["MAINPID"] = strE(self.read_mainpid_from(conf)) + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s stop %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + run = must_have_failed(run, newcmd) # TODO: a workaround + # self.write_status_from(conf, MainPID=run.pid) # no ExecStop + if run.returncode and exe.check: + returncode = run.returncode + service_result = "failed" + break + pid = to_intN(env.get("MAINPID")) + if pid: + if self.wait_vanished_pid(pid, timeout): + self.clean_pid_file_from(conf) + self.clean_status_from(conf) # "inactive" + else: + logg.info("%s sleep as no PID was found on Stop", runs) + time.sleep(MinimumTimeoutStopSec) + pid = self.read_mainpid_from(conf) + if not pid or not pid_exists(pid) or pid_zombie(pid): + self.clean_pid_file_from(conf) + self.clean_status_from(conf) # "inactive" + elif runs in ["forking"]: + status_file = self.get_status_file_from(conf) + pid_file = self.pid_file_from(conf) + for cmd in conf.getlist(Service, "ExecStop", []): + # active = self.is_active_from(conf) + if pid_file: + new_pid = self.read_mainpid_from(conf) + if new_pid: + env["MAINPID"] = strE(new_pid) + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("fork stop %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + if run.returncode and exe.check: + returncode = run.returncode + service_result = "failed" + break + pid = to_intN(env.get("MAINPID")) + if pid: + if self.wait_vanished_pid(pid, timeout): + self.clean_pid_file_from(conf) + else: + logg.info("%s sleep as no PID was found on Stop", runs) + time.sleep(MinimumTimeoutStopSec) + pid = self.read_mainpid_from(conf) + if not pid or not pid_exists(pid) or pid_zombie(pid): + self.clean_pid_file_from(conf) + if returncode: + if os.path.isfile(status_file): + self.set_status_from(conf, "ExecStopCode", strE(returncode)) + self.write_status_from(conf, AS="failed") + else: + self.clean_status_from(conf) # "inactive" + else: + logg.error("unsupported run type '%s'", runs) + return False + # POST sequence + if not self.is_active_from(conf): + env["SERVICE_RESULT"] = service_result + for cmd in conf.getlist(Service, "ExecStopPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-stop %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-stop done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + if self._only_what[0] not in ["none", "keep"]: + self.remove_service_directories(conf) + return service_result == "success" + def do_stop_socket_from(self, conf): + runs = "socket" + timeout = self.get_SocketTimeoutSec(conf) + accept = conf.getbool(Socket, "Accept", "no") + service_unit = self.get_socket_service_from(conf) + service_conf = self.load_unit_conf(service_unit) + if service_conf is None: + logg.debug("unit could not be loaded (%s)", service_unit) + logg.error("Unit %s not found.", service_unit) + return False + env = self.get_env(conf) + if not self._quiet: + okee = self.exec_check_unit(conf, env, Socket, "ExecStop") + if not okee and _no_reload: return False + if not accept: + # we do not listen but have the service started right away + done = self.do_stop_service_from(service_conf) + service_result = done and "success" or "failed" + else: + done = self.do_stop_service_from(service_conf) + service_result = done and "success" or "failed" + # service_directories = self.env_service_directories(conf) + # env.update(service_directories) + # POST sequence + if not self.is_active_from(conf): + env["SERVICE_RESULT"] = service_result + for cmd in conf.getlist(Socket, "ExecStopPost", []): + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("post-stop %s", shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + logg.debug("post-stop done (%s) <-%s>", + run.returncode or "OK", run.signal or "") + return service_result == "success" + def wait_vanished_pid(self, pid, timeout): + if not pid: + return True + if not self.is_active_pid(pid): + return True + logg.info("wait for PID %s to vanish (%ss)", pid, timeout) + for x in xrange(int(timeout)): + time.sleep(1) # until TimeoutStopSec + if not self.is_active_pid(pid): + logg.info("wait for PID %s is done (%s.)", pid, x) + return True + logg.info("wait for PID %s failed (%s.)", pid, timeout) + return False + def reload_modules(self, *modules): + """ [UNIT]... -- reload these units """ + self.wait_system() + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.reload_units(units) and found_all + def reload_units(self, units): + """ fails if any unit fails to reload """ + self.wait_system() + done = True + for unit in self.sortedAfter(units): + if not self.reload_unit(unit): + done = False + return done + def reload_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.reload_unit_from(conf) + def reload_unit_from(self, conf): + if not conf: return False + if self.syntax_check(conf) > 100: return False + with waitlock(conf): + logg.info(" reload unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_reload_unit_from(conf) + def do_reload_unit_from(self, conf): + if conf.name().endswith(".service"): + return self.do_reload_service_from(conf) + elif conf.name().endswith(".socket"): + service_unit = self.get_socket_service_from(conf) + service_conf = self.load_unit_conf(service_unit) + if service_conf: + return self.do_reload_service_from(service_conf) + else: + logg.error("no %s found for unit type: %s", service_unit, conf.name()) + return False + elif conf.name().endswith(".target"): + return self.do_reload_target_from(conf) + else: + logg.error("reload not implemented for unit type: %s", conf.name()) + return False + def do_reload_service_from(self, conf): + runs = conf.get(Service, "Type", "simple").lower() + env = self.get_env(conf) + if not self._quiet: + okee = self.exec_check_unit(conf, env, Service, "ExecReload") + if not okee and _no_reload: return False + initscript = conf.filename() + if self.is_sysv_file(initscript): + status_file = self.get_status_file_from(conf) + if initscript: + newcmd = [initscript, "reload"] + env["SYSTEMCTL_SKIP_REDIRECT"] = "yes" + logg.info("%s reload %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: nocover + run = subprocess_waitpid(forkpid) + self.set_status_from(conf, "ExecReloadCode", run.returncode) + if run.returncode: + self.write_status_from(conf, AS="failed") + return False + else: + self.write_status_from(conf, AS="active") + return True + service_directories = self.env_service_directories(conf) + env.update(service_directories) + if runs in ["simple", "exec", "notify", "forking", "idle"]: + if not self.is_active_from(conf): + logg.info("no reload on inactive service %s", conf.name()) + return True + for cmd in conf.getlist(Service, "ExecReload", []): + env["MAINPID"] = strE(self.read_mainpid_from(conf)) + exe, newcmd = self.exec_newcmd(cmd, env, conf) + logg.info("%s reload %s", runs, shell_cmd(newcmd)) + forkpid = os.fork() + if not forkpid: + self.execve_from(conf, newcmd, env) # pragma: no cover + run = subprocess_waitpid(forkpid) + if run.returncode and exe.check: + logg.error("Job for %s failed because the control process exited with error code. (%s)", + conf.name(), run.returncode) + return False + time.sleep(MinimumYield) + return True + elif runs in ["oneshot"]: + logg.debug("ignored run type '%s' for reload", runs) + return True + else: + logg.error("unsupported run type '%s'", runs) + return False + def restart_modules(self, *modules): + """ [UNIT]... -- restart these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.restart_units(units) and found_all + def restart_units(self, units): + """ fails if any unit fails to restart """ + self.wait_system() + done = True + for unit in self.sortedAfter(units): + if not self.restart_unit(unit): + done = False + return done + def restart_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.restart_unit_from(conf) + def restart_unit_from(self, conf): + if not conf: return False + if self.syntax_check(conf) > 100: return False + with waitlock(conf): + if conf.name().endswith(".service"): + logg.info(" restart service %s => %s", conf.name(), strQ(conf.filename())) + if not self.is_active_from(conf): + return self.do_start_unit_from(conf) + else: + return self.do_restart_unit_from(conf) + else: + return self.do_restart_unit_from(conf) + def do_restart_unit_from(self, conf): + logg.info("(restart) => stop/start %s", conf.name()) + self.do_stop_unit_from(conf) + return self.do_start_unit_from(conf) + def try_restart_modules(self, *modules): + """ [UNIT]... -- try-restart these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.try_restart_units(units) and found_all + def try_restart_units(self, units): + """ fails if any module fails to try-restart """ + self.wait_system() + done = True + for unit in self.sortedAfter(units): + if not self.try_restart_unit(unit): + done = False + return done + def try_restart_unit(self, unit): + """ only do 'restart' if 'active' """ + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + with waitlock(conf): + logg.info(" try-restart unit %s => %s", conf.name(), strQ(conf.filename())) + if self.is_active_from(conf): + return self.do_restart_unit_from(conf) + return True + def reload_or_restart_modules(self, *modules): + """ [UNIT]... -- reload-or-restart these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.reload_or_restart_units(units) and found_all + def reload_or_restart_units(self, units): + """ fails if any unit does not reload-or-restart """ + self.wait_system() + done = True + for unit in self.sortedAfter(units): + if not self.reload_or_restart_unit(unit): + done = False + return done + def reload_or_restart_unit(self, unit): + """ do 'reload' if specified, otherwise do 'restart' """ + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.reload_or_restart_unit_from(conf) + def reload_or_restart_unit_from(self, conf): + """ do 'reload' if specified, otherwise do 'restart' """ + if not conf: return False + with waitlock(conf): + logg.info(" reload-or-restart unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_reload_or_restart_unit_from(conf) + def do_reload_or_restart_unit_from(self, conf): + if not self.is_active_from(conf): + # try: self.stop_unit_from(conf) + # except Exception as e: pass + return self.do_start_unit_from(conf) + elif conf.getlist(Service, "ExecReload", []): + logg.info("found service to have ExecReload -> 'reload'") + return self.do_reload_unit_from(conf) + else: + logg.info("found service without ExecReload -> 'restart'") + return self.do_restart_unit_from(conf) + def reload_or_try_restart_modules(self, *modules): + """ [UNIT]... -- reload-or-try-restart these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.reload_or_try_restart_units(units) and found_all + def reload_or_try_restart_units(self, units): + """ fails if any unit fails to reload-or-try-restart """ + self.wait_system() + done = True + for unit in self.sortedAfter(units): + if not self.reload_or_try_restart_unit(unit): + done = False + return done + def reload_or_try_restart_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.reload_or_try_restart_unit_from(conf) + def reload_or_try_restart_unit_from(self, conf): + with waitlock(conf): + logg.info(" reload-or-try-restart unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_reload_or_try_restart_unit_from(conf) + def do_reload_or_try_restart_unit_from(self, conf): + if conf.getlist(Service, "ExecReload", []): + return self.do_reload_unit_from(conf) + elif not self.is_active_from(conf): + return True + else: + return self.do_restart_unit_from(conf) + def kill_modules(self, *modules): + """ [UNIT]... -- kill these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.kill_units(units) and found_all + def kill_units(self, units): + """ fails if any unit could not be killed """ + self.wait_system() + done = True + for unit in self.sortedBefore(units): + if not self.kill_unit(unit): + done = False + return done + def kill_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.kill_unit_from(conf) + def kill_unit_from(self, conf): + if not conf: return False + with waitlock(conf): + logg.info(" kill unit %s => %s", conf.name(), strQ(conf.filename())) + return self.do_kill_unit_from(conf) + def do_kill_unit_from(self, conf): + started = time.time() + doSendSIGKILL = self.get_SendSIGKILL(conf) + doSendSIGHUP = self.get_SendSIGHUP(conf) + useKillMode = self.get_KillMode(conf) + useKillSignal = self.get_KillSignal(conf) + kill_signal = getattr(signal, useKillSignal) + timeout = self.get_TimeoutStopSec(conf) + status_file = self.get_status_file_from(conf) + size = os.path.exists(status_file) and os.path.getsize(status_file) + logg.info("STATUS %s %s", status_file, size) + mainpid = self.read_mainpid_from(conf) + self.clean_status_from(conf) # clear RemainAfterExit and TimeoutStartSec + if not mainpid: + if useKillMode in ["control-group"]: + logg.warning("no main PID %s", strQ(conf.filename())) + logg.warning("and there is no control-group here") + else: + logg.info("no main PID %s", strQ(conf.filename())) + return False + if not pid_exists(mainpid) or pid_zombie(mainpid): + logg.debug("ignoring children when mainpid is already dead") + # because we list child processes, not processes in control-group + return True + pidlist = self.pidlist_of(mainpid) # here + if pid_exists(mainpid): + logg.info("stop kill PID %s", mainpid) + self._kill_pid(mainpid, kill_signal) + if useKillMode in ["control-group"]: + if len(pidlist) > 1: + logg.info("stop control-group PIDs %s", pidlist) + for pid in pidlist: + if pid != mainpid: + self._kill_pid(pid, kill_signal) + if doSendSIGHUP: + logg.info("stop SendSIGHUP to PIDs %s", pidlist) + for pid in pidlist: + self._kill_pid(pid, signal.SIGHUP) + # wait for the processes to have exited + while True: + dead = True + for pid in pidlist: + if pid_exists(pid) and not pid_zombie(pid): + dead = False + break + if dead: + break + if time.time() > started + timeout: + logg.info("service PIDs not stopped after %s", timeout) + break + time.sleep(1) # until TimeoutStopSec + if dead or not doSendSIGKILL: + logg.info("done kill PID %s %s", mainpid, dead and "OK") + return dead + if useKillMode in ["control-group", "mixed"]: + logg.info("hard kill PIDs %s", pidlist) + for pid in pidlist: + if pid != mainpid: + self._kill_pid(pid, signal.SIGKILL) + time.sleep(MinimumYield) + # useKillMode in [ "control-group", "mixed", "process" ] + if pid_exists(mainpid): + logg.info("hard kill PID %s", mainpid) + self._kill_pid(mainpid, signal.SIGKILL) + time.sleep(MinimumYield) + dead = not pid_exists(mainpid) or pid_zombie(mainpid) + logg.info("done hard kill PID %s %s", mainpid, dead and "OK") + return dead + def _kill_pid(self, pid, kill_signal = None): + try: + sig = kill_signal or signal.SIGTERM + os.kill(pid, sig) + except OSError as e: + if e.errno == errno.ESRCH or e.errno == errno.ENOENT: + logg.debug("kill PID %s => No such process", pid) + return True + else: + logg.error("kill PID %s => %s", pid, str(e)) + return False + return not pid_exists(pid) or pid_zombie(pid) + def is_active_modules(self, *modules): + """ [UNIT].. -- check if these units are in active state + implements True if all is-active = True """ + # systemctl returns multiple lines, one for each argument + # "active" when is_active + # "inactive" when not is_active + # "unknown" when not enabled + # The return code is set to + # 0 when "active" + # 1 when unit is not found + # 3 when any "inactive" or "unknown" + # However: # TODO! BUG in original systemctl! + # documentation says " exit code 0 if at least one is active" + # and "Unless --quiet is specified, print the unit state" + # | + units = [] + results = [] + for module in modules: + units = self.match_units(to_list(module)) + if not units: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + self.error |= NOT_ACTIVE + results += ["inactive"] + continue + for unit in units: + active = self.get_active_unit(unit) + enabled = self.enabled_unit(unit) + if enabled != "enabled" and ACTIVE_IF_ENABLED: + active = "inactive" # "unknown" + results += [active] + break + # how it should work: + status = "active" in results + # how 'systemctl' works: + non_active = [result for result in results if result != "active"] + if non_active: + self.error |= NOT_ACTIVE + if non_active: + self.error |= NOT_OK # status + if _quiet: + return [] + return results + def is_active_from(self, conf): + """ used in try-restart/other commands to check if needed. """ + if not conf: return False + return self.get_active_from(conf) == "active" + def active_pid_from(self, conf): + if not conf: return False + pid = self.read_mainpid_from(conf) + return self.is_active_pid(pid) + def is_active_pid(self, pid): + """ returns pid if the pid is still an active process """ + if pid and pid_exists(pid) and not pid_zombie(pid): + return pid # usually a string (not null) + return None + def get_active_unit(self, unit): + """ returns 'active' 'inactive' 'failed' 'unknown' """ + conf = self.load_unit_conf(unit) + if not conf: + logg.warning("Unit %s not found.", unit) + return "unknown" + else: + return self.get_active_from(conf) + def get_active_from(self, conf): + if conf.name().endswith(".service"): + return self.get_active_service_from(conf) + elif conf.name().endswith(".socket"): + service_unit = self.get_socket_service_from(conf) + service_conf = self.load_unit_conf(service_unit) + return self.get_active_service_from(service_conf) + elif conf.name().endswith(".target"): + return self.get_active_target_from(conf) + else: + logg.debug("is-active not implemented for unit type: %s", conf.name()) + return "unknown" # TODO: "inactive" ? + def get_active_service_from(self, conf): + """ returns 'active' 'inactive' 'failed' 'unknown' """ + # used in try-restart/other commands to check if needed. + if not conf: return "unknown" + pid_file = self.pid_file_from(conf) + if pid_file: # application PIDFile + if not os.path.exists(pid_file): + return "inactive" + status_file = self.get_status_file_from(conf) + if self.getsize(status_file): + state = self.get_status_from(conf, "ActiveState", "") + if state: + if DEBUG_STATUS: + logg.info("get_status_from %s => %s", conf.name(), state) + return state + pid = self.read_mainpid_from(conf) + if DEBUG_STATUS: + logg.debug("pid_file '%s' => PID %s", pid_file or status_file, strE(pid)) + if pid: + if not pid_exists(pid) or pid_zombie(pid): + return "failed" + return "active" + else: + return "inactive" + def get_active_target_from(self, conf): + """ returns 'active' 'inactive' 'failed' 'unknown' """ + return self.get_active_target(conf.name()) + def get_active_target(self, target): + """ returns 'active' 'inactive' 'failed' 'unknown' """ + if target in self.get_active_target_list(): + status = self.is_system_running() + if status in ["running"]: + return "active" + return "inactive" + else: + services = self.target_default_services(target) + result = "active" + for service in services: + conf = self.load_unit_conf(service) + if conf: + state = self.get_active_from(conf) + if state in ["failed"]: + result = state + elif state not in ["active"]: + result = state + return result + def get_active_target_list(self): + current_target = self.get_default_target() + target_list = self.get_target_list(current_target) + target_list += [DefaultUnit] # upper end + target_list += [SysInitTarget] # lower end + return target_list + def get_substate_from(self, conf): + """ returns 'running' 'exited' 'dead' 'failed' 'plugged' 'mounted' """ + if not conf: return None + pid_file = self.pid_file_from(conf) + if pid_file: + if not os.path.exists(pid_file): + return "dead" + status_file = self.get_status_file_from(conf) + if self.getsize(status_file): + state = self.get_status_from(conf, "ActiveState", "") + if state: + if state in ["active"]: + return self.get_status_from(conf, "SubState", "running") + else: + return self.get_status_from(conf, "SubState", "dead") + pid = self.read_mainpid_from(conf) + if DEBUG_STATUS: + logg.debug("pid_file '%s' => PID %s", pid_file or status_file, strE(pid)) + if pid: + if not pid_exists(pid) or pid_zombie(pid): + return "failed" + return "running" + else: + return "dead" + def is_failed_modules(self, *modules): + """ [UNIT]... -- check if these units are in failes state + implements True if any is-active = True """ + units = [] + results = [] + for module in modules: + units = self.match_units(to_list(module)) + if not units: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + results += ["inactive"] + continue + for unit in units: + active = self.get_active_unit(unit) + enabled = self.enabled_unit(unit) + if enabled != "enabled" and ACTIVE_IF_ENABLED: + active = "inactive" + results += [active] + break + if "failed" in results: + self.error = 0 + else: + self.error |= NOT_OK + if _quiet: + return [] + return results + def is_failed_from(self, conf): + if conf is None: return True + return self.get_active_from(conf) == "failed" + def reset_failed_modules(self, *modules): + """ [UNIT]... -- Reset failed state for all, one, or more units """ + units = [] + status = True + for module in modules: + units = self.match_units(to_list(module)) + if not units: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + return False + for unit in units: + if not self.reset_failed_unit(unit): + logg.error("Unit %s could not be reset.", unit_of(module)) + status = False + break + return status + def reset_failed_unit(self, unit): + conf = self.load_unit_conf(unit) + if not conf: + logg.warning("Unit %s not found.", unit) + return False + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.reset_failed_from(conf) + def reset_failed_from(self, conf): + if conf is None: return True + if not self.is_failed_from(conf): return False + done = False + status_file = self.get_status_file_from(conf) + if status_file and os.path.exists(status_file): + try: + os.remove(status_file) + done = True + logg.debug("done rm %s", status_file) + except Exception as e: + logg.error("while rm %s: %s", status_file, e) + pid_file = self.pid_file_from(conf) + if pid_file and os.path.exists(pid_file): + try: + os.remove(pid_file) + done = True + logg.debug("done rm %s", pid_file) + except Exception as e: + logg.error("while rm %s: %s", pid_file, e) + return done + def status_modules(self, *modules): + """ [UNIT]... check the status of these units. + """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + result = self.status_units(units) + # if not found_all: + # self.error |= NOT_OK | NOT_ACTIVE # 3 + # # same as (dead) # original behaviour + return result + def status_units(self, units): + """ concatenates the status output of all units + and the last non-successful statuscode """ + status = 0 + result = "" + for unit in units: + status1, result1 = self.status_unit(unit) + if status1: status = status1 + if result: result += "\n\n" + result += result1 + if status: + self.error |= NOT_OK | NOT_ACTIVE # 3 + return result + def status_unit(self, unit): + conf = self.get_unit_conf(unit) + result = "%s - %s" % (unit, self.get_description_from(conf)) + loaded = conf.loaded() + if loaded: + filename = str(conf.filename()) + enabled = self.enabled_from(conf) + result += "\n Loaded: {loaded} ({filename}, {enabled})".format(**locals()) + for path in conf.overrides(): + result += "\n Drop-In: {path}".format(**locals()) + else: + result += "\n Loaded: failed" + return 3, result + active = self.get_active_from(conf) + substate = self.get_substate_from(conf) + result += "\n Active: {} ({})".format(active, substate) + if active == "active": + return 0, result + else: + return 3, result + def cat_modules(self, *modules): + """ [UNIT]... show the *.system file for these" + """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + result = self.cat_units(units) + if not found_all: + self.error |= NOT_OK + return result + def cat_units(self, units): + done = True + result = "" + for unit in units: + text = self.cat_unit(unit) + if not text: + done = False + else: + if result: + result += "\n\n" + result += text + if not done: + self.error = NOT_OK + return result + def cat_unit(self, unit): + try: + unit_file = self.unit_file(unit) + if unit_file: + return open(unit_file).read() + logg.error("No files found for %s", unit) + except Exception as e: + print("Unit {} is not-loaded: {}".format(unit, e)) + self.error |= NOT_OK + return None + ## + ## + def load_preset_files(self, module = None): # -> [ preset-file-names,... ] + """ reads all preset files, returns the scanned files """ + if self._preset_file_list is None: + self._preset_file_list = {} + assert self._preset_file_list is not None + for folder in self.preset_folders(): + if not folder: + continue + if self._root: + folder = os_path(self._root, folder) + if not os.path.isdir(folder): + continue + for name in os.listdir(folder): + if not name.endswith(".preset"): + continue + if name not in self._preset_file_list: + path = os.path.join(folder, name) + if os.path.isdir(path): + continue + preset = PresetFile().read(path) + self._preset_file_list[name] = preset + logg.debug("found %s preset files", len(self._preset_file_list)) + return sorted(self._preset_file_list.keys()) + def get_preset_of_unit(self, unit): + """ [UNIT] check the *.preset of this unit + """ + self.load_preset_files() + assert self._preset_file_list is not None + for filename in sorted(self._preset_file_list.keys()): + preset = self._preset_file_list[filename] + status = preset.get_preset(unit) + if status: + return status + return None + def preset_modules(self, *modules): + """ [UNIT]... -- set 'enabled' when in *.preset + """ + if self.user_mode(): + logg.warning("preset makes no sense in --user mode") + return True + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.preset_units(units) and found_all + def preset_units(self, units): + """ fails if any unit could not be changed """ + self.wait_system() + fails = 0 + found = 0 + for unit in units: + status = self.get_preset_of_unit(unit) + if not status: continue + found += 1 + if status.startswith("enable"): + if self._preset_mode == "disable": continue + logg.info("preset enable %s", unit) + if not self.enable_unit(unit): + logg.warning("failed to enable %s", unit) + fails += 1 + if status.startswith("disable"): + if self._preset_mode == "enable": continue + logg.info("preset disable %s", unit) + if not self.disable_unit(unit): + logg.warning("failed to disable %s", unit) + fails += 1 + return not fails and not not found + def preset_all_modules(self, *modules): + """ 'preset' all services + enable or disable services according to *.preset files + """ + if self.user_mode(): + logg.warning("preset-all makes no sense in --user mode") + return True + found_all = True + units = self.match_units() # TODO: how to handle module arguments + return self.preset_units(units) and found_all + def wanted_from(self, conf, default = None): + if not conf: return default + return conf.get(Install, "WantedBy", default, True) + def enablefolders(self, wanted): + if self.user_mode(): + for folder in self.user_folders(): + yield self.default_enablefolder(wanted, folder) + if True: + for folder in self.system_folders(): + yield self.default_enablefolder(wanted, folder) + def enablefolder(self, wanted): + if self.user_mode(): + user_folder = self.user_folder() + return self.default_enablefolder(wanted, user_folder) + else: + return self.default_enablefolder(wanted) + def default_enablefolder(self, wanted, basefolder = None): + basefolder = basefolder or self.system_folder() + if not wanted: + return wanted + if not wanted.endswith(".wants"): + wanted = wanted + ".wants" + return os.path.join(basefolder, wanted) + def enable_modules(self, *modules): + """ [UNIT]... -- enable these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + logg.info("matched %s", unit) # ++ + if unit not in units: + units += [unit] + return self.enable_units(units) and found_all + def enable_units(self, units): + self.wait_system() + done = True + for unit in units: + if not self.enable_unit(unit): + done = False + elif self._now: + self.start_unit(unit) + return done + def enable_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + unit_file = conf.filename() + if unit_file is None: + logg.error("Unit file %s not found.", unit) + return False + if self.is_sysv_file(unit_file): + if self.user_mode(): + logg.error("Initscript %s not for --user mode", unit) + return False + return self.enable_unit_sysv(unit_file) + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.enable_unit_from(conf) + def enable_unit_from(self, conf): + wanted = self.wanted_from(conf) + if not wanted and not self._force: + logg.debug("%s has no target", conf.name()) + return False # "static" is-enabled + target = wanted or self.get_default_target() + folder = self.enablefolder(target) + if self._root: + folder = os_path(self._root, folder) + if not os.path.isdir(folder): + os.makedirs(folder) + source = conf.filename() + if not source: # pragma: no cover (was checked before) + logg.debug("%s has no real file", conf.name()) + return False + symlink = os.path.join(folder, conf.name()) + if True: + _f = self._force and "-f" or "" + logg.info("ln -s {_f} '{source}' '{symlink}'".format(**locals())) + if self._force and os.path.islink(symlink): + os.remove(target) + if not os.path.islink(symlink): + os.symlink(source, symlink) + return True + def rc3_root_folder(self): + old_folder = os_path(self._root, _rc3_boot_folder) + new_folder = os_path(self._root, _rc3_init_folder) + if os.path.isdir(old_folder): # pragma: no cover + return old_folder + return new_folder + def rc5_root_folder(self): + old_folder = os_path(self._root, _rc5_boot_folder) + new_folder = os_path(self._root, _rc5_init_folder) + if os.path.isdir(old_folder): # pragma: no cover + return old_folder + return new_folder + def enable_unit_sysv(self, unit_file): + # a "multi-user.target"/rc3 is also started in /rc5 + rc3 = self._enable_unit_sysv(unit_file, self.rc3_root_folder()) + rc5 = self._enable_unit_sysv(unit_file, self.rc5_root_folder()) + return rc3 and rc5 + def _enable_unit_sysv(self, unit_file, rc_folder): + name = os.path.basename(unit_file) + nameS = "S50"+name + nameK = "K50"+name + if not os.path.isdir(rc_folder): + os.makedirs(rc_folder) + # do not double existing entries + for found in os.listdir(rc_folder): + m = re.match(r"S\d\d(.*)", found) + if m and m.group(1) == name: + nameS = found + m = re.match(r"K\d\d(.*)", found) + if m and m.group(1) == name: + nameK = found + target = os.path.join(rc_folder, nameS) + if not os.path.exists(target): + os.symlink(unit_file, target) + target = os.path.join(rc_folder, nameK) + if not os.path.exists(target): + os.symlink(unit_file, target) + return True + def disable_modules(self, *modules): + """ [UNIT]... -- disable these units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.disable_units(units) and found_all + def disable_units(self, units): + self.wait_system() + done = True + for unit in units: + if not self.disable_unit(unit): + done = False + elif self._now: + self.stop_unit(unit) + return done + def disable_unit(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + unit_file = conf.filename() + if unit_file is None: + logg.error("Unit file %s not found.", unit) + return False + if self.is_sysv_file(unit_file): + if self.user_mode(): + logg.error("Initscript %s not for --user mode", unit) + return False + return self.disable_unit_sysv(unit_file) + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + return self.disable_unit_from(conf) + def disable_unit_from(self, conf): + wanted = self.wanted_from(conf) + if not wanted and not self._force: + logg.debug("%s has no target", conf.name()) + return False # "static" is-enabled + target = wanted or self.get_default_target() + for folder in self.enablefolders(target): + if self._root: + folder = os_path(self._root, folder) + symlink = os.path.join(folder, conf.name()) + if os.path.exists(symlink): + try: + _f = self._force and "-f" or "" + logg.info("rm {_f} '{symlink}'".format(**locals())) + if os.path.islink(symlink) or self._force: + os.remove(symlink) + except IOError as e: + logg.error("disable %s: %s", symlink, e) + except OSError as e: + logg.error("disable %s: %s", symlink, e) + return True + def disable_unit_sysv(self, unit_file): + rc3 = self._disable_unit_sysv(unit_file, self.rc3_root_folder()) + rc5 = self._disable_unit_sysv(unit_file, self.rc5_root_folder()) + return rc3 and rc5 + def _disable_unit_sysv(self, unit_file, rc_folder): + # a "multi-user.target"/rc3 is also started in /rc5 + name = os.path.basename(unit_file) + nameS = "S50"+name + nameK = "K50"+name + # do not forget the existing entries + for found in os.listdir(rc_folder): + m = re.match(r"S\d\d(.*)", found) + if m and m.group(1) == name: + nameS = found + m = re.match(r"K\d\d(.*)", found) + if m and m.group(1) == name: + nameK = found + target = os.path.join(rc_folder, nameS) + if os.path.exists(target): + os.unlink(target) + target = os.path.join(rc_folder, nameK) + if os.path.exists(target): + os.unlink(target) + return True + def is_enabled_sysv(self, unit_file): + name = os.path.basename(unit_file) + target = os.path.join(self.rc3_root_folder(), "S50%s" % name) + if os.path.exists(target): + return True + return False + def is_enabled_modules(self, *modules): + """ [UNIT]... -- check if these units are enabled + returns True if any of them is enabled.""" + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.is_enabled_units(units) # and found_all + def is_enabled_units(self, units): + """ true if any is enabled, and a list of infos """ + result = False + infos = [] + for unit in units: + infos += [self.enabled_unit(unit)] + if self.is_enabled(unit): + result = True + if not result: + self.error |= NOT_OK + return infos + def is_enabled(self, unit): + conf = self.load_unit_conf(unit) + if conf is None: + logg.error("Unit %s not found.", unit) + return False + unit_file = conf.filename() + if not unit_file: + logg.error("Unit %s not found.", unit) + return False + if self.is_sysv_file(unit_file): + return self.is_enabled_sysv(unit_file) + state = self.get_enabled_from(conf) + if state in ["enabled", "static"]: + return True + return False # ["disabled", "masked"] + def enabled_unit(self, unit): + conf = self.get_unit_conf(unit) + return self.enabled_from(conf) + def enabled_from(self, conf): + unit_file = strE(conf.filename()) + if self.is_sysv_file(unit_file): + state = self.is_enabled_sysv(unit_file) + if state: + return "enabled" + return "disabled" + return self.get_enabled_from(conf) + def get_enabled_from(self, conf): + if conf.masked: + return "masked" + wanted = self.wanted_from(conf) + target = wanted or self.get_default_target() + for folder in self.enablefolders(target): + if self._root: + folder = os_path(self._root, folder) + target = os.path.join(folder, conf.name()) + if os.path.isfile(target): + return "enabled" + if not wanted: + return "static" + return "disabled" + def mask_modules(self, *modules): + """ [UNIT]... -- mask non-startable units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.mask_units(units) and found_all + def mask_units(self, units): + self.wait_system() + done = True + for unit in units: + if not self.mask_unit(unit): + done = False + return done + def mask_unit(self, unit): + unit_file = self.unit_file(unit) + if not unit_file: + logg.error("Unit %s not found.", unit) + return False + if self.is_sysv_file(unit_file): + logg.error("Initscript %s can not be masked", unit) + return False + conf = self.get_unit_conf(unit) + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + folder = self.mask_folder() + if self._root: + folder = os_path(self._root, folder) + if not os.path.isdir(folder): + os.makedirs(folder) + target = os.path.join(folder, os.path.basename(unit_file)) + dev_null = _dev_null + if True: + _f = self._force and "-f" or "" + logg.debug("ln -s {_f} {dev_null} '{target}'".format(**locals())) + if self._force and os.path.islink(target): + os.remove(target) + if not os.path.exists(target): + os.symlink(dev_null, target) + logg.info("Created symlink {target} -> {dev_null}".format(**locals())) + return True + elif os.path.islink(target): + logg.debug("mask symlink does already exist: %s", target) + return True + else: + logg.error("mask target does already exist: %s", target) + return False + def mask_folder(self): + for folder in self.mask_folders(): + if folder: return folder + raise Exception("did not find any systemd/system folder") + def mask_folders(self): + if self.user_mode(): + for folder in self.user_folders(): + yield folder + if True: + for folder in self.system_folders(): + yield folder + def unmask_modules(self, *modules): + """ [UNIT]... -- unmask non-startable units """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s not found.", unit_of(module)) + self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.unmask_units(units) and found_all + def unmask_units(self, units): + self.wait_system() + done = True + for unit in units: + if not self.unmask_unit(unit): + done = False + return done + def unmask_unit(self, unit): + unit_file = self.unit_file(unit) + if not unit_file: + logg.error("Unit %s not found.", unit) + return False + if self.is_sysv_file(unit_file): + logg.error("Initscript %s can not be un/masked", unit) + return False + conf = self.get_unit_conf(unit) + if self.not_user_conf(conf): + logg.error("Unit %s not for --user mode", unit) + return False + folder = self.mask_folder() + if self._root: + folder = os_path(self._root, folder) + target = os.path.join(folder, os.path.basename(unit_file)) + if True: + _f = self._force and "-f" or "" + logg.info("rm {_f} '{target}'".format(**locals())) + if os.path.islink(target): + os.remove(target) + return True + elif not os.path.exists(target): + logg.debug("Symlink did not exist anymore: %s", target) + return True + else: + logg.warning("target is not a symlink: %s", target) + return True + def list_dependencies_modules(self, *modules): + """ [UNIT]... show the dependency tree" + """ + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.list_dependencies_units(units) # and found_all + def list_dependencies_units(self, units): + result = [] + for unit in units: + if result: + result += ["", ""] + result += self.list_dependencies_unit(unit) + return result + def list_dependencies_unit(self, unit): + result = [] + for line in self.list_dependencies(unit, ""): + result += [line] + return result + def list_dependencies(self, unit, indent = None, mark = None, loop = []): + mapping = {} + mapping["Requires"] = "required to start" + mapping["Wants"] = "wanted to start" + mapping["Requisite"] = "required started" + mapping["Bindsto"] = "binds to start" + mapping["PartOf"] = "part of started" + mapping[".requires"] = ".required to start" + mapping[".wants"] = ".wanted to start" + mapping["PropagateReloadTo"] = "(to be reloaded as well)" + mapping["Conflicts"] = "(to be stopped on conflict)" + restrict = ["Requires", "Requisite", "ConsistsOf", "Wants", + "BindsTo", ".requires", ".wants"] + indent = indent or "" + mark = mark or "" + deps = self.get_dependencies_unit(unit) + conf = self.get_unit_conf(unit) + if not conf.loaded(): + if not self._show_all: + return + yield "%s(%s): %s" % (indent, unit, mark) + else: + yield "%s%s: %s" % (indent, unit, mark) + for stop_recursion in ["Conflict", "conflict", "reloaded", "Propagate"]: + if stop_recursion in mark: + return + for dep in deps: + if dep in loop: + logg.debug("detected loop at %s", dep) + continue + new_loop = loop + list(deps.keys()) + new_indent = indent + "| " + new_mark = deps[dep] + if not self._show_all: + if new_mark not in restrict: + continue + if new_mark in mapping: + new_mark = mapping[new_mark] + restrict = ["Requires", "Wants", "Requisite", "BindsTo", "PartOf", "ConsistsOf", + ".requires", ".wants"] + for line in self.list_dependencies(dep, new_indent, new_mark, new_loop): + yield line + def get_dependencies_unit(self, unit, styles = None): + styles = styles or ["Requires", "Wants", "Requisite", "BindsTo", "PartOf", "ConsistsOf", + ".requires", ".wants", "PropagateReloadTo", "Conflicts", ] + conf = self.get_unit_conf(unit) + deps = {} + for style in styles: + if style.startswith("."): + for folder in self.sysd_folders(): + if not folder: + continue + require_path = os.path.join(folder, unit + style) + if self._root: + require_path = os_path(self._root, require_path) + if os.path.isdir(require_path): + for required in os.listdir(require_path): + if required not in deps: + deps[required] = style + else: + for requirelist in conf.getlist(Unit, style, []): + for required in requirelist.strip().split(" "): + deps[required.strip()] = style + return deps + def get_required_dependencies(self, unit, styles = None): + styles = styles or ["Requires", "Wants", "Requisite", "BindsTo", + ".requires", ".wants"] + return self.get_dependencies_unit(unit, styles) + def get_start_dependencies(self, unit, styles = None): # pragma: no cover + """ the list of services to be started as well / TODO: unused """ + styles = styles or ["Requires", "Wants", "Requisite", "BindsTo", "PartOf", "ConsistsOf", + ".requires", ".wants"] + deps = {} + unit_deps = self.get_dependencies_unit(unit) + for dep_unit, dep_style in unit_deps.items(): + if dep_style in styles: + if dep_unit in deps: + if dep_style not in deps[dep_unit]: + deps[dep_unit].append(dep_style) + else: + deps[dep_unit] = [dep_style] + next_deps = self.get_start_dependencies(dep_unit) + for dep, styles in next_deps.items(): + for style in styles: + if dep in deps: + if style not in deps[dep]: + deps[dep].append(style) + else: + deps[dep] = [style] + return deps + def list_start_dependencies_modules(self, *modules): + """ [UNIT]... show the dependency tree (experimental)" + """ + return self.list_start_dependencies_units(list(modules)) + def list_start_dependencies_units(self, units): + unit_order = [] + deps = {} + for unit in units: + unit_order.append(unit) + # unit_deps = self.get_start_dependencies(unit) # TODO + unit_deps = self.get_dependencies_unit(unit) + for dep_unit, styles in unit_deps.items(): + dep_styles = to_list(styles) + for dep_style in dep_styles: + if dep_unit in deps: + if dep_style not in deps[dep_unit]: + deps[dep_unit].append(dep_style) + else: + deps[dep_unit] = [dep_style] + deps_conf = [] + for dep in deps: + if dep in unit_order: + continue + conf = self.get_unit_conf(dep) + if conf.loaded(): + deps_conf.append(conf) + for unit in unit_order: + deps[unit] = ["Requested"] + conf = self.get_unit_conf(unit) + if conf.loaded(): + deps_conf.append(conf) + result = [] + sortlist = conf_sortedAfter(deps_conf, cmp=compareAfter) + for item in sortlist: + line = (item.name(), "(%s)" % (" ".join(deps[item.name()]))) + result.append(line) + return result + def sortedAfter(self, unitlist): + """ get correct start order for the unit list (ignoring masked units) """ + conflist = [self.get_unit_conf(unit) for unit in unitlist] + if True: + conflist = [] + for unit in unitlist: + conf = self.get_unit_conf(unit) + if conf.masked: + logg.debug("ignoring masked unit %s", unit) + continue + conflist.append(conf) + sortlist = conf_sortedAfter(conflist) + return [item.name() for item in sortlist] + def sortedBefore(self, unitlist): + """ get correct start order for the unit list (ignoring masked units) """ + conflist = [self.get_unit_conf(unit) for unit in unitlist] + if True: + conflist = [] + for unit in unitlist: + conf = self.get_unit_conf(unit) + if conf.masked: + logg.debug("ignoring masked unit %s", unit) + continue + conflist.append(conf) + sortlist = conf_sortedAfter(reversed(conflist)) + return [item.name() for item in reversed(sortlist)] + def daemon_reload_target(self): + """ reload does will only check the service files here. + The returncode will tell the number of warnings, + and it is over 100 if it can not continue even + for the relaxed systemctl.py style of execution. """ + errors = 0 + for unit in self.match_units(): + try: + conf = self.get_unit_conf(unit) + except Exception as e: + logg.error("%s: can not read unit file %s\n\t%s", + unit, strQ(conf.filename()), e) + continue + errors += self.syntax_check(conf) + if errors: + logg.warning(" (%s) found %s problems", errors, errors % 100) + return True # errors + def syntax_check(self, conf): + filename = conf.filename() + if filename and filename.endswith(".service"): + return self.syntax_check_service(conf) + return 0 + def syntax_check_service(self, conf, section = Service): + unit = conf.name() + if not conf.data.has_section(Service): + logg.error(" %s: a .service file without [Service] section", unit) + return 101 + errors = 0 + haveType = conf.get(section, "Type", "simple") + haveExecStart = conf.getlist(section, "ExecStart", []) + haveExecStop = conf.getlist(section, "ExecStop", []) + haveExecReload = conf.getlist(section, "ExecReload", []) + usedExecStart = [] + usedExecStop = [] + usedExecReload = [] + if haveType not in ["simple", "exec", "forking", "notify", "oneshot", "dbus", "idle"]: + logg.error(" %s: Failed to parse service type, ignoring: %s", unit, haveType) + errors += 100 + for line in haveExecStart: + mode, exe = exec_path(line) + if not exe.startswith("/"): + if mode.check: + logg.error(" %s: %s Executable path is not absolute.", unit, section) + else: + logg.warning("%s: %s Executable path is not absolute.", unit, section) + logg.info("%s: %s exe = %s", unit, section, exe) + errors += 1 + usedExecStart.append(line) + for line in haveExecStop: + mode, exe = exec_path(line) + if not exe.startswith("/"): + if mode.check: + logg.error(" %s: %s Executable path is not absolute.", unit, section) + else: + logg.warning("%s: %s Executable path is not absolute.", unit, section) + logg.info("%s: %s exe = %s", unit, section, exe) + errors += 1 + usedExecStop.append(line) + for line in haveExecReload: + mode, exe = exec_path(line) + if not exe.startswith("/"): + if mode.check: + logg.error(" %s: %s Executable path is not absolute.", unit, section) + else: + logg.warning("%s: %s Executable path is not absolute.", unit, section) + logg.info("%s: %s exe = %s", unit, section, exe) + errors += 1 + usedExecReload.append(line) + if haveType in ["simple", "exec", "notify", "forking", "idle"]: + if not usedExecStart and not usedExecStop: + logg.error(" %s: %s lacks both ExecStart and ExecStop= setting. Refusing.", unit, section) + errors += 101 + elif not usedExecStart and haveType != "oneshot": + logg.error(" %s: %s has no ExecStart= setting, which is only allowed for Type=oneshot services. Refusing.", unit, section) + errors += 101 + if len(usedExecStart) > 1 and haveType != "oneshot": + logg.error(" %s: there may be only one %s ExecStart statement (unless for 'oneshot' services)." + + "\n\t\t\tYou can use ExecStartPre / ExecStartPost to add additional commands.", unit, section) + errors += 1 + if len(usedExecStop) > 1 and haveType != "oneshot": + logg.info(" %s: there should be only one %s ExecStop statement (unless for 'oneshot' services)." + + "\n\t\t\tYou can use ExecStopPost to add additional commands (also executed on failed Start)", unit, section) + if len(usedExecReload) > 1: + logg.info(" %s: there should be only one %s ExecReload statement." + + "\n\t\t\tUse ' ; ' for multiple commands (ExecReloadPost or ExedReloadPre do not exist)", unit, section) + if len(usedExecReload) > 0 and "/bin/kill " in usedExecReload[0]: + logg.warning(" %s: the use of /bin/kill is not recommended for %s ExecReload as it is asynchronous." + + "\n\t\t\tThat means all the dependencies will perform the reload simultaneously / out of order.", unit, section) + if conf.getlist(Service, "ExecRestart", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecRestart (ignored)", unit, section) + if conf.getlist(Service, "ExecRestartPre", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecRestartPre (ignored)", unit, section) + if conf.getlist(Service, "ExecRestartPost", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecRestartPost (ignored)", unit, section) + if conf.getlist(Service, "ExecReloadPre", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecReloadPre (ignored)", unit, section) + if conf.getlist(Service, "ExecReloadPost", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecReloadPost (ignored)", unit, section) + if conf.getlist(Service, "ExecStopPre", []): # pragma: no cover + logg.error(" %s: there no such thing as an %s ExecStopPre (ignored)", unit, section) + for env_file in conf.getlist(Service, "EnvironmentFile", []): + if env_file.startswith("-"): continue + if not os.path.isfile(os_path(self._root, self.expand_special(env_file, conf))): + logg.error(" %s: Failed to load environment files: %s", unit, env_file) + errors += 101 + return errors + def exec_check_unit(self, conf, env, section = Service, exectype = ""): + if conf is None: # pragma: no cover (is never null) + return True + if not conf.data.has_section(section): + return True # pragma: no cover + haveType = conf.get(section, "Type", "simple") + if self.is_sysv_file(conf.filename()): + return True # we don't care about that + unit = conf.name() + abspath = 0 + notexists = 0 + badusers = 0 + badgroups = 0 + for execs in ["ExecStartPre", "ExecStart", "ExecStartPost", "ExecStop", "ExecStopPost", "ExecReload"]: + if not execs.startswith(exectype): + continue + for cmd in conf.getlist(section, execs, []): + mode, newcmd = self.exec_newcmd(cmd, env, conf) + if not newcmd: + continue + exe = newcmd[0] + if not exe: + continue + if exe[0] != "/": + logg.error(" %s: Exec is not an absolute path: %s=%s", unit, execs, cmd) + abspath += 1 + if not os.path.isfile(exe): + logg.error(" %s: Exec command does not exist: (%s) %s", unit, execs, exe) + if mode.check: + notexists += 1 + newexe1 = os.path.join("/usr/bin", exe) + newexe2 = os.path.join("/bin", exe) + if os.path.exists(newexe1): + logg.error(" %s: but this does exist: %s %s", unit, " " * len(execs), newexe1) + elif os.path.exists(newexe2): + logg.error(" %s: but this does exist: %s %s", unit, " " * len(execs), newexe2) + users = [conf.get(section, "User", ""), conf.get(section, "SocketUser", "")] + groups = [conf.get(section, "Group", ""), conf.get(section, "SocketGroup", "")] + conf.getlist(section, "SupplementaryGroups") + for user in users: + if user: + try: pwd.getpwnam(self.expand_special(user, conf)) + except Exception as e: + logg.error(" %s: User does not exist: %s (%s)", unit, user, getattr(e, "__doc__", "")) + badusers += 1 + for group in groups: + if group: + try: grp.getgrnam(self.expand_special(group, conf)) + except Exception as e: + logg.error(" %s: Group does not exist: %s (%s)", unit, group, getattr(e, "__doc__", "")) + badgroups += 1 + tmpproblems = 0 + for setting in ("RootDirectory", "RootImage", "BindPaths", "BindReadOnlyPaths", + "ReadWritePaths", "ReadOnlyPaths", "TemporaryFileSystem"): + setting_value = conf.get(section, setting, "") + if setting_value: + logg.info("%s: %s private directory remounts ignored: %s=%s", unit, section, setting, setting_value) + tmpproblems += 1 + for setting in ("PrivateTmp", "PrivateDevices", "PrivateNetwork", "PrivateUsers", "DynamicUser", + "ProtectSystem", "ProjectHome", "ProtectHostname", "PrivateMounts", "MountAPIVFS"): + setting_yes = conf.getbool(section, setting, "no") + if setting_yes: + logg.info("%s: %s private directory option is ignored: %s=yes", unit, section, setting) + tmpproblems += 1 + if not abspath and not notexists and not badusers and not badgroups: + return True + if True: + filename = strE(conf.filename()) + if len(filename) > 44: filename = o44(filename) + logg.error(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") + if abspath: + logg.error(" The SystemD ExecXY commands must always be absolute paths by definition.") + time.sleep(1) + if notexists: + logg.error(" Oops, %s executable paths were not found in the current environment. Refusing.", notexists) + time.sleep(1) + if badusers or badgroups: + logg.error(" Oops, %s user names and %s group names were not found. Refusing.", badusers, badgroups) + time.sleep(1) + if tmpproblems: + logg.info(" Note, %s private directory settings are ignored. The application should not depend on it.", tmpproblems) + time.sleep(1) + logg.error(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") + return False + def show_modules(self, *modules): + """ [PATTERN]... -- Show properties of one or more units + Show properties of one or more units (or the manager itself). + If no argument is specified, properties of the manager will be + shown. If a unit name is specified, properties of the unit is + shown. By default, empty properties are suppressed. Use --all to + show those too. To select specific properties to show, use + --property=. This command is intended to be used whenever + computer-parsable output is required. Use status if you are looking + for formatted human-readable output. + / + NOTE: only a subset of properties is implemented """ + notfound = [] + units = [] + found_all = True + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + units += [module] + # self.error |= NOT_FOUND + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + return self.show_units(units) + notfound # and found_all + def show_units(self, units): + logg.debug("show --property=%s", ",".join(self._only_property)) + result = [] + for unit in units: + if result: result += [""] + for var, value in self.show_unit_items(unit): + if self._only_property: + if var not in self._only_property: + continue + else: + if not value and not self._show_all: + continue + result += ["%s=%s" % (var, value)] + return result + def show_unit_items(self, unit): + """ [UNIT]... -- show properties of a unit. + """ + logg.info("try read unit %s", unit) + conf = self.get_unit_conf(unit) + for entry in self.each_unit_items(unit, conf): + yield entry + def each_unit_items(self, unit, conf): + loaded = conf.loaded() + if not loaded: + loaded = "not-loaded" + if "NOT-FOUND" in self.get_description_from(conf): + loaded = "not-found" + names = {unit: 1, conf.name(): 1} + yield "Id", conf.name() + yield "Names", " ".join(sorted(names.keys())) + yield "Description", self.get_description_from(conf) # conf.get(Unit, "Description") + yield "PIDFile", self.get_pid_file(conf) # not self.pid_file_from w/o default location + yield "PIDFilePath", self.pid_file_from(conf) + yield "MainPID", strE(self.active_pid_from(conf)) # status["MainPID"] or PIDFile-read + yield "SubState", self.get_substate_from(conf) or "unknown" # status["SubState"] or notify-result + yield "ActiveState", self.get_active_from(conf) or "unknown" # status["ActiveState"] + yield "LoadState", loaded + yield "UnitFileState", self.enabled_from(conf) + yield "StatusFile", self.get_StatusFile(conf) + yield "StatusFilePath", self.get_status_file_from(conf) + yield "JournalFile", self.get_journal_log(conf) + yield "JournalFilePath", self.get_journal_log_from(conf) + yield "NotifySocket", self.get_notify_socket_from(conf) + yield "User", self.get_User(conf) or "" + yield "Group", self.get_Group(conf) or "" + yield "SupplementaryGroups", " ".join(self.get_SupplementaryGroups(conf)) + yield "TimeoutStartUSec", seconds_to_time(self.get_TimeoutStartSec(conf)) + yield "TimeoutStopUSec", seconds_to_time(self.get_TimeoutStopSec(conf)) + yield "NeedDaemonReload", "no" + yield "SendSIGKILL", strYes(self.get_SendSIGKILL(conf)) + yield "SendSIGHUP", strYes(self.get_SendSIGHUP(conf)) + yield "KillMode", strE(self.get_KillMode(conf)) + yield "KillSignal", strE(self.get_KillSignal(conf)) + yield "StartLimitBurst", strE(self.get_StartLimitBurst(conf)) + yield "StartLimitIntervalSec", seconds_to_time(self.get_StartLimitIntervalSec(conf)) + yield "RestartSec", seconds_to_time(self.get_RestartSec(conf)) + yield "RemainAfterExit", strYes(self.get_RemainAfterExit(conf)) + yield "WorkingDirectory", strE(self.get_WorkingDirectory(conf)) + env_parts = [] + for env_part in conf.getlist(Service, "Environment", []): + env_parts.append(self.expand_special(env_part, conf)) + if env_parts: + yield "Environment", " ".join(env_parts) + env_files = [] + for env_file in conf.getlist(Service, "EnvironmentFile", []): + env_files.append(self.expand_special(env_file, conf)) + if env_files: + yield "EnvironmentFile", " ".join(env_files) + def get_SendSIGKILL(self, conf): + return conf.getbool(Service, "SendSIGKILL", "yes") + def get_SendSIGHUP(self, conf): + return conf.getbool(Service, "SendSIGHUP", "no") + def get_KillMode(self, conf): + return conf.get(Service, "KillMode", "control-group") + def get_KillSignal(self, conf): + return conf.get(Service, "KillSignal", "SIGTERM") + # + igno_centos = ["netconsole", "network"] + igno_opensuse = ["raw", "pppoe", "*.local", "boot.*", "rpmconf*", "postfix*"] + igno_ubuntu = ["mount*", "umount*", "ondemand", "*.local"] + igno_always = ["network*", "dbus*", "systemd-*", "kdump*", "kmod*"] + igno_always += ["purge-kernels.service", "after-local.service", "dm-event.*"] # as on opensuse + igno_targets = ["remote-fs.target"] + def _ignored_unit(self, unit, ignore_list): + for ignore in ignore_list: + if fnmatch.fnmatchcase(unit, ignore): + return True # ignore + if fnmatch.fnmatchcase(unit, ignore+".service"): + return True # ignore + return False + def default_services_modules(self, *modules): + """ show the default services + This is used internally to know the list of service to be started in the 'get-default' + target runlevel when the container is started through default initialisation. It will + ignore a number of services - use '--all' to show a longer list of services and + use '--all --force' if not even a minimal filter shall be used. + """ + results = [] + targets = modules or [self.get_default_target()] + for target in targets: + units = self.target_default_services(target) + logg.debug(" %s # %s", " ".join(units), target) + for unit in units: + if unit not in results: + results.append(unit) + return results + def target_default_services(self, target = None, sysv = "S"): + """ get the default services for a target - this will ignore a number of services, + use '--all' and --force' to get more services. + """ + igno = self.igno_centos + self.igno_opensuse + self.igno_ubuntu + self.igno_always + if self._show_all: + igno = self.igno_always + if self._force: + igno = [] + logg.debug("ignored services filter for default.target:\n\t%s", igno) + default_target = target or self.get_default_target() + return self.enabled_target_services(default_target, sysv, igno) + def enabled_target_services(self, target, sysv = "S", igno = []): + units = [] + if self.user_mode(): + targetlist = self.get_target_list(target) + logg.debug("check for %s user services : %s", target, targetlist) + for targets in targetlist: + for unit in self.enabled_target_user_local_units(targets, ".target", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.required_target_units(targets, ".socket", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_user_local_units(targets, ".socket", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.required_target_units(targets, ".service", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_user_local_units(targets, ".service", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_user_system_units(targets, ".service", igno): + if unit not in units: + units.append(unit) + else: + targetlist = self.get_target_list(target) + logg.debug("check for %s system services: %s", target, targetlist) + for targets in targetlist: + for unit in self.enabled_target_configured_system_units(targets, ".target", igno + self.igno_targets): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.required_target_units(targets, ".socket", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_installed_system_units(targets, ".socket", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.required_target_units(targets, ".service", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_installed_system_units(targets, ".service", igno): + if unit not in units: + units.append(unit) + for targets in targetlist: + for unit in self.enabled_target_sysv_units(targets, sysv, igno): + if unit not in units: + units.append(unit) + return units + def enabled_target_user_local_units(self, target, unit_kind = ".service", igno = []): + units = [] + for basefolder in self.user_folders(): + if not basefolder: + continue + folder = self.default_enablefolder(target, basefolder) + if self._root: + folder = os_path(self._root, folder) + if os.path.isdir(folder): + for unit in sorted(os.listdir(folder)): + path = os.path.join(folder, unit) + if os.path.isdir(path): continue + if self._ignored_unit(unit, igno): + continue # ignore + if unit.endswith(unit_kind): + units.append(unit) + return units + def enabled_target_user_system_units(self, target, unit_kind = ".service", igno = []): + units = [] + for basefolder in self.system_folders(): + if not basefolder: + continue + folder = self.default_enablefolder(target, basefolder) + if self._root: + folder = os_path(self._root, folder) + if os.path.isdir(folder): + for unit in sorted(os.listdir(folder)): + path = os.path.join(folder, unit) + if os.path.isdir(path): continue + if self._ignored_unit(unit, igno): + continue # ignore + if unit.endswith(unit_kind): + conf = self.load_unit_conf(unit) + if conf is None: + pass + elif self.not_user_conf(conf): + pass + else: + units.append(unit) + return units + def enabled_target_installed_system_units(self, target, unit_type = ".service", igno = []): + units = [] + for basefolder in self.system_folders(): + if not basefolder: + continue + folder = self.default_enablefolder(target, basefolder) + if self._root: + folder = os_path(self._root, folder) + if os.path.isdir(folder): + for unit in sorted(os.listdir(folder)): + path = os.path.join(folder, unit) + if os.path.isdir(path): continue + if self._ignored_unit(unit, igno): + continue # ignore + if unit.endswith(unit_type): + units.append(unit) + return units + def enabled_target_configured_system_units(self, target, unit_type = ".service", igno = []): + units = [] + if True: + folder = self.default_enablefolder(target) + if self._root: + folder = os_path(self._root, folder) + if os.path.isdir(folder): + for unit in sorted(os.listdir(folder)): + path = os.path.join(folder, unit) + if os.path.isdir(path): continue + if self._ignored_unit(unit, igno): + continue # ignore + if unit.endswith(unit_type): + units.append(unit) + return units + def enabled_target_sysv_units(self, target, sysv = "S", igno = []): + units = [] + folders = [] + if target in ["multi-user.target", DefaultUnit]: + folders += [self.rc3_root_folder()] + if target in ["graphical.target"]: + folders += [self.rc5_root_folder()] + for folder in folders: + if not os.path.isdir(folder): + logg.warning("non-existent %s", folder) + continue + for unit in sorted(os.listdir(folder)): + path = os.path.join(folder, unit) + if os.path.isdir(path): continue + m = re.match(sysv+r"\d\d(.*)", unit) + if m: + service = m.group(1) + unit = service + ".service" + if self._ignored_unit(unit, igno): + continue # ignore + units.append(unit) + return units + def required_target_units(self, target, unit_type, igno): + units = [] + deps = self.get_required_dependencies(target) + for unit in sorted(deps): + if self._ignored_unit(unit, igno): + continue # ignore + if unit.endswith(unit_type): + if unit not in units: + units.append(unit) + return units + def get_target_conf(self, module): # -> conf (conf | default-conf) + """ accept that a unit does not exist + and return a unit conf that says 'not-loaded' """ + conf = self.load_unit_conf(module) + if conf is not None: + return conf + target_conf = self.default_unit_conf(module) + if module in target_requires: + target_conf.set(Unit, "Requires", target_requires[module]) + return target_conf + def get_target_list(self, module): + """ the Requires= in target units are only accepted if known """ + target = module + if "." not in target: target += ".target" + targets = [target] + conf = self.get_target_conf(module) + requires = conf.get(Unit, "Requires", "") + while requires in target_requires: + targets = [requires] + targets + requires = target_requires[requires] + logg.debug("the %s requires %s", module, targets) + return targets + def default_system(self, arg = True): + """ start units for default system level + This will go through the enabled services in the default 'multi-user.target'. + However some services are ignored as being known to be installation garbage + from unintended services. Use '--all' so start all of the installed services + and with '--all --force' even those services that are otherwise wrong. + /// SPECIAL: with --now or --init the init-loop is run and afterwards + a system_halt is performed with the enabled services to be stopped.""" + self.sysinit_status(SubState = "initializing") + logg.info("system default requested - %s", arg) + init = self._now or self._init + return self.start_system_default(init = init) + def start_system_default(self, init = False): + """ detect the default.target services and start them. + When --init is given then the init-loop is run and + the services are stopped again by 'systemctl halt'.""" + target = self.get_default_target() + services = self.start_target_system(target, init) + logg.info("%s system is up", target) + if init: + logg.info("init-loop start") + sig = self.init_loop_until_stop(services) + logg.info("init-loop %s", sig) + self.stop_system_default() + return not not services + def start_target_system(self, target, init = False): + services = self.target_default_services(target, "S") + self.sysinit_status(SubState = "starting") + self.start_units(services) + return services + def do_start_target_from(self, conf): + target = conf.name() + # services = self.start_target_system(target) + services = self.target_default_services(target, "S") + units = [service for service in services if not self.is_running_unit(service)] + logg.debug("start %s is starting %s from %s", target, units, services) + return self.start_units(units) + def stop_system_default(self): + """ detect the default.target services and stop them. + This is commonly run through 'systemctl halt' or + at the end of a 'systemctl --init default' loop.""" + target = self.get_default_target() + services = self.stop_target_system(target) + logg.info("%s system is down", target) + return not not services + def stop_target_system(self, target): + services = self.target_default_services(target, "K") + self.sysinit_status(SubState = "stopping") + self.stop_units(services) + return services + def do_stop_target_from(self, conf): + target = conf.name() + # services = self.stop_target_system(target) + services = self.target_default_services(target, "K") + units = [service for service in services if self.is_running_unit(service)] + logg.debug("stop %s is stopping %s from %s", target, units, services) + return self.stop_units(units) + def do_reload_target_from(self, conf): + target = conf.name() + return self.reload_target_system(target) + def reload_target_system(self, target): + services = self.target_default_services(target, "S") + units = [service for service in services if self.is_running_unit(service)] + return self.reload_units(units) + def halt_target(self, arg = True): + """ stop units from default system level """ + logg.info("system halt requested - %s", arg) + done = self.stop_system_default() + try: + os.kill(1, signal.SIGQUIT) # exit init-loop on no_more_procs + except Exception as e: + logg.warning("SIGQUIT to init-loop on PID-1: %s", e) + return done + def system_get_default(self): + """ get current default run-level""" + return self.get_default_target() + def get_targets_folder(self): + return os_path(self._root, self.mask_folder()) + def get_default_target_file(self): + targets_folder = self.get_targets_folder() + return os.path.join(targets_folder, DefaultUnit) + def get_default_target(self, default_target = None): + """ get current default run-level""" + current = default_target or self._default_target + default_target_file = self.get_default_target_file() + if os.path.islink(default_target_file): + current = os.path.basename(os.readlink(default_target_file)) + return current + def set_default_modules(self, *modules): + """ set current default run-level""" + if not modules: + logg.debug(".. no runlevel given") + self.error |= NOT_OK + return "Too few arguments" + current = self.get_default_target() + default_target_file = self.get_default_target_file() + msg = "" + for module in modules: + if module == current: + continue + targetfile = None + for targetname, targetpath in self.each_target_file(): + if targetname == module: + targetfile = targetpath + if not targetfile: + self.error |= NOT_OK | NOT_ACTIVE # 3 + msg = "No such runlevel %s" % (module) + continue + # + if os.path.islink(default_target_file): + os.unlink(default_target_file) + if not os.path.isdir(os.path.dirname(default_target_file)): + os.makedirs(os.path.dirname(default_target_file)) + os.symlink(targetfile, default_target_file) + msg = "Created symlink from %s -> %s" % (default_target_file, targetfile) + logg.debug("%s", msg) + return msg + def init_modules(self, *modules): + """ [UNIT*] -- init loop: '--init default' or '--init start UNIT*' + The systemctl init service will start the enabled 'default' services, + and then wait for any zombies to be reaped. When a SIGINT is received + then a clean shutdown of the enabled services is ensured. A Control-C in + in interactive mode will also run 'stop' on all the enabled services. // + When a UNIT name is given then only that one is started instead of the + services in the 'default.target'. Using 'init UNIT' is better than + '--init start UNIT' because the UNIT is also stopped cleanly even when + it was never enabled in the system. + /// SPECIAL: when using --now then only the init-loop is started, + with the reap-zombies function and waiting for an interrupt. + (and no unit is started/stoppped wether given or not). + """ + if self._now: + result = self.init_loop_until_stop([]) + return not not result + if not modules: + # like 'systemctl --init default' + if self._now or self._show_all: + logg.debug("init default --now --all => no_more_procs") + self.doExitWhenNoMoreProcs = True + return self.start_system_default(init = True) + # + # otherwise quit when all the init-services have died + self.doExitWhenNoMoreServices = True + if self._now or self._show_all: + logg.debug("init services --now --all => no_more_procs") + self.doExitWhenNoMoreProcs = True + found_all = True + units = [] + for module in modules: + matched = self.match_units(to_list(module)) + if not matched: + logg.error("Unit %s could not be found.", unit_of(module)) + found_all = False + continue + for unit in matched: + if unit not in units: + units += [unit] + logg.info("init %s -> start %s", ",".join(modules), ",".join(units)) + done = self.start_units(units, init = True) + logg.info("-- init is done") + return done # and found_all + def start_log_files(self, units): + self._log_file = {} + self._log_hold = {} + for unit in units: + conf = self.load_unit_conf(unit) + if not conf: continue + if self.skip_journal_log(conf): continue + log_path = self.get_journal_log_from(conf) + try: + opened = os.open(log_path, os.O_RDONLY | os.O_NONBLOCK) + self._log_file[unit] = opened + self._log_hold[unit] = b"" + except Exception as e: + logg.error("can not open %s log: %s\n\t%s", unit, log_path, e) + def read_log_files(self, units): + self.print_log_files(units) + def print_log_files(self, units, stdout = 1): + BUFSIZE=8192 + printed = 0 + for unit in units: + if unit in self._log_file: + new_text = b"" + while True: + buf = os.read(self._log_file[unit], BUFSIZE) + if not buf: break + new_text += buf + continue + text = self._log_hold[unit] + new_text + if not text: continue + lines = text.split(b"\n") + if not text.endswith(b"\n"): + self._log_hold[unit] = lines[-1] + lines = lines[:-1] + for line in lines: + prefix = unit.encode("utf-8") + content = prefix+b": "+line+b"\n" + try: + os.write(stdout, content) + try: + os.fsync(stdout) + except Exception: + pass + printed += 1 + except BlockingIOError: + pass + return printed + def stop_log_files(self, units): + for unit in units: + try: + if unit in self._log_file: + if self._log_file[unit]: + os.close(self._log_file[unit]) + except Exception as e: + logg.error("can not close log: %s\n\t%s", unit, e) + self._log_file = {} + self._log_hold = {} + + def get_StartLimitBurst(self, conf): + defaults = DefaultStartLimitBurst + return to_int(conf.get(Service, "StartLimitBurst", strE(defaults)), defaults) # 5 + def get_StartLimitIntervalSec(self, conf, maximum = None): + maximum = maximum or 999 + defaults = DefaultStartLimitIntervalSec + interval = conf.get(Service, "StartLimitIntervalSec", strE(defaults)) # 10s + return time_to_seconds(interval, maximum) + def get_RestartSec(self, conf, maximum = None): + maximum = maximum or DefaultStartLimitIntervalSec + delay = conf.get(Service, "RestartSec", strE(DefaultRestartSec)) + return time_to_seconds(delay, maximum) + def restart_failed_units(self, units, maximum = None): + """ This function will restart failed units. + / + NOTE that with standard settings the LimitBurst implementation has no effect. If + the InitLoopSleep is ticking at the Default of 5sec and the LimitBurst Default + is 5x within a Default 10secs time frame then within those 10sec only 2 loop + rounds have come here checking for possible restarts. You can directly shorten + the interval ('-c InitLoopSleep=1') or have it indirectly shorter from the + service descriptor's RestartSec ("RestartSec=2s"). + """ + global InitLoopSleep + me = os.getpid() + maximum = maximum or DefaultStartLimitIntervalSec + restartDelay = MinimumYield + for unit in units: + now = time.time() + try: + conf = self.load_unit_conf(unit) + if not conf: continue + restartPolicy = conf.get(Service, "Restart", "no") + if restartPolicy in ["no", "on-success"]: + logg.debug("[%s] [%s] Current NoCheck (Restart=%s)", me, unit, restartPolicy) + continue + restartSec = self.get_RestartSec(conf) + if restartSec == 0: + if InitLoopSleep > 1: + logg.warning("[%s] set InitLoopSleep from %ss to 1 (caused by RestartSec=0!)", + unit, InitLoopSleep) + InitLoopSleep = 1 + elif restartSec > 0.9 and restartSec < InitLoopSleep: + restartSleep = int(restartSec + 0.2) + if restartSleep < InitLoopSleep: + logg.warning("[%s] set InitLoopSleep from %ss to %s (caused by RestartSec=%.3fs)", + unit, InitLoopSleep, restartSleep, restartSec) + InitLoopSleep = restartSleep + isUnitState = self.get_active_from(conf) + isUnitFailed = isUnitState in ["failed"] + logg.debug("[%s] [%s] Current Status: %s (%s)", me, unit, isUnitState, isUnitFailed) + if not isUnitFailed: + if unit in self._restart_failed_units: + del self._restart_failed_units[unit] + continue + limitBurst = self.get_StartLimitBurst(conf) + limitSecs = self.get_StartLimitIntervalSec(conf) + if limitBurst > 1 and limitSecs >= 1: + try: + if unit not in self._restarted_unit: + self._restarted_unit[unit] = [] + # we want to register restarts from now on + restarted = self._restarted_unit[unit] + logg.debug("[%s] [%s] Current limitSecs=%ss limitBurst=%sx (restarted %sx)", + me, unit, limitSecs, limitBurst, len(restarted)) + oldest = 0. + interval = 0. + if len(restarted) >= limitBurst: + logg.debug("[%s] [%s] restarted %s", + me, unit, ["%.3fs" % (t - now) for t in restarted]) + while len(restarted): + oldest = restarted[0] + interval = time.time() - oldest + if interval > limitSecs: + restarted = restarted[1:] + continue + break + self._restarted_unit[unit] = restarted + logg.debug("[%s] [%s] ratelimit %s", + me, unit, ["%.3fs" % (t - now) for t in restarted]) + # all values in restarted have a time below limitSecs + if len(restarted) >= limitBurst: + logg.info("[%s] [%s] Blocking Restart - oldest %s is %s ago (allowed %s)", + me, unit, oldest, interval, limitSecs) + self.write_status_from(conf, AS="error") + unit = "" # dropped out + continue + except Exception as e: + logg.error("[%s] burst exception %s", unit, e) + if unit: # not dropped out + if unit not in self._restart_failed_units: + self._restart_failed_units[unit] = now + restartSec + logg.debug("[%s] [%s] restart scheduled in %+.3fs", + me, unit, (self._restart_failed_units[unit] - now)) + except Exception as e: + logg.error("[%s] [%s] An error occurred while restart checking: %s", me, unit, e) + if not self._restart_failed_units: + self.error |= NOT_OK + return [] + # NOTE: this function is only called from InitLoop when "running" + # let's check if any of the restart_units has its restartSec expired + now = time.time() + restart_done = [] + logg.debug("[%s] Restart checking %s", + me, ["%+.3fs" % (t - now) for t in self._restart_failed_units.values()]) + for unit in sorted(self._restart_failed_units): + restartAt = self._restart_failed_units[unit] + if restartAt > now: + continue + restart_done.append(unit) + try: + conf = self.load_unit_conf(unit) + if not conf: continue + isUnitState = self.get_active_from(conf) + isUnitFailed = isUnitState in ["failed"] + logg.debug("[%s] [%s] Restart Status: %s (%s)", me, unit, isUnitState, isUnitFailed) + if isUnitFailed: + logg.debug("[%s] [%s] --- restarting failed unit...", me, unit) + self.restart_unit(unit) + logg.debug("[%s] [%s] --- has been restarted.", me, unit) + if unit in self._restarted_unit: + self._restarted_unit[unit].append(time.time()) + except Exception as e: + logg.error("[%s] [%s] An error occurred while restarting: %s", me, unit, e) + for unit in restart_done: + if unit in self._restart_failed_units: + del self._restart_failed_units[unit] + logg.debug("[%s] Restart remaining %s", + me, ["%+.3fs" % (t - now) for t in self._restart_failed_units.values()]) + return restart_done + + def init_loop_until_stop(self, units): + """ this is the init-loop - it checks for any zombies to be reaped and + waits for an interrupt. When a SIGTERM /SIGINT /Control-C signal + is received then the signal name is returned. Any other signal will + just raise an Exception like one would normally expect. As a special + the 'systemctl halt' emits SIGQUIT which puts it into no_more_procs mode.""" + signal.signal(signal.SIGQUIT, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt("SIGQUIT")) + signal.signal(signal.SIGINT, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt("SIGINT")) + signal.signal(signal.SIGTERM, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt("SIGTERM")) + result = None + # + self.start_log_files(units) + logg.debug("start listen") + listen = SystemctlListenThread(self) + logg.debug("starts listen") + listen.start() + logg.debug("started listen") + self.sysinit_status(ActiveState = "active", SubState = "running") + timestamp = time.time() + while True: + try: + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("DONE InitLoop (sleep %ss)", InitLoopSleep) + sleep_sec = InitLoopSleep - (time.time() - timestamp) + if sleep_sec < MinimumYield: + sleep_sec = MinimumYield + sleeping = sleep_sec + while sleeping > 2: + time.sleep(1) # accept signals atleast every second + sleeping = InitLoopSleep - (time.time() - timestamp) + if sleeping < MinimumYield: + sleeping = MinimumYield + break + time.sleep(sleeping) # remainder waits less that 2 seconds + timestamp = time.time() + self.loop.acquire() + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("NEXT InitLoop (after %ss)", sleep_sec) + self.read_log_files(units) + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("reap zombies - check current processes") + running = self.reap_zombies() + if DEBUG_INITLOOP: # pragma: no cover + logg.debug("reap zombies - init-loop found %s running procs", running) + if self.doExitWhenNoMoreServices: + active = False + for unit in units: + conf = self.load_unit_conf(unit) + if not conf: continue + if self.is_active_from(conf): + active = True + if not active: + logg.info("no more services - exit init-loop") + break + if self.doExitWhenNoMoreProcs: + if not running: + logg.info("no more procs - exit init-loop") + break + if RESTART_FAILED_UNITS: + self.restart_failed_units(units) + self.loop.release() + except KeyboardInterrupt as e: + if e.args and e.args[0] == "SIGQUIT": + # the original systemd puts a coredump on that signal. + logg.info("SIGQUIT - switch to no more procs check") + self.doExitWhenNoMoreProcs = True + continue + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + logg.info("interrupted - exit init-loop") + result = str(e) or "STOPPED" + break + except Exception as e: + logg.info("interrupted - exception %s", e) + raise + self.sysinit_status(ActiveState = None, SubState = "degraded") + try: self.loop.release() + except: pass + listen.stop() + listen.join(2) + self.read_log_files(units) + self.read_log_files(units) + self.stop_log_files(units) + logg.debug("done - init loop") + return result + def reap_zombies_target(self): + """ -- check to reap children (internal) """ + running = self.reap_zombies() + return "remaining {running} process".format(**locals()) + def reap_zombies(self): + """ check to reap children """ + selfpid = os.getpid() + running = 0 + for pid_entry in os.listdir(_proc_pid_dir): + pid = to_intN(pid_entry) + if pid is None: + continue + if pid == selfpid: + continue + proc_status = _proc_pid_status.format(**locals()) + if os.path.isfile(proc_status): + zombie = False + ppid = -1 + try: + for line in open(proc_status): + m = re.match(r"State:\s*Z.*", line) + if m: zombie = True + m = re.match(r"PPid:\s*(\d+)", line) + if m: ppid = int(m.group(1)) + except IOError as e: + logg.warning("%s : %s", proc_status, e) + continue + if zombie and ppid == os.getpid(): + logg.info("reap zombie %s", pid) + try: os.waitpid(pid, os.WNOHANG) + except OSError as e: + logg.warning("reap zombie %s: %s", e.strerror) + if os.path.isfile(proc_status): + if pid > 1: + running += 1 + return running # except PID 0 and PID 1 + def sysinit_status(self, **status): + conf = self.sysinit_target() + self.write_status_from(conf, **status) + def sysinit_target(self): + if not self._sysinit_target: + self._sysinit_target = self.default_unit_conf(SysInitTarget, "System Initialization") + assert self._sysinit_target is not None + return self._sysinit_target + def is_system_running(self): + conf = self.sysinit_target() + if not self.is_running_unit_from(conf): + time.sleep(MinimumYield) + if not self.is_running_unit_from(conf): + return "offline" + status = self.read_status_from(conf) + return status.get("SubState", "unknown") + def is_system_running_info(self): + state = self.is_system_running() + if state not in ["running"]: + self.error |= NOT_OK # 1 + if self._quiet: + return None + return state + def wait_system(self, target = None): + target = target or SysInitTarget + for attempt in xrange(int(SysInitWait)): + state = self.is_system_running() + if "init" in state: + if target in [SysInitTarget, "basic.target"]: + logg.info("system not initialized - wait %s", target) + time.sleep(1) + continue + if "start" in state or "stop" in state: + if target in ["basic.target"]: + logg.info("system not running - wait %s", target) + time.sleep(1) + continue + if "running" not in state: + logg.info("system is %s", state) + break + def is_running_unit_from(self, conf): + status_file = self.get_status_file_from(conf) + pid_file = self.pid_file_from(conf) + return self.getsize(status_file) > 0 or self.getsize(pid_file) > 0 + def is_running_unit(self, unit): + conf = self.get_unit_conf(unit) + return self.is_running_unit_from(conf) + def pidlist_of(self, pid): + if not pid: + return [] + pidlist = [pid] + pids = [pid] + for depth in xrange(PROC_MAX_DEPTH): + for pid_entry in os.listdir(_proc_pid_dir): + pid = to_intN(pid_entry) + if pid is None: + continue + proc_status = _proc_pid_status.format(**locals()) + if os.path.isfile(proc_status): + try: + for line in open(proc_status): + if line.startswith("PPid:"): + ppid_text = line[len("PPid:"):].strip() + try: ppid = int(ppid_text) + except: continue + if ppid in pidlist and pid not in pids: + pids += [pid] + except IOError as e: + logg.warning("%s : %s", proc_status, e) + continue + if len(pids) != len(pidlist): + pidlist = pids[:] + continue + return pids + def echo(self, *targets): + line = " ".join(*targets) + logg.info(" == echo == %s", line) + return line + def killall(self, *targets): + mapping = {} + mapping[":3"] = signal.SIGQUIT + mapping[":QUIT"] = signal.SIGQUIT + mapping[":6"] = signal.SIGABRT + mapping[":ABRT"] = signal.SIGABRT + mapping[":9"] = signal.SIGKILL + mapping[":KILL"] = signal.SIGKILL + sig = signal.SIGTERM + for target in targets: + if target.startswith(":"): + if target in mapping: + sig = mapping[target] + else: # pragma: no cover + logg.error("unsupported %s", target) + continue + for pid_entry in os.listdir(_proc_pid_dir): + pid = to_intN(pid_entry) + if pid: + try: + cmdline = _proc_pid_cmdline.format(**locals()) + cmd = open(cmdline).read().split("\0") + if DEBUG_KILLALL: logg.debug("cmdline %s", cmd) + found = None + cmd_exe = os.path.basename(cmd[0]) + if DEBUG_KILLALL: logg.debug("cmd.exe '%s'", cmd_exe) + if fnmatch.fnmatchcase(cmd_exe, target): found = "exe" + if len(cmd) > 1 and cmd_exe.startswith("python"): + X = 1 + while cmd[X].startswith("-"): X += 1 # atleast '-u' unbuffered + cmd_arg = os.path.basename(cmd[X]) + if DEBUG_KILLALL: logg.debug("cmd.arg '%s'", cmd_arg) + if fnmatch.fnmatchcase(cmd_arg, target): found = "arg" + if cmd_exe.startswith("coverage") or cmd_arg.startswith("coverage"): + x = cmd.index("--") + if x > 0 and x+1 < len(cmd): + cmd_run = os.path.basename(cmd[x+1]) + if DEBUG_KILLALL: logg.debug("cmd.run '%s'", cmd_run) + if fnmatch.fnmatchcase(cmd_run, target): found = "run" + if found: + if DEBUG_KILLALL: logg.debug("%s found %s %s", found, pid, [c for c in cmd]) + if pid != os.getpid(): + logg.debug(" kill -%s %s # %s", sig, pid, target) + os.kill(pid, sig) + except Exception as e: + logg.error("kill -%s %s : %s", sig, pid, e) + return True + def force_ipv4(self, *args): + """ only ipv4 localhost in /etc/hosts """ + logg.debug("checking hosts sysconf for '::1 localhost'") + lines = [] + sysconf_hosts = os_path(self._root, _etc_hosts) + for line in open(sysconf_hosts): + if "::1" in line: + newline = re.sub("\\slocalhost\\s", " ", line) + if line != newline: + logg.info("%s: '%s' => '%s'", _etc_hosts, line.rstrip(), newline.rstrip()) + line = newline + lines.append(line) + f = open(sysconf_hosts, "w") + for line in lines: + f.write(line) + f.close() + def force_ipv6(self, *args): + """ only ipv4 localhost in /etc/hosts """ + logg.debug("checking hosts sysconf for '127.0.0.1 localhost'") + lines = [] + sysconf_hosts = os_path(self._root, _etc_hosts) + for line in open(sysconf_hosts): + if "127.0.0.1" in line: + newline = re.sub("\\slocalhost\\s", " ", line) + if line != newline: + logg.info("%s: '%s' => '%s'", _etc_hosts, line.rstrip(), newline.rstrip()) + line = newline + lines.append(line) + f = open(sysconf_hosts, "w") + for line in lines: + f.write(line) + f.close() + def help_modules(self, *args): + """[command] -- show this help + """ + lines = [] + okay = True + prog = os.path.basename(sys.argv[0]) + if not args: + argz = {} + for name in dir(self): + arg = None + if name.startswith("system_"): + arg = name[len("system_"):].replace("_", "-") + if name.startswith("show_"): + arg = name[len("show_"):].replace("_", "-") + if name.endswith("_of_unit"): + arg = name[:-len("_of_unit")].replace("_", "-") + if name.endswith("_modules"): + arg = name[:-len("_modules")].replace("_", "-") + if arg: + argz[arg] = name + lines.append("%s command [options]..." % prog) + lines.append("") + lines.append("Commands:") + for arg in sorted(argz): + name = argz[arg] + method = getattr(self, name) + doc = "..." + doctext = getattr(method, "__doc__") + if doctext: + doc = doctext + elif not self._show_all: + continue # pragma: no cover + firstline = doc.split("\n")[0] + doc_text = firstline.strip() + if "--" not in firstline: + doc_text = "-- " + doc_text + lines.append(" %s %s" % (arg, firstline.strip())) + return lines + for arg in args: + arg = arg.replace("-", "_") + func1 = getattr(self.__class__, arg+"_modules", None) + func2 = getattr(self.__class__, arg+"_of_unit", None) + func3 = getattr(self.__class__, "show_"+arg, None) + func4 = getattr(self.__class__, "system_"+arg, None) + func5 = None + if arg.startswith("__"): + func5 = getattr(self.__class__, arg[2:], None) + func = func1 or func2 or func3 or func4 or func5 + if func is None: + print("error: no such command '%s'" % arg) + okay = False + else: + doc_text = "..." + doc = getattr(func, "__doc__", "") + if doc: + doc_text = doc.replace("\n", "\n\n", 1).strip() + if "--" not in doc_text: + doc_text = "-- " + doc_text + else: + func_name = arg # FIXME + logg.debug("__doc__ of %s is none", func_name) + if not self._show_all: continue + lines.append("%s %s %s" % (prog, arg, doc_text)) + if not okay: + self.help_modules() + self.error |= NOT_OK + return [] + return lines + def systemd_version(self): + """ the version line for systemd compatibility """ + return "systemd %s\n - via systemctl.py %s" % (self._systemd_version, __version__) + def systemd_features(self): + """ the info line for systemd features """ + features1 = "-PAM -AUDIT -SELINUX -IMA -APPARMOR -SMACK" + features2 = " +SYSVINIT -UTMP -LIBCRYPTSETUP -GCRYPT -GNUTLS" + features3 = " -ACL -XZ -LZ4 -SECCOMP -BLKID -ELFUTILS -KMOD -IDN" + return features1+features2+features3 + def version_info(self): + return [self.systemd_version(), self.systemd_features()] + def test_float(self): + return 0. # "Unknown result type" + +def print_begin(argv, args): + script = os.path.realpath(argv[0]) + system = _user_mode and " --user" or " --system" + init = _init and " --init" or "" + logg.info("EXEC BEGIN %s %s%s%s", script, " ".join(args), system, init) + if _root and not is_good_root(_root): + root44 = path44(_root) + logg.warning("the --root=%s should have atleast three levels /tmp/test_123/root", root44) + +def print_begin2(args): + logg.debug("======= systemctl.py %s", " ".join(args)) + +def is_not_ok(result): + if DebugPrintResult: + logg.log(HINT, "EXEC END %s", result) + if result is False: + return NOT_OK + return 0 + +def print_str(result): + if result is None: + if DebugPrintResult: + logg.debug(" END %s", result) + return + print(result) + if DebugPrintResult: + result1 = result.split("\n")[0][:-20] + if result == result1: + logg.log(HINT, "EXEC END '%s'", result) + else: + logg.log(HINT, "EXEC END '%s...'", result1) + logg.debug(" END '%s'", result) +def print_str_list(result): + if result is None: + if DebugPrintResult: + logg.debug(" END %s", result) + return + shown = 0 + for element in result: + print(element) + shown += 1 + if DebugPrintResult: + logg.log(HINT, "EXEC END %i items", shown) + logg.debug(" END %s", result) +def print_str_list_list(result): + shown = 0 + for element in result: + print("\t".join([str(elem) for elem in element])) + shown += 1 + if DebugPrintResult: + logg.log(HINT, "EXEC END %i items", shown) + logg.debug(" END %s", result) +def print_str_dict(result): + if result is None: + if DebugPrintResult: + logg.debug(" END %s", result) + return + shown = 0 + for key in sorted(result.keys()): + element = result[key] + print("%s=%s" % (key, element)) + shown += 1 + if DebugPrintResult: + logg.log(HINT, "EXEC END %i items", shown) + logg.debug(" END %s", result) +def print_str_dict_dict(result): + if result is None: + if DebugPrintResult: + logg.debug(" END %s", result) + return + shown = 0 + for key in sorted(result): + element = result[key] + for name in sorted(element): + value = element[name] + print("%s [%s] %s" % (key, value, name)) + shown += 1 + if DebugPrintResult: + logg.log(HINT, "EXEC END %i items", shown) + logg.debug(" END %s", result) + +def run(command, *modules): + exitcode = 0 + if command in ["help"]: + print_str_list(systemctl.help_modules(*modules)) + elif command in ["cat"]: + print_str(systemctl.cat_modules(*modules)) + elif command in ["clean"]: + exitcode = is_not_ok(systemctl.clean_modules(*modules)) + elif command in ["command"]: + print_str_list(systemctl.command_of_unit(*modules)) + elif command in ["daemon-reload"]: + exitcode = is_not_ok(systemctl.daemon_reload_target()) + elif command in ["default"]: + exitcode = is_not_ok(systemctl.default_system()) + elif command in ["default-services"]: + print_str_list(systemctl.default_services_modules(*modules)) + elif command in ["disable"]: + exitcode = is_not_ok(systemctl.disable_modules(*modules)) + elif command in ["enable"]: + exitcode = is_not_ok(systemctl.enable_modules(*modules)) + elif command in ["environment"]: + print_str_dict(systemctl.environment_of_unit(*modules)) + elif command in ["get-default"]: + print_str(systemctl.get_default_target()) + elif command in ["get-preset"]: + print_str(systemctl.get_preset_of_unit(*modules)) + elif command in ["halt"]: + exitcode = is_not_ok(systemctl.halt_target()) + elif command in ["init"]: + exitcode = is_not_ok(systemctl.init_modules(*modules)) + elif command in ["is-active"]: + print_str_list(systemctl.is_active_modules(*modules)) + elif command in ["is-enabled"]: + print_str_list(systemctl.is_enabled_modules(*modules)) + elif command in ["is-failed"]: + print_str_list(systemctl.is_failed_modules(*modules)) + elif command in ["is-system-running"]: + print_str(systemctl.is_system_running_info()) + elif command in ["kill"]: + exitcode = is_not_ok(systemctl.kill_modules(*modules)) + elif command in ["list-start-dependencies"]: + print_str_list_list(systemctl.list_start_dependencies_modules(*modules)) + elif command in ["list-dependencies"]: + print_str_list(systemctl.list_dependencies_modules(*modules)) + elif command in ["list-unit-files"]: + print_str_list_list(systemctl.list_unit_files_modules(*modules)) + elif command in ["list-units"]: + print_str_list_list(systemctl.list_units_modules(*modules)) + elif command in ["listen"]: + exitcode = is_not_ok(systemctl.listen_modules(*modules)) + elif command in ["log", "logs"]: + exitcode = is_not_ok(systemctl.log_modules(*modules)) + elif command in ["mask"]: + exitcode = is_not_ok(systemctl.mask_modules(*modules)) + elif command in ["preset"]: + exitcode = is_not_ok(systemctl.preset_modules(*modules)) + elif command in ["preset-all"]: + exitcode = is_not_ok(systemctl.preset_all_modules()) + elif command in ["reap-zombies"]: + print_str(systemctl.reap_zombies_target()) + elif command in ["reload"]: + exitcode = is_not_ok(systemctl.reload_modules(*modules)) + elif command in ["reload-or-restart"]: + exitcode = is_not_ok(systemctl.reload_or_restart_modules(*modules)) + elif command in ["reload-or-try-restart"]: + exitcode = is_not_ok(systemctl.reload_or_try_restart_modules(*modules)) + elif command in ["reset-failed"]: + exitcode = is_not_ok(systemctl.reset_failed_modules(*modules)) + elif command in ["restart"]: + exitcode = is_not_ok(systemctl.restart_modules(*modules)) + elif command in ["set-default"]: + print_str(systemctl.set_default_modules(*modules)) + elif command in ["show"]: + print_str_list(systemctl.show_modules(*modules)) + elif command in ["start"]: + exitcode = is_not_ok(systemctl.start_modules(*modules)) + elif command in ["status"]: + print_str(systemctl.status_modules(*modules)) + elif command in ["stop"]: + exitcode = is_not_ok(systemctl.stop_modules(*modules)) + elif command in ["try-restart"]: + exitcode = is_not_ok(systemctl.try_restart_modules(*modules)) + elif command in ["unmask"]: + exitcode = is_not_ok(systemctl.unmask_modules(*modules)) + elif command in ["version"]: + print_str_list(systemctl.version_info()) + elif command in ["__cat_unit"]: + print_str(systemctl.cat_unit(*modules)) + elif command in ["__get_active_unit"]: + print_str(systemctl.get_active_unit(*modules)) + elif command in ["__get_description"]: + print_str(systemctl.get_description(*modules)) + elif command in ["__get_status_file"]: + print_str(systemctl.get_status_file(modules[0])) + elif command in ["__get_status_pid_file", "__get_pid_file"]: + print_str(systemctl.get_status_pid_file(modules[0])) + elif command in ["__disable_unit"]: + exitcode = is_not_ok(systemctl.disable_unit(*modules)) + elif command in ["__enable_unit"]: + exitcode = is_not_ok(systemctl.enable_unit(*modules)) + elif command in ["__is_enabled"]: + exitcode = is_not_ok(systemctl.is_enabled(*modules)) + elif command in ["__killall"]: + exitcode = is_not_ok(systemctl.killall(*modules)) + elif command in ["__kill_unit"]: + exitcode = is_not_ok(systemctl.kill_unit(*modules)) + elif command in ["__load_preset_files"]: + print_str_list(systemctl.load_preset_files(*modules)) + elif command in ["__mask_unit"]: + exitcode = is_not_ok(systemctl.mask_unit(*modules)) + elif command in ["__read_env_file"]: + print_str_list_list(list(systemctl.read_env_file(*modules))) + elif command in ["__reload_unit"]: + exitcode = is_not_ok(systemctl.reload_unit(*modules)) + elif command in ["__reload_or_restart_unit"]: + exitcode = is_not_ok(systemctl.reload_or_restart_unit(*modules)) + elif command in ["__reload_or_try_restart_unit"]: + exitcode = is_not_ok(systemctl.reload_or_try_restart_unit(*modules)) + elif command in ["__reset_failed_unit"]: + exitcode = is_not_ok(systemctl.reset_failed_unit(*modules)) + elif command in ["__restart_unit"]: + exitcode = is_not_ok(systemctl.restart_unit(*modules)) + elif command in ["__start_unit"]: + exitcode = is_not_ok(systemctl.start_unit(*modules)) + elif command in ["__stop_unit"]: + exitcode = is_not_ok(systemctl.stop_unit(*modules)) + elif command in ["__try_restart_unit"]: + exitcode = is_not_ok(systemctl.try_restart_unit(*modules)) + elif command in ["__test_start_unit"]: + systemctl.test_start_unit(*modules) + elif command in ["__unmask_unit"]: + exitcode = is_not_ok(systemctl.unmask_unit(*modules)) + elif command in ["__show_unit_items"]: + print_str_list_list(list(systemctl.show_unit_items(*modules))) + else: + logg.error("Unknown operation %s", command) + return EXIT_FAILURE + # + exitcode |= systemctl.error + return exitcode + +if __name__ == "__main__": + import optparse + _o = optparse.OptionParser("%prog [options] command [name...]", + epilog="use 'help' command for more information") + _o.add_option("--version", action="store_true", + help="Show package version") + _o.add_option("--system", action="store_true", default=False, + help="Connect to system manager (default)") # overrides --user + _o.add_option("--user", action="store_true", default=_user_mode, + help="Connect to user service manager") + # _o.add_option("-H", "--host", metavar="[USER@]HOST", + # help="Operate on remote host*") + # _o.add_option("-M", "--machine", metavar="CONTAINER", + # help="Operate on local container*") + _o.add_option("-t", "--type", metavar="TYPE", action="append", dest="only_type", default=_only_type, + help="List units of a particual type") + _o.add_option("--state", metavar="STATE", action="append", dest="only_state", default=_only_state, + help="List units with particular LOAD or SUB or ACTIVE state") + _o.add_option("-p", "--property", metavar="NAME", action="append", dest="only_property", default=_only_property, + help="Show only properties by this name") + _o.add_option("--what", metavar="TYPE", action="append", dest="only_what", default=_only_what, + help="Defines the service directories to be cleaned (configuration, state, cache, logs, runtime)") + _o.add_option("-a", "--all", action="store_true", dest="show_all", default=_show_all, + help="Show all loaded units/properties, including dead empty ones. To list all units installed on the system, use the 'list-unit-files' command instead") + _o.add_option("-l", "--full", action="store_true", default=_full, + help="Don't ellipsize unit names on output (never ellipsized)") + _o.add_option("--reverse", action="store_true", + help="Show reverse dependencies with 'list-dependencies' (ignored)") + _o.add_option("--job-mode", metavar="MODE", + help="Specify how to deal with already queued jobs, when queuing a new job (ignored)") + _o.add_option("--show-types", action="store_true", + help="When showing sockets, explicitly show their type (ignored)") + _o.add_option("-i", "--ignore-inhibitors", action="store_true", + help="When shutting down or sleeping, ignore inhibitors (ignored)") + _o.add_option("--kill-who", metavar="WHO", + help="Who to send signal to (ignored)") + _o.add_option("-s", "--signal", metavar="SIG", + help="Which signal to send (ignored)") + _o.add_option("--now", action="store_true", default=_now, + help="Start or stop unit in addition to enabling or disabling it") + _o.add_option("-q", "--quiet", action="store_true", default=_quiet, + help="Suppress output") + _o.add_option("--no-block", action="store_true", default=False, + help="Do not wait until operation finished (ignored)") + _o.add_option("--no-legend", action="store_true", default=_no_legend, + help="Do not print a legend (column headers and hints)") + _o.add_option("--no-wall", action="store_true", default=False, + help="Don't send wall message before halt/power-off/reboot (ignored)") + _o.add_option("--no-reload", action="store_true", default=_no_reload, + help="Don't reload daemon after en-/dis-abling unit files") + _o.add_option("--no-ask-password", action="store_true", default=_no_ask_password, + help="Do not ask for system passwords") + # _o.add_option("--global", action="store_true", dest="globally", default=_globally, + # help="Enable/disable unit files globally") # for all user logins + # _o.add_option("--runtime", action="store_true", + # help="Enable unit files only temporarily until next reboot") + _o.add_option("-f", "--force", action="store_true", default=_force, + help="When enabling unit files, override existing symblinks / When shutting down, execute action immediately") + _o.add_option("--preset-mode", metavar="TYPE", default=_preset_mode, + help="Apply only enable, only disable, or all presets [%default]") + _o.add_option("--root", metavar="PATH", default=_root, + help="Enable unit files in the specified root directory (used for alternative root prefix)") + _o.add_option("-n", "--lines", metavar="NUM", + help="Number of journal entries to show") + _o.add_option("-o", "--output", metavar="CAT", + help="change journal output mode [short, ..., cat] (ignored)") + _o.add_option("--plain", action="store_true", + help="Print unit dependencies as a list instead of a tree (ignored)") + _o.add_option("--no-pager", action="store_true", + help="Do not pipe output into pager (mostly ignored)") + _o.add_option("--no-warn", action="store_true", + help="Do not generate certain warnings (ignored)") + # + _o.add_option("-c", "--config", metavar="NAME=VAL", action="append", default=[], + help="..override internal variables (InitLoopSleep,SysInitTarget) {%default}") + _o.add_option("-e", "--extra-vars", "--environment", metavar="NAME=VAL", action="append", default=[], + help="..override settings in the syntax of 'Environment='") + _o.add_option("-v", "--verbose", action="count", default=0, + help="..increase debugging information level") + _o.add_option("-4", "--ipv4", action="store_true", default=False, + help="..only keep ipv4 localhost in /etc/hosts") + _o.add_option("-6", "--ipv6", action="store_true", default=False, + help="..only keep ipv6 localhost in /etc/hosts") + _o.add_option("-1", "--init", action="store_true", default=False, + help="..keep running as init-process (default if PID 1)") + opt, args = _o.parse_args() + logging.basicConfig(level = max(0, logging.FATAL - 10 * opt.verbose)) + logg.setLevel(max(0, logging.ERROR - 10 * opt.verbose)) + # + _extra_vars = opt.extra_vars + _force = opt.force + _full = opt.full + _log_lines = opt.lines + _no_pager = opt.no_pager + _no_reload = opt.no_reload + _no_legend = opt.no_legend + _no_ask_password = opt.no_ask_password + _now = opt.now + _preset_mode = opt.preset_mode + _quiet = opt.quiet + _root = opt.root + _show_all = opt.show_all + _only_state = opt.only_state + _only_type = opt.only_type + _only_property = opt.only_property + _only_what = opt.only_what + # being PID 1 (or 0) in a container will imply --init + _pid = os.getpid() + _init = opt.init or _pid in [1, 0] + _user_mode = opt.user + if os.geteuid() and _pid in [1, 0]: + _user_mode = True + if opt.system: + _user_mode = False # override --user + # + for setting in opt.config: + nam, val = setting, "1" + if "=" in setting: + nam, val = setting.split("=", 1) + elif nam.startswith("no-") or nam.startswith("NO-"): + nam, val = nam[3:], "0" + elif nam.startswith("No") or nam.startswith("NO"): + nam, val = nam[2:], "0" + if nam in globals(): + old = globals()[nam] + if old is False or old is True: + logg.debug("yes %s=%s", nam, val) + globals()[nam] = (val in ("true", "True", "TRUE", "yes", "y", "Y", "YES", "1")) + logg.debug("... _show_all=%s", _show_all) + elif isinstance(old, float): + logg.debug("num %s=%s", nam, val) + globals()[nam] = float(val) + logg.debug("... MinimumYield=%s", MinimumYield) + elif isinstance(old, int): + logg.debug("int %s=%s", nam, val) + globals()[nam] = int(val) + logg.debug("... InitLoopSleep=%s", InitLoopSleep) + elif isinstance(old, basestring): + logg.debug("str %s=%s", nam, val) + globals()[nam] = val.strip() + logg.debug("... SysInitTarget=%s", SysInitTarget) + elif isinstance(old, list): + logg.debug("str %s+=[%s]", nam, val) + globals()[nam] += val.strip().split(",") + logg.debug("... _extra_vars=%s", _extra_vars) + else: + logg.warning("(ignored) unknown target type -c '%s' : %s", nam, type(old)) + else: + logg.warning("(ignored) unknown target config -c '%s' : no such variable", nam) + # + systemctl_debug_log = os_path(_root, expand_path(SYSTEMCTL_DEBUG_LOG, not _user_mode)) + systemctl_extra_log = os_path(_root, expand_path(SYSTEMCTL_EXTRA_LOG, not _user_mode)) + if os.access(systemctl_extra_log, os.W_OK): + loggfile = logging.FileHandler(systemctl_extra_log) + loggfile.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) + logg.addHandler(loggfile) + logg.setLevel(max(0, logging.INFO - 10 * opt.verbose)) + if os.access(systemctl_debug_log, os.W_OK): + loggfile = logging.FileHandler(systemctl_debug_log) + loggfile.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) + logg.addHandler(loggfile) + logg.setLevel(logging.DEBUG) + # + print_begin(sys.argv, args) + # + systemctl = Systemctl() + if opt.version: + args = ["version"] + if not args: + if _init: + args = ["default"] + else: + args = ["list-units"] + print_begin2(args) + command = args[0] + modules = args[1:] + try: + modules.remove("service") + except ValueError: + pass + if opt.ipv4: + systemctl.force_ipv4() + elif opt.ipv6: + systemctl.force_ipv6() + sys.exit(run(command, *modules)) diff --git a/battybirdnet-pi/rootfs/helpers/timedatectl b/battybirdnet-pi/rootfs/helpers/timedatectl new file mode 100644 index 000000000..482f0d75b --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/timedatectl @@ -0,0 +1,72 @@ +#!/bin/bash + +# Function to show the current timezone, with two alternative methods +show_timezone() { + # Check if the /etc/timezone file exists + if [ -f /etc/timezone ]; then + timezone=$(cat /etc/timezone) + elif [ -f /etc/localtime ]; then + timezone=$(readlink /etc/localtime) + timezone=${timezone/\/usr\/share\/zoneinfo\//} + else + timezone="Cannot determine timezone." + fi + echo "$timezone" +} + +# Function to set the timezone +set_timezone() { + new_timezone="$1" + echo "$new_timezone" | sudo tee /etc/timezone >/dev/null + sudo ln -sf /usr/share/zoneinfo/"$new_timezone" /etc/localtime + if [ -f /etc/environment ]; then sudo sed -i "/TZ/c\TZ=$new_timezone" /etc/environment; fi + if [ -d /var/run/s6/container_environment ]; then echo "$new_timezone" | sudo tee /var/run/s6/container_environment/TZ > /dev/null; fi + echo "$new_timezone" +} + +# Main script +case "$1" in + "set-ntp") + case "$2" in + "false") + sudo systemctl stop systemd-timesyncd + sudo systemctl disable systemd-timesyncd + echo "NTP disabled" + ;; + "true") + sudo systemctl start systemd-timesyncd + sudo systemctl enable systemd-timesyncd + echo "NTP enabled" + ;; + *) + echo "Invalid argument for set-ntp. Use 'false' or 'true'." + ;; + esac + ;; + "show") + show_timezone + ;; + "set-timezone") + set_timezone "$2" + ;; + *) + # Get values + local_time="$(date)" + utc_time="$(date -u)" + time_zone="$(show_timezone)" + # Check if NTP is used + if sudo systemctl status systemd-timesyncd | grep -q " active"; then + ntp_status="yes" + ntp_service="active" + else + ntp_status="no" + ntp_service="inactive" + fi + # Print the information + echo "Local time: $local_time" + echo "Universal time: $utc_time" + echo "Time zone: $time_zone" + echo "Network time on: $ntp_status" + echo "NTP service: $ntp_service" + ;; +esac diff --git a/battybirdnet-pi/rootfs/helpers/views.add b/battybirdnet-pi/rootfs/helpers/views.add new file mode 100644 index 000000000..8514078a0 --- /dev/null +++ b/battybirdnet-pi/rootfs/helpers/views.add @@ -0,0 +1,27 @@ + if($_GET['view'] == "Converted"){ + ensure_authenticated(); + if(isset($_GET['species']) && isset($_GET['add'])){ + $file = './scripts/convert_species_list.txt'; + $str = file_get_contents("$file"); + $str = preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "\n", $str); + file_put_contents("$file", "$str"); + // Write $_GET['species'] to the file + file_put_contents("./scripts/convert_species_list.txt", htmlspecialchars_decode($_GET['species'], ENT_QUOTES)."\n", FILE_APPEND); + } elseif (isset($_GET['species']) && isset($_GET['del'])){ + $file = './scripts/convert_species_list.txt'; + $str = file_get_contents("$file"); + $str = preg_replace('/^\h*\v+/m', '', $str); + file_put_contents("$file", "$str"); + foreach($_GET['species'] as $selectedOption) { + $content = file_get_contents("./scripts/convert_species_list.txt"); + $newcontent = str_replace($selectedOption, "", "$content"); + $newcontent = str_replace(htmlspecialchars_decode($selectedOption, ENT_QUOTES), "", "$content"); + file_put_contents("./scripts/convert_species_list.txt", "$newcontent"); + } + $file = './scripts/convert_species_list.txt'; + $str = file_get_contents("$file"); + $str = preg_replace('/^\h*\v+/m', '', $str); + file_put_contents("$file", "$str"); + } + include('./scripts/convert_list.php'); + } diff --git a/battybirdnet-pi/stats.png b/battybirdnet-pi/stats.png new file mode 100644 index 000000000..b13bc2efb Binary files /dev/null and b/battybirdnet-pi/stats.png differ diff --git a/battybirdnet-pi/updater.json b/battybirdnet-pi/updater.json new file mode 100644 index 000000000..caf58ceaa --- /dev/null +++ b/battybirdnet-pi/updater.json @@ -0,0 +1,8 @@ +{ + "last_update": "27-07-2024", + "repository": "alexbelgium/hassio-addons", + "slug": "battybattybirdnet-pi", + "source": "github", + "upstream_repo": "rdz-oss/Battybattybirdnet-pi", + "upstream_version": "" +} diff --git a/bazarr/stats.png b/bazarr/stats.png index 68091889c..a3976bdbc 100644 Binary files a/bazarr/stats.png and b/bazarr/stats.png differ diff --git a/binance-trading-bot/stats.png b/binance-trading-bot/stats.png index c9d4f5100..7bcf2d9c4 100644 Binary files a/binance-trading-bot/stats.png and b/binance-trading-bot/stats.png differ diff --git a/birdnet-go/HAINTEGRATION.md b/birdnet-go/HAINTEGRATION.md new file mode 100644 index 000000000..e58562e2a --- /dev/null +++ b/birdnet-go/HAINTEGRATION.md @@ -0,0 +1,248 @@ +# BirdNET-Go Addon: Home Assistant Integration + +BirdNET-Go can be integrated with Home Assistant using a MQTT Broker. + +## MQTT Configuration + +Your Home Assistant must be setup with MQTT and BirdNET-Go MQTT integration must be enabled. Modify the BirdNET-Go config.yaml file to enable MQTT. If you are using the Mosquitto Broker addon, you will see a log message during the BirdNET-Go startup showing the internal MQTT server details needed for configuration similar to below. + +```text +BirdNET-Go log snipped showing MQTT details: +/etc/cont-init.d/33-mqtt.sh: executing +--- +MQTT addon is active on your system! Add the MQTT details below to the Birdnet-go config.yaml : +MQTT user : addons +MQTT password : Ri5ahV1aipeiw0aelerooteixai5ohtoeNg6oo3mo0thi5te0phiezuge4Phoore +MQTT broker : tcp://core-mosquitto:1883 +--- + +Edit this section of config.yaml found in addon_configs/db21ed7f_birdnet-go/: + mqtt: + enabled: true # true to enable MQTT + broker: tcp://core-mosquitto:1883 # MQTT (tcp://host:port) + topic: birdnet # MQTT topic + username: addons # MQTT username + password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # MQTT password +``` + +## BirdNET-Go MQTT Sensors + +Add the [MQTT sensor](https://www.home-assistant.io/integrations/sensor.mqtt/) yaml configuration below to your Home Assistant configuration.yaml file. Reload the configuration and once BirdNET-Go publishes a new finding to MQTT the new BirdNET-Go sensors should show that latest finding data. + +```yaml +mqtt: + sensor: + - name: "Birdnet-Go" + state_topic: "birdnet" + value_template: "{{ today_at(value_json.Time) }}" + json_attributes_topic: "birdnet" + json_attributes_template: "{{ value_json | tojson }}" + - name: "Birdnet-Go Bird Image Url" + state_topic: "birdnet" + value_template: "{{ value_json.BirdImage.URL }}" + - name: "Birdnet-Go Clip Name" + state_topic: "birdnet" + value_template: "{{ value_json.ClipName }}" + - name: "Birdnet-Go Common Name" + state_topic: "birdnet" + value_template: "{{ value_json.CommonName }}" + - name: "Birdnet-Go Confidence" + state_topic: "birdnet" + value_template: "{{ (value_json.Confidence | float * 100) | round(2) }}" + unit_of_measurement: "%" + - name: "Birdnet-Go Date" + state_topic: "birdnet" + value_template: "{{ value_json.Date }}" + - name: "Birdnet-Go ProcessingTime" + state_topic: "birdnet" + value_template: "{{ (value_json.ProcessingTime | float / 1000000000 ) | round(4) }}" + unit_of_measurement: "s" + - name: "Birdnet-Go Scientific Name" + state_topic: "birdnet" + value_template: "{{ value_json.ScientificName }}" + - name: "Birdnet-Go Sensitivity" + state_topic: "birdnet" + value_template: "{{ value_json.Sensitivity }}" + - name: "Birdnet-Go Source" + state_topic: "birdnet" + value_template: "{{ value_json.Source }}" + - name: "Birdnet-Go Species Code" + state_topic: "birdnet" + value_template: "{{ value_json.SpeciesCode }}" + - name: "Birdnet-Go Threshold" + state_topic: "birdnet" + value_template: "{{ value_json.Threshold }}" + - name: "Birdnet-Go Time" + state_topic: "birdnet" + value_template: "{{ today_at(value_json.Time) }}" +``` + +![BirdNET-go MQTT](./images/ha_birdnet_mqtt_sensor.png) + +## BirdNET-Go Events Sensor[^1] + +Then create a new template sensor using the configuration below. + +```yaml +- trigger: + - platform: mqtt + topic: "birdnet" + - platform: time + at: "00:00:00" + id: reset + sensor: + - unique_id: c893533c-3c06-4ebe-a5bb-da833da0a947 + name: BirdNET-Go Events + state: > + {% if trigger.id == 'reset' %} + {{ now() }} + {% else %} + {{ today_at(trigger.payload_json.Time) }} + {% endif %} + attributes: + bird_events: > + {% if trigger.id == 'reset' %} + {{ [] }} + {% else %} + {% set time = trigger.payload_json.Time %} + {% set name = trigger.payload_json.CommonName %} + {% set confidence = trigger.payload_json.Confidence|round(2) * 100 ~ '%' %} + {% set current = this.attributes.get('bird_events', []) %} + {% set new = dict(time=time, name=name, confidence=confidence) %} + {{ current + [new] }} + {% endif %} +``` + +### BirdNET-Go Dashboard Cards + +There are two versions listed below. The first example will link the Bird Name to Wikipedia. The other example will link to All About Birds. You will need to modify the Confidence link to match your Home Assistant setup. + +![BirdNET-go Markdown Card Wikipedia](./images/ha_birdnet_markdown_card_wikipedia.png) + +```yaml +type: markdown +title: BirdNET (Wikipedia) +content: >- + Time|  Bird Name|Number Today|    Max + [Confidence](http://ip_address_of_HA:8080/) + + :---|:---|:---:|:---: + + {%- set t = now() %} + + {%- set bird_list = state_attr('sensor.birdnet_go_events','bird_events') | + sort(attribute='time', reverse=true) | map(attribute='name') | unique | list + %} + + {%- set bird_objects = state_attr('sensor.birdnet_go_events','bird_events') | + sort(attribute='time', reverse=true) %} + + {%- for thisbird in bird_list or [] %} + + {%- set ubird = ((bird_objects | selectattr("name", "equalto", thisbird)) | + list)[0] %} + + {%- set ubird_count = ((bird_objects | selectattr("name", "equalto", + thisbird)) | list) | length %} + + {%- set ubird_max_confidence = ((bird_objects | selectattr("name", "equalto", + thisbird)) | map(attribute='confidence') | map('replace', '%', '') | + map('float') | max | round(0)) %} + + {%- if ubird_max_confidence > 70 %} + + {{ubird.time}} + |  [{{ubird.name}}](https://en.wikipedia.org/wiki/{{ubird.name | + replace(' ', '_')}}) | {{ubird_count}} | {{ ubird_max_confidence }} % + + {%- endif %} + + {%- endfor %} +card_mod: + style: + $: | + .card-header { + display: flex !important; + align-items: center; + } + .card-header:before { + content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'%3E%3Cpath fill='%23000' d='m23 11.5l-3.05-1.13c-.26-1.15-.91-1.81-.91-1.81a4.19 4.19 0 0 0-5.93 0l-1.48 1.48L5 3c-1 4 0 8 2.45 11.22L2 19.5s8.89 2 14.07-2.05c2.76-2.16 3.38-3.42 3.77-4.75zm-5.29.22c-.39.39-1.03.39-1.42 0a.996.996 0 0 1 0-1.41c.39-.39 1.03-.39 1.42 0s.39 1.02 0 1.41'/%3E%3C/svg%3E"); + height: 42px; + width: 42px; + margin-top: 0px; + padding-left: 0px; + padding-right: 14px; + } + @media (prefers-color-scheme: dark) { + .card-header:before { + content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'%3E%3Cpath fill='%23E1E1E1' d='m23 11.5l-3.05-1.13c-.26-1.15-.91-1.81-.91-1.81a4.19 4.19 0 0 0-5.93 0l-1.48 1.48L5 3c-1 4 0 8 2.45 11.22L2 19.5s8.89 2 14.07-2.05c2.76-2.16 3.38-3.42 3.77-4.75zm-5.29.22c-.39.39-1.03.39-1.42 0a.996.996 0 0 1 0-1.41c.39-.39 1.03-.39 1.42 0s.39 1.02 0 1.41'/%3E%3C/svg%3E"); + height: 42px; + width: 42px; + margin-top: 0px; + padding-left: 0px; + padding-right: 14px; + } + } +``` + +![BirdNET-go Markdown Card All About Birds](./images/ha_birdnet_markdown_card_all_about_birds.png) + +```yaml +type: markdown +title: BirdNET (All About Birds) +content: >- + Time|  Bird Name|Number Today|    Max + [Confidence](http://ip_address_of_HA:8080/) + + :---|:---|:---:|:---: + + {%- set t = now() %} + + {%- set bird_list = state_attr('sensor.birdnet_go_events','bird_events') | + sort(attribute='time', reverse=true) | map(attribute='name') | unique | list + %} + + {%- set bird_objects = state_attr('sensor.birdnet_go_events','bird_events') | + sort(attribute='time', reverse=true) %} + + {%- for thisbird in bird_list or [] %} + + {%- set ubird = ((bird_objects | selectattr("name", "equalto", thisbird)) | + list)[0] %} + + {%- set ubird_count = ((bird_objects | selectattr("name", "equalto", + thisbird)) | list) | length %} + + {%- set ubird_max_confidence = ((bird_objects | selectattr("name", "equalto", + thisbird)) | map(attribute='confidence') | map('replace', '%', '') | + map('float') | max | round(0)) %} + + {%- if ubird_max_confidence > 70 %} + + {{ubird.time}} + |  [{{ubird.name}}](https://www.allaboutbirds.org/guide/{{ubird.name + | replace(' ', '_')}}) | {{ubird_count}} | {{ ubird_max_confidence }} % + + {%- endif %} + + {%- endfor %} +card_mod: + style: + $: | + .card-header { + display: flex !important; + align-items: center; + } + .card-header:before { + content: url("data:image/svg+xml;base64,PHN2ZyBpZD0iTGF5ZXJfMSIgZGF0YS1uYW1lPSJMYXllciAxIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyMDYuODcgMTE2LjY2Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2Y0ZTUwNTt9LmNscy0ye2ZpbGw6I2UzMWUyNjt9LmNscy0ze2ZpbGw6I2ZmZjt9PC9zdHlsZT48L2RlZnM+PHBhdGggZD0iTTIwNi4zNywxNi42OHMtMTYuNDQtNC4zNC0yMi43Ni00LjljMCwwLTI1LDEzLjUtMzIsMThhMTkuMTYsMTkuMTYsMCwwLDAtOC42NywxMy44OWwzNS43MS0yNi4zMmgyOEMyMDcuMzEsMTcuMzksMjA2LjM3LDE2LjY4LDIwNi4zNywxNi42OFoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBkPSJNMTQ4LjU1LDI3LjMzYzcuMzItNC45LDMyLjYyLTE4LjczLDMyLjYyLTE4LjczbDAsMEEzMC42OSwzMC42OSwwLDAsMCwxNTktLjYzYTQ0LjIzLDQ0LjIzLDAsMCwwLTIwLjcxLDVIMGMwLDMuNzEsNS42LDYuNTYsMTIuMTQsNi41Nkg1Mi4zNkw4Ni42MiwzNS4xMlY3MS4zN2MwLDE1LjczLDguMjYsMjkuNDQsMjEuNzgsMzcuMzVTMTI4LjY4LDExNiwxMzguNjMsMTE2VjQ2Ljg3QzEzOC42Myw0MC43OCwxNDAuNDcsMzIuNzMsMTQ4LjU1LDI3LjMzWk0xNjcuODcsOGEyLjUxLDIuNTEsMCwxLDEtMi41MSwyLjUxQTIuNTEsMi41MSwwLDAsMSwxNjcuODcsOFptLTI5LjEzLDEzLDE1LjY5LTguNjgsNi44OS41N0wxMzguNzQsMjUuMzZaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0xIiBkPSJNNTIuMzYsMTAuOTFIMTEwYy0xMi44OSwwLTIzLjQsMTAuMzUtMjMuNCwyNC4yMVoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBjbGFzcz0iY2xzLTIiIGQ9Ik0xNzgsMTAuMzNBMzEuNzEsMzEuNzEsMCwwLDAsMTU3Ljc4LDIuOVYtLjYxbDEuMjUsMEEzMC42MywzMC42MywwLDAsMSwxODEuMTcsOC42WiIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMCAwLjYzKSIvPjxwYXRoIGNsYXNzPSJjbHMtMiIgZD0iTTE3OC42MywxNy4zOWwtMjUsMTguNDNzLS4yOS0yLjcsMy40Ny01Ljc0LDI2LjUtMTguMywyNi41LTE4LjNaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0zIiBkPSJNMTI4LjE0LDY0LjQ3VjUyLjE1YzAtNS4xOC0yLjExLTguNzctNi45My0xMi4xOEwxMDAuNzksMjUuNTRhMTQuMzIsMTQuMzIsMCwwLDAsMiwyMVoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBjbGFzcz0iY2xzLTMiIGQ9Ik0xMjguMTQsNjQuNDdWNTIuMTVjMC01LjE4LTIuMTEtOC43Ny02LjkzLTEyLjE4TDEwMC43OSwyNS41NGExNC4zMiwxNC4zMiwwLDAsMCwyLDIxWiIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMCAwLjYzKSIvPjxwYXRoIGNsYXNzPSJjbHMtMyIgZD0iTTE1MS41OSwyOS44MmM3LTQuNTQsMzItMTgsMzItMThhMTYuMjQsMTYuMjQsMCwwLDAtMi40MS0zLjE1bDAsMHMtMjUuMywxMy44My0zMi42MiwxOC43My05LjU3LDEyLjE3LTkuODcsMThsLS4wNSwxLjUxLDQuMjktMy4xNkExOS4xNiwxOS4xNiwwLDAsMSwxNTEuNTksMjkuODJaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0zIiBkPSJNMTY3Ljg3LDhhMi41MSwyLjUxLDAsMSwxLTIuNTEsMi41MUEyLjUxLDIuNTEsMCwwLDEsMTY3Ljg3LDhaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBvbHlnb24gY2xhc3M9ImNscy0zIiBwb2ludHM9IjEzOC43NCAyMS41NyAxNTQuNDMgMTIuODkgMTYxLjMyIDEzLjQ1IDEzOC43NCAyNS45OCAxMzguNzQgMjEuNTciLz48L3N2Zz4="); + height: 20px; + width: 60px; + margin-top: -10px; + padding-left: 8px; + padding-right: 18px; + } +``` + +## Footnotes + +[^1]: [Displaying Birdnet-go detections](https://community.home-assistant.io/t/displaying-birdnet-go-detections/713611/22) diff --git a/birdnet-go/README.md b/birdnet-go/README.md index fb2014ea6..82619b9e7 100644 --- a/birdnet-go/README.md +++ b/birdnet-go/README.md @@ -56,6 +56,7 @@ Additional environment variables can be configured there The installation of this add-on is pretty straightforward and not different in comparison to installing any other add-on. 1. Add my add-ons repository to your home assistant instance (in supervisor addons store at top right, or click button below if you have configured my HA) + [![Open your Home Assistant instance and show the add add-on repository dialog with a specific repository URL pre-filled.](https://my.home-assistant.io/badges/supervisor_add_addon_repository.svg)](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons) 1. Install this add-on. 1. Click the `Save` button to store your configuration. @@ -64,194 +65,9 @@ The installation of this add-on is pretty straightforward and not different in c 1. Check the logs of the add-on to see if everything went well. 1. Open the webUI and adapt the software options -## Integration with HA[^1] +## Integration with HA -Birdnet-Go can be integrated with Home Assistant using a MQTT Broker. - -### Birdnet-Go Events Sensor - -Your Home Assistant must be setup with MQTT and Birdnet-Go MQTT integration must be enabled. Modify the Birdnet-Go config.yaml file to enable MQTT. If you are using the Mosquitto Broker addon, you will see a log message during the Birdnet-Go startup showing the internal MQTT server details needed for configuration similar to below. - -```text -Birdnet-Go log snipped showing MQTT details: -/etc/cont-init.d/33-mqtt.sh: executing ---- -MQTT addon is active on your system! Add the MQTT details below to the Birdnet-go config.yaml : -MQTT user : addons -MQTT password : Ri5ahV1aipeiw0aelerooteixai5ohtoeNg6oo3mo0thi5te0phiezuge4Phoore -MQTT broker : tcp://core-mosquitto:1883 ---- - -Edit this section of config.yaml found in addon_configs/db21ed7f_birdnet-go/: - mqtt: - enabled: true # true to enable MQTT - broker: tcp://core-mosquitto:1883 # MQTT (tcp://host:port) - topic: birdnet # MQTT topic - username: addons # MQTT username - password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # MQTT password -``` - -Then create a new template sensor using the configuration below. - -```yaml -- trigger: - - platform: mqtt - topic: "birdnet" - - platform: time - at: "00:00:00" - id: reset - sensor: - - unique_id: c893533c-3c06-4ebe-a5bb-da833da0a947 - name: BirdNET-Go Events - state: > - {% if trigger.id == 'reset' %} - {{ now() }} - {% else %} - {{ today_at(trigger.payload_json.Time) }} - {% endif %} - attributes: - bird_events: > - {% if trigger.id == 'reset' %} - {{ [] }} - {% else %} - {% set time = trigger.payload_json.Time %} - {% set name = trigger.payload_json.CommonName %} - {% set confidence = trigger.payload_json.Confidence|round(2) * 100 ~ '%' %} - {% set current = this.attributes.get('bird_events', []) %} - {% set new = dict(time=time, name=name, confidence=confidence) %} - {{ current + [new] }} - {% endif %} -``` - -### Birdnet-Go Dashboard Cards - -There are two versions listed below. One will link the Bird Name to Wikipedia the other one will link to All About Birds. You will need to modify the Confidence link to match your Home Assistant setup. - -![Birdnet-go Markdown Card Wikipedia](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/birdnet-go/images/ha_birdnet_markdown_card_wikipedia.png) - -```yaml -type: markdown -title: Birdnet (Wikipedia) -content: >- - Time|  Bird Name|Number Today|    Max - [Confidence](http://192.168.1.25:8081/) - - :---|:---|:---:|:---: - - {%- set t = now() %} - - {%- set bird_list = state_attr('sensor.birdnet_go_events','bird_events') | - sort(attribute='time', reverse=true) | map(attribute='name') | unique | list - %} - - {%- set bird_objects = state_attr('sensor.birdnet_go_events','bird_events') | - sort(attribute='time', reverse=true) %} - - {%- for thisbird in bird_list or [] %} - - {%- set ubird = ((bird_objects | selectattr("name", "equalto", thisbird)) | - list)[0] %} - - {%- set ubird_count = ((bird_objects | selectattr("name", "equalto", - thisbird)) | list) | length %} - - {%- set ubird_max_confidence = ((bird_objects | selectattr("name", "equalto", - thisbird)) | map(attribute='confidence') | map('replace', '%', '') | - map('float') | max | round(0)) %} - - {%- if ubird_max_confidence > 70 %} - - {{ubird.time}} - |  [{{ubird.name}}](https://en.wikipedia.org/wiki/{{ubird.name | - replace(' ', '_')}}) | {{ubird_count}} | {{ ubird_max_confidence }} % - - {%- endif %} - - {%- endfor %} -card_mod: - style: - $: | - .card-header { - display: flex !important; - align-items: center; - } - .card-header:before { - content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'%3E%3Cpath fill='%23000' d='m23 11.5l-3.05-1.13c-.26-1.15-.91-1.81-.91-1.81a4.19 4.19 0 0 0-5.93 0l-1.48 1.48L5 3c-1 4 0 8 2.45 11.22L2 19.5s8.89 2 14.07-2.05c2.76-2.16 3.38-3.42 3.77-4.75zm-5.29.22c-.39.39-1.03.39-1.42 0a.996.996 0 0 1 0-1.41c.39-.39 1.03-.39 1.42 0s.39 1.02 0 1.41'/%3E%3C/svg%3E"); - height: 42px; - width: 42px; - margin-top: 0px; - padding-left: 0px; - padding-right: 14px; - } - @media (prefers-color-scheme: dark) { - .card-header:before { - content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'%3E%3Cpath fill='%23E1E1E1' d='m23 11.5l-3.05-1.13c-.26-1.15-.91-1.81-.91-1.81a4.19 4.19 0 0 0-5.93 0l-1.48 1.48L5 3c-1 4 0 8 2.45 11.22L2 19.5s8.89 2 14.07-2.05c2.76-2.16 3.38-3.42 3.77-4.75zm-5.29.22c-.39.39-1.03.39-1.42 0a.996.996 0 0 1 0-1.41c.39-.39 1.03-.39 1.42 0s.39 1.02 0 1.41'/%3E%3C/svg%3E"); - height: 42px; - width: 42px; - margin-top: 0px; - padding-left: 0px; - padding-right: 14px; - } - } -``` - -![Birdnet-go Markdown Card All About Birds](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/birdnet-go/images/ha_birdnet_markdown_card_all_about_birds.png) - -```yaml -type: markdown -title: Birdnet (All About Birds) -content: >- - Time|  Bird Name|Number Today|    Max - [Confidence](http://ip_address_of_HA:8080/) - - :---|:---|:---:|:---: - - {%- set t = now() %} - - {%- set bird_list = state_attr('sensor.birdnet_go_events','bird_events') | - sort(attribute='time', reverse=true) | map(attribute='name') | unique | list - %} - - {%- set bird_objects = state_attr('sensor.birdnet_go_events','bird_events') | - sort(attribute='time', reverse=true) %} - - {%- for thisbird in bird_list or [] %} - - {%- set ubird = ((bird_objects | selectattr("name", "equalto", thisbird)) | - list)[0] %} - - {%- set ubird_count = ((bird_objects | selectattr("name", "equalto", - thisbird)) | list) | length %} - - {%- set ubird_max_confidence = ((bird_objects | selectattr("name", "equalto", - thisbird)) | map(attribute='confidence') | map('replace', '%', '') | - map('float') | max | round(0)) %} - - {%- if ubird_max_confidence > 70 %} - - {{ubird.time}} - |  [{{ubird.name}}](https://www.allaboutbirds.org/guide/{{ubird.name - | replace(' ', '_')}}) | {{ubird_count}} | {{ ubird_max_confidence }} % - - {%- endif %} - - {%- endfor %} -card_mod: - style: - $: | - .card-header { - display: flex !important; - align-items: center; - } - .card-header:before { - content: url("data:image/svg+xml;base64,PHN2ZyBpZD0iTGF5ZXJfMSIgZGF0YS1uYW1lPSJMYXllciAxIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyMDYuODcgMTE2LjY2Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2Y0ZTUwNTt9LmNscy0ye2ZpbGw6I2UzMWUyNjt9LmNscy0ze2ZpbGw6I2ZmZjt9PC9zdHlsZT48L2RlZnM+PHBhdGggZD0iTTIwNi4zNywxNi42OHMtMTYuNDQtNC4zNC0yMi43Ni00LjljMCwwLTI1LDEzLjUtMzIsMThhMTkuMTYsMTkuMTYsMCwwLDAtOC42NywxMy44OWwzNS43MS0yNi4zMmgyOEMyMDcuMzEsMTcuMzksMjA2LjM3LDE2LjY4LDIwNi4zNywxNi42OFoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBkPSJNMTQ4LjU1LDI3LjMzYzcuMzItNC45LDMyLjYyLTE4LjczLDMyLjYyLTE4LjczbDAsMEEzMC42OSwzMC42OSwwLDAsMCwxNTktLjYzYTQ0LjIzLDQ0LjIzLDAsMCwwLTIwLjcxLDVIMGMwLDMuNzEsNS42LDYuNTYsMTIuMTQsNi41Nkg1Mi4zNkw4Ni42MiwzNS4xMlY3MS4zN2MwLDE1LjczLDguMjYsMjkuNDQsMjEuNzgsMzcuMzVTMTI4LjY4LDExNiwxMzguNjMsMTE2VjQ2Ljg3QzEzOC42Myw0MC43OCwxNDAuNDcsMzIuNzMsMTQ4LjU1LDI3LjMzWk0xNjcuODcsOGEyLjUxLDIuNTEsMCwxLDEtMi41MSwyLjUxQTIuNTEsMi41MSwwLDAsMSwxNjcuODcsOFptLTI5LjEzLDEzLDE1LjY5LTguNjgsNi44OS41N0wxMzguNzQsMjUuMzZaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0xIiBkPSJNNTIuMzYsMTAuOTFIMTEwYy0xMi44OSwwLTIzLjQsMTAuMzUtMjMuNCwyNC4yMVoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBjbGFzcz0iY2xzLTIiIGQ9Ik0xNzgsMTAuMzNBMzEuNzEsMzEuNzEsMCwwLDAsMTU3Ljc4LDIuOVYtLjYxbDEuMjUsMEEzMC42MywzMC42MywwLDAsMSwxODEuMTcsOC42WiIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMCAwLjYzKSIvPjxwYXRoIGNsYXNzPSJjbHMtMiIgZD0iTTE3OC42MywxNy4zOWwtMjUsMTguNDNzLS4yOS0yLjcsMy40Ny01Ljc0LDI2LjUtMTguMywyNi41LTE4LjNaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0zIiBkPSJNMTI4LjE0LDY0LjQ3VjUyLjE1YzAtNS4xOC0yLjExLTguNzctNi45My0xMi4xOEwxMDAuNzksMjUuNTRhMTQuMzIsMTQuMzIsMCwwLDAsMiwyMVoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAgMC42MykiLz48cGF0aCBjbGFzcz0iY2xzLTMiIGQ9Ik0xMjguMTQsNjQuNDdWNTIuMTVjMC01LjE4LTIuMTEtOC43Ny02LjkzLTEyLjE4TDEwMC43OSwyNS41NGExNC4zMiwxNC4zMiwwLDAsMCwyLDIxWiIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMCAwLjYzKSIvPjxwYXRoIGNsYXNzPSJjbHMtMyIgZD0iTTE1MS41OSwyOS44MmM3LTQuNTQsMzItMTgsMzItMThhMTYuMjQsMTYuMjQsMCwwLDAtMi40MS0zLjE1bDAsMHMtMjUuMywxMy44My0zMi42MiwxOC43My05LjU3LDEyLjE3LTkuODcsMThsLS4wNSwxLjUxLDQuMjktMy4xNkExOS4xNiwxOS4xNiwwLDAsMSwxNTEuNTksMjkuODJaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBhdGggY2xhc3M9ImNscy0zIiBkPSJNMTY3Ljg3LDhhMi41MSwyLjUxLDAsMSwxLTIuNTEsMi41MUEyLjUxLDIuNTEsMCwwLDEsMTY3Ljg3LDhaIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwIDAuNjMpIi8+PHBvbHlnb24gY2xhc3M9ImNscy0zIiBwb2ludHM9IjEzOC43NCAyMS41NyAxNTQuNDMgMTIuODkgMTYxLjMyIDEzLjQ1IDEzOC43NCAyNS45OCAxMzguNzQgMjEuNTciLz48L3N2Zz4="); - height: 20px; - width: 60px; - margin-top: -10px; - padding-left: 8px; - padding-right: 18px; - } -``` +Home Assistant Integration instructions are found here, [Birdnet-Go Addon: Home Assistant Integration](./HAINTEGRATION.md) ## Setting up a RTSP Source using VLC @@ -322,6 +138,3 @@ Create an issue on github ![illustration](https://raw.githubusercontent.com/tphakala/birdnet-go/main/doc/BirdNET-Go-dashboard.webp) -## Footnotes - -[^1]: [Displaying Birdnet-go detections](https://community.home-assistant.io/t/displaying-birdnet-go-detections/713611/22) diff --git a/birdnet-go/images/ha_birdnet_mqtt_sensor.png b/birdnet-go/images/ha_birdnet_mqtt_sensor.png new file mode 100644 index 000000000..0ac0291a9 Binary files /dev/null and b/birdnet-go/images/ha_birdnet_mqtt_sensor.png differ diff --git a/birdnet-go/stats.png b/birdnet-go/stats.png index e778dcc1a..43d66bf00 100644 Binary files a/birdnet-go/stats.png and b/birdnet-go/stats.png differ diff --git a/birdnet-pi/CHANGELOG.md b/birdnet-pi/CHANGELOG.md index c3f78aff5..378e42a75 100644 --- a/birdnet-pi/CHANGELOG.md +++ b/birdnet-pi/CHANGELOG.md @@ -1,5 +1,19 @@ +## 0.13-75 (08-08-2024) +- Fix : improve symlinks logic + +## 0.13-73 (07-08-2024) +- [REMOVE] : SPECIES_CONVERTER_ENABLED option removed, please instead us "exclude species" and "change detection" +- [FEAT] : Improve dark mode +- [FEAT] : Species confirmation + +## 0.13-71 (14-07-2024) +- [FEAT] : Add manual MQTT options + +## 0.13-69 (12-07-2024) +- [FEAT] : limit a specific number of audio samples per species + ## 0.13-68 (10-07-2024) -- [FIX] : correct mqtt posting, switch to service +- [FIX] : correct mqtt posting, switch to service ## 0.13-65 (08-07-2024) - [FEAT] : publish mqtt to homeassistant if a server is found diff --git a/birdnet-pi/Dockerfile b/birdnet-pi/Dockerfile index 778d3441a..583cf8ccb 100644 --- a/birdnet-pi/Dockerfile +++ b/birdnet-pi/Dockerfile @@ -103,6 +103,20 @@ RUN \ apt-get clean all && \ rm -rf /var/lib/apt/lists/* +# Add PR +RUN \ + cd /home/pi/BirdNET-Pi && \ + git fetch origin pull/158/head:158 && \ + git checkout main && \ + git branch -D f-merge-prs || true && \ + git checkout -b f-merge-prs && \ + git merge 158 --no-edit && \ + git stash && \ + git checkout main && \ + git merge f-merge-prs && \ + git branch -d f-merge-prs && \ + git branch -d 158 + # Update with my repo if more recent #WORKDIR /home/pi #RUN \ diff --git a/birdnet-pi/README.md b/birdnet-pi/README.md index 67ee46d3e..b9aee2dd8 100644 --- a/birdnet-pi/README.md +++ b/birdnet-pi/README.md @@ -1,3 +1,5 @@ +## ⚠ Open Request : [✨ [REQUEST] BattyBirdnet-Pi x86-64 (opened 2024-07-29)](https://github.com/alexbelgium/hassio-addons/issues/1498) by [@mrcrunchybeans](https://github.com/mrcrunchybeans) +## ⚠ Open Issue : [🐛 [BirdNET-Pi] The Detection in the Overview is permanently refreshing (opened 2024-08-07)](https://github.com/alexbelgium/hassio-addons/issues/1506) by [@UlrichThiess](https://github.com/UlrichThiess) # Home assistant add-on: birdnet-pi [![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) @@ -56,7 +58,6 @@ Options can be configured through three ways : BIRDSONGS_FOLDER: folder to store birdsongs file # It should be an ssd if you want to avoid clogging of analysis MQTT_DISABLED : if true, disables automatic mqtt publishing. Only valid if there is a local broker already available LIVESTREAM_BOOT_ENABLED: start livestream from boot, or from settings -SPECIES_CONVERTER_ENABLED: true/false. if enabled, will create a new setting in the birdnet options where you can specify birds to convert. It will convert on the fly the specie when detected PROCESSED_FOLDER_ENABLED : if enabled, you need to set in the birdnet.conf (or the setting of birdnet) the number of last wav files that will be saved in the temporary folder "/tmp/Processed" within the tmpfs (so no disk wear) in case you want to retrieve them. This amount can be adapted from the addon options TZ: Etc/UTC specify a timezone to use, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List pi_password: set the user password to access the web terminal diff --git a/birdnet-pi/config.json b/birdnet-pi/config.json index 288a4ad20..cd87c25f6 100644 --- a/birdnet-pi/config.json +++ b/birdnet-pi/config.json @@ -61,7 +61,6 @@ ], "image": "ghcr.io/alexbelgium/birdnet-pi-{arch}", "ingress": true, - "ingress_stream": true, "init": false, "map": [ "addon_config:rw", @@ -94,10 +93,13 @@ ], "schema": { "BIRDSONGS_FOLDER": "str?", - "MQTT_DISABLED": "bool?", "LIVESTREAM_BOOT_ENABLED": "bool", + "MQTT_DISABLED": "bool?", + "MQTT_HOST_manual": "str?", + "MQTT_PASSWORD_manual": "password?", + "MQTT_PORT_manual": "int?", + "MQTT_USER_manual": "str?", "PROCESSED_FOLDER_ENABLED": "bool?", - "SPECIES_CONVERTER_ENABLED": "bool?", "TZ": "str?", "certfile": "str", "cifsdomain": "str?", @@ -117,6 +119,6 @@ "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/birdnet-pi", "usb": true, - "version": "0.13-68", + "version": "0.13-75", "video": true } diff --git a/birdnet-pi/rootfs/custom-services.d/30-monitoring.sh b/birdnet-pi/rootfs/custom-services.d/30-monitoring.sh index 07137b371..4364481fb 100755 --- a/birdnet-pi/rootfs/custom-services.d/30-monitoring.sh +++ b/birdnet-pi/rootfs/custom-services.d/30-monitoring.sh @@ -24,7 +24,7 @@ mkdir -p "$ingest_dir" chown -R pi:pi "$ingest_dir" chmod -R 755 "$ingest_dir" -function apprisemessage() { +function apprisealert() { # Set failed check so it only runs once touch "$HOME"/BirdNET-Pi/failed_servicescheck NOTIFICATION="" @@ -77,13 +77,15 @@ while true; do bashio::log.green "$(date) INFO ${wavs} wav files waiting in $ingest_dir, $srv state is $state" - if ((wavs > 100)) && [[ "$state" == "active" ]]; then + if ((wavs > 100)); then + bashio::log.red "$(date) WARNING too many files in queue, pausing $srv" sudo systemctl stop "$srv" - bashio::log.red "$(date) WARNING stopped $srv service" + sudo systemctl restart birdnet_analysis if [ -s "$HOME/BirdNET-Pi/apprise.txt" ]; then apprisealert; fi - elif ((wavs <= 100)) && [[ "$state" != "active" ]]; then - sudo systemctl start $srv + elif [[ "$state" != "active" ]]; then bashio::log.yellow "$(date) INFO started $srv service" + sudo systemctl start $srv + sudo systemctl restart birdnet_analysis fi ((counter--)) diff --git a/birdnet-pi/rootfs/etc/cont-init.d/01-structure.sh b/birdnet-pi/rootfs/etc/cont-init.d/01-structure.sh index dda92e81e..1cacc1248 100755 --- a/birdnet-pi/rootfs/etc/cont-init.d/01-structure.sh +++ b/birdnet-pi/rootfs/etc/cont-init.d/01-structure.sh @@ -11,12 +11,12 @@ bashio::log.info "Ensuring the file structure is correct :" # Define structure echo "... creating default files" -touch /config/apprise.txt -touch /config/include_species_list.txt -touch /config/exclude_species_list.txt -if [ ! -f /config/IdentifiedSoFar.txt ]; then echo "" > /config/IdentifiedSoFar.txt; fi -if [ ! -f /config/disk_check_exclude.txt ]; then echo "" > /config/disk_check_exclude.txt; fi # Using touch caused an issue with stats.php -if [ ! -f /config/confirmed_species_list.txt ]; then echo "" > /config/confirmed_species_list.txt; fi # Using touch caused an issue with stats.php +touch /config/include_species_list.txt # Should be null +for files in apprise.txt exclude_species_list.txt IdentifiedSoFar.txt disk_check_exclude.txt confirmed_species_list.txt blacklisted_images.txt whitelist_species_list.txt; do + if [ ! -f /config/"$files" ]; then + echo "" > /config/"$files" + fi +done # Get BirdSongs folder locations BIRDSONGS_FOLDER="/config/BirdSongs" @@ -58,7 +58,7 @@ cp "$HOME"/BirdNET-Pi/birdnet.conf "$HOME"/BirdNET-Pi/birdnet.bak # Symlink files echo "... creating symlink" -for files in "$HOME/BirdNET-Pi/birdnet.conf" "$HOME/BirdNET-Pi/scripts/birds.db" "$HOME/BirdNET-Pi/BirdDB.txt" "$HOME/BirdNET-Pi/scripts/disk_check_exclude.txt" "$HOME/BirdNET-Pi/apprise.txt" "$HOME/BirdNET-Pi/exclude_species_list.txt" "$HOME/BirdNET-Pi/include_species_list.txt" "$HOME/BirdNET-Pi/IdentifiedSoFar.txt" "$HOME/BirdNET-Pi/confirmed_species_list.txt"; do +for files in "$HOME/BirdNET-Pi/birdnet.conf" "$HOME/BirdNET-Pi/scripts/whitelist_species_list.txt" "$HOME/BirdNET-Pi/blacklisted_images.txt" "$HOME/BirdNET-Pi/scripts/birds.db" "$HOME/BirdNET-Pi/BirdDB.txt" "$HOME/BirdNET-Pi/scripts/disk_check_exclude.txt" "$HOME/BirdNET-Pi/apprise.txt" "$HOME/BirdNET-Pi/exclude_species_list.txt" "$HOME/BirdNET-Pi/include_species_list.txt" "$HOME/BirdNET-Pi/IdentifiedSoFar.txt" "$HOME/BirdNET-Pi/scripts/confirmed_species_list.txt"; do filename="${files##*/}" if [ ! -f /config/"$filename" ]; then if [ -f "$files" ]; then @@ -68,7 +68,8 @@ for files in "$HOME/BirdNET-Pi/birdnet.conf" "$HOME/BirdNET-Pi/scripts/birds.db" fi fi if [ -e "$files" ]; then rm "$files"; fi - sudo -u pi ln -fs /config/"$filename" "$files" || bashio::log.fatal "Symlink creation failed for $filename" + sudo -u pi ln -fs /config/"$filename" "$HOME/BirdNET-Pi/$filename" || bashio::log.fatal "Symlink creation failed for $filename" + sudo -u pi ln -fs /config/"$filename" "$HOME/BirdNET-Pi/scripts/$filename" || bashio::log.fatal "Symlink creation failed for $filename" sudo -u pi ln -fs /config/"$filename" /etc/birdnet/"$filename" || bashio::log.fatal "Symlink creation failed for $filename" done diff --git a/birdnet-pi/rootfs/etc/cont-init.d/31-checks.sh b/birdnet-pi/rootfs/etc/cont-init.d/31-checks.sh index 47ffa5b77..2330c6956 100755 --- a/birdnet-pi/rootfs/etc/cont-init.d/31-checks.sh +++ b/birdnet-pi/rootfs/etc/cont-init.d/31-checks.sh @@ -28,20 +28,6 @@ grep -o '^[^#=]*=' "$configtemplate" | sed 's/=//' | while read -r var; do fi done -################ -# CHECK AMIXER # -################ - -# If default capture is set at 0%, increase it to 50% -# current_volume="$(amixer sget Capture | grep -oP '\[\d+%]' | tr -d '[]%' | head -1)" 2>/dev/null || true -# current_volume="${current_volume:-100}" - -# Set the default microphone volume to 50% if it's currently at 0% -# if [[ "$current_volume" -eq 0 ]]; then -# amixer sset Capture 70% -# bashio::log.warning "Microphone was off, volume set to 70%." -# fi - ############## # CHECK PORT # ############## @@ -51,4 +37,22 @@ if [[ "$(bashio::addon.port "80")" == 3000 ]]; then sleep infinity fi +################## +# PERFORM UPDATE # +################## + +echo " " +bashio::log.info "Performing potential updates" + +# Adapt update_birdnet_snippets +sed -i "s|systemctl list-unit-files|false \&\& echo|g" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh +sed -i "/systemctl /d" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh +sed -i "/find /d" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh +sed -i "/set -x/d" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh +sed -i "/restart_services/d" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh +sed -i "s|/etc/birdnet/birdnet.conf|/config/birdnet.conf|g" "$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh + +# Execute update_birdnet_snippets +/."$HOME"/BirdNET-Pi/scripts/update_birdnet_snippets.sh + echo " " diff --git a/birdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh b/birdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh index f97cfd537..8f6f41342 100755 --- a/birdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh +++ b/birdnet-pi/rootfs/etc/cont-init.d/33-mqtt.sh @@ -24,5 +24,24 @@ if bashio::services.available 'mqtt' && ! bashio::config.true 'MQTT_DISABLED' ; cp /helpers/birdnet_to_mqtt.sh /custom-services.d chmod 777 /usr/bin/birdnet_to_mqtt.py chmod 777 /custom-services.d/birdnet_to_mqtt.sh +elif bashio::config.has_value "MQTT_HOST_manual" && bashio::config.has_value "MQTT_PORT_manual"; then + bashio::log.green "---" + bashio::log.blue "MQTT is manually configured in the addon options" + bashio::log.blue "Birdnet-pi is now automatically configured to send its ouptut to MQTT" + bashio::log.green "---" + bashio::log.blue "Data will be posted to the topic : 'birdnet'" + bashio::log.blue "Json data : {'Date', 'Time', 'ScientificName', 'CommonName', 'Confidence', 'SpeciesCode', 'ClipName', 'url'}" + bashio::log.blue "---" + # Apply MQTT settings + sed -i "s|%%mqtt_server%%|$(bashio::config "MQTT_HOST_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_port%%|$(bashio::config "MQTT_PORT_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_user%%|$(bashio::config "MQTT_USER_manual")|g" /helpers/birdnet_to_mqtt.py + sed -i "s|%%mqtt_pass%%|$(bashio::config "MQTT_PASSWORD_manual")|g" /helpers/birdnet_to_mqtt.py + + # Copy script + cp /helpers/birdnet_to_mqtt.py /usr/bin/birdnet_to_mqtt.py + cp /helpers/birdnet_to_mqtt.sh /custom-services.d + chmod +x /usr/bin/birdnet_to_mqtt.py + chmod +x /custom-services.d/birdnet_to_mqtt.sh fi diff --git a/birdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh b/birdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh index 96398d9cf..b4fbae83a 100755 --- a/birdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh +++ b/birdnet-pi/rootfs/etc/cont-init.d/71-newfeatures.sh @@ -15,70 +15,6 @@ bashio::log.info "Adding optional features" # sed -i "s|ar 48000|ar 48000 -af afftdn=nr=30:nt=w:om=o|g" "$HOME"/BirdNET-Pi/scripts/birdnet_recording.sh #fi -# Add species conversion system -############################### -if bashio::config.true "SPECIES_CONVERTER_ENABLED"; then - echo "... adding feature of SPECIES_CONVERTER, a new tab is added to your Tools" - touch /config/convert_species_list.txt - chown pi:pi /config/convert_species_list.txt - sudo -u pi ln -fs /config/convert_species_list.txt "$HOME"/BirdNET-Pi/ - sudo -u pi ln -fs /config/convert_species_list.txt "$HOME"/BirdNET-Pi/scripts/ - # Not useful - sed -i "/exclude_species_list.txt/a sudo -u pi ln -fs /config/convert_species_list.txt $HOME/BirdNET-Pi/scripts/" "$HOME"/BirdNET-Pi/scripts/clear_all_data.sh - sed -i "/exclude_species_list.txt/a sudo -u pi ln -fs /config/convert_species_list.txt $HOME/BirdNET-Pi/scripts/" "$HOME"/BirdNET-Pi/scripts/install_services.sh - # Modify views.php if not already done - if ! grep -q "Converted" "$HOME"/BirdNET-Pi/homepage/views.php; then - # Add button - # shellcheck disable=SC2016 - sed -i '/Excluded Species List/a\ ' "$HOME"/BirdNET-Pi/homepage/views.php - # Flag to indicate whether we've found the target line - found_target=false - # Read the original file line by line - while IFS= read -r line; do - if [[ $line == *"if(\$_GET['view'] == \"File\"){"* ]]; then - found_target=true - fi - if $found_target; then - echo "$line" >> "$HOME"/BirdNET-Pi/homepage/views.php.temp - fi - done < "$HOME"/BirdNET-Pi/homepage/views.php - # Remove the extracted lines from the original file - # shellcheck disable=SC2016 - sed -i '/if(\$_GET\['\''view'\''\] == "File"){/,$d' "$HOME"/BirdNET-Pi/homepage/views.php - # Add new text - cat "/helpers/views.add" >> "$HOME"/BirdNET-Pi/homepage/views.php - cat "$HOME"/BirdNET-Pi/homepage/views.php.temp >> "$HOME"/BirdNET-Pi/homepage/views.php - # Clean up: Remove the temporary file - rm "$HOME"/BirdNET-Pi/homepage/views.php.temp - fi - - # Add the converter script - if [ ! -f "$HOME"/BirdNET-Pi/scripts/convert_list.php ]; then - mv -f /helpers/convert_list.php "$HOME"/BirdNET-Pi/scripts/convert_list.php - chown pi:pi "$HOME"/BirdNET-Pi/scripts/convert_list.php - chmod 664 "$HOME"/BirdNET-Pi/scripts/convert_list.php - fi - - # Change server - if ! grep -q "converted_entry" "$HOME"/BirdNET-Pi/scripts/server.py; then - sed -i "/INTERPRETER, M_INTERPRETER, INCLUDE_LIST, EXCLUDE_LIST/c INTERPRETER, M_INTERPRETER, INCLUDE_LIST, EXCLUDE_LIST, CONVERT_LIST = (None, None, None, None, None)" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/global INCLUDE_LIST, EXCLUDE_LIST/c\ global INCLUDE_LIST, EXCLUDE_LIST, CONVERT_LIST, CONVERT_DICT" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/exclude_species_list.txt/a\ CONVERT_DICT = {row.split(';')[0]: row.split(';')[1] for row in CONVERT_LIST}" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/exclude_species_list.txt/a\ CONVERT_LIST = loadCustomSpeciesList(os.path.expanduser(\"~/BirdNET-Pi/convert_species_list.txt\"))" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "s|entry\[0\]|converted_entry|g" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "s|if converted_entry in|if entry\[0\] in|g" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ converted_entry = entry[0]" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ else :" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ log.info('WARNING : ' + entry[0] + ' converted to ' + converted_entry)" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ converted_entry = CONVERT_DICT.get(entry[0], entry[0])" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ if entry[0] in CONVERT_DICT:" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/for entry in entries/a\ if entry[1] >= conf.getfloat('CONFIDENCE'):" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "/converted_entry in INCLUDE_LIST or len(INCLUDE_LIST)/c\ if ((converted_entry in INCLUDE_LIST or len(INCLUDE_LIST) == 0)" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "s| d = Detection| d = Detection|g" "$HOME"/BirdNET-Pi/scripts/server.py - sed -i "s| confident_detections| confident_detections|g" "$HOME"/BirdNET-Pi/scripts/server.py - fi -fi || true - # Enable the Processed folder ############################# diff --git a/birdnet-pi/stats.png b/birdnet-pi/stats.png index 7a53c4767..32a2c985e 100644 Binary files a/birdnet-pi/stats.png and b/birdnet-pi/stats.png differ diff --git a/birdnet-pi/updater.json b/birdnet-pi/updater.json index 8b3a6507f..da5f4a6ce 100644 --- a/birdnet-pi/updater.json +++ b/birdnet-pi/updater.json @@ -1,7 +1,7 @@ { "last_update": "22-06-2024", "repository": "alexbelgium/hassio-addons", - "slug": "birdnet-go", + "slug": "birdnet-pi", "source": "github", "upstream_repo": "Nachtzuster/BirdNET-Pi", "upstream_version": "0.1" diff --git a/browserless_chrome/CHANGELOG.md b/browserless_chrome/CHANGELOG.md index fbf1bfcc7..235a044f5 100644 --- a/browserless_chrome/CHANGELOG.md +++ b/browserless_chrome/CHANGELOG.md @@ -1,4 +1,7 @@ +## 2.16.0 (20-07-2024) +- Update to latest version from browserless/chrome (changelog : https://github.com/browserless/chrome/releases) + ## 2.15.0 (06-07-2024) - Update to latest version from browserless/chrome (changelog : https://github.com/browserless/chrome/releases) diff --git a/browserless_chrome/config.json b/browserless_chrome/config.json index 51cea7a70..129ffba69 100644 --- a/browserless_chrome/config.json +++ b/browserless_chrome/config.json @@ -88,6 +88,6 @@ "slug": "browserless_chrome", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/browserless_chrome", - "version": "2.15.0", + "version": "2.16.0", "webui": "[PROTO:ssl]://[HOST]:[PORT:3000]/docs" } diff --git a/browserless_chrome/stats.png b/browserless_chrome/stats.png index a055e500d..d11f79410 100644 Binary files a/browserless_chrome/stats.png and b/browserless_chrome/stats.png differ diff --git a/browserless_chrome/updater.json b/browserless_chrome/updater.json index c6e35d311..f5a86a8ae 100644 --- a/browserless_chrome/updater.json +++ b/browserless_chrome/updater.json @@ -1,9 +1,9 @@ { "github_tagfilter": "v", - "last_update": "06-07-2024", + "last_update": "20-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "browserless_chrome", "source": "github", "upstream_repo": "browserless/chrome", - "upstream_version": "2.15.0" + "upstream_version": "2.16.0" } diff --git a/calibre/CHANGELOG.md b/calibre/CHANGELOG.md index 4d0fee55c..4692daae8 100644 --- a/calibre/CHANGELOG.md +++ b/calibre/CHANGELOG.md @@ -1,4 +1,13 @@ +## 7.16.0 (03-08-2024) +- Update to latest version from linuxserver/docker-calibre (changelog : https://github.com/linuxserver/docker-calibre/releases) + +## 7.15.0 (20-07-2024) +- Update to latest version from linuxserver/docker-calibre (changelog : https://github.com/linuxserver/docker-calibre/releases) + +## 7.14.0 (13-07-2024) +- Update to latest version from linuxserver/docker-calibre (changelog : https://github.com/linuxserver/docker-calibre/releases) + ## 7.13.0 (29-06-2024) - Update to latest version from linuxserver/docker-calibre (changelog : https://github.com/linuxserver/docker-calibre/releases) diff --git a/calibre/config.json b/calibre/config.json index 2cc2901ce..ae9afcd5c 100644 --- a/calibre/config.json +++ b/calibre/config.json @@ -119,6 +119,6 @@ "slug": "calibre", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/calibre", - "version": "7.13.0", + "version": "7.16.0", "video": true } diff --git a/calibre/stats.png b/calibre/stats.png index 028934af3..81a95f30a 100644 Binary files a/calibre/stats.png and b/calibre/stats.png differ diff --git a/calibre/updater.json b/calibre/updater.json index dfd15bc78..af2e6c131 100644 --- a/calibre/updater.json +++ b/calibre/updater.json @@ -1,9 +1,9 @@ { "github_fulltag": "false", - "last_update": "29-06-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "calibre", "source": "github", "upstream_repo": "linuxserver/docker-calibre", - "upstream_version": "7.13.0" + "upstream_version": "7.16.0" } diff --git a/calibre_web/CHANGELOG.md b/calibre_web/CHANGELOG.md index 4549f4fcd..8af3585c4 100644 --- a/calibre_web/CHANGELOG.md +++ b/calibre_web/CHANGELOG.md @@ -1,4 +1,7 @@ +## 0.6.22 (13-07-2024) +- Update to latest version from linuxserver/docker-calibre-web (changelog : https://github.com/linuxserver/docker-calibre-web/releases) + ## 0.6.21-7 (12-01-2024) - Minor bugs fixed diff --git a/calibre_web/config.json b/calibre_web/config.json index b80c7fe9c..3771457e9 100644 --- a/calibre_web/config.json +++ b/calibre_web/config.json @@ -118,6 +118,6 @@ "slug": "calibre-web", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/calibre_web", - "version": "0.6.21-7", + "version": "0.6.22", "video": true } diff --git a/calibre_web/updater.json b/calibre_web/updater.json index 637d823bd..c4d2decd6 100644 --- a/calibre_web/updater.json +++ b/calibre_web/updater.json @@ -1,9 +1,9 @@ { - "last_update": "28-10-2023", + "last_update": "13-07-2024", "paused": false, "repository": "alexbelgium/hassio-addons", "slug": "calibre-web", "source": "github", "upstream_repo": "linuxserver/docker-calibre-web", - "upstream_version": "0.6.21" + "upstream_version": "0.6.22" } diff --git a/changedetection.io/CHANGELOG.md b/changedetection.io/CHANGELOG.md index 0e8c19cef..c3b0b888a 100644 --- a/changedetection.io/CHANGELOG.md +++ b/changedetection.io/CHANGELOG.md @@ -1,3 +1,14 @@ + +## 0.46.2 (03-08-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) +## 0.46.1-2 (23-07-2024) +- Minor bugs fixed + +## 0.46.1 (20-07-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.26 (13-07-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) ## 0.45.25-2 (08-07-2024) - Minor bugs fixed diff --git a/changedetection.io/README.md b/changedetection.io/README.md index 630dac21b..d86a6dc0f 100644 --- a/changedetection.io/README.md +++ b/changedetection.io/README.md @@ -16,7 +16,7 @@ _Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_ -[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers) +[![Stargazers repo roster for @alexbelgium/hassio-addons](https://reporoster.com/stars/alexbelgium/hassio-addons)](https://github.com/alexbelgium/hassio-addons/stargazers) ![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/changedetection.io/stats.png) @@ -30,7 +30,17 @@ This addon is based on the [docker image](https://github.com/linuxserver/docker- ### Main app -Webui can be found at `:5000`. +Web UI can be found at `:5000`, also accessible from the add-on page. + +#### Sidebar shortcut + +You can add a shortcut pointing to your Changedetection.io instance with the following steps: +1. Go to ⚙ Settings > Dashboards +2. Click ➕ Add Dashboard at the bottom corner +3. Select the Webpage option, and paste the Web UI URL you got from the add-on page. +4. Fill in the title for the sidebar item, an icon (suggestion: `mdi:vector-difference`), and a **relative URL** for that panel (e.g. `change-detection`). Lastly, confirm it. + +### Configurable options ```yaml PGID: user @@ -39,17 +49,15 @@ TZ: Etc/UTC specify a timezone to use, see https://en.wikipedia.org/wiki/List_of BASE_URL: Specify the full URL (including protocol) when running behind a reverse proxy ``` -### Connect to browserless chrome (from @RhysMcW) +### Connect to browserless Chrome (from @RhysMcW) -In HA use the File Editor addon (or Filebrowser) and edit the Changedetection.io config.yaml : /homeassistant/addons_config/changedetection.io/config.yaml - -Add the following line to the end of the file: +In HA, use the File Editor add-on (or Filebrowser) and edit the Changedetection.io config file at `/homeassistant/addons_config/changedetection.io/config.yaml`. Add the following line to the end of it: `PLAYWRIGHT_DRIVER_URL: ws://db21ed7f-browserless-chrome.local.hass.io:3000/chromium?launch={"defaultViewport":{"height":720,"width":1280},"headless":false,"stealth":true}&blockAds=true` Remember to add a blank line at the end of the file too according to yaml requirements. The "db21ed7f-browserless-chrome.local.hass.io" hostname was got from the CLI in HA, using arp, but you should also be able to use your HA IP address. -Then restart the Changedetection.io addon - after that you can use the browser options in Changedetection.io. +Then restart the Changedetection.io add-on - after that you can use the browser options in Changedetection.io. ## Installation diff --git a/changedetection.io/config.json b/changedetection.io/config.json index 8f1a5c19f..ccf722e70 100644 --- a/changedetection.io/config.json +++ b/changedetection.io/config.json @@ -6,6 +6,7 @@ "codenotary": "alexandrep.github@gmail.com", "description": "web page monitoring, notification and change detection", "environment": { + "LC_ALL": "en_US.UTF-8", "TIMEOUT": "60000" }, "image": "ghcr.io/alexbelgium/changedetection.io-{arch}", @@ -35,6 +36,6 @@ "slug": "changedetection.io", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/changedetection.io", - "version": "0.45.25-2", + "version": "0.46.2", "webui": "http://[HOST]:[PORT:5000]" } diff --git a/changedetection.io/stats.png b/changedetection.io/stats.png index e1c1a9402..62934cd3d 100644 Binary files a/changedetection.io/stats.png and b/changedetection.io/stats.png differ diff --git a/changedetection.io/updater.json b/changedetection.io/updater.json index b3bb539fb..167e45cd0 100644 --- a/changedetection.io/updater.json +++ b/changedetection.io/updater.json @@ -1,9 +1,9 @@ { "github_fulltag": "false", - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "changedetection.io", "source": "github", "upstream_repo": "linuxserver/docker-changedetection.io", - "upstream_version": "0.45.25" + "upstream_version": "0.46.2" } diff --git a/cloudcommander/CHANGELOG.md b/cloudcommander/CHANGELOG.md index aef364dfb..98dd39ec2 100644 --- a/cloudcommander/CHANGELOG.md +++ b/cloudcommander/CHANGELOG.md @@ -1,4 +1,7 @@ +## 17.4.3 (03-08-2024) +- Update to latest version from coderaiser/cloudcmd (changelog : https://github.com/coderaiser/cloudcmd/releases) + ## 17.4.2 (06-07-2024) - Update to latest version from coderaiser/cloudcmd (changelog : https://github.com/coderaiser/cloudcmd/releases) diff --git a/cloudcommander/config.json b/cloudcommander/config.json index 4b7f8e5dd..c1e562766 100644 --- a/cloudcommander/config.json +++ b/cloudcommander/config.json @@ -104,5 +104,5 @@ "slug": "cloudcommander", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/cloudcommander", - "version": "17.4.2" + "version": "17.4.3" } diff --git a/cloudcommander/stats.png b/cloudcommander/stats.png index 72c534f3b..b844bf4cb 100644 Binary files a/cloudcommander/stats.png and b/cloudcommander/stats.png differ diff --git a/cloudcommander/updater.json b/cloudcommander/updater.json index bf104cc6a..f6adbeda1 100644 --- a/cloudcommander/updater.json +++ b/cloudcommander/updater.json @@ -1,8 +1,8 @@ { - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "cloudcommander", "source": "github", "upstream_repo": "coderaiser/cloudcmd", - "upstream_version": "17.4.2" + "upstream_version": "17.4.3" } diff --git a/codex/CHANGELOG.md b/codex/CHANGELOG.md index ac0dd5d19..522425d19 100644 --- a/codex/CHANGELOG.md +++ b/codex/CHANGELOG.md @@ -1,4 +1,15 @@ +## 1.6.16 (03-08-2024) +- Update to latest version from ajslater/codex (changelog : https://github.com/ajslater/codex/releases) +## 1.6.15 (24-07-2024) +- Minor bugs fixed + +## 1.6.9 (20-07-2024) +- Update to latest version from ajslater/codex (changelog : https://github.com/ajslater/codex/releases) + +## 1.6.8 (13-07-2024) +- Update to latest version from ajslater/codex (changelog : https://github.com/ajslater/codex/releases) + ## 1.6.3 (29-06-2024) - Update to latest version from ajslater/codex (changelog : https://github.com/ajslater/codex/releases) diff --git a/codex/config.json b/codex/config.json index 0c03d3f9a..15b125d45 100644 --- a/codex/config.json +++ b/codex/config.json @@ -106,5 +106,5 @@ "slug": "codex", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "1.6.3" + "version": "1.6.16" } diff --git a/codex/stats.png b/codex/stats.png index 640303aff..a76fc0b57 100644 Binary files a/codex/stats.png and b/codex/stats.png differ diff --git a/codex/updater.json b/codex/updater.json index afb707d89..509bd6e31 100644 --- a/codex/updater.json +++ b/codex/updater.json @@ -1,9 +1,9 @@ { "github_beta": "true", - "last_update": "29-06-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "codex", "source": "github", "upstream_repo": "ajslater/codex", - "upstream_version": "1.6.3" + "upstream_version": "1.6.16" } diff --git a/comixed/stats.png b/comixed/stats.png index f6d4271ef..bb079c18e 100644 Binary files a/comixed/stats.png and b/comixed/stats.png differ diff --git a/elasticsearch/CHANGELOG.md b/elasticsearch/CHANGELOG.md index 5f2bd05aa..abfb873f7 100644 --- a/elasticsearch/CHANGELOG.md +++ b/elasticsearch/CHANGELOG.md @@ -1,3 +1,6 @@ +- BREAKING CHANGE : upgrade to v8.14.3. You'll need to rebuild your indexes + +## v7 - Implemented healthcheck - WARNING : update to supervisor 2022.11 before installing - Add codenotary sign diff --git a/elasticsearch/config.json b/elasticsearch/config.json index 2a697caad..abf2b7dc5 100644 --- a/elasticsearch/config.json +++ b/elasticsearch/config.json @@ -88,5 +88,5 @@ "startup": "services", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/elasticsearch", - "version": "7.17.9" + "version": "8.14.3" } diff --git a/elasticsearch/stats.png b/elasticsearch/stats.png index 357705ea4..49031f0ea 100644 Binary files a/elasticsearch/stats.png and b/elasticsearch/stats.png differ diff --git a/emby/CHANGELOG.md b/emby/CHANGELOG.md index b3a8a1f1a..45c35502b 100644 --- a/emby/CHANGELOG.md +++ b/emby/CHANGELOG.md @@ -1,4 +1,10 @@ +## 4.9.0.28 (27-07-2024) +- Update to latest version from linuxserver/docker-emby (changelog : https://github.com/linuxserver/docker-emby/releases) + +## 4.9.0.27 (20-07-2024) +- Update to latest version from linuxserver/docker-emby (changelog : https://github.com/linuxserver/docker-emby/releases) + ## 4.9.0.26 (29-06-2024) - Update to latest version from linuxserver/docker-emby (changelog : https://github.com/linuxserver/docker-emby/releases) diff --git a/emby/Dockerfile b/emby/Dockerfile index a68ee66e4..e34477a42 100644 --- a/emby/Dockerfile +++ b/emby/Dockerfile @@ -16,7 +16,7 @@ ARG BUILD_FROM ARG BUILD_VERSION -ARG BUILD_UPSTREAM="4.9.0.26" +ARG BUILD_UPSTREAM="4.9.0.28" FROM ${BUILD_FROM} ################## diff --git a/emby/config.json b/emby/config.json index 59d3b9ff6..ca1545091 100644 --- a/emby/config.json +++ b/emby/config.json @@ -122,6 +122,6 @@ "slug": "emby_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/emby", - "version": "4.9.0.26", + "version": "4.9.0.28", "video": true } diff --git a/emby/stats.png b/emby/stats.png index 258370587..322fd642f 100644 Binary files a/emby/stats.png and b/emby/stats.png differ diff --git a/emby/updater.json b/emby/updater.json index b8f8eb512..57663c3ff 100644 --- a/emby/updater.json +++ b/emby/updater.json @@ -1,9 +1,9 @@ { "github_beta": "true", - "last_update": "29-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "emby", "source": "github", "upstream_repo": "linuxserver/docker-emby", - "upstream_version": "4.9.0.26" + "upstream_version": "4.9.0.28" } diff --git a/enedisgateway2mqtt/stats.png b/enedisgateway2mqtt/stats.png index 1518daa25..0af712929 100644 Binary files a/enedisgateway2mqtt/stats.png and b/enedisgateway2mqtt/stats.png differ diff --git a/enedisgateway2mqtt_dev/CHANGELOG.md b/enedisgateway2mqtt_dev/CHANGELOG.md index 9cf3599b5..28ba04632 100644 --- a/enedisgateway2mqtt_dev/CHANGELOG.md +++ b/enedisgateway2mqtt_dev/CHANGELOG.md @@ -1,4 +1,7 @@ +## 1.0.0rc14 (03-08-2024) +- Update to latest version from m4dm4rtig4n/myelectricaldata (changelog : https://github.com/m4dm4rtig4n/myelectricaldata/releases) + ## 1.0.0rc7 (08-06-2024) - Update to latest version from m4dm4rtig4n/myelectricaldata (changelog : https://github.com/m4dm4rtig4n/myelectricaldata/releases) diff --git a/enedisgateway2mqtt_dev/config.json b/enedisgateway2mqtt_dev/config.json index cb9cac332..cf05988c8 100644 --- a/enedisgateway2mqtt_dev/config.json +++ b/enedisgateway2mqtt_dev/config.json @@ -97,5 +97,5 @@ "slug": "enedisgateway2mqtt_dev", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "1.0.0rc7" + "version": "1.0.0rc14" } diff --git a/enedisgateway2mqtt_dev/stats.png b/enedisgateway2mqtt_dev/stats.png index cdc02be69..673d79017 100644 Binary files a/enedisgateway2mqtt_dev/stats.png and b/enedisgateway2mqtt_dev/stats.png differ diff --git a/enedisgateway2mqtt_dev/updater.json b/enedisgateway2mqtt_dev/updater.json index de3979d60..539989665 100644 --- a/enedisgateway2mqtt_dev/updater.json +++ b/enedisgateway2mqtt_dev/updater.json @@ -1,9 +1,9 @@ { "github_beta": "true", - "last_update": "08-06-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "enedisgateway2mqtt", "source": "github", "upstream_repo": "m4dm4rtig4n/myelectricaldata", - "upstream_version": "1.0.0rc7" + "upstream_version": "1.0.0rc14" } diff --git a/epicgamesfree/CHANGELOG.md b/epicgamesfree/CHANGELOG.md index 21133145f..822b8676d 100644 --- a/epicgamesfree/CHANGELOG.md +++ b/epicgamesfree/CHANGELOG.md @@ -1,4 +1,10 @@ +## debian-2024-07-31 (2024-07-31) +- Update to latest version from charlocharlie/epicgames-freegames + +## debian-2024-07-19 (2024-07-19) +- Update to latest version from charlocharlie/epicgames-freegames + ## debian-2024-06-16 (2024-06-16) - Update to latest version from charlocharlie/epicgames-freegames - The last release changelog mentions that automatic redemption is not possible anymore due to epic improvement on automation detection diff --git a/epicgamesfree/config.json b/epicgamesfree/config.json index 192a3338d..90be34bc8 100644 --- a/epicgamesfree/config.json +++ b/epicgamesfree/config.json @@ -82,6 +82,6 @@ "slug": "epicgamesfree", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "debian-2024-06-16", + "version": "debian-2024-07-31", "webui": "[PROTO:ssl]://[HOST]:[PORT:3000]" } diff --git a/epicgamesfree/stats.png b/epicgamesfree/stats.png index 864a19ca4..0d6d7dbc8 100644 Binary files a/epicgamesfree/stats.png and b/epicgamesfree/stats.png differ diff --git a/epicgamesfree/updater.json b/epicgamesfree/updater.json index b77a2e29d..786f096d2 100644 --- a/epicgamesfree/updater.json +++ b/epicgamesfree/updater.json @@ -2,10 +2,10 @@ "dockerhub_by_date": true, "dockerhub_list_size": 2, "github_exclude": "-", - "last_update": "2024-06-16", + "last_update": "2024-07-31", "repository": "alexbelgium/hassio-addons", "slug": "epicgamesfree", "source": "dockerhub", "upstream_repo": "charlocharlie/epicgames-freegames", - "upstream_version": "debian-2024-06-16" + "upstream_version": "debian-2024-07-31" } diff --git a/filebrowser/stats.png b/filebrowser/stats.png index 9d7b939f6..258a4fb42 100644 Binary files a/filebrowser/stats.png and b/filebrowser/stats.png differ diff --git a/fireflyiii/CHANGELOG.md b/fireflyiii/CHANGELOG.md index fb519ea39..d2d1e028e 100644 --- a/fireflyiii/CHANGELOG.md +++ b/fireflyiii/CHANGELOG.md @@ -1,4 +1,7 @@ +## 6.1.19 (27-07-2024) +- Update to latest version from firefly-iii/firefly-iii (changelog : https://github.com/firefly-iii/firefly-iii/releases) + ## 6.1.18 (22-06-2024) - Update to latest version from firefly-iii/firefly-iii (changelog : https://github.com/firefly-iii/firefly-iii/releases) ## 6.1.17 (16-06-2024) diff --git a/fireflyiii/config.json b/fireflyiii/config.json index c280c5ef3..709a86868 100644 --- a/fireflyiii/config.json +++ b/fireflyiii/config.json @@ -71,8 +71,7 @@ "image": "ghcr.io/alexbelgium/fireflyiii-{arch}", "map": [ "config:rw", - "share:rw", - "ssl" + "share:rw" ], "name": "Firefly iii", "options": { @@ -82,10 +81,12 @@ "silent": "true" }, "ports": { - "8080/tcp": 3473 + "8080/tcp": 3473, + "8443/tcp": null }, "ports_description": { - "8080/tcp": "web interface" + "8080/tcp": "web interface", + "8443/tcp": "ssl web interface" }, "schema": { "APP_KEY": "str", @@ -106,6 +107,6 @@ "startup": "services", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "6.1.18", + "version": "6.1.19", "webui": "[PROTO:ssl]://[HOST]:[PORT:8080]" } diff --git a/fireflyiii/rootfs/etc/nginx/includes/upstream.conf b/fireflyiii/rootfs/etc/nginx/includes/upstream.conf deleted file mode 100644 index b292326bd..000000000 --- a/fireflyiii/rootfs/etc/nginx/includes/upstream.conf +++ /dev/null @@ -1,3 +0,0 @@ -upstream backend { - server 127.0.0.1:8080; -} diff --git a/fireflyiii/rootfs/etc/nginx/servers/ingress.conf b/fireflyiii/rootfs/etc/nginx/servers/ingress.conf deleted file mode 100644 index 04c7b1a32..000000000 --- a/fireflyiii/rootfs/etc/nginx/servers/ingress.conf +++ /dev/null @@ -1,21 +0,0 @@ -server { - listen %%interface%%:%%port%% default_server; - - include /etc/nginx/includes/server_params.conf; - include /etc/nginx/includes/proxy_params.conf; - - client_max_body_size 0; - - location / { - root /var/www/firefly-iii/public/; - } - - location ~* \.php(?:$|/) { - include snippets/fastcgi-php.conf; - fastcgi_param SCRIPT_FILENAME $request_filename; - fastcgi_param modHeadersAvailable true; #Avoid sending the security headers twice - fastcgi_pass unix:/run/php/php8.0-fpm.sock; - } -} - - diff --git a/fireflyiii/stats.png b/fireflyiii/stats.png index 07afece87..a0bbf3b17 100644 Binary files a/fireflyiii/stats.png and b/fireflyiii/stats.png differ diff --git a/fireflyiii/updater.json b/fireflyiii/updater.json index 32a66415c..8c6f5a188 100644 --- a/fireflyiii/updater.json +++ b/fireflyiii/updater.json @@ -1,8 +1,8 @@ { - "last_update": "22-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "fireflyiii", "source": "github", "upstream_repo": "firefly-iii/firefly-iii", - "upstream_version": "6.1.18" + "upstream_version": "6.1.19" } diff --git a/fireflyiii_data_importer/CHANGELOG.md b/fireflyiii_data_importer/CHANGELOG.md index c4099a0ed..633f6bad6 100644 --- a/fireflyiii_data_importer/CHANGELOG.md +++ b/fireflyiii_data_importer/CHANGELOG.md @@ -1,4 +1,7 @@ +## 1.5.3 (03-08-2024) +- Update to latest version from firefly-iii/data-importer (changelog : https://github.com/firefly-iii/data-importer/releases) + ## 20240414.1 (29-06-2024) - Update to latest version from firefly-iii/data-importer (changelog : https://github.com/firefly-iii/data-importer/releases) diff --git a/fireflyiii_data_importer/Dockerfile b/fireflyiii_data_importer/Dockerfile index 5586534f5..a2fdef855 100644 --- a/fireflyiii_data_importer/Dockerfile +++ b/fireflyiii_data_importer/Dockerfile @@ -16,7 +16,7 @@ ARG BUILD_FROM ARG BUILD_VERSION -ARG BUILD_UPSTREAM="20240414.1" +ARG BUILD_UPSTREAM="1.5.3" FROM ${BUILD_FROM} ################## diff --git a/fireflyiii_data_importer/config.json b/fireflyiii_data_importer/config.json index a4bef460e..4080d25dd 100644 --- a/fireflyiii_data_importer/config.json +++ b/fireflyiii_data_importer/config.json @@ -103,6 +103,6 @@ "slug": "fireflyiii_data_importer", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "20240414.1", + "version": "1.5.3", "webui": "[PROTO:ssl]://[HOST]:[PORT:8080]" } diff --git a/fireflyiii_data_importer/stats.png b/fireflyiii_data_importer/stats.png index f451384ff..82f5a408a 100644 Binary files a/fireflyiii_data_importer/stats.png and b/fireflyiii_data_importer/stats.png differ diff --git a/fireflyiii_data_importer/updater.json b/fireflyiii_data_importer/updater.json index d9cfac654..06a1e162c 100644 --- a/fireflyiii_data_importer/updater.json +++ b/fireflyiii_data_importer/updater.json @@ -1,8 +1,8 @@ { - "last_update": "29-06-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "fireflyiii_data_importer", "source": "github", "upstream_repo": "firefly-iii/data-importer", - "upstream_version": "20240414.1" + "upstream_version": "1.5.3" } diff --git a/fireflyiii_fints_importer/stats.png b/fireflyiii_fints_importer/stats.png index 9c7e09d41..1e4209363 100644 Binary files a/fireflyiii_fints_importer/stats.png and b/fireflyiii_fints_importer/stats.png differ diff --git a/flaresolverr/stats.png b/flaresolverr/stats.png index 44edc45c4..940a0343e 100644 Binary files a/flaresolverr/stats.png and b/flaresolverr/stats.png differ diff --git a/flexget/CHANGELOG.md b/flexget/CHANGELOG.md index 3c2f7d794..bbb8f20ec 100644 --- a/flexget/CHANGELOG.md +++ b/flexget/CHANGELOG.md @@ -1,4 +1,10 @@ +## 3.11.41 (20-07-2024) +- Update to latest version from wiserain/flexget + +## 3.11.40 (13-07-2024) +- Update to latest version from wiserain/flexget + ## 3.11.39 (22-06-2024) - Update to latest version from wiserain/flexget diff --git a/flexget/config.json b/flexget/config.json index ab42b1160..71eafd22c 100644 --- a/flexget/config.json +++ b/flexget/config.json @@ -97,6 +97,6 @@ "slug": "flexget", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "3.11.39", + "version": "3.11.41", "webui": "[PROTO:ssl]://[HOST]:[PORT:5050]" } diff --git a/flexget/stats.png b/flexget/stats.png index f56addd8b..c6d0ff4ef 100644 Binary files a/flexget/stats.png and b/flexget/stats.png differ diff --git a/flexget/updater.json b/flexget/updater.json index cca633450..6bd6edb52 100644 --- a/flexget/updater.json +++ b/flexget/updater.json @@ -1,9 +1,9 @@ { "dockerhub_list_size": "10", - "last_update": "22-06-2024", + "last_update": "20-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "flexget", "source": "dockerhub", "upstream_repo": "wiserain/flexget", - "upstream_version": "3.11.39" + "upstream_version": "3.11.41" } diff --git a/free_games_claimer/stats.png b/free_games_claimer/stats.png index 60eb425c3..14f8d24dd 100644 Binary files a/free_games_claimer/stats.png and b/free_games_claimer/stats.png differ diff --git a/gazpar2mqtt/stats.png b/gazpar2mqtt/stats.png index c6ba749fb..e791f34d9 100644 Binary files a/gazpar2mqtt/stats.png and b/gazpar2mqtt/stats.png differ diff --git a/gitea/stats.png b/gitea/stats.png index 431c77d74..1da851614 100644 Binary files a/gitea/stats.png and b/gitea/stats.png differ diff --git a/grav/stats.png b/grav/stats.png index b1f5d9b57..94007f41d 100644 Binary files a/grav/stats.png and b/grav/stats.png differ diff --git a/guacamole/stats.png b/guacamole/stats.png index cfc257684..03c40f211 100644 Binary files a/guacamole/stats.png and b/guacamole/stats.png differ diff --git a/immich/CHANGELOG.md b/immich/CHANGELOG.md index 57495b9de..90c1f618b 100644 --- a/immich/CHANGELOG.md +++ b/immich/CHANGELOG.md @@ -1,4 +1,16 @@ +## 1.111.0 (03-08-2024) +- Update to latest version from imagegenius/docker-immich (changelog : https://github.com/imagegenius/docker-immich/releases) + +## 1.110.0 (27-07-2024) +- Update to latest version from imagegenius/docker-immich (changelog : https://github.com/imagegenius/docker-immich/releases) + +## 1.109.2 (20-07-2024) +- Update to latest version from imagegenius/docker-immich (changelog : https://github.com/imagegenius/docker-immich/releases) + +## 1.108.0 (13-07-2024) +- Update to latest version from imagegenius/docker-immich (changelog : https://github.com/imagegenius/docker-immich/releases) + ## 1.107.2 (06-07-2024) - Update to latest version from imagegenius/docker-immich (changelog : https://github.com/imagegenius/docker-immich/releases) ## 1.106.4-3 (24-06-2024) diff --git a/immich/config.json b/immich/config.json index 066449428..83b0e7d3b 100644 --- a/immich/config.json +++ b/immich/config.json @@ -141,6 +141,6 @@ "slug": "immich", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "1.107.2", + "version": "1.111.0", "webui": "http://[HOST]:[PORT:8080]" } diff --git a/immich/stats.png b/immich/stats.png index 2d0258259..b7d704073 100644 Binary files a/immich/stats.png and b/immich/stats.png differ diff --git a/immich/updater.json b/immich/updater.json index 285451e2d..09875aff9 100644 --- a/immich/updater.json +++ b/immich/updater.json @@ -1,9 +1,9 @@ { "github_beta": "false", - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "immich", "source": "github", "upstream_repo": "imagegenius/docker-immich", - "upstream_version": "1.107.2" + "upstream_version": "1.111.0" } diff --git a/inadyn/stats.png b/inadyn/stats.png index e580e62d2..c49a6cc0e 100644 Binary files a/inadyn/stats.png and b/inadyn/stats.png differ diff --git a/jackett/CHANGELOG.md b/jackett/CHANGELOG.md index 84829cdf0..794d873d4 100644 --- a/jackett/CHANGELOG.md +++ b/jackett/CHANGELOG.md @@ -1,3 +1,15 @@ + +## 0.22.402 (03-08-2024) +- Update to latest version from linuxserver/docker-jackett (changelog : https://github.com/linuxserver/docker-jackett/releases) + +## 0.22.372 (27-07-2024) +- Update to latest version from linuxserver/docker-jackett (changelog : https://github.com/linuxserver/docker-jackett/releases) + +## 0.22.344 (20-07-2024) +- Update to latest version from linuxserver/docker-jackett (changelog : https://github.com/linuxserver/docker-jackett/releases) + +## 0.22.320 (13-07-2024) +- Update to latest version from linuxserver/docker-jackett (changelog : https://github.com/linuxserver/docker-jackett/releases) ## 0.22.277-2 (06-07-2024) - Minor bugs fixed diff --git a/jackett/config.json b/jackett/config.json index 5a0b33e66..96bcf437a 100644 --- a/jackett/config.json +++ b/jackett/config.json @@ -107,6 +107,6 @@ "slug": "jackett_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/jackett", - "version": "0.22.277-2", + "version": "0.22.402", "webui": "http://[HOST]:[PORT:9117]" } diff --git a/jackett/stats.png b/jackett/stats.png index e2c132d54..3fc0ca23f 100644 Binary files a/jackett/stats.png and b/jackett/stats.png differ diff --git a/jackett/updater.json b/jackett/updater.json index b6f2a9bbb..20d65189e 100644 --- a/jackett/updater.json +++ b/jackett/updater.json @@ -1,8 +1,8 @@ { - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "jackett", "source": "github", "upstream_repo": "linuxserver/docker-jackett", - "upstream_version": "0.22.277" + "upstream_version": "0.22.402" } diff --git a/jellyfin/CHANGELOG.md b/jellyfin/CHANGELOG.md index c7f1c0188..adec52a2b 100644 --- a/jellyfin/CHANGELOG.md +++ b/jellyfin/CHANGELOG.md @@ -1,4 +1,7 @@ +## 10.9.8 (27-07-2024) +- Update to latest version from linuxserver/docker-jellyfin (changelog : https://github.com/linuxserver/docker-jellyfin/releases) + ## 10.9.7 (29-06-2024) - Update to latest version from linuxserver/docker-jellyfin (changelog : https://github.com/linuxserver/docker-jellyfin/releases) diff --git a/jellyfin/config.json b/jellyfin/config.json index d4ae5004b..9ad74a5ef 100644 --- a/jellyfin/config.json +++ b/jellyfin/config.json @@ -124,6 +124,6 @@ "slug": "jellyfin", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "10.9.7", + "version": "10.9.8", "video": true } diff --git a/jellyfin/stats.png b/jellyfin/stats.png index c1cfd9514..8e7b422f1 100644 Binary files a/jellyfin/stats.png and b/jellyfin/stats.png differ diff --git a/jellyfin/updater.json b/jellyfin/updater.json index 807eac165..edc29c654 100644 --- a/jellyfin/updater.json +++ b/jellyfin/updater.json @@ -1,9 +1,9 @@ { "github_fulltag": "false", - "last_update": "29-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "jellyfin", "source": "github", "upstream_repo": "linuxserver/docker-jellyfin", - "upstream_version": "10.9.7" + "upstream_version": "10.9.8" } diff --git a/jellyseerr/stats.png b/jellyseerr/stats.png index 881b6b832..918866e44 100644 Binary files a/jellyseerr/stats.png and b/jellyseerr/stats.png differ diff --git a/joal/stats.png b/joal/stats.png index 459d23b64..bc480629c 100644 Binary files a/joal/stats.png and b/joal/stats.png differ diff --git a/joplin/CHANGELOG.md b/joplin/CHANGELOG.md index 3846c4a34..1f8d05349 100644 --- a/joplin/CHANGELOG.md +++ b/joplin/CHANGELOG.md @@ -1,4 +1,7 @@ +## 3.0.1 (27-07-2024) +- Update to latest version from etechonomy/joplin-server (changelog : https://github.com/etechonomy/joplin-server/releases) + ## 2.14.2 (20-01-2024) - Update to latest version from etechonomy/joplin-server diff --git a/joplin/config.json b/joplin/config.json index bdbae6f46..64cefbebf 100644 --- a/joplin/config.json +++ b/joplin/config.json @@ -107,6 +107,6 @@ "slug": "joplin", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "2.14.2", + "version": "3.0.1", "webui": "[PROTO:ssl]://[HOST]:[PORT:22300]" } diff --git a/joplin/stats.png b/joplin/stats.png index e69de29bb..a7bfb0f82 100644 Binary files a/joplin/stats.png and b/joplin/stats.png differ diff --git a/joplin/updater.json b/joplin/updater.json index a23432462..0fd8bf098 100644 --- a/joplin/updater.json +++ b/joplin/updater.json @@ -1,8 +1,8 @@ { - "last_update": "20-01-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "joplin", "source": "github", "upstream_repo": "etechonomy/joplin-server", - "upstream_version": "2.14.2" + "upstream_version": "3.0.1" } diff --git a/lidarr/CHANGELOG.md b/lidarr/CHANGELOG.md index d2d3541de..dd33649d5 100644 --- a/lidarr/CHANGELOG.md +++ b/lidarr/CHANGELOG.md @@ -1,4 +1,7 @@ +## 2.4.3.4248 (20-07-2024) +- Update to latest version from linuxserver/docker-lidarr (changelog : https://github.com/linuxserver/docker-lidarr/releases) + ## 2.3.3.4204 (18-05-2024) - Update to latest version from linuxserver/docker-lidarr (changelog : https://github.com/linuxserver/docker-lidarr/releases) diff --git a/lidarr/config.json b/lidarr/config.json index 1d6ce94f4..272b6063e 100644 --- a/lidarr/config.json +++ b/lidarr/config.json @@ -105,6 +105,6 @@ "slug": "lidarr_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/blob/master/lidarr/Readme.md", - "version": "2.3.3.4204", + "version": "2.4.3.4248", "webui": "[PROTO:ssl]://[HOST]:[PORT:8686]" } diff --git a/lidarr/stats.png b/lidarr/stats.png index 4bd42b2d2..c44bf030b 100644 Binary files a/lidarr/stats.png and b/lidarr/stats.png differ diff --git a/lidarr/updater.json b/lidarr/updater.json index 5288b7bae..cf0efeaa5 100644 --- a/lidarr/updater.json +++ b/lidarr/updater.json @@ -1,8 +1,8 @@ { - "last_update": "18-05-2024", + "last_update": "20-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "lidarr", "source": "github", "upstream_repo": "linuxserver/docker-lidarr", - "upstream_version": "2.3.3.4204" + "upstream_version": "2.4.3.4248" } diff --git a/linkwarden/CHANGELOG.md b/linkwarden/CHANGELOG.md index e7d6066fa..9e617f23a 100644 --- a/linkwarden/CHANGELOG.md +++ b/linkwarden/CHANGELOG.md @@ -1,2 +1,12 @@ + +## 2.6.2 (27-07-2024) +- Update to latest version from linkwarden/linkwarden (changelog : https://github.com/linkwarden/linkwarden/releases) +## 2.6.0-3 (21-07-2024) +- Minor bugs fixed +## 2.6.0-2 (21-07-2024) +- Minor bugs fixed + +## 2.6.0 (20-07-2024) +- Update to latest version from linkwarden/linkwarden (changelog : https://github.com/linkwarden/linkwarden/releases) ## 2.5.3-2 (08-05-2024) - Using latest tag diff --git a/linkwarden/README.md b/linkwarden/README.md index 7f46e4449..b0cc58b58 100644 --- a/linkwarden/README.md +++ b/linkwarden/README.md @@ -1,3 +1,4 @@ + # Home assistant add-on: linkwarden [![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) @@ -40,8 +41,15 @@ Options can be configured through two ways : ```yaml "NEXTAUTH_SECRET": mandatory, must be filled at start "NEXTAUTH_URL": optional, only if linkwarden is kept externally +"NEXT_PUBLIC_DISABLE_REGISTRATION": If set to true, registration will be disabled. +"NEXT_PUBLIC_CREDENTIALS_ENABLED": If set to true, users will be able to login with username and password. "STORAGE_FOLDER": optional, is /config/library by default "DATABASE_URL": optional, if kept blank an internal database will be used. If an external database is used, modify according to this design postgresql://postgres:homeassistant@localhost:5432/linkwarden +"NEXT_PUBLIC_AUTHENTIK_ENABLED": If set to true, Authentik will be enabled and you'll need to define the variables below. +"AUTHENTIK_CUSTOM_NAME": Optionally set a custom provider name. (name on the button) +"AUTHENTIK_ISSUER": This is the "OpenID Configuration Issuer" shown in the Provider Overview. Note that you must delete the "/" at the end of the URL. Should look like: `https://authentik.my-doma.in/application/o/linkwarden` +"AUTHENTIK_CLIENT_ID": Client ID copied from the Provider Overview screen in Authentik +"AUTHENTIK_CLIENT_SECRET": Client Secret copied from the Provider Overview screen in Authentik ``` - Config.yaml @@ -64,106 +72,16 @@ The installation of this add-on is pretty straightforward and not different in c 1. Check the logs of the add-on to see if everything went well. 1. Open the webUI and adapt the software options -## Integration with HA +## Integration with Authentik -Use the [linkwarden integration](https://www.home-assistant.io/integrations/linkwarden/) +Follow the instruction from the Linkwarden docs page. https://docs.linkwarden.app/self-hosting/sso-oauth#authentik -You can use the following snippets to check and set the alternate speeds (the HA integration above is not needed for this) -```bash -shell_command: - toggle_torrent_speed: curl -X POST https://:8081/api/v2/transfer/toggleSpeedLimitsMode -k -sensor: - - platform: command_line - name: get_torrent_speed - command: curl https://:8081/api/v2/transfer/speedLimitsMode -k -``` - -If you're not using the SSL option, you can skip the -k parameter and use http instead of https in the URL - -These lines will expose a `sensor.get_torrent_speed` that updates every 60 seconds and returns 1 if the alternate speed mode is enabled, 0 otherwise, and a `shell_command.toggle_torrent_speed` that you can call as a Service in your automations ## Common issues
- ### ipv6 issues with openvpn (@happycoo) -Add this code to your .ovpn config - -```bash -# don't route lan through vpn -route 192.168.1.0 255.255.255.0 net_gateway - -# deactivate ipv6 -pull-filter ignore "dhcp-option DNS6" -pull-filter ignore "tun-ipv6" -pull-filter ignore "ifconfig-ipv6" -``` -
- -
- ### Monitored folders (@FaliseDotCom) - -- go to config\addons_config\linkwarden -- find (or create) the file watched_folders.json -- paste or adjust to the following: - -```json -{ - "folder/to/watch": { - "add_torrent_params": { - "category": "", - "content_layout": "Original", - "download_limit": -1, - "download_path": "[folder/for/INCOMPLETE_downloads]", - "operating_mode": "AutoManaged", - "ratio_limit": -2, - "save_path": "[folder/for/COMPLETED_downloads]", - "seeding_time_limit": -2, - "skip_checking": false, - "stopped": false, - "tags": [ - ], - "upload_limit": -1, - "use_auto_tmm": false, - "use_download_path": true - }, - "recursive": false - } -} -``` -
- -
- ### nginx error code (@Nanianmichaels) - -> [cont-init.d] 30-nginx.sh: executing... -> [cont-init.d] 30-nginx.sh: exited 1. - -Wait a couple minutes and restart addon, it could be a temporary unavailability of github - -### Local mount with invalid argument (@antonio1475) - -> [cont-init.d] 00-local_mounts.sh: executing... -> Local Disks mounting... -> mount: mounting /dev/sda1 on /mnt/sda1 failed: Invalid argument -> [19:19:44] FATAL: Unable to mount local drives! Please check the name. -> [cont-init.d] 00-local_mounts.sh: exited 0. - -Try to mount by putting the partition label in the "localdisks" options instead of the hardware name -
- -
- ### Loss of metadata fetching with openvpn after several days (@almico) - -Add `ping-restart 60` to your config.ovpn -
- -
- ### Downloads info are empty on small scale window (@aviadlevy) - -When my window size width is lower than 960 pixels my downloads are empty. -Solution is to reset the Vuetorrent settings. -
+ ## Support diff --git a/linkwarden/config.json b/linkwarden/config.json index 444b78ed2..de3f77e44 100644 --- a/linkwarden/config.json +++ b/linkwarden/config.json @@ -6,8 +6,15 @@ "codenotary": "alexandrep.github@gmail.com", "description": "collaborative bookmark manager to collect, organize, and preserve webpages and articles", "environment": { + "AUTHENTIK_CLIENT_ID": "CLIENT_ID", + "AUTHENTIK_CLIENT_SECRET": "CLIENT_SECRET", + "AUTHENTIK_CUSTOM_NAME": "Authentik", + "AUTHENTIK_ISSUER": "https://authentik.my-doma.in/application/o/linkwarden", "DATABASE_URL": "postgresql://postgres:homeassistant@localhost:5432/linkwarden", "NEXTAUTH_URL": "http://localhost:3000/api/v1/auth", + "NEXT_PUBLIC_AUTHENTIK_ENABLED": "false", + "NEXT_PUBLIC_CREDENTIALS_ENABLED": "true", + "NEXT_PUBLIC_DISABLE_REGISTRATION": "false", "STORAGE_FOLDER": "/config/library" }, "image": "ghcr.io/alexbelgium/linkwarden-{arch}", @@ -23,13 +30,20 @@ "3000/tcp": "webui" }, "schema": { + "AUTHENTIK_CLIENT_ID": "str?", + "AUTHENTIK_CLIENT_SECRET": "str?", + "AUTHENTIK_CUSTOM_NAME": "str?", + "AUTHENTIK_ISSUER": "str?", "DATABASE_URL": "str?", "NEXTAUTH_SECRET": "str", "NEXTAUTH_URL": "str?", + "NEXT_PUBLIC_AUTHENTIK_ENABLED": "bool?", + "NEXT_PUBLIC_CREDENTIALS_ENABLED": "bool?", + "NEXT_PUBLIC_DISABLE_REGISTRATION": "bool?", "STORAGE_FOLDER": "str?" }, "slug": "linkwarden", "url": "https://github.com/alexbelgium/hassio-addons/tree/master/linkwarden", - "version": "2.5.3-2", + "version": "2.6.2", "webui": "[PROTO:ssl]://[HOST]:[PORT:3000]" } diff --git a/linkwarden/stats.png b/linkwarden/stats.png index f438497b6..547373ae8 100644 Binary files a/linkwarden/stats.png and b/linkwarden/stats.png differ diff --git a/linkwarden/updater.json b/linkwarden/updater.json index c5d6a32fa..5a117f1f7 100644 --- a/linkwarden/updater.json +++ b/linkwarden/updater.json @@ -1,8 +1,8 @@ { - "last_update": "24-02-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "linkwarden", "source": "github", "upstream_repo": "linkwarden/linkwarden", - "upstream_version": "2.5.3" + "upstream_version": "2.6.2" } diff --git a/mealie/CHANGELOG.md b/mealie/CHANGELOG.md index efcf6bbfe..83689837d 100644 --- a/mealie/CHANGELOG.md +++ b/mealie/CHANGELOG.md @@ -1,4 +1,7 @@ +## v1.11.0 (03-08-2024) +- Update to latest version from hay-kot/mealie (changelog : https://github.com/hay-kot/mealie/releases) + ## v1.10.2 (06-07-2024) - Update to latest version from hay-kot/mealie (changelog : https://github.com/hay-kot/mealie/releases) diff --git a/mealie/config.json b/mealie/config.json index 34afea61c..8f6533947 100644 --- a/mealie/config.json +++ b/mealie/config.json @@ -109,6 +109,6 @@ "slug": "mealie", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "v1.10.2", + "version": "v1.11.0", "webui": "[PROTO:ssl]://[HOST]:[PORT:9001]" } diff --git a/mealie/stats.png b/mealie/stats.png index b3cbe6cba..bc9d373ab 100644 Binary files a/mealie/stats.png and b/mealie/stats.png differ diff --git a/mealie/updater.json b/mealie/updater.json index 287268985..af3e7ff1c 100644 --- a/mealie/updater.json +++ b/mealie/updater.json @@ -1,10 +1,10 @@ { "github_beta": "true", "github_fulltag": "true", - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "mealie", "source": "github", "upstream_repo": "hay-kot/mealie", - "upstream_version": "v1.10.2" + "upstream_version": "v1.11.0" } diff --git a/mylar3/stats.png b/mylar3/stats.png index 92e630c91..e73154a16 100644 Binary files a/mylar3/stats.png and b/mylar3/stats.png differ diff --git a/navidrome/stats.png b/navidrome/stats.png index 197b392db..7eb97b725 100644 Binary files a/navidrome/stats.png and b/navidrome/stats.png differ diff --git a/nextcloud/CHANGELOG.md b/nextcloud/CHANGELOG.md index 1ac7ce690..5041d871a 100644 --- a/nextcloud/CHANGELOG.md +++ b/nextcloud/CHANGELOG.md @@ -1,4 +1,7 @@ +## 29.0.4 (27-07-2024) +- Update to latest version from linuxserver/docker-nextcloud (changelog : https://github.com/linuxserver/docker-nextcloud/releases) + ## 29.0.3 (29-06-2024) - Update to latest version from linuxserver/docker-nextcloud (changelog : https://github.com/linuxserver/docker-nextcloud/releases) ## 29.0.2-3 (25-06-2024) diff --git a/nextcloud/config.json b/nextcloud/config.json index 85a989a9d..5f315efeb 100644 --- a/nextcloud/config.json +++ b/nextcloud/config.json @@ -77,7 +77,8 @@ ], "environment": { "NEXTCLOUD_PATH": "/data/config/www/nextcloud", - "SKIP_DATA_DIRECTORY_PERMISSION_CHECK": "yes" + "SKIP_DATA_DIRECTORY_PERMISSION_CHECK": "yes", + "TRUSTED_PROXIES": "**" }, "hassio_api": true, "image": "ghcr.io/alexbelgium/nextcloud_ocr-{arch}", @@ -151,6 +152,6 @@ "uart": true, "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/nextcloud", - "version": "29.0.3", + "version": "29.0.4", "webui": "https://[HOST]:[PORT:443]" } diff --git a/nextcloud/stats.png b/nextcloud/stats.png index a37bf22f1..4aaefce13 100644 Binary files a/nextcloud/stats.png and b/nextcloud/stats.png differ diff --git a/nextcloud/updater.json b/nextcloud/updater.json index d27c14f5e..41cbdbf9c 100644 --- a/nextcloud/updater.json +++ b/nextcloud/updater.json @@ -1,8 +1,8 @@ { - "last_update": "29-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "nextcloud", "source": "github", "upstream_repo": "linuxserver/docker-nextcloud", - "upstream_version": "29.0.3" + "upstream_version": "29.0.4" } diff --git a/nzbget/CHANGELOG.md b/nzbget/CHANGELOG.md index d6ce76318..30dd50a99 100644 --- a/nzbget/CHANGELOG.md +++ b/nzbget/CHANGELOG.md @@ -1,3 +1,15 @@ + +## v24.1-ls151 (03-08-2024) +- Update to latest version from linuxserver/docker-nzbget (changelog : https://github.com/linuxserver/docker-nzbget/releases) + +## v24.1-ls150 (27-07-2024) +- Update to latest version from linuxserver/docker-nzbget (changelog : https://github.com/linuxserver/docker-nzbget/releases) + +## v24.1-ls149 (20-07-2024) +- Update to latest version from linuxserver/docker-nzbget (changelog : https://github.com/linuxserver/docker-nzbget/releases) + +## v24.1-ls148 (13-07-2024) +- Update to latest version from linuxserver/docker-nzbget (changelog : https://github.com/linuxserver/docker-nzbget/releases) ## v24.1-ls147-2 (06-07-2024) - Minor bugs fixed diff --git a/nzbget/config.json b/nzbget/config.json index 2538881d8..df1336867 100644 --- a/nzbget/config.json +++ b/nzbget/config.json @@ -107,5 +107,5 @@ "slug": "nzbget", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "v24.1-ls147-2" + "version": "v24.1-ls151" } diff --git a/nzbget/stats.png b/nzbget/stats.png index bca8561c6..b51e4556a 100644 Binary files a/nzbget/stats.png and b/nzbget/stats.png differ diff --git a/nzbget/updater.json b/nzbget/updater.json index c29fbce85..4a79057ae 100644 --- a/nzbget/updater.json +++ b/nzbget/updater.json @@ -1,9 +1,9 @@ { "github_fulltag": "true", - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "nzbget", "source": "github", "upstream_repo": "linuxserver/docker-nzbget", - "upstream_version": "v24.1-ls147" + "upstream_version": "v24.1-ls151" } diff --git a/omada/CHANGELOG.md b/omada/CHANGELOG.md index 0bb42ab4c..9a3a4cc3d 100644 --- a/omada/CHANGELOG.md +++ b/omada/CHANGELOG.md @@ -1,3 +1,15 @@ + +## 5.1-2024-08-01 (2024-08-01) +- Update to latest version from mbentley/omada-controller + +## 5.14.26.1-2024-07-26 (2024-07-26) +- Update to latest version from mbentley/omada-controller + +## 5.14.26.1-2024-07-17 (2024-07-17) +- Update to latest version from mbentley/omada-controller + +## 5.13-2024-07-12 (2024-07-12) +- Update to latest version from mbentley/omada-controller ## 5.13-2024-07-03-2 (06-07-2024) - Minor bugs fixed diff --git a/omada/README.md b/omada/README.md index f9f7dc526..53e99148d 100644 --- a/omada/README.md +++ b/omada/README.md @@ -1,4 +1,3 @@ -## ⚠ Open Request : [✨ [REQUEST] Omada (additional ports) (opened 2024-03-05)](https://github.com/alexbelgium/hassio-addons/issues/1287) by [@ttocsr](https://github.com/ttocsr) Recommendation : please backup your database and migrated to this addon https://github.com/jkunczik/home-assistant-omada It is dedicated and seems in active development ; it should be more stable diff --git a/omada/config.json b/omada/config.json index 0842e62d7..c95d845d2 100644 --- a/omada/config.json +++ b/omada/config.json @@ -106,6 +106,6 @@ "slug": "omada", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "5.13-2024-07-03-2", + "version": "5.1-2024-08-01", "webui": "http://[HOST]:[PORT:8088]" } diff --git a/omada/stats.png b/omada/stats.png index b8c738c0a..e150bd6ac 100644 Binary files a/omada/stats.png and b/omada/stats.png differ diff --git a/omada/updater.json b/omada/updater.json index 39610c40d..5f04048a6 100644 --- a/omada/updater.json +++ b/omada/updater.json @@ -3,11 +3,11 @@ "dockerhub_list_size": 20, "github_exclude": "-", "github_tagfilter": "5.1", - "last_update": "2024-07-03", + "last_update": "2024-08-01", "paused": false, "repository": "alexbelgium/hassio-addons", "slug": "omada", "source": "dockerhub", "upstream_repo": "mbentley/omada-controller", - "upstream_version": "5.13-2024-07-03" + "upstream_version": "5.1-2024-08-01" } diff --git a/omada_v3/CHANGELOG.md b/omada_v3/CHANGELOG.md index fb6c3df99..fd944be28 100644 --- a/omada_v3/CHANGELOG.md +++ b/omada_v3/CHANGELOG.md @@ -1,4 +1,7 @@ +## 5.3-amd64-2024-08-01 (2024-08-01) +- Update to latest version from mbentley/omada-controller + ## 5.3-amd64-2024-06-27 (2024-06-27) - Update to latest version from mbentley/omada-controller diff --git a/omada_v3/config.json b/omada_v3/config.json index 24af5e9eb..acde46878 100644 --- a/omada_v3/config.json +++ b/omada_v3/config.json @@ -107,6 +107,6 @@ "slug": "omada_v3", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "5.3-amd64-2024-06-27", + "version": "5.3-amd64-2024-08-01", "webui": "http://[HOST]:[PORT:8088]" } diff --git a/omada_v3/stats.png b/omada_v3/stats.png index c0086a120..ee8e381b4 100644 Binary files a/omada_v3/stats.png and b/omada_v3/stats.png differ diff --git a/omada_v3/updater.json b/omada_v3/updater.json index d75796f61..1b35a5c56 100644 --- a/omada_v3/updater.json +++ b/omada_v3/updater.json @@ -1,11 +1,11 @@ { "dockerhub_by_date": true, "github_tagfilter": "5.3-amd64", - "last_update": "2024-06-27", + "last_update": "2024-08-01", "paused": false, "repository": "alexbelgium/hassio-addons", "slug": "omada_v3", "source": "dockerhub", "upstream_repo": "mbentley/omada-controller", - "upstream_version": "5.3-amd64-2024-06-27" + "upstream_version": "5.3-amd64-2024-08-01" } diff --git a/ombi/CHANGELOG.md b/ombi/CHANGELOG.md index 3b2c9b412..0c93e9950 100644 --- a/ombi/CHANGELOG.md +++ b/ombi/CHANGELOG.md @@ -1,4 +1,7 @@ +## 4.44.1 (27-07-2024) +- Update to latest version from linuxserver/docker-ombi (changelog : https://github.com/linuxserver/docker-ombi/releases) + ## 4.43.5 (26-08-2023) - Update to latest version from linuxserver/docker-ombi diff --git a/ombi/config.json b/ombi/config.json index cace8865e..d4f0caf98 100644 --- a/ombi/config.json +++ b/ombi/config.json @@ -88,6 +88,6 @@ "slug": "ombi", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/ombi", - "version": "4.43.5", + "version": "4.44.1", "webui": "[PROTO:ssl]://[HOST]:[PORT:3579]" } diff --git a/ombi/stats.png b/ombi/stats.png index a64b22d4c..f58064a90 100644 Binary files a/ombi/stats.png and b/ombi/stats.png differ diff --git a/ombi/updater.json b/ombi/updater.json index 5479895e6..b98f754ce 100644 --- a/ombi/updater.json +++ b/ombi/updater.json @@ -1,8 +1,8 @@ { - "last_update": "26-08-2023", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "ombi", "source": "github", "upstream_repo": "linuxserver/docker-ombi", - "upstream_version": "4.43.5" + "upstream_version": "4.44.1" } diff --git a/openproject/stats.png b/openproject/stats.png index 93cfe2b41..d3a479d83 100644 Binary files a/openproject/stats.png and b/openproject/stats.png differ diff --git a/organizr/stats.png b/organizr/stats.png index 1b557713d..7beb9fd0a 100644 Binary files a/organizr/stats.png and b/organizr/stats.png differ diff --git a/overseerr/stats.png b/overseerr/stats.png index 3eab67608..a985831c3 100644 Binary files a/overseerr/stats.png and b/overseerr/stats.png differ diff --git a/papermerge/stats.png b/papermerge/stats.png index bd8a5103c..ea2a034bb 100644 Binary files a/papermerge/stats.png and b/papermerge/stats.png differ diff --git a/photoprism/CHANGELOG.md b/photoprism/CHANGELOG.md index a45846443..66c0857b9 100644 --- a/photoprism/CHANGELOG.md +++ b/photoprism/CHANGELOG.md @@ -1,3 +1,6 @@ + +## ubuntu-2024-07-11 (2024-07-11) +- Update to latest version from photoprism/photoprism ## ubuntu-2024-05-31-3 (10-06-2024) - Minor bugs fixed diff --git a/photoprism/config.json b/photoprism/config.json index e1be0a94e..a4333cba7 100644 --- a/photoprism/config.json +++ b/photoprism/config.json @@ -132,6 +132,6 @@ "slug": "photoprism", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "ubuntu-2024-05-31-3", + "version": "ubuntu-2024-07-11", "video": true } diff --git a/photoprism/stats.png b/photoprism/stats.png index d35a3d9ef..5987dbfb4 100644 Binary files a/photoprism/stats.png and b/photoprism/stats.png differ diff --git a/photoprism/updater.json b/photoprism/updater.json index 642199580..63402a3fc 100644 --- a/photoprism/updater.json +++ b/photoprism/updater.json @@ -1,11 +1,11 @@ { "dockerhub_by_date": "true", "github_fulltag": "true", - "last_update": "2024-05-31", + "last_update": "2024-07-11", "paused": false, "repository": "alexbelgium/hassio-addons", "slug": "photoprism", "source": "dockerhub", "upstream_repo": "photoprism/photoprism", - "upstream_version": "ubuntu-2024-05-31" + "upstream_version": "ubuntu-2024-07-11" } diff --git a/piwigo/CHANGELOG.md b/piwigo/CHANGELOG.md index e5aaca217..aa195750b 100644 --- a/piwigo/CHANGELOG.md +++ b/piwigo/CHANGELOG.md @@ -1,4 +1,7 @@ +## 14.5.0 (20-07-2024) +- Update to latest version from linuxserver/docker-piwigo (changelog : https://github.com/linuxserver/docker-piwigo/releases) + ## 14.4.0 (20-04-2024) - Update to latest version from linuxserver/docker-piwigo (changelog : https://github.com/linuxserver/docker-piwigo/releases) diff --git a/piwigo/config.json b/piwigo/config.json index 1f9e6fb2c..88d5058ec 100644 --- a/piwigo/config.json +++ b/piwigo/config.json @@ -104,6 +104,6 @@ "slug": "piwigo", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "14.4.0", + "version": "14.5.0", "webui": "[PROTO:ssl]://[HOST]:[PORT:80]" } diff --git a/piwigo/stats.png b/piwigo/stats.png index cf5d40f7c..fb9378e77 100644 Binary files a/piwigo/stats.png and b/piwigo/stats.png differ diff --git a/piwigo/updater.json b/piwigo/updater.json index 4a295984f..e2dece425 100644 --- a/piwigo/updater.json +++ b/piwigo/updater.json @@ -1,8 +1,8 @@ { - "last_update": "20-04-2024", + "last_update": "20-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "piwigo", "source": "github", "upstream_repo": "linuxserver/docker-piwigo", - "upstream_version": "14.4.0" + "upstream_version": "14.5.0" } diff --git a/plex/CHANGELOG.md b/plex/CHANGELOG.md index 9e49a759c..2b1540a54 100644 --- a/plex/CHANGELOG.md +++ b/plex/CHANGELOG.md @@ -1,3 +1,12 @@ + +## 1.40.4.8679-424562606-ls224 (27-07-2024) +- Update to latest version from linuxserver/docker-plex (changelog : https://github.com/linuxserver/docker-plex/releases) + +## 1.40.4.8679-424562606-ls223 (20-07-2024) +- Update to latest version from linuxserver/docker-plex (changelog : https://github.com/linuxserver/docker-plex/releases) + +## 1.40.4.8679-424562606-ls222 (13-07-2024) +- Update to latest version from linuxserver/docker-plex (changelog : https://github.com/linuxserver/docker-plex/releases) ## 1.40.3.8555-fef15d30c-ls220-2 (06-07-2024) - Minor bugs fixed diff --git a/plex/config.json b/plex/config.json index 13e0c4af0..2138148f8 100644 --- a/plex/config.json +++ b/plex/config.json @@ -136,7 +136,7 @@ "slug": "plex_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/plex", - "version": "1.40.3.8555-fef15d30c-ls220-2", + "version": "1.40.4.8679-424562606-ls224", "video": true, "webui": "[PROTO:ssl]://[HOST]:[PORT:32400]/web" } diff --git a/plex/stats.png b/plex/stats.png index c7d763367..c29520e3c 100644 Binary files a/plex/stats.png and b/plex/stats.png differ diff --git a/plex/updater.json b/plex/updater.json index 844612e8a..23b5be70a 100644 --- a/plex/updater.json +++ b/plex/updater.json @@ -1,9 +1,9 @@ { "github_fulltag": "true", - "last_update": "06-07-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "plex", "source": "github", "upstream_repo": "linuxserver/docker-plex", - "upstream_version": "1.40.3.8555-fef15d30c-ls220" + "upstream_version": "1.40.4.8679-424562606-ls224" } diff --git a/portainer/stats.png b/portainer/stats.png index aa1ecffb5..ab5c82406 100644 Binary files a/portainer/stats.png and b/portainer/stats.png differ diff --git a/portainer_agent/stats.png b/portainer_agent/stats.png index 51933ecfc..20f8f4294 100644 Binary files a/portainer_agent/stats.png and b/portainer_agent/stats.png differ diff --git a/postgres/stats.png b/postgres/stats.png index a8b4bc455..36bec67bf 100644 Binary files a/postgres/stats.png and b/postgres/stats.png differ diff --git a/prowlarr/CHANGELOG.md b/prowlarr/CHANGELOG.md index 793bcfa75..71a9e3308 100644 --- a/prowlarr/CHANGELOG.md +++ b/prowlarr/CHANGELOG.md @@ -1,3 +1,15 @@ + +## develop-1.21.2.4649-ls185 (03-08-2024) +- Update to latest version from linuxserver/docker-prowlarr (changelog : https://github.com/linuxserver/docker-prowlarr/releases) + +## 1.21.2.4649-ls80 (27-07-2024) +- Update to latest version from linuxserver/docker-prowlarr (changelog : https://github.com/linuxserver/docker-prowlarr/releases) + +## nightly-1.21.1.4626-ls25 (20-07-2024) +- Update to latest version from linuxserver/docker-prowlarr (changelog : https://github.com/linuxserver/docker-prowlarr/releases) + +## nightly-1.21.0.4615-ls19 (13-07-2024) +- Update to latest version from linuxserver/docker-prowlarr (changelog : https://github.com/linuxserver/docker-prowlarr/releases) ## nightly-1.20.1.4597-ls10-2 (06-07-2024) - Minor bugs fixed diff --git a/prowlarr/config.json b/prowlarr/config.json index 66469071f..e0e960594 100644 --- a/prowlarr/config.json +++ b/prowlarr/config.json @@ -105,6 +105,6 @@ "slug": "prowlarr", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "nightly-1.20.1.4597-ls10-2", + "version": "develop-1.21.2.4649-ls185", "webui": "[PROTO:ssl]://[HOST]:[PORT:9696]" } diff --git a/prowlarr/stats.png b/prowlarr/stats.png index f2670cc6d..e69de29bb 100644 Binary files a/prowlarr/stats.png and b/prowlarr/stats.png differ diff --git a/prowlarr/updater.json b/prowlarr/updater.json index b287b822b..4ff96f158 100644 --- a/prowlarr/updater.json +++ b/prowlarr/updater.json @@ -1,10 +1,10 @@ { "github_beta": "true", "github_fulltag": "true", - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "prowlarr", "source": "github", "upstream_repo": "linuxserver/docker-prowlarr", - "upstream_version": "nightly-1.20.1.4597-ls10" + "upstream_version": "develop-1.21.2.4649-ls185" } diff --git a/qbittorrent/stats.png b/qbittorrent/stats.png index 541994da5..ed77ff706 100644 Binary files a/qbittorrent/stats.png and b/qbittorrent/stats.png differ diff --git a/radarr/CHANGELOG.md b/radarr/CHANGELOG.md index d7360b4dc..18fe30d3a 100644 --- a/radarr/CHANGELOG.md +++ b/radarr/CHANGELOG.md @@ -1,4 +1,7 @@ +## 5.8.3.8933 (27-07-2024) +- Update to latest version from linuxserver/docker-radarr (changelog : https://github.com/linuxserver/docker-radarr/releases) + ## 5.7.0.8882 (22-06-2024) - Update to latest version from linuxserver/docker-radarr (changelog : https://github.com/linuxserver/docker-radarr/releases) diff --git a/radarr/config.json b/radarr/config.json index 86ae020db..c37310e3c 100644 --- a/radarr/config.json +++ b/radarr/config.json @@ -111,5 +111,5 @@ "slug": "radarr_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/radarr", - "version": "5.7.0.8882" + "version": "5.8.3.8933" } diff --git a/radarr/stats.png b/radarr/stats.png index 10e69cdd7..6fe3136ba 100644 Binary files a/radarr/stats.png and b/radarr/stats.png differ diff --git a/radarr/updater.json b/radarr/updater.json index 821633e63..593807704 100644 --- a/radarr/updater.json +++ b/radarr/updater.json @@ -1,8 +1,8 @@ { - "last_update": "22-06-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "radarr", "source": "github", "upstream_repo": "linuxserver/docker-radarr", - "upstream_version": "5.7.0.8882" + "upstream_version": "5.8.3.8933" } diff --git a/readarr/CHANGELOG.md b/readarr/CHANGELOG.md index 7421d3957..cf5af7bf9 100644 --- a/readarr/CHANGELOG.md +++ b/readarr/CHANGELOG.md @@ -1,3 +1,12 @@ + +## 0.4.0.2593 (27-07-2024) +- Update to latest version from linuxserver/docker-readarr (changelog : https://github.com/linuxserver/docker-readarr/releases) + +## 0.3.32.2587 (20-07-2024) +- Update to latest version from linuxserver/docker-readarr (changelog : https://github.com/linuxserver/docker-readarr/releases) + +## 0.3.31.2578 (13-07-2024) +- Update to latest version from linuxserver/docker-readarr (changelog : https://github.com/linuxserver/docker-readarr/releases) ## 0.3.30.2576-2 (06-07-2024) - Minor bugs fixed diff --git a/readarr/config.json b/readarr/config.json index fa58593f3..175c22ca4 100644 --- a/readarr/config.json +++ b/readarr/config.json @@ -111,5 +111,5 @@ "slug": "readarr_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/readarr", - "version": "0.3.30.2576-2" + "version": "0.4.0.2593" } diff --git a/readarr/stats.png b/readarr/stats.png index eddea5d43..06e6b7945 100644 Binary files a/readarr/stats.png and b/readarr/stats.png differ diff --git a/readarr/updater.json b/readarr/updater.json index 979bbedb7..4eceffad6 100644 --- a/readarr/updater.json +++ b/readarr/updater.json @@ -1,9 +1,9 @@ { "github_beta": "true", - "last_update": "06-07-2024", + "last_update": "27-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "readarr", "source": "github", "upstream_repo": "linuxserver/docker-readarr", - "upstream_version": "0.3.30.2576" + "upstream_version": "0.4.0.2593" } diff --git a/requestrr/stats.png b/requestrr/stats.png index 3f1c5baf7..60fa66f02 100644 Binary files a/requestrr/stats.png and b/requestrr/stats.png differ diff --git a/resiliosync/stats.png b/resiliosync/stats.png index 922770a04..e8962b776 100644 Binary files a/resiliosync/stats.png and b/resiliosync/stats.png differ diff --git a/sabnzbd/stats.png b/sabnzbd/stats.png index a28f410e8..36c817d85 100644 Binary files a/sabnzbd/stats.png and b/sabnzbd/stats.png differ diff --git a/scrutiny/CHANGELOG.md b/scrutiny/CHANGELOG.md index 42ee0e740..b5f4515eb 100644 --- a/scrutiny/CHANGELOG.md +++ b/scrutiny/CHANGELOG.md @@ -1,3 +1,5 @@ +## v0.8.1-4 (30-07-2024) +- Minor bugs fixed ## v0.8.1-3 (11-06-2024) - Minor bugs fixed ## v0.8.1-2 (13-04-2024) diff --git a/scrutiny/README.md b/scrutiny/README.md index 1cad5798c..1d979a341 100644 --- a/scrutiny/README.md +++ b/scrutiny/README.md @@ -110,7 +110,7 @@ rest: value_template: '{{ value_json.data.smart_results[0].attrs["198"].raw_value }}' binary_sensor: - name: "HDD - SMART Status" - value_template: "{{ bool(value_json.data.smart_results[0].Status) }}" + value_template: "{{ 1 if value_json.data.smart_results[0].Status in [1, 2] else 0 }}" device_class: problem ``` diff --git a/scrutiny/config.json b/scrutiny/config.json index cfe8449aa..2a161b18d 100644 --- a/scrutiny/config.json +++ b/scrutiny/config.json @@ -114,5 +114,5 @@ "slug": "scrutiny", "udev": true, "url": "https://github.com/AnalogJ/scrutiny", - "version": "v0.8.1-3" + "version": "v0.8.1-4" } diff --git a/scrutiny/stats.png b/scrutiny/stats.png index 61519ab1d..5422b8efb 100644 Binary files a/scrutiny/stats.png and b/scrutiny/stats.png differ diff --git a/scrutiny_fa/CHANGELOG.md b/scrutiny_fa/CHANGELOG.md index 5564fa6c9..d798828a6 100644 --- a/scrutiny_fa/CHANGELOG.md +++ b/scrutiny_fa/CHANGELOG.md @@ -1,3 +1,5 @@ +## v0.8.1-4 (30-07-2024) +- Minor bugs fixed ## v0.8.1-3 (11-06-2024) - Minor bugs fixed ## v0.8.1-2 (13-04-2024) diff --git a/scrutiny_fa/config.json b/scrutiny_fa/config.json index fb3d42da5..ac7fea52e 100644 --- a/scrutiny_fa/config.json +++ b/scrutiny_fa/config.json @@ -49,5 +49,5 @@ "slug": "scrutiny_fa", "udev": true, "url": "https://github.com/AnalogJ/scrutiny", - "version": "v0.8.1-3" + "version": "v0.8.1-4" } diff --git a/scrutiny_fa/stats.png b/scrutiny_fa/stats.png index e1ab7ca71..cdc1e5143 100644 Binary files a/scrutiny_fa/stats.png and b/scrutiny_fa/stats.png differ diff --git a/seafile/stats.png b/seafile/stats.png index aeaaed3f7..facd72079 100644 Binary files a/seafile/stats.png and b/seafile/stats.png differ diff --git a/signalk/CHANGELOG.md b/signalk/CHANGELOG.md index 14e9d5b73..d7eb96bee 100644 --- a/signalk/CHANGELOG.md +++ b/signalk/CHANGELOG.md @@ -1,3 +1,5 @@ +## 2.8.3-7 (14-07-2024) +- Minor bugs fixed ## 2.8.3-6 (11-07-2024) - Add permissions for ttyUSB diff --git a/signalk/config.json b/signalk/config.json index fb972a39e..160f065c1 100644 --- a/signalk/config.json +++ b/signalk/config.json @@ -7,6 +7,8 @@ "codenotary": "alexandrep.github@gmail.com", "description": "An implementation of a Signal K central server for boats", "devices": [ + "/dev/can0", + "/dev/can1", "/dev/ttyUSB0", "/dev/ttyUSB1" ], @@ -17,6 +19,7 @@ "SSLPORT": "3443", "TCPSTREAMPORT": "8375" }, + "full_access": true, "image": "ghcr.io/alexbelgium/signalk-{arch}", "map": [ "addon_config:rw" @@ -44,6 +47,6 @@ "uart": true, "url": "https://github.com/alexbelgium/hassio-addons", "usb": true, - "version": "2.8.3-6", + "version": "2.8.3-7", "webui": "http://[HOST]:[PORT:3000]" } diff --git a/signalk/stats.png b/signalk/stats.png index 008d8974e..edd43664b 100644 Binary files a/signalk/stats.png and b/signalk/stats.png differ diff --git a/sonarr/CHANGELOG.md b/sonarr/CHANGELOG.md index 64453fbba..d25325324 100644 --- a/sonarr/CHANGELOG.md +++ b/sonarr/CHANGELOG.md @@ -1,3 +1,15 @@ + +## 4.0.8.2008 (03-08-2024) +- Update to latest version from linuxserver/docker-sonarr (changelog : https://github.com/linuxserver/docker-sonarr/releases) + +## 4.0.8.1929 (27-07-2024) +- Update to latest version from linuxserver/docker-sonarr (changelog : https://github.com/linuxserver/docker-sonarr/releases) + +## 4.0.8.1902 (20-07-2024) +- Update to latest version from linuxserver/docker-sonarr (changelog : https://github.com/linuxserver/docker-sonarr/releases) + +## 4.0.6.1847 (13-07-2024) +- Update to latest version from linuxserver/docker-sonarr (changelog : https://github.com/linuxserver/docker-sonarr/releases) ## 4.0.6.1805-2 (06-07-2024) - Minor bugs fixed diff --git a/sonarr/Dockerfile b/sonarr/Dockerfile index 7c5e45104..94a2e4713 100644 --- a/sonarr/Dockerfile +++ b/sonarr/Dockerfile @@ -16,7 +16,7 @@ ARG BUILD_FROM ARG BUILD_VERSION -ARG BUILD_UPSTREAM="4.0.6.1805" +ARG BUILD_UPSTREAM="4.0.8.2008" FROM ${BUILD_FROM} ################## diff --git a/sonarr/README.md b/sonarr/README.md index 4e9bf3f45..55c3c8d58 100644 --- a/sonarr/README.md +++ b/sonarr/README.md @@ -1,4 +1,3 @@ -## ⚠ Open Request : [✨ [REQUEST] Sonarr HA calendar integration l (opened 2024-03-06)](https://github.com/alexbelgium/hassio-addons/issues/1289) by [@FaliseDotCom](https://github.com/FaliseDotCom) # Home assistant add-on: Sonarr diff --git a/sonarr/config.json b/sonarr/config.json index 74c8994ab..a2b2a51b0 100644 --- a/sonarr/config.json +++ b/sonarr/config.json @@ -111,5 +111,5 @@ "slug": "sonarr_nas", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/sonarr", - "version": "4.0.6.1805-2" + "version": "4.0.8.2008" } diff --git a/sonarr/stats.png b/sonarr/stats.png index 7d564ff16..6fbcf916e 100644 Binary files a/sonarr/stats.png and b/sonarr/stats.png differ diff --git a/sonarr/updater.json b/sonarr/updater.json index 0c822cca8..2fb5a1004 100644 --- a/sonarr/updater.json +++ b/sonarr/updater.json @@ -1,9 +1,9 @@ { "github_beta": true, - "last_update": "06-07-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "sonarr", "source": "github", "upstream_repo": "linuxserver/docker-sonarr", - "upstream_version": "4.0.6.1805" + "upstream_version": "4.0.8.2008" } diff --git a/sponsorblockcast/stats.png b/sponsorblockcast/stats.png index b824beb66..498250b68 100644 Binary files a/sponsorblockcast/stats.png and b/sponsorblockcast/stats.png differ diff --git a/spotweb/stats.png b/spotweb/stats.png index 0a0d2b9c1..b6e041d5e 100644 Binary files a/spotweb/stats.png and b/spotweb/stats.png differ diff --git a/tandoor_recipes/CHANGELOG.md b/tandoor_recipes/CHANGELOG.md index 6dbc75da5..6103fa1af 100644 --- a/tandoor_recipes/CHANGELOG.md +++ b/tandoor_recipes/CHANGELOG.md @@ -1,4 +1,7 @@ +## 1.5.18 (13-07-2024) +- Update to latest version from TandoorRecipes/recipes (changelog : https://github.com/TandoorRecipes/recipes/releases) + ## 1.5.17 (13-04-2024) - Update to latest version from TandoorRecipes/recipes (changelog : https://github.com/TandoorRecipes/recipes/releases) diff --git a/tandoor_recipes/config.json b/tandoor_recipes/config.json index d4fbea4f8..05a5b303d 100644 --- a/tandoor_recipes/config.json +++ b/tandoor_recipes/config.json @@ -113,6 +113,6 @@ "slug": "tandoor_recipes", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "1.5.17", + "version": "1.5.18", "webui": "http://[HOST]:[PORT:8080]" } diff --git a/tandoor_recipes/stats.png b/tandoor_recipes/stats.png index 305d5436d..cbe39ad39 100644 Binary files a/tandoor_recipes/stats.png and b/tandoor_recipes/stats.png differ diff --git a/tandoor_recipes/updater.json b/tandoor_recipes/updater.json index 7f27dcd23..703f15122 100644 --- a/tandoor_recipes/updater.json +++ b/tandoor_recipes/updater.json @@ -1,8 +1,8 @@ { - "last_update": "13-04-2024", + "last_update": "13-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "tandoor_recipes", "source": "github", "upstream_repo": "TandoorRecipes/recipes", - "upstream_version": "1.5.17" + "upstream_version": "1.5.18" } diff --git a/tdarr/stats.png b/tdarr/stats.png index bd6fa247e..3fced75b1 100644 Binary files a/tdarr/stats.png and b/tdarr/stats.png differ diff --git a/teamspeak/stats.png b/teamspeak/stats.png index 063998963..23079dc77 100644 Binary files a/teamspeak/stats.png and b/teamspeak/stats.png differ diff --git a/transmission/README.md b/transmission/README.md index 557fbe09b..3763ae1a7 100644 --- a/transmission/README.md +++ b/transmission/README.md @@ -1,3 +1,4 @@ +## ⚠ Open Issue : [🐛 [Transmission NAS] Cannot move downloaded data to other CIFS directory (opened 2024-07-25)](https://github.com/alexbelgium/hassio-addons/issues/1494) by [@hiagocosta](https://github.com/hiagocosta) # Home assistant add-on: Transmission diff --git a/transmission/stats.png b/transmission/stats.png index 1eff7154c..4efd48c3b 100644 Binary files a/transmission/stats.png and b/transmission/stats.png differ diff --git a/transmission_openvpn/stats.png b/transmission_openvpn/stats.png index 28a465dc3..2ce6133c6 100644 Binary files a/transmission_openvpn/stats.png and b/transmission_openvpn/stats.png differ diff --git a/ubooquity/stats.png b/ubooquity/stats.png index ae805b194..fd521bcfb 100644 Binary files a/ubooquity/stats.png and b/ubooquity/stats.png differ diff --git a/unpackerr/CHANGELOG.md b/unpackerr/CHANGELOG.md index e296c9da5..c2602b10f 100644 --- a/unpackerr/CHANGELOG.md +++ b/unpackerr/CHANGELOG.md @@ -1,3 +1,9 @@ + +## 0.14.5 (03-08-2024) +- Update to latest version from Unpackerr/unpackerr (changelog : https://github.com/Unpackerr/unpackerr/releases) + +## 0.14.0 (13-07-2024) +- Update to latest version from Unpackerr/unpackerr (changelog : https://github.com/Unpackerr/unpackerr/releases) ## 0.13.1-8 (13-03-2024) - Minor bugs fixed ## 0.13.1-7 (13-03-2024) diff --git a/unpackerr/config.json b/unpackerr/config.json index 68f5a823a..bdbe29601 100644 --- a/unpackerr/config.json +++ b/unpackerr/config.json @@ -101,5 +101,5 @@ "slug": "unpackerr", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "0.13.1-8" + "version": "0.14.5" } diff --git a/unpackerr/stats.png b/unpackerr/stats.png index f9454a0b4..fe51d8b7c 100644 Binary files a/unpackerr/stats.png and b/unpackerr/stats.png differ diff --git a/unpackerr/updater.json b/unpackerr/updater.json index 8eca2ac34..79a539352 100644 --- a/unpackerr/updater.json +++ b/unpackerr/updater.json @@ -1,10 +1,10 @@ { "dockerhub_by_date": true, "fulltag": true, - "last_update": "27-01-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "unpackerr", "source": "github", "upstream_repo": "Unpackerr/unpackerr", - "upstream_version": "0.13.1" + "upstream_version": "0.14.5" } diff --git a/webtop_kde/stats.png b/webtop_kde/stats.png index 799ea371a..9b794b298 100644 Binary files a/webtop_kde/stats.png and b/webtop_kde/stats.png differ diff --git a/webtrees/stats.png b/webtrees/stats.png index 06d739648..7ac1c3bf3 100644 Binary files a/webtrees/stats.png and b/webtrees/stats.png differ diff --git a/wger/stats.png b/wger/stats.png index 4115e5362..a4870cf78 100644 Binary files a/wger/stats.png and b/wger/stats.png differ diff --git a/whatsapper/CHANGELOG.md b/whatsapper/CHANGELOG.md index 33824573d..8e496ad76 100644 --- a/whatsapper/CHANGELOG.md +++ b/whatsapper/CHANGELOG.md @@ -1,3 +1,13 @@ + +## 1.0.2 (04-08-2024) +- Update to latest version from baldarn/whatsapper (changelog : https://github.com/baldarn/whatsapper/releases) + +## 1.0.1-2 (05-08-2024) +- Minor bugs fixed + +## 1.0.1 (25-07-2024) +- Update to latest version from baldarn/whatsapper (changelog : https://github.com/baldarn/whatsapper/releases) + ## 1.0.0 (11-07-2024) - Update to version 1.0.0 of baldarn/whatsapper (changelog : https://github.com/baldarn/whatsapper/releases) diff --git a/whatsapper/README.md b/whatsapper/README.md index 91effe66f..0e1d15258 100644 --- a/whatsapper/README.md +++ b/whatsapper/README.md @@ -1,3 +1,4 @@ +## ⚠ Open Issue : [🐛 [Whatsapper] Normal Addon PowerOff trigger Watchdog (opened 2024-08-03)](https://github.com/alexbelgium/hassio-addons/issues/1503) by [@SiriosDev](https://github.com/SiriosDev) # Home assistant add-on: Whatsapper [![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) diff --git a/whatsapper/build.json b/whatsapper/build.json index f59371884..de54c76ec 100644 --- a/whatsapper/build.json +++ b/whatsapper/build.json @@ -1,7 +1,7 @@ { "build_from": { - "aarch64": "baldarn/whatsapper:1.0.0", - "amd64": "baldarn/whatsapper:1.0.0" + "aarch64": "baldarn/whatsapper:1.0.2", + "amd64": "baldarn/whatsapper:1.0.2" }, "codenotary": { "signer": "alexandrep.github@gmail.com" diff --git a/whatsapper/config.json b/whatsapper/config.json index ab62ece47..4f6911610 100644 --- a/whatsapper/config.json +++ b/whatsapper/config.json @@ -6,7 +6,6 @@ "codenotary": "alexandrep.github@gmail.com", "description": "Whatsapper for Home Assistant", "image": "ghcr.io/alexbelgium/whatsapper-{arch}", - "init": false, "map": [ "addon_config:rw", "share:rw" @@ -21,5 +20,5 @@ "slug": "whatsapper", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/whatsapper", - "version": "1.0.0" + "version": "1.0.2" } diff --git a/whatsapper/stats.png b/whatsapper/stats.png index 2701e6450..d9566ebd9 100644 Binary files a/whatsapper/stats.png and b/whatsapper/stats.png differ diff --git a/whatsapper/updater.json b/whatsapper/updater.json index bd4a19b82..8c79624b9 100644 --- a/whatsapper/updater.json +++ b/whatsapper/updater.json @@ -1,8 +1,9 @@ { - "last_update": "11-07-2024", + "github_exclude": "2024", + "last_update": "04-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "whatsapper", "source": "github", "upstream_repo": "baldarn/whatsapper", - "upstream_version": "1.0.0" + "upstream_version": "1.0.2" } diff --git a/whoogle/stats.png b/whoogle/stats.png index 7b05100a2..6ccbdc22a 100644 Binary files a/whoogle/stats.png and b/whoogle/stats.png differ diff --git a/xteve/stats.png b/xteve/stats.png index d9bb9da19..b0232eb76 100644 Binary files a/xteve/stats.png and b/xteve/stats.png differ diff --git a/zoneminder/stats.png b/zoneminder/stats.png index 20f3bf12c..34baf6f18 100644 Binary files a/zoneminder/stats.png and b/zoneminder/stats.png differ diff --git a/zzz_archived_bitwarden/CHANGELOG.md b/zzz_archived_bitwarden/CHANGELOG.md index 6369d0d3f..3042119ca 100644 --- a/zzz_archived_bitwarden/CHANGELOG.md +++ b/zzz_archived_bitwarden/CHANGELOG.md @@ -1,4 +1,7 @@ +## 1.31.0 (13-07-2024) +- Update to latest version from dani-garcia/bitwarden_rs (changelog : https://github.com/dani-garcia/bitwarden_rs/releases) + ## 1.30.5 (04-05-2024) - Update to latest version from dani-garcia/bitwarden_rs (changelog : https://github.com/dani-garcia/bitwarden_rs/releases) - Deprecated, please use version from community repository. Just export your vault from this addon, and import it in the official addon. diff --git a/zzz_archived_bitwarden/Dockerfile b/zzz_archived_bitwarden/Dockerfile index 882f09b6d..01e081594 100644 --- a/zzz_archived_bitwarden/Dockerfile +++ b/zzz_archived_bitwarden/Dockerfile @@ -16,7 +16,7 @@ ARG BUILD_FROM=ghcr.io/hassio-addons/debian-base/amd64:7.1.0 ARG BUILD_VERSION -ARG BUILD_UPSTREAM="1.30.5" +ARG BUILD_UPSTREAM="1.31.0" FROM "vaultwarden/server:${BUILD_UPSTREAM}" as vaultwarden FROM ${BUILD_FROM} SHELL ["/bin/bash", "-o", "pipefail", "-c"] diff --git a/zzz_archived_bitwarden/config.json b/zzz_archived_bitwarden/config.json index 8ad811371..8ece96eb1 100644 --- a/zzz_archived_bitwarden/config.json +++ b/zzz_archived_bitwarden/config.json @@ -34,6 +34,6 @@ "stage": "deprecated", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/bitwarden", - "version": "1.30.5", + "version": "1.31.0", "webui": "[PROTO:ssl]://[HOST]:[PORT:7277]" } diff --git a/zzz_archived_bitwarden/stats.png b/zzz_archived_bitwarden/stats.png index d46cea660..a4ae492eb 100644 Binary files a/zzz_archived_bitwarden/stats.png and b/zzz_archived_bitwarden/stats.png differ diff --git a/zzz_archived_bitwarden/updater.json b/zzz_archived_bitwarden/updater.json index 74a57a178..3c21c48c6 100644 --- a/zzz_archived_bitwarden/updater.json +++ b/zzz_archived_bitwarden/updater.json @@ -1,9 +1,9 @@ { - "last_update": "04-05-2024", + "last_update": "13-07-2024", "paused": false, "repository": "alexbelgium/hassio-addons", "slug": "bitwarden", "source": "github", "upstream_repo": "dani-garcia/bitwarden_rs", - "upstream_version": "1.30.5" + "upstream_version": "1.31.0" } diff --git a/zzz_archived_code-server/CHANGELOG.md b/zzz_archived_code-server/CHANGELOG.md index e865abcae..28fef1145 100644 --- a/zzz_archived_code-server/CHANGELOG.md +++ b/zzz_archived_code-server/CHANGELOG.md @@ -1,4 +1,10 @@ +## 4.91.1 (20-07-2024) +- Update to latest version from linuxserver/docker-code-server (changelog : https://github.com/linuxserver/docker-code-server/releases) + +## 4.91.0 (13-07-2024) +- Update to latest version from linuxserver/docker-code-server (changelog : https://github.com/linuxserver/docker-code-server/releases) + ## 4.90.3 (22-06-2024) - Update to latest version from linuxserver/docker-code-server (changelog : https://github.com/linuxserver/docker-code-server/releases) diff --git a/zzz_archived_code-server/config.json b/zzz_archived_code-server/config.json index 93f45e53e..40e10b27d 100644 --- a/zzz_archived_code-server/config.json +++ b/zzz_archived_code-server/config.json @@ -102,6 +102,6 @@ "slug": "code-server", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons/tree/master/coder-server", - "version": "4.90.3", + "version": "4.91.1", "webui": "[PROTO:ssl]://[HOST]:[PORT:8443]" } diff --git a/zzz_archived_code-server/stats.png b/zzz_archived_code-server/stats.png index a2f63f8fe..48b42cb9e 100644 Binary files a/zzz_archived_code-server/stats.png and b/zzz_archived_code-server/stats.png differ diff --git a/zzz_archived_code-server/updater.json b/zzz_archived_code-server/updater.json index 619d72f96..29d4139ce 100644 --- a/zzz_archived_code-server/updater.json +++ b/zzz_archived_code-server/updater.json @@ -1,8 +1,8 @@ { - "last_update": "22-06-2024", + "last_update": "20-07-2024", "repository": "alexbelgium/hassio-addons", "slug": "code-server", "source": "github", "upstream_repo": "linuxserver/docker-code-server", - "upstream_version": "4.90.3" + "upstream_version": "4.91.1" } diff --git a/zzz_archived_paperless_ngx/CHANGELOG.md b/zzz_archived_paperless_ngx/CHANGELOG.md index cfe66922b..da1589ce9 100644 --- a/zzz_archived_paperless_ngx/CHANGELOG.md +++ b/zzz_archived_paperless_ngx/CHANGELOG.md @@ -1,4 +1,13 @@ +## 2.11.2 (03-08-2024) +- Update to latest version from paperless-ngx/paperless-ngx (changelog : https://github.com/paperless-ngx/paperless-ngx/releases) + +## 2.11.1 (27-07-2024) +- Update to latest version from paperless-ngx/paperless-ngx (changelog : https://github.com/paperless-ngx/paperless-ngx/releases) + +## 2.11.0 (13-07-2024) +- Update to latest version from paperless-ngx/paperless-ngx (changelog : https://github.com/paperless-ngx/paperless-ngx/releases) + ## 2.10.2 (29-06-2024) - Update to latest version from paperless-ngx/paperless-ngx (changelog : https://github.com/paperless-ngx/paperless-ngx/releases) diff --git a/zzz_archived_paperless_ngx/config.json b/zzz_archived_paperless_ngx/config.json index c4d1c5abe..06806f2f4 100644 --- a/zzz_archived_paperless_ngx/config.json +++ b/zzz_archived_paperless_ngx/config.json @@ -134,6 +134,6 @@ "stage": "deprecated", "udev": true, "url": "https://github.com/alexbelgium/hassio-addons", - "version": "2.10.2", + "version": "2.11.2", "webui": "http://[HOST]:[PORT:8000]" } diff --git a/zzz_archived_paperless_ngx/stats.png b/zzz_archived_paperless_ngx/stats.png index a628803ee..53ebacb67 100644 Binary files a/zzz_archived_paperless_ngx/stats.png and b/zzz_archived_paperless_ngx/stats.png differ diff --git a/zzz_archived_paperless_ngx/updater.json b/zzz_archived_paperless_ngx/updater.json index 129b17d45..15e03cae6 100644 --- a/zzz_archived_paperless_ngx/updater.json +++ b/zzz_archived_paperless_ngx/updater.json @@ -1,10 +1,10 @@ { "github_beta": "false", "github_fulltag": "false", - "last_update": "29-06-2024", + "last_update": "03-08-2024", "repository": "alexbelgium/hassio-addons", "slug": "paperless_ng", "source": "github", "upstream_repo": "paperless-ngx/paperless-ngx", - "upstream_version": "2.10.2" + "upstream_version": "2.11.2" } diff --git a/zzz_archived_plex_meta_manager/stats.png b/zzz_archived_plex_meta_manager/stats.png index 6104ccc98..f44b0701d 100644 Binary files a/zzz_archived_plex_meta_manager/stats.png and b/zzz_archived_plex_meta_manager/stats.png differ