update repository references and improve script handling

This commit is contained in:
2025-03-19 20:36:36 +01:00
parent 51b0252b0e
commit 1d90749486
160 changed files with 14361 additions and 18 deletions

9
arpspoof/CHANGELOG.md Normal file
View File

@@ -0,0 +1,9 @@
- Implemented healthcheck
- WARNING : update to supervisor 2022.11 before installing
- Add codenotary sign
- New standardized logic for Dockerfile build and packages installation
## 1.0.0 (07-12-2021)
- Update to latest version from t0mer/Arpspoof-Docker
- Initial release

131
arpspoof/Dockerfile Normal file
View File

@@ -0,0 +1,131 @@
#============================#
# ALEXBELGIUM'S DOCKERFILE #
#============================#
# _.------.
# _.-` ('>.-`"""-.
# '.--'` _'` _ .--.)
# -' '-.-';` `
# ' - _.' ``'--.
# '---` .-'""`
# /`
#=== Home Assistant Addon ===#
#################
# 1 Build Image #
#################
ARG BUILD_FROM
ARG BUILD_VERSION
ARG BUILD_UPSTREAM="1.0.0"
FROM ${BUILD_FROM}
##################
# 2 Modify Image #
##################
# Set S6 wait time
ENV S6_CMD_WAIT_FOR_SERVICES=1 \
S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 \
S6_SERVICES_GRACETIME=0
##################
# 3 Install apps #
##################
# Add rootfs
COPY rootfs/ /
# Uses /bin for compatibility purposes
# hadolint ignore=DL4005
RUN if [ ! -f /bin/sh ] && [ -f /usr/bin/sh ]; then ln -s /usr/bin/sh /bin/sh; fi && \
if [ ! -f /bin/bash ] && [ -f /usr/bin/bash ]; then ln -s /usr/bin/bash /bin/bash; fi
# Modules
ARG MODULES="00-banner.sh 01-custom_script.sh 00-global_var.sh"
# Automatic modules download
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_automodules.sh" "/ha_automodules.sh"
RUN chmod 744 /ha_automodules.sh && /ha_automodules.sh "$MODULES" && rm /ha_automodules.sh
# Manual apps
ENV PACKAGES="jq curl iproute2"
# Automatic apps & bashio
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh"
RUN chmod 744 /ha_autoapps.sh && /ha_autoapps.sh "$PACKAGES" && rm /ha_autoapps.sh
################
# 4 Entrypoint #
################
# Add entrypoint
ENV S6_STAGE2_HOOK=/ha_entrypoint.sh
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh"
# Entrypoint modifications
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh"
RUN chmod 777 /ha_entrypoint.sh /ha_entrypoint_modif.sh && /ha_entrypoint_modif.sh && rm /ha_entrypoint_modif.sh
ENTRYPOINT [ "/usr/bin/env" ]
CMD [ "/ha_entrypoint.sh" ]
############
# 5 Labels #
############
ARG BUILD_ARCH
ARG BUILD_DATE
ARG BUILD_DESCRIPTION
ARG BUILD_NAME
ARG BUILD_REF
ARG BUILD_REPOSITORY
ARG BUILD_VERSION
ENV BUILD_VERSION="${BUILD_VERSION}"
LABEL \
io.hass.name="${BUILD_NAME}" \
io.hass.description="${BUILD_DESCRIPTION}" \
io.hass.arch="${BUILD_ARCH}" \
io.hass.type="addon" \
io.hass.version=${BUILD_VERSION} \
maintainer="alexbelgium (https://github.com/alexbelgium)" \
org.opencontainers.image.title="${BUILD_NAME}" \
org.opencontainers.image.description="${BUILD_DESCRIPTION}" \
org.opencontainers.image.vendor="Home Assistant Add-ons" \
org.opencontainers.image.authors="alexbelgium (https://github.com/alexbelgium)" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.url="https://github.com/alexbelgium" \
org.opencontainers.image.source="https://github.com/${BUILD_REPOSITORY}" \
org.opencontainers.image.documentation="https://github.com/${BUILD_REPOSITORY}/blob/main/README.md" \
org.opencontainers.image.created=${BUILD_DATE} \
org.opencontainers.image.revision=${BUILD_REF} \
org.opencontainers.image.version=${BUILD_VERSION}
#################
# 6 Healthcheck #
#################
# Avoid spamming logs
# hadolint ignore=SC2016
RUN \
# Handle Apache configuration
if [ -d /etc/apache2/sites-available ]; then \
for file in /etc/apache2/sites-*/*.conf; do \
sed -i '/<VirtualHost/a \ \n # Match requests with the custom User-Agent "HealthCheck" \n SetEnvIf User-Agent "HealthCheck" dontlog \n # Exclude matching requests from access logs \n CustomLog ${APACHE_LOG_DIR}/access.log combined env=!dontlog' "$file"; \
done; \
fi && \
\
# Handle Nginx configuration
if [ -f /etc/nginx/nginx.conf ]; then \
awk '/http \{/{print; print "map $http_user_agent $dontlog {\n default 0;\n \"~*HealthCheck\" 1;\n}\naccess_log /var/log/nginx/access.log combined if=$dontlog;"; next}1' /etc/nginx/nginx.conf > /etc/nginx/nginx.conf.new && \
mv /etc/nginx/nginx.conf.new /etc/nginx/nginx.conf; \
fi
ENV HEALTH_PORT="7022" \
HEALTH_URL=""
HEALTHCHECK \
--interval=5s \
--retries=5 \
--start-period=30s \
--timeout=25s \
CMD curl -A "HealthCheck: Docker/1.0" -s -f "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1

76
arpspoof/README.md Normal file
View File

@@ -0,0 +1,76 @@
# Home assistant add-on: Arpspoof
[![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium)
[![Donate][paypal-badge]](https://www.paypal.com/donate/?hosted_button_id=DZFULJZTP3UQA)
[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white
[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white
![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json)
![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json)
![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade)
[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml)
[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml)
_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_
[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers)
![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/arpspoof/stats.png)
## About
[arpspoof](https://github.com/t0mer/Arpspoof-Docker) adds ability to block internet connection for local network devices
This addon is based on the docker image https://hub.docker.com/r/techblog/arpspoof-docker
See all informations here : https://en.techblog.co.il/2021/03/15/home-assistant-cut-internet-connection-using-arpspoof/ , on in the upstream image documentation : https://github.com/t0mer/Arpspoof-Docker
## Installation
The installation of this add-on is pretty straightforward and not different in comparison to installing any other add-on.
1. Add my add-ons repository to your home assistant instance (in supervisor addons store at top right, or click button below if you have configured my HA)
[![Open your Home Assistant instance and show the add add-on repository dialog with a specific repository URL pre-filled.](https://my.home-assistant.io/badges/supervisor_add_addon_repository.svg)](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
1. Install this add-on.
1. Click the `Save` button to store your configuration.
1. Set the add-on options to your preferences
1. Start the add-on.
1. Check the logs of the add-on to see if everything went well.
1. Open the webUI and adapt the software options
## Configuration
Webui can be found at <http://homeassistant:PORT>.
```yaml
ROUTER_IP: 127.0.0.1 #Required Router IP
INTERFACE_NAME: name #Required Interface name. Autofilled if empty.
```
## Home-Assistant configuration
Description : [techblog](https://en.techblog.co.il/2021/03/15/home-assistant-cut-internet-connection-using-arpspoof/)
You can use a `command_line` switch to temporary disable a internet device in your network.
```yaml
- platform: command_line
switches:
iphone_internet:
friendly_name: "iPhone internet"
command_off: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/disconnect?ip={iPhoneIP}"
command_on: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/reconnect?ip={iPhoneIP}"
command_state: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/status?ip={iPhoneIP}"
value_template: >
{{ value != "1" }}
```
## Support
Create an issue on github
## Illustration
No illustration

10
arpspoof/build.json Normal file
View File

@@ -0,0 +1,10 @@
{
"build_from": {
"aarch64": "techblog/arpspoof-docker:1.0.0",
"amd64": "techblog/arpspoof-docker:1.0.0",
"armv7": "techblog/arpspoof-docker:1.0.0"
},
"codenotary": {
"signer": "alexandrep.github@gmail.com"
}
}

BIN
arpspoof/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
arpspoof/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bashio
# shellcheck shell=bash
set -e
# Avoid unbound variables
set +u
# Autodefine if not defined
if [ -n "$INTERFACE_NAME" ]; then
# shellcheck disable=SC2155
export INTERFACE_NAME="$(ip route get 8.8.8.8 | sed -nr 's/.*dev ([^\ ]+).*/\1/p')"
bashio::log.blue "Autodetection : INTERFACE_NAME=$INTERFACE_NAME"
fi
bashio::log.info "Starting..."
/usr/bin/python3 /opt/arpspoof/arpspoof.py

BIN
arpspoof/stats.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

8
arpspoof/updater.json Normal file
View File

@@ -0,0 +1,8 @@
{
"last_update": "07-12-2021",
"repository": "alexbelgium/hassio-addons",
"slug": "arpspoof",
"source": "github",
"upstream_repo": "t0mer/Arpspoof-Docker",
"upstream_version": "1.0.0"
}

1
beta/CHANGELOG.md Executable file
View File

@@ -0,0 +1 @@
Please reference the [beta commits](https://github.com/jakowenko/double-take/commits/beta) for changes.

1
beta/Dockerfile Normal file
View File

@@ -0,0 +1 @@
FROM jakowenko/double-take:beta

11
beta/README.md Executable file
View File

@@ -0,0 +1,11 @@
[![Double Take](https://badgen.net/github/release/jakowenko/double-take/stable)](https://github.com/jakowenko/double-take) [![Double Take](https://badgen.net/github/stars/jakowenko/double-take)](https://github.com/jakowenko/double-take/stargazers) [![Docker Pulls](https://flat.badgen.net/docker/pulls/jakowenko/double-take)](https://hub.docker.com/r/jakowenko/double-take) [![Discord](https://flat.badgen.net/discord/members/3pumsskdN5?label=Discord)](https://discord.gg/3pumsskdN5)
![amd64][amd64-shield]
# Double Take
Unified UI and API for processing and training images for facial recognition.
[Documentation](https://github.com/jakowenko/double-take/tree/beta#readme)
[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg

35
beta/config.json Executable file
View File

@@ -0,0 +1,35 @@
{
"name": "Double Take (beta)",
"version": "1.13.1",
"url": "https://github.com/jakowenko/double-take",
"panel_icon": "mdi:face-recognition",
"slug": "double-take-beta",
"description": "Unified UI and API for processing and training images for facial recognition",
"arch": ["amd64"],
"startup": "application",
"boot": "auto",
"ingress": true,
"ingress_port": 3000,
"ports": {
"3000/tcp": 3000
},
"ports_description": {
"3000/tcp": "Web interface (not required for Home Assistant ingress)"
},
"map": ["media:rw", "config:rw"],
"environment": {
"HA_ADDON": "true"
},
"options": {
"STORAGE_PATH": "/config/double-take",
"CONFIG_PATH": "/config/double-take",
"SECRETS_PATH": "/config",
"MEDIA_PATH": "/media/double-take"
},
"schema": {
"STORAGE_PATH": "str",
"CONFIG_PATH": "str",
"SECRETS_PATH": "str",
"MEDIA_PATH": "str"
}
}

BIN
beta/icon.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

View File

@@ -0,0 +1,132 @@
## 0.49.4 (15-03-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.49.3 (01-03-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.49.2 (21-02-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.49.1 (15-02-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.49.0 (25-01-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.48.6 (11-01-2025)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.48.5 (28-12-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.48.4 (21-12-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.48.1 (07-12-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.47.6 (09-11-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.47.5 (02-11-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.47.3 (12-10-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.46.4 (07-09-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.46.3 (24-08-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.46.2 (03-08-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.46.1-2 (23-07-2024)
- Minor bugs fixed
## 0.46.1 (20-07-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.26 (13-07-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.25-2 (08-07-2024)
- Minor bugs fixed
## 0.45.25 (06-07-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.24 (22-06-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.23 (25-05-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.22-2 (21-05-2024)
- Minor bugs fixed
## 0.45.22 (04-05-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.21 (27-04-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.20 (20-04-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.17 (06-04-2024)
- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases)
## 0.45.16 (09-03-2024)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.14 (10-02-2024)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.13 (20-01-2024)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.12 (06-01-2024)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.9 (23-12-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.8.1 (02-12-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.7.3-2 (21-11-2023)
- Minor bugs fixed
## 0.45.7.3 (18-11-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.7 (11-11-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.5 (04-11-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.3-2 (01-11-2023)
- Minor bugs fixed
## 0.45.3 (07-10-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.2 (23-09-2023)
- Update to latest version from linuxserver/docker-changedetection.io
## 0.45.1 (10-09-2023)
- Initial build

View File

@@ -0,0 +1,85 @@
# Home assistant add-on: changedetection.io
[![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium)
[![Donate][paypal-badge]](https://www.paypal.com/donate/?hosted_button_id=DZFULJZTP3UQA)
![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json)
![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json)
![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade)
[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml)
[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml)
[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white
[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white
_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_
[![Stargazers repo roster for @alexbelgium/hassio-addons](https://reporoster.com/stars/alexbelgium/hassio-addons)](https://github.com/alexbelgium/hassio-addons/stargazers)
![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/changedetection.io/stats.png)
## About
[Changedetection.io](https://github.com/dgtlmoon/changedetection.io) provides free, open-source web page monitoring, notification and change detection.
This addon is based on the [docker image](https://github.com/linuxserver/docker-changedetection.io) from linuxserver.io.
## Configuration
### Main app
Web UI can be found at `<your-ip>:5000`, also accessible from the add-on page.
#### Sidebar shortcut
You can add a shortcut pointing to your Changedetection.io instance with the following steps:
1. Go to <kbd>⚙ Settings</kbd> > <kbd>Dashboards</kbd>
2. Click <kbd> Add Dashboard</kbd> at the bottom corner
3. Select the <kbd>Webpage</kbd> option, and paste the Web UI URL you got from the add-on page.
4. Fill in the title for the sidebar item, an icon (suggestion: `mdi:vector-difference`), and a **relative URL** for that panel (e.g. `change-detection`). Lastly, confirm it.
### Configurable options
```yaml
PGID: user
GPID: user
TZ: Etc/UTC specify a timezone to use, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
BASE_URL: Specify the full URL (including protocol) when running behind a reverse proxy
```
### Connect to browserless Chrome (from @RhysMcW)
In HA, use the File Editor add-on (or Filebrowser) and edit the Changedetection.io config file at `/homeassistant/addons_config/changedetection.io/config.yaml`.
Add the following line to the end of it:
```yaml
PLAYWRIGHT_DRIVER_URL: ws://2937404c-browserless-chrome:3000/chromium?launch={"defaultViewport":{"height":720,"width":1280},"headless":false,"stealth":true}&blockAds=true
```
Remember to add a blank line at the end of the file too according to yaml requirements.
The `2937404c-browserless-chrome` hostname is displayed in the UI, on the Browserless Chromium addon page:
![image](https://github.com/user-attachments/assets/a63514f6-027a-4361-a33f-0d8f87461279)
You can also fetch it:
* By using SSH and running `docker exec -i hassio_dns cat "/config/hosts"`
* From the CLI in HA, using arp
* You should also be able to use your HA IP address.
Then restart the Changedetection.io add-on - after that you can use the browser options in Changedetection.io.
## Installation
The installation of this add-on is pretty straightforward and not different in
comparison to installing any other Hass.io add-on.
1. [Add my Hass.io add-ons repository][repository] to your Hass.io instance.
1. Install this add-on.
1. Click the `Save` button to store your configuration.
1. Start the add-on.
1. Check the logs of the add-on to see if everything went well.
1. Carefully configure the add-on to your preferences, see the official documentation for for that.
[repository]: https://github.com/alexbelgium/hassio-addons

View File

@@ -0,0 +1,67 @@
#include <tunables/global>
profile addon_db21ed7f_changedetection.io_nas flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
capability,
file,
signal,
mount,
umount,
remount,
network udp,
network tcp,
network dgram,
network stream,
network inet,
network inet6,
network netlink raw,
network unix dgram,
capability setgid,
capability setuid,
capability sys_admin,
capability dac_read_search,
# capability dac_override,
# capability sys_rawio,
# S6-Overlay
/init ix,
/run/{s6,s6-rc*,service}/** ix,
/package/** ix,
/command/** ix,
/run/{,**} rwk,
/dev/tty rw,
/bin/** ix,
/usr/bin/** ix,
/usr/lib/bashio/** ix,
/etc/s6/** rix,
/run/s6/** rix,
/etc/services.d/** rwix,
/etc/cont-init.d/** rwix,
/etc/cont-finish.d/** rwix,
/init rix,
/var/run/** mrwkl,
/var/run/ mrwkl,
/dev/i2c-1 mrwkl,
# Files required
/dev/fuse mrwkl,
/dev/sda1 mrwkl,
/dev/sdb1 mrwkl,
/dev/nvme0 mrwkl,
/dev/nvme1 mrwkl,
/dev/mmcblk0p1 mrwkl,
/dev/ttyUSB0 mrwkl,
/dev/* mrwkl,
/tmp/** mrkwl,
# Data access
/data/** rw,
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read) peer=docker-default,
# docker daemon confinement requires explict allow rule for signal
signal (receive) set=(kill,term) peer=/usr/bin/docker,
}

View File

@@ -0,0 +1,9 @@
{
"build_from": {
"aarch64": "lscr.io/linuxserver/changedetection.io:arm64v8-latest",
"amd64": "lscr.io/linuxserver/changedetection.io:amd64-latest"
},
"codenotary": {
"signer": "alexandrep.github@gmail.com"
}
}

View File

@@ -0,0 +1,41 @@
{
"arch": [
"aarch64",
"amd64"
],
"codenotary": "alexandrep.github@gmail.com",
"description": "web page monitoring, notification and change detection",
"environment": {
"LC_ALL": "en_US.UTF-8",
"TIMEOUT": "60000"
},
"image": "ghcr.io/alexbelgium/changedetection.io-{arch}",
"init": false,
"map": [
"config:rw"
],
"name": "Changedetection.io",
"options": {
"PGID": 0,
"PUID": 0,
"TIMEOUT": "60000"
},
"ports": {
"5000/tcp": 5000
},
"ports_description": {
"5000/tcp": "Webui"
},
"schema": {
"BASE_URL": "str?",
"PGID": "int",
"PUID": "int",
"TIMEOUT": "int",
"TZ": "str?"
},
"slug": "changedetection.io",
"udev": true,
"url": "https://github.com/alexbelgium/hassio-addons/tree/master/changedetection.io",
"version": "0.49.4",
"webui": "http://[HOST]:[PORT:5000]"
}

BIN
changedetection.io/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

BIN
changedetection.io/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

View File

View File

@@ -0,0 +1,15 @@
#!/usr/bin/with-contenv bashio
# shellcheck shell=bash
set -e
# Define user
PUID=$(bashio::config "PUID")
PGID=$(bashio::config "PGID")
# Check data location
LOCATION="/config/addons_config/changedetection.io"
# Check structure
mkdir -p "$LOCATION"
chown -R "$PUID":"$PGID" "$LOCATION"
chmod -R 755 "$LOCATION"

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

@@ -0,0 +1,9 @@
{
"github_fulltag": "false",
"last_update": "15-03-2025",
"repository": "alexbelgium/hassio-addons",
"slug": "changedetection.io",
"source": "github",
"upstream_repo": "linuxserver/docker-changedetection.io",
"upstream_version": "0.49.4"
}

1
compreface/CHANGELOG.md Executable file
View File

@@ -0,0 +1 @@
Please reference the [release notes](https://github.com/exadel-inc/CompreFace/releases) for changes.

6
compreface/Dockerfile Executable file
View File

@@ -0,0 +1,6 @@
FROM exadel/compreface:1.1.0
ENV PGDATA=/data/database
RUN apt-get update && apt-get install jq -y && rm -rf /var/lib/apt/lists/*
COPY postgresql.conf /etc/postgresql/13/main/postgresql.conf
COPY run.sh /
CMD ["/run.sh"]

7
compreface/README.md Executable file
View File

@@ -0,0 +1,7 @@
# Exadel CompreFace
This add-on runs the [single container](https://github.com/exadel-inc/CompreFace/issues/651) version of CompreFace.
CompreFace will be exposed on port 8000 - you can change this in the add-on configuration if another port is required.
[Documentation](https://github.com/exadel-inc/CompreFace#readme)

30
compreface/config.json Executable file
View File

@@ -0,0 +1,30 @@
{
"name": "Exadel CompreFace",
"version": "1.1.0",
"url": "https://github.com/exadel-inc/CompreFace",
"slug": "compreface",
"description": "Exadel CompreFace is a leading free and open-source face recognition system",
"arch": ["amd64"],
"startup": "application",
"boot": "auto",
"ports": {
"80/tcp": 8000
},
"ports_description": {
"80/tcp": "UI/API"
},
"options": {
"POSTGRES_URL": "jdbc:postgresql://localhost:5432/frs",
"POSTGRES_USER": "compreface",
"POSTGRES_PASSWORD": "M7yfTsBscdqvZs49",
"POSTGRES_DB": "frs",
"API_JAVA_OPTS": "-Xmx1g"
},
"schema": {
"POSTGRES_URL": "str",
"POSTGRES_USER": "str",
"POSTGRES_PASSWORD": "str",
"POSTGRES_DB": "str",
"API_JAVA_OPTS": "str"
}
}

BIN
compreface/icon.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

785
compreface/postgresql.conf Executable file
View File

@@ -0,0 +1,785 @@
# CompreFace changes:
# 1. Changed `data_directory`, so it will always link to `/var/lib/postgresql/data` and do not depend on postgres version.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/data/database' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/13/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/13/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/13-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
max_wal_size = 1GB
min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_incremental_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '13/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
stats_temp_directory = '/var/run/postgresql/13-main.pg_stat_tmp'
# - Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

36
compreface/run.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
#
# Entrypoint
#
# Ensure persistent data is stored in /data/ and then start the stack
set -euo pipefail
start() {
echo "Starting CompreFace" >&2
values=$(cat /data/options.json)
for s in $(echo "$values" | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]" ); do
export "${s?}"
done
if [ "$PGDATA" == "/data/database" ] && [ -d /data ]
then
if [ ! -d /data/database ]
then
cp -rp /var/lib/postgresql/data /data/database
fi
fi
chown -R postgres:postgres "$PGDATA"
exec /usr/bin/supervisord
}
if grep -q avx /proc/cpuinfo
then
start
else
echo "AVX not detected" >&2
exit 1
fi

1
deepstack-cpu/CHANGELOG.md Executable file
View File

@@ -0,0 +1 @@
Please reference the [release notes](https://github.com/johnolafenwa/DeepStack/releases) for changes.

3
deepstack-cpu/Dockerfile Executable file
View File

@@ -0,0 +1,3 @@
ARG BUILD_FROM
FROM $BUILD_FROM
ENV DATA_DIR=/data/database

7
deepstack-cpu/README.md Executable file
View File

@@ -0,0 +1,7 @@
![amd64]amd64-shield
# DeepStack (CPU)
[Documentation](https://docs.deepstack.cc)
[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg

5
deepstack-cpu/build.json Executable file
View File

@@ -0,0 +1,5 @@
{
"build_from": {
"amd64": "deepquestai/deepstack:cpu"
}
}

20
deepstack-cpu/config.json Executable file
View File

@@ -0,0 +1,20 @@
{
"name": "DeepStack (CPU)",
"version": "2021.09.1",
"url": "https://github.com/johnolafenwa/DeepStack",
"slug": "deepstack-cpu",
"description": "The World's Leading Cross Platform AI Engine for Edge Devices",
"arch": ["amd64"],
"startup": "application",
"boot": "auto",
"ports": {
"5000/tcp": 5001
},
"ports_description": {
"5000/tcp": "API"
},
"environment": {
"VISION-FACE": "True",
"VISION-DETECTION": "True"
}
}

BIN
deepstack-cpu/icon.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

1
deepstack/CHANGELOG.md Executable file
View File

@@ -0,0 +1 @@
Please reference the [release notes](https://github.com/johnolafenwa/DeepStack/releases) for changes.

3
deepstack/Dockerfile Executable file
View File

@@ -0,0 +1,3 @@
ARG BUILD_FROM
FROM $BUILD_FROM
ENV DATA_DIR=/data/database

9
deepstack/README.md Executable file
View File

@@ -0,0 +1,9 @@
![amd64][amd64-shield] ![armv7][armv7-shield] ![aarch64][aarch64-shield]
# DeepStack
[Documentation](https://docs.deepstack.cc)
[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg
[aarch64-shield]: https://img.shields.io/badge/aarch64-yes-green.svg
[armv7-shield]: https://img.shields.io/badge/armv7-yes-green.svg

7
deepstack/build.json Executable file
View File

@@ -0,0 +1,7 @@
{
"build_from": {
"amd64": "deepquestai/deepstack:latest",
"aarch64": "deepquestai/deepstack:arm64",
"armv7": "deepquestai/deepstack:arm64"
}
}

20
deepstack/config.json Executable file
View File

@@ -0,0 +1,20 @@
{
"name": "DeepStack",
"version": "2021.09.1",
"url": "https://github.com/johnolafenwa/DeepStack",
"slug": "deepstack",
"description": "The World's Leading Cross Platform AI Engine for Edge Devices",
"arch": ["amd64", "armv7", "aarch64"],
"startup": "application",
"boot": "auto",
"ports": {
"5000/tcp": 5001
},
"ports_description": {
"5000/tcp": "API"
},
"environment": {
"VISION-FACE": "True",
"VISION-DETECTION": "True"
}
}

BIN
deepstack/icon.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

4
facebox/Dockerfile Executable file
View File

@@ -0,0 +1,4 @@
FROM machinebox/facebox
RUN apt-get install jq -y
COPY run.sh /
ENTRYPOINT ["/run.sh"]

14
facebox/README.md Executable file
View File

@@ -0,0 +1,14 @@
![amd64][amd64-shield]
# Facebox
To use this add-on create a Veritone Developer account and login at:
https://machinebox.io/login
Click on the Machine Box navigation link to view your `MB_KEY`.
Enter your `MB_KEY` into the configuration of the add-on.
[Documentation](https://machinebox.io/docs/facebox)
[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg

22
facebox/config.json Executable file
View File

@@ -0,0 +1,22 @@
{
"name": "Facebox",
"version": "1a1358d",
"url": "https://machinebox.io/docs/facebox",
"slug": "facebox",
"description": "Facial recognition with one-shot teaching",
"arch": ["amd64"],
"startup": "application",
"boot": "auto",
"ports": {
"8080/tcp": 8000
},
"ports_description": {
"8080/tcp": "UI/API"
},
"options": {
"MB_KEY": ""
},
"schema": {
"MB_KEY": "str"
}
}

BIN
facebox/icon.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

6
facebox/run.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
values=`cat /data/options.json`
for s in $(echo $values | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]" ); do
export $s
done
/app/facebox

View File

@@ -1,4 +1,31 @@
## 1.23.5 (08-03-2025)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.23.4 (21-02-2025)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.23.3 (08-02-2025)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.23.1 (11-01-2025)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.6 (14-12-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.4 (30-11-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.3 (12-10-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.2 (07-09-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.1 (06-07-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)
## 1.22.0 (01-06-2024)
- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases)

View File

@@ -1,3 +1,15 @@
#============================#
# ALEXBELGIUM'S DOCKERFILE #
#============================#
# _.------.
# _.-` ('>.-`"""-.
# '.--'` _'` _ .--.)
# -' '-.-';` `
# ' - _.' ``'--.
# '---` .-'""`
# /`
#=== Home Assistant Addon ===#
#################
# 1 Build Image #
#################
@@ -23,7 +35,7 @@ USER root
##################
# Add rootfs
COPY rootfs/ /
COPY rootfs /
# Uses /bin for compatibility purposes
# hadolint ignore=DL4005
@@ -34,14 +46,14 @@ RUN if [ ! -f /bin/sh ] && [ -f /usr/bin/sh ]; then ln -s /usr/bin/sh /bin/sh; f
ARG MODULES="00-banner.sh 00-global_var.sh 01-custom_script.sh"
# Automatic modules download
ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_automodules.sh" "/ha_automodules.sh"
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_automodules.sh" "/ha_automodules.sh"
RUN chmod 744 /ha_automodules.sh && /ha_automodules.sh "$MODULES" && rm /ha_automodules.sh
# Manual apps
ENV PACKAGES=""
# Automatic apps & bashio
ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh"
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh"
RUN chmod 744 /ha_autoapps.sh && /ha_autoapps.sh "$PACKAGES" && rm /ha_autoapps.sh
EXPOSE 22 3000
@@ -52,10 +64,10 @@ EXPOSE 22 3000
# Add entrypoint
ENV S6_STAGE2_HOOK=/ha_entrypoint.sh
ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh"
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh"
# Entrypoint modifications
ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh"
ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh"
RUN chmod 777 /ha_entrypoint.sh /ha_entrypoint_modif.sh && /ha_entrypoint_modif.sh && rm /ha_entrypoint_modif.sh
@@ -72,6 +84,7 @@ ARG BUILD_NAME
ARG BUILD_REF
ARG BUILD_REPOSITORY
ARG BUILD_VERSION
ENV BUILD_VERSION="${BUILD_VERSION}"
LABEL \
io.hass.name="${BUILD_NAME}" \
io.hass.description="${BUILD_DESCRIPTION}" \
@@ -95,6 +108,22 @@ LABEL \
# 6 Healthcheck #
#################
# Avoid spamming logs
# hadolint ignore=SC2016
RUN \
# Handle Apache configuration
if [ -d /etc/apache2/sites-available ]; then \
for file in /etc/apache2/sites-*/*.conf; do \
sed -i '/<VirtualHost/a \ \n # Match requests with the custom User-Agent "HealthCheck" \n SetEnvIf User-Agent "HealthCheck" dontlog \n # Exclude matching requests from access logs \n CustomLog ${APACHE_LOG_DIR}/access.log combined env=!dontlog' "$file"; \
done; \
fi && \
\
# Handle Nginx configuration
if [ -f /etc/nginx/nginx.conf ]; then \
awk '/http \{/{print; print "map $http_user_agent $dontlog {\n default 0;\n \"~*HealthCheck\" 1;\n}\naccess_log /var/log/nginx/access.log combined if=$dontlog;"; next}1' /etc/nginx/nginx.conf > /etc/nginx/nginx.conf.new && \
mv /etc/nginx/nginx.conf.new /etc/nginx/nginx.conf; \
fi
ENV HEALTH_PORT="3000" \
HEALTH_URL=""
HEALTHCHECK \
@@ -102,4 +131,4 @@ HEALTHCHECK \
--retries=5 \
--start-period=30s \
--timeout=25s \
CMD curl --fail "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1
CMD curl -A "HealthCheck: Docker/1.0" -s -f "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1

View File

@@ -7,18 +7,18 @@
![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fconfig.json)
![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fconfig.json)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/Mesteriis/hassio-addons-avm/dashboard?utm_source=github.com&utm_medium=referral&utm_content=Mesteriis/hassio-addons-avm&utm_campaign=Badge_Grade)
[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/Mesteriis/hassio-addons-avm/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/Mesteriis/hassio-addons-avm/actions/workflows/weekly-supelinter.yaml)
[![Builder](https://img.shields.io/github/actions/workflow/status/Mesteriis/hassio-addons-avm/onpush_builder.yaml?label=Builder)](https://github.com/Mesteriis/hassio-addons-avm/actions/workflows/onpush_builder.yaml)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade)
[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml)
[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml)
[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white
[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white
_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_
[![Stargazers repo roster for @Mesteriis/hassio-addons-avm](https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.github/stars2.svg)](https://github.com/Mesteriis/hassio-addons-avm/stargazers)
[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers)
![downloads evolution](https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/gitea/stats.png)
![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/gitea/stats.png)
## About
@@ -53,4 +53,4 @@ comparison to installing any other Hass.io add-on.
1. Go to the webui, where you will initialize the app
1. Restart the addon, to apply any option that should be applied
[repository]: https://github.com/Mesteriis/hassio-addons-avm
[repository]: https://github.com/alexbelgium/hassio-addons

View File

@@ -27,15 +27,19 @@
"/dev/sdf",
"/dev/sdg",
"/dev/nvme",
"/dev/nvme0",
"/dev/nvme0n1",
"/dev/nvme0n1p1",
"/dev/nvme0n1p2",
"/dev/nvme0n1p3",
"/dev/nvme1n1",
"/dev/nvme1n1p1",
"/dev/nvme1n1p2",
"/dev/nvme1n1p3",
"/dev/nvme2n1",
"/dev/nvme2n1p1",
"/dev/nvme2n1p2",
"/dev/nvme3n1p3",
"/dev/nvme2n3p3",
"/dev/mmcblk",
"/dev/fuse",
"/dev/sda1",
@@ -97,7 +101,7 @@
},
"slug": "gitea",
"udev": true,
"url": "https://github.com/Mesteriis/hassio-addons-avm/tree/master/gitea",
"version": "1.22.0",
"url": "https://github.com/alexbelgium/hassio-addons/tree/master/gitea",
"version": "1.23.5",
"webui": "[PROTO:ssl]://[HOST]:[PORT:3000]"
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@@ -1,8 +1,8 @@
{
"last_update": "01-06-2024",
"repository": "Mesteriis/hassio-addons-avm",
"last_update": "08-03-2025",
"repository": "alexbelgium/hassio-addons",
"slug": "gitea",
"source": "github",
"upstream_repo": "go-gitea/gitea",
"upstream_version": "1.22.0"
"upstream_version": "1.23.5"
}

View File

@@ -0,0 +1,44 @@
# Authentication with Google Drive
This document describes how the addon (Home Assistant Google Drive Backup) authenticates with Google Drive and stores your credentials. It's geared toward those who wish to know more detail and is not necessary to take advantage of the full features of the addon. The document is provided in the interest of providing full transparency into how the add-on works. I've tried to describe this as plainly as possible, but it is technical and therefore may not be understandable to everyone. Feedback on its clarity is appreciated.
> This document describes how authentication works if you use the big blue "AUTHENTICATE WITH GOOGLE DRIVE" button in the addon. If you're using [your own Google Drive credentials](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/LOCAL_AUTH.md), then none of this applies.
## Your Credentials and the Needed Permission
To have access to any information in Google Drive, Google's authentication servers must be told that the add-on has the permission. The add-on uses [Google Drive's Rest API (v3)](https://developers.google.com/drive/api/v3/about-sdk) for communication and requests the [drive.file](https://developers.google.com/drive/api/v3/about-auth) permission *scope*. This *scope* means the add-on has access to files and folders that the add-on created, but nothing else. It can't see files you've added to Google Drive through their web interface or anywhere else. Google Drive's Rest API allows the addon to periodically check what backups are uploaded and upload new ones if necessary by making requests over the internet.
## Authentication with Google Services
For reference, Google's documentation for how to authenticate users with the Google Drive REST API is [here](https://developers.google.com/drive/api/v3/about-auth). Authentication is handled through [OAuth 2.0](https://developers.google.com/identity/protocols/OAuth2), which means that the add-on never actually sees your Google username and password, only an opaque [security token](https://en.wikipedia.org/wiki/Access_token) used to verify that the addon has been given permission. More detail is provided about what that token is and where it is stored later in this document.
The way a web-based application would normally authenticate with a Google service (eg Google Drive) looks something like this:
1. User navigates to the app's webpage, eg http://examplegoogleapp.com
2. The app generates a URL to Google's servers (https://accounts.google.com) used to grant the app permission.
3. User navigates there, enters their Google username and password, and confirms the intention to give the app some permission (eg one or more *scopes*).
4. Google redirects the user back to the app's webpage with an access token appended to the URL (eg http://examplegoogleapp.com/authenticate?token=0x12345678)
5. The app stores the access token (0x12345678 in this example), and then passes it back to Google whenever it wishes to make access the API on behalf of the user who logged in.
This access token allows the app to act as if it is the user who created it. In the case of this add-on, the permission granted by the drive.file scope allows it to create folders, upload backups, and retrieve the previously created folders. Because the add-on only ever sees the access token (not the username/password), and the access token only grants limited permissions, the add-on doesn't have a way to elevate its permission further to access other information in Google Drive or your Google account.
## Authentication for the Add-on
Google puts some limitations on how the access token must be generated that will be important for understanding how the add-on authenticates in reality:
* When the user is redirected to https://accounts.google.com (step 2), the redirect must be from a known public website associated with the app.
* When the user is redirected back to the app after authorization (step 4), the redirect must be a statically addressed and publicly accessible website.
These limitations make a technical problem for the addon because most people's Home Assistant instances aren't publicly accessible and the address is different for each one. Performing the authentication workflow exactly as described above won't work. To get around this, I (the developer of this addon) set up a website, https://habackup.io, which serves as the known public and statically addressable website that Google redirects from/to. The source code for this server is available within the add-on's GitHub repository.
So when you authenticate the add-on, the workflow looks like this:
1. You start at the add-on's web interface, something like https://homeassistant.local:8123/ingress/hassio_google_drive_backup
2. You click the "Authenticate With Google Drive" button, which takes note of the address of your Home Assistant installation (https://homeassistant.local:8123 in this case) and sends you to https://habackup.io/drive/authorize
3. https://habackup.io immediately generates the Google login URL for you and redirects you to https://accounts.google.com
4. You log in with your Google credentials on Google's domain, and confirm you want to give the add-on permission to see files and folders it creates (the drive.file scope)
5. Google redirects you back to https://habackup.io, along with the access token that will be used for future authentication.
6. https://habackup.io redirects you back to your add-on web-UI (which is kept track of in step 2) along with the access token.
7. The addon (on your local Home Assistant installation) persists the access token and uses it in the future any time it needs to talk to Google Drive.
Notably, your access token isn't persisted at https://habackup.io, it is only passed through back to your local add-on installation. I do this because:
- It ensures your information is only ever stored on your machine, which is reassuring from the user's perspective (eg you).
- If my server (https://habackup.io) ever gets compromised, there isn't any valuable information stored there that compromises you as well.
- This is practicing a form of [defense-in-depth](https://en.wikipedia.org/wiki/Defense_in_depth_%28computing%29) security, where-in [personal data](https://en.wikipedia.org/wiki/Personal_data) is only stored in the places where it is strictly critical.
- It makes the server more simple since it is a stateless machine that doesn't require a database (eg to store your token).
After your token is generated and stored on your machine, it needs to be *refreshed* periodically with Google Drive. To do this, the addon will again ask https://habackup.io who will relay the request with Google Drive.

View File

@@ -0,0 +1,123 @@
# 'Snapshot' vs 'Backup'
In August 2021 [the Home Assistant team announced](https://www.home-assistant.io/blog/2021/08/24/supervisor-update/) that 'snapshots' will be called 'backups' moving forward. This addon exposes a binary sensor to indicate if snapshots are stale and a another sensor that publishes details about backups. Both of the sensors used 'snapshot' in their names and values, so they had to be changed to match the new language. To prevent breaking any existing automations you might have, the addon will only start using the new names and values when you upgrade if you tell it to.
This can be controlled by using the configuration option ```call_backup_snapshot```, which will use the old names and values for sensors when it is true. If you updated the addon from a version that used to use 'snapshot' in it names, this option will be automatically added when you update to make sure it doesn't break any existing automations.
Here is a breakdown of what the new and old sensor values mean:
## Old sensor name/values
These will be the sensor values used when ```call_backup_snapshot: True``` or if the addon is below version 0.105.1. The addon sets ```call_backup_snapshot: True``` automatically if you upgrade the addon from an older version.
### Backup Stale Binary Sensor
#### Entity Id:
```yaml
binary_sensor.snapshots_stale
```
#### Possible states:
```yaml
on
off
```
#### Example Attributes:
```yaml
friendly_name: Snapshots Stale
device_class: problem
```
### Backup State Sensor
#### Entity Id:
```yaml
sensor.snapshot_backup
```
#### Possible States:
```yaml
error
waiting
backed_up
```
#### Example Attributes:
```yaml
friendly_name: Snapshots State
last_snapshot: 2021-09-01T20:26:49.100376+00:00
snapshots_in_google_drive: 2
snapshots_in_hassio: 2
snapshots_in_home_assistant: 2
size_in_google_drive: 2.5 GB
size_in_home_assistant: 2.5 GB
snapshots:
- name: Full Snapshot 2021-02-06 11:37:00
date: '2021-02-06T18:37:00.916510+00:00'
state: Backed Up
slug: DFG123
- name: Full Snapshot 2021-02-07 11:00:00
date: '2021-02-07T18:00:00.916510+00:00'
state: Backed Up
slug: DFG124
```
## New Sensor Names/Values
These will be the sensor values used when ```call_backup_snapshot: False``` or if the configuration option is un-set. New installations of the addon will default to this.
### Backup Stale Binary Sensor
#### Entity Id
```yaml
binary_sensor.backups_stale
```
#### Possible States
```yaml
on
off
```
#### Example Attributes:
```yaml
friendly_name: Backups Stale
device_class: problem
```
### Backup State Sensor
#### Entity Id
```yaml
sensor.backup_state
```
#### Possible States
```yaml
error
waiting
backed_up
```
#### Example Attributes:
```yaml
friendly_name: Backup State
last_backup: 2021-09-01T20:26:49.100376+00:00
last_upload: 2021-09-01T20:26:49.100376+00:00
backups_in_google_drive: 2
backups_in_home_assistant: 2
size_in_google_drive: 2.5 GB
size_in_home_assistant: 2.5 GB
backups:
- name: Full Snapshot 2021-02-06 11:37:00
date: '2021-02-06T18:37:00.916510+00:00
state: Backed Up
slug: DFG123
- name: Full Snapshot 2021-02-07 11:00:00
date: '2021-02-07T18:00:00.916510+00:00'
state: Backed Up
slug: DFG124
```
### What do the values mean?
```binary_sensor.backups_stale``` is "on" when backups are stale and "off"" otherwise. Backups are stale when the addon is 6 hours past a scheduled backup and no new backup has been made. This delay is in place to avoid triggerring on transient errors (eg internet connectivity problems or one-off problems in Home Assistant).
```sensor.backup_state``` is:
- ```waiting``` when the addon is first booted up or hasn't been connected to Google Drive yet.
- ```error``` immediately after any error is encountered, even transient ones.
- ```backed_up``` when everything is running fine without errors.
It's attributes are:
- ```last_backup``` The UTC ISO-8601 date of the most recent backup in Home Assistant or Google Drive.
- ```last_upload``` The UTC ISO-8601 date of the most recent backup uploaded to Google Drive.
- ```backups_in_google_drive``` The number of backups in Google Drive.
- ```backups_in_home_assistant``` The number of backups in Home Assistant.
- ```size_in_google_drive``` A string representation of the space used by backups in Google Drive.
- ```size_in_home_assistant``` A string representation of the space used by backups in Home Assistant.
- ```backups``` The list of each snapshot in decending order of date. Each snapshot includes its ```name```, ```date```, ```slug```, and ```state```. ```state``` can be one of:
- ```Backed Up``` if its in Home Assistant and Google Drive.
- ```HA Only``` if its only in Home Assistant.
- ```Drive Only``` if its only in Google Drive.
- ```Pending``` if the snapshot was requested but not yet complete.

View File

@@ -0,0 +1,43 @@
## v0.112.1 [2023-11-03]
- Added warnings about using the "Stop Addons" feature. I plan on removing this in the near future. If you'd like to keep the feature around, please give your feedback in [this GitHub issue](https://github.com/sabeechen/hassio-google-drive-backup/issues/940).
- When backups are stuck in the "pending" state, the addon now provides you with the Supervisor logs to help figure out whats wrong.
- Added support for the "exclude Home Assistant database" options for automatic backups
- Added configuration options to limit the speed of uploads to Google Drive
- When Google Drive doesn't have enough space, the addon now explains how much space you're using and how much is left. This was a source of confusion for users.
- When the addon halts because it needs to delete more than one backup, it now tells you which backups will be deleted.
- Fixed a bug when using "stop addons" that prevented it from recognizing addons in the "starting" state.
- The addon's containers are now donwloaded from Github (previously was DockerHub)
- Added another redundant token provider, hosted on heroku, that the addon uses for its cloud-required component when you aren't using your own google app credentials.
## v0.111.1 [2023-06-19]
- Support for the new network storage features in Home Assistant. The addon will now create backups in what Home Assistant has configured as its default backup location. This can be overridden in the addon's settings.
- Raised the addon's required permissions to "Admin" in order to access the supervisor's mount API.
- Fixed a CSS error causing toast messages to render partially off screen on small displays.
- Fixed misreporting of some error codes from Google Drive when a partial upload can't be resumed.
## v0.110.4 [2023-04-28]
- Fix a whitespace error causing authorization to fail.
## v0.110.3 [2023-03-24]
- Fix an error causing "Days Between Backups" to be ignored when "Time of Day" for a backup is set.
- Fix a bug causing some timezones to make the addon to fail to start.
## v0.110.2 [2023-03-24]
- Fix a potential cause of SSL errors when communicating with Google Drive
- Fix a bug causing backups to be requested indefinitely if scheduled during DST transitions.
## v0.110.1 [2023-01-09]
- Adds some additional options for donating
- Mitgigates SD card corruption by redundantly storing config files needed for addon startup.
- Avoid global throttling of Google Drive API calls by:
- Making sync intervals more spread out and a little random.
- Syncing more selectively when there are modifications to the /backup directory.
- Caching data from Google Drive for short periods during periodic syncing.
- Backing off for a longer time (2 hours) when the addon hits permanent errors.
- Fixes CSS issues that made the logs page hard to use.

View File

@@ -0,0 +1,205 @@
# Home Assistant Add-on: Google Assistant SDK
## Installation
To install the add-on, first follow the installation steps from the [README on GitHub](https://github.com/sabeechen/hassio-google-drive-backup#installation).
## Configuration
_Note_: The configuration can be changed easily by starting the add-on and clicking `Settings` in the web UI.
The UI explains what each setting is and you don't need to modify anything before clicking `Start`.
If you would still prefer to modify the settings in yaml, the options are detailed below.
### Add-on configuration example
Don't use this directly, the addon has a lot of configuration options that most users don't need or want:
```yaml
# Keep 10 backups in Home Assistant
max_backups_in_ha: 10
# Keep 10 backups in Google Drive
max_backups_in_google_drive: 10
# Create backups in Home Assistant on network storage
backup_location: my_nfs_share
# Ignore backups the add-on hasn't created
ignore_other_backups: True
# Ignore backups that look like they were created by Home Assistant automatic backup option during upgrades
ignore_upgrade_backups: True
# Automatically delete "ignored" snapshots after this many days
delete_ignored_after_days: 7
# Take a backup every 3 days
days_between_backups: 3
# Create backups at 1:30pm exactly
backup_time_of_day: "13:30"
# Delete backups from Home Assistant immediately after uploading them to Google Drive
delete_after_upload: True
# Manually specify the backup folder used in Google Drive
specify_backup_folder: true
# Use a dark and red theme
background_color: "#242424"
accent_color: "#7D0034"
# Use a password for backup archives. Use "!secret secret_name" to use a password form your secrets file
backup_password: "super_secret"
# Create backup names like 'Full Backup HA 0.92.0'
backup_name: "{type} Backup HA {version_ha}"
# Keep a backup once every day for 3 days and once a week for 4 weeks
generational_days: 3
generational_weeks: 4
# Create partial backups with no folders and no configurator add-on
exclude_folders: "homeassistant,ssl,share,addons/local,media"
exclude_addons: "core_configurator"
# Turn off notifications and staleness sensor
enable_backup_stale_sensor: false
notify_for_stale_backups: false
# Enable server directly on port 1627
expose_extra_server: true
# Allow sending error reports
send_error_reports: true
# Delete backups after they're uploaded to Google Drive
delete_after_upload: true
```
### Option: `max_backups_in_ha` (default: 4)
The number of backups the add-on will allow Home Assistant to store locally before old ones are deleted.
### Option: `max_backups_in_google_drive` (default: 4)
The number of backups the add-on will keep in Google Drive before old ones are deleted. Google Drive gives you 15GB of free storage (at the time of writing) so plan accordingly if you know how big your backups are.
### Option: `backup_location` (default: None)
The place where backups are created in Home Assistant before uploading to Google Drive. Can be "local-disk" or the name of any backup network storage you've configured in Home Assistant. Leave unspecified (the default) to have backups created in whatever Home Assistant uses as the default backup location.
### Option: `ignore_other_backups` (default: False)
Make the addon ignore any backups it didn't directly create. Any backup already uploaded to Google Drive will not be ignored until you delete it from Google Drive.
### Option: `ignore_upgrade_backups` (default: False)
Ignores backups that look like they were automatically created from updating an add-on or Home Assistant itself. This will make the add-on ignore any partial backup that has only one add-on or folder in it.
### Option: `days_between_backups` (default: 3)
How often a new backup should be scheduled, eg `1` for daily and `7` for weekly.
### Option: `backup_time_of_day`
The time of day (local time) that new backups should be created in 24-hour ("HH:MM") format. When not specified backups are created at (roughly) the same time of day as the most recent backup.
### Options: `delete_after_upload` (default: False)
Deletes backups from Home Assistant immediately after uploading them to Google Drive. This is useful if you have very limited space inside Home Assistant since you only need to have available space for a single backup locally.
### Option: `specify_backup_folder` (default: False)
When true, you must select the folder in Google Drive where backups are stored. Once you turn this on, restart the add-on and visit the Web-UI to be prompted to select the backup folder.
### Option: `background_color` and `accent_color`
The background and accent colors for the web UI. You can use this to make the UI fit in with whatever color scheme you use in Home Assistant. When unset, the interface matches Home Assistant's default blue/white style.
### Option: `backup_password`
When set, backups are created with a password. You can use a value from your secrets.yaml by prefixing the password with "!secret". You'll need to remember this password when restoring a backup.
> Example: Use a password for backup archives
>
> ```yaml
> backup_password: "super_secret"
> ```
>
> Example: Use a password from secrets.yaml
>
> ```yaml
> backup_password: "!secret backup_password"
> ```
### Option: `backup_name` (default: "{type} Backup {year}-{month}-{day} {hr24}:{min}:{sec}")
Sets the name for new backups. Variable parameters of the form `{variable_name}` can be used to modify the name to your liking. A list of available variables is available [here](https://github.com/sabeechen/hassio-google-drive-backup#can-i-give-backups-a-different-name).
### Option: `generational_*`
When set, older backups will be kept longer using a [generational backup scheme](https://en.wikipedia.org/wiki/Backup_rotation_scheme). See the [question here](https://github.com/sabeechen/hassio-google-drive-backup#can-i-keep-older-backups-for-longer) for configuration options.
### Option: `exclude_folders`
When set, excludes the comma-separated list of folders by creating a partial backup.
### Option: `exclude_addons`
When set, excludes the comma-separated list of addons by creating a partial backup.
_Note_: Folders and add-ons must be identified by their "slug" name. It is recommended to use the `Settings` dialog within the add-on web UI to configure partial backups since these names are esoteric and hard to find.
### Option: `enable_backup_stale_sensor` (default: True)
When false, the add-on will not publish the [binary_sensor.backups_stale](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) stale sensor.
### Option: `enable_backup_state_sensor` (default: True)
When false, the add-on will not publish the [sensor.backup_state](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) sensor.
### Option: `notify_for_stale_backups` (default: True)
When false, the add-on will send a [persistent notification](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) in Home Assistant when backups are stale.
---
### UI Server Options
The UI is available through Home Assistant [ingress](https://www.home-assistant.io/blog/2019/04/15/hassio-ingress/).
It can also be exposed through a web server on port `1627`, which you can map to an externally visible port from the add-on `Network` panel. You can configure a few more options to add SSL or require your Home Assistant username/password.
#### Option: `expose_extra_server` (default: False)
Expose the webserver on port `1627`. This is optional, as the add-on is already available with Home Assistant ingress.
#### Option: `require_login` (default: False)
When true, requires your home assistant username and password to access the Web UI.
#### Option: `use_ssl` (default: False)
When true, the Web UI exposed by `expose_extra_server` will be served over SSL (HTTPS).
#### Option: `certfile` (default: `/ssl/certfile.pem`)
Required when `use_ssl: True`. The path to your SSL key file
#### Option: `keyfile` (default: `/ssl/keyfile.pem`)
Required when `use_ssl: True`. The path to your SSL cert file.
#### Option: `verbose` (default: False)
If true, enable additional debug logging. Useful if you start seeing errors and need to file a bug with me.
#### Option: `send_error_reports` (default: False)
When true, the text of unexpected errors will be sent to a database maintained by the developer. This helps identify problems with new releases and provide better context messages when errors come up.
#### Option: `delete_after_upload` (default: False)
When true, backups are always deleted after they've been uploaded to Google Drive. 'max_backups_in_ha' is ignored when this option is True, since a backup is always deleted from Home Assistant after it gets uploaded to Google Drive. Some find this useful if they only have enough space on their Home Assistant machine for one backup.
## FAQ
Read the [FAQ on GitHub](https://github.com/sabeechen/hassio-google-drive-backup#faq).

View File

@@ -0,0 +1,12 @@
ARG BUILD_FROM
FROM $BUILD_FROM
WORKDIR /app
COPY . /app
RUN chmod +x addon_deps.sh
RUN ./addon_deps.sh
RUN pip3 install .
COPY config.json /usr/local/lib/python3.11/site-packages/config.json
EXPOSE 1627
EXPOSE 8099
ENTRYPOINT ["python3", "-m", "backup"]

View File

@@ -0,0 +1,16 @@
# Use the official lightweight Python image.
# https://hub.docker.com/_/python
FROM python:3.11-buster
# Copy local code to the container image.
ENV APP_HOME /server
WORKDIR $APP_HOME
COPY . ./
COPY config.json /usr/local/lib/python3.11/site-packages/config.json
# Install server python requirements
RUN pip3 install --trusted-host pypi.python.org -r requirements-server.txt
RUN pip3 install .
WORKDIR /
ENTRYPOINT ["python3", "-m", "backup.server"]

View File

@@ -0,0 +1,41 @@
# Generational Backup
Generational backup lets you keep a longer history of backups on daily, weekly, monthly, and yearly cycles. This is in contrast to the "regular" scheme for keeping history backups, which will always just delete the oldest backup when needed. This has the effect of keeping older backups around for a longer time, which is particularly useful if you've made a bad configuration change but didn't notice until several days later.
## Configuration
The generational backup will be used when any one of `generational_days`, `generational_weeks`, `generational_months`, or `generational_years` is greater than zero. All of the available configuration options are given below, but utes much easier to configure from the Settings dialog accessible from the "Settings" menu at the top of the web UI.
* `generational_days` (int): The number of days to keep
* `generational_weeks` (int): The number of weeks to keep
* `generational_months` (int): The number of months to keep
* `generational_years` (int): The number of years to keep
* `generational_day_of_week` (str): The day of the week when weekly backups will be kept. It can be one of 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' or 'sun'. The default is 'mon'.
* `generational_day_of_month` (int): The day of the month when monthly backups will be kept, from 1 to 31. If a month has less than the configured number of days, the latest day of that month is used.
* `generational_day_of_year` (int): The day of the year that yearly backups are kept, from 1 to 365.
## Some Details to Consider
* Generational backup assumes that a backup is available for every day to work properly, so it's recommended that you set `days_between_backups`=1 if you're using the feature. Otherwise, a backup may not be available to be saved for a given day.
* The backups maintained by generational backup will still never exceed the number you permit to be maintained in Google Drive or Home Assistant. For example, if `max_backups_in_google_drive`=3 and `generational_weeks`=4, then only 3 weeks of backups will be kept in Google Drive.
* Generational backup will only delete older backups when it has to. For example, if you've configured it to keep 5 weekly backups on Monday, you've been running it for a week (so you have 7 backups), and `max_backups_in_google_drive`=7, then your backups on Tuesday, Wednesday, etc won't get deleted yet. They won't get deleted until doing so is necessary to keep older backups around without violating the maximum allowed in Google Drive.
>Note: You can configure the addon to delete backups more aggressively by setting `generational_delete_early`=true. With this, the addon will delete old backups that don't match a daily, weekly, monthly, or yearly configured cycle even if you aren't yet at risk of exceeding `max_backups_in_ha` or `max_backups_in_google_drive`. Careful though! You can accidentally delete all your backups this way if you don't have all your settings configured just the way you want them.
* If more than one backup is created for a day (for example if you create one manually) then only the latest backup from that day will be kept.
## Schedule
Figuring out date math in your head is hard, so it's useful to see a concrete example. Consider you have the following configuration. Two backups for each day, week, month, and year along with a limit in Google drive large enough to accommodate them all:
```json
"days_between_backups": 1,
"generational_days": 2,
"generational_weeks": 2,
"generational_months": 2
"generational_years": 2
"max_backups_in_google_drive": 8
```
Imagine you've been running the add-on for 2 years now, diligently making a backup every day with no interruptions. On 19 May 2021, you could expect your list of backups in Google Drive to look like this:
- May 19, 2021 <-- 1st Daily backup
- May 18, 2021 <-- 2nd Daily backup
- May 13, 2021 <-- 1st Weekly backup
- May 06, 2021 <-- 2nd Weekly backup
- May 01, 2021 <-- 1st Monthly backup
- April 01, 2021 <-- 2nd Monthly backup
- January 01, 2021 <-- 1st Yearly backup
- January 01, 2020 <-- 2nd Yearly backup
Note that sometimes a day might overlap more than one schedule. For example, a backup on January 1st could satisfy the constraints for both a yearly and monthly backup. In this case, the add-on will only delete older backups when it *must* to keep from exceeding `max_backups_in_ha` or `max_backups_in_google_drive`. Thus, the most recent backup that would otherwise be deleted will be kept until space is needed somewhere else in the schedule.

View File

@@ -0,0 +1,34 @@
# Home Assistant Add-on: Google Drive Backup
A complete and easy way to upload your Home Assistant backups to Google Drive.
## About
Quickly set up a backup strategy without much fuss. It doesn't require much familiarity with Home Assistant, its architecture, or Google Drive. Detailed install instructions are provided below but you can just add the repo, click install and open the Web UI. It will tell you what to do and only takes a few simple clicks.
>This project requires financial support to make the Google Drive integration work, but it is free for you to use. You can join those helping to keep the lights on at:
>
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/bmc-button.svg" width=150 height=40 style="margin: 5px"/>](https://www.buymeacoffee.com/sabeechen)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/paypal-button.svg" width=150 height=40 style="margin: 5px"/>](https://www.paypal.com/paypalme/stephenbeechen)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/patreon-button.svg" width=150 height=40 style="margin: 5px"/>](https://www.patreon.com/bePatron?u=4064183)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/github-sponsors-button.svg" width=150 height=40 style="margin: 5px"/>](https://github.com/sponsors/sabeechen)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/monero-button.svg" width=150 height=40 style="margin: 5px"/>](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/bitcoin-button.svg" width=150 height=40 style="margin: 5px"/>](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md)
>[<img src="https://raw.githubusercontent.com/sabeechen/hassio-google-drive-backup/master/images/ethereum-button.svg" width=150 height=40 style="margin: 5px"/>](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md)
### Features
- Creates backups on a configurable schedule.
- Uploads backups to Drive, even the ones it didn't create.
- Clean up old backups in Home Assistant and Google Drive, so you don't run out of space.
- Restore from a fresh install or recover quickly from disaster by uploading your backups directly from Google Drive.
- Integrates with Home Assistant Notifications, and provides sensors you can trigger off of.
- Notifies you when something goes wrong with your backups.
- Super easy installation and configuration.
- Privacy-centric design philosophy.
- Comprehensive documentation.
- _Most certainly_ doesn't mine bitcoin on your home automation server. Definitely no.
See the [README on GitHub](https://github.com/sabeechen/hassio-google-drive-backup) for all the details, or just install the add-on and open the Web UI.
The Web-UI explains everything you have to do.

View File

@@ -0,0 +1,7 @@
#!/bin/bash
apk add python3 fping linux-headers libc-dev libffi-dev python3-dev gcc py3-pip
pip3 install --upgrade pip wheel setuptools
pip3 install --trusted-host pypi.python.org -r requirements-addon.txt
# Remove packages we only needed for installation
apk del linux-headers libc-dev libffi-dev python3-dev gcc

View File

@@ -0,0 +1,17 @@
# How to use:
# cd hassio-google-drive-backup
# gcloud config set project hassio-drive-backup
# gcloud builds submit --config cloudbuild-dev.yaml --substitutions _DOCKERHUB_PASSWORD=<PASSWORD>
steps:
- name: "gcr.io/cloud-builders/docker"
entrypoint: "bash"
args: ["-c", "docker login --username=sabeechen --password=${_DOCKERHUB_PASSWORD}"]
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-f', 'Dockerfile-addon', '-t', 'sabeechen/hassio-google-drive-backup-dev-amd64:${_VERSION}', "--build-arg", "BUILD_FROM=homeassistant/amd64-base", '.' ]
substitutions:
_DOCKERHUB_PASSWORD: "define me" # default value
_VERSION: "dev-testing" # default value
images:
- "sabeechen/hassio-google-drive-backup-dev-amd64:${_VERSION}"

View File

@@ -0,0 +1,22 @@
# How to use:
# gcloud config set project hassio-drive-backup
# gcloud builds submit --config cloudbuild-server.yaml
#steps:
#- name: 'gcr.io/cloud-builders/docker'
# args: [ 'build', '-f', 'Dockerfile-server', '-t', 'gcr.io/$PROJECT_ID/authserver', '.' ]
#images:
#- 'gcr.io/$PROJECT_ID/authserver'
steps:
# Build the container image
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-f', 'Dockerfile-server', '-t', 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}', '.']
# Push the container image to Container Registry
- name: 'gcr.io/cloud-builders/docker'
args: ['push', 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}']
substitutions:
_SERVICE_NAME: "authserver-dev" # default value
_VERSION: "test-deployment" # default value
images:
- 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}'

View File

@@ -0,0 +1,110 @@
{
"name": "Home Assistant Google Drive Backup",
"version": "0.112.1",
"slug": "hassio_google_drive_backup",
"description": "Automatically manage backups between Home Assistant and Google Drive",
"arch": ["armhf", "armv7", "aarch64", "amd64", "i386"],
"url": "https://github.com/sabeechen/hassio-google-drive-backup",
"homeassistant_api": true,
"hassio_api": true,
"hassio_role": "admin",
"auth_api": true,
"ingress": true,
"panel_icon": "mdi:cloud",
"panel_title": "Backups",
"map": ["ssl", "backup:rw", "config"],
"options": {
"max_backups_in_ha": 4,
"max_backups_in_google_drive": 4,
"days_between_backups": 3
},
"schema": {
"max_backups_in_ha": "int(0,)?",
"max_backups_in_google_drive": "int(0,)?",
"days_between_backups": "float(0,)?",
"ignore_other_backups": "bool?",
"ignore_upgrade_backups": "bool?",
"backup_storage": "str?",
"delete_after_upload": "bool?",
"delete_before_new_backup": "bool?",
"verbose": "bool?",
"use_ssl": "bool?",
"certfile": "str?",
"keyfile": "str?",
"require_login": "bool?",
"backup_name": "str?",
"backup_time_of_day": "match(^[0-2]\\d:[0-5]\\d$)?",
"specify_backup_folder": "bool?",
"warn_for_low_space": "bool?",
"watch_backup_directory": "bool?",
"trace_requests": "bool?",
"generational_days": "int(0,)?",
"generational_weeks": "int(0,)?",
"generational_months": "int(0,)?",
"generational_years": "int(0,)?",
"generational_day_of_year": "int(1,365)?",
"generational_day_of_month": "int(1,31)?",
"generational_day_of_week": "list(mon|tue|wed|thu|fri|sat|sun)?",
"generational_delete_early": "bool?",
"notify_for_stale_backups": "bool?",
"enable_backup_stale_sensor": "bool?",
"enable_backup_state_sensor": "bool?",
"send_error_reports": "bool?",
"backup_password": "str?",
"exclude_folders": "str?",
"exclude_addons": "str?",
"exclude_ha_database": "bool?",
"stop_addons": "str?",
"disable_watchdog_when_stopping": "bool?",
"expose_extra_server": "bool?",
"drive_experimental": "bool?",
"drive_ipv4": "match(^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$)?",
"ignore_ipv6_addresses": "bool?",
"confirm_multiple_deletes": "bool?",
"google_drive_timeout_seconds": "float(1,)?",
"alternate_dns_servers": "match(^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})(,[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})*$)?",
"enable_drive_upload": "bool?",
"call_backup_snapshot": "bool?",
"background_color": "match(^(#[0-9ABCDEFabcdef]{6}|)$)?",
"accent_color": "match(^(#[0-9ABCDEFabcdef]{6}|)$)?",
"max_sync_interval_seconds": "float(300,)?",
"default_sync_interval_variation": "float(0,1)?",
"port": "int(0,)?",
"debugger_port": "int(100,)?",
"log_level": "list(DEBUG|TRACE|INFO|WARN|CRITICAL|WARNING)?",
"console_log_level": "list(DEBUG|TRACE|INFO|WARN|CRITICAL|WARNING)?",
"max_backoff_seconds": "int(3600,)?",
"max_snapshots_in_hassio": "int(0,)?",
"max_snapshots_in_google_drive": "int(0,)?",
"days_between_snapshots": "float(0,)?",
"ignore_other_snapshots": "bool?",
"ignore_upgrade_snapshots": "bool?",
"delete_before_new_snapshot": "bool?",
"delete_ignored_after_days": "float(0,)?",
"snapshot_name": "str?",
"snapshot_time_of_day": "match(^[0-2]\\d:[0-5]\\d$)?",
"specify_snapshot_folder": "bool?",
"notify_for_stale_snapshots": "bool?",
"enable_snapshot_stale_sensor": "bool?",
"enable_snapshot_state_sensor": "bool?",
"snapshot_password": "str?",
"maximum_upload_chunk_bytes": "float(262144,)?",
"ha_reporting_interval_seconds": "int(1,)?",
"upload_limit_bytes_per_second": "float(0,)?"
},
"ports": {
"1627/tcp": 1627
},
"ports_description": {
"1627/tcp": "Direct access to the add-on without ingress. Must be enabled in the settings, see 'expose_extra_server'."
},
"image": "ghcr.io/sabeechen/hassio-google-drive-backup-{arch}"
}

View File

@@ -0,0 +1,404 @@
from injector import singleton, inject
import asyncio
from ipaddress import ip_address
from typing import Any, Dict, Union, Optional
import aiohttp
from aiohttp import hdrs, web, ClientSession
from aiohttp.web_exceptions import (
HTTPBadGateway,
HTTPServiceUnavailable,
HTTPUnauthorized,
HTTPNotFound
)
from multidict import CIMultiDict, istr
from backup.logger import getLogger
from .ports import Ports
from .base_server import BaseServer
from .simulated_supervisor import SimulatedSupervisor
ATTR_ADMIN = "admin"
ATTR_ENABLE = "enable"
ATTR_ICON = "icon"
ATTR_PANELS = "panels"
ATTR_SESSION = "session"
ATTR_TITLE = "title"
COOKIE_INGRESS = "ingress_session"
HEADER_TOKEN = "X-Supervisor-Token"
HEADER_TOKEN_OLD = "X-Hassio-Key"
REQUEST_FROM = "HASSIO_FROM"
JSON_RESULT = "result"
JSON_DATA = "data"
JSON_MESSAGE = "message"
RESULT_ERROR = "error"
RESULT_OK = "ok"
_LOGGER = getLogger(__name__)
def api_return_error(message: Optional[str] = None) -> web.Response:
"""Return an API error message."""
return web.json_response(
{JSON_RESULT: RESULT_ERROR, JSON_MESSAGE: message}, status=400
)
def api_return_ok(data: Optional[Dict[str, Any]] = None) -> web.Response:
"""Return an API ok answer."""
return web.json_response({JSON_RESULT: RESULT_OK, JSON_DATA: data or {}})
def api_process(method):
"""Wrap function with true/false calls to rest api."""
async def wrap_api(api, *args, **kwargs):
"""Return API information."""
try:
answer = await method(api, *args, **kwargs)
except Exception as err:
return api_return_error(message=str(err))
if isinstance(answer, dict):
return api_return_ok(data=answer)
if isinstance(answer, web.Response):
return answer
elif isinstance(answer, bool) and not answer:
return api_return_error()
return api_return_ok()
return wrap_api
class Addon():
def __init__(self, ports: Ports, token: str):
self.ports = ports
self.ip_address = "127.0.0.1"
self.ingress_port = ports.ingress
self.token = token
class SysIngress():
def __init__(self, ports: Ports, token: str, cookie_value: str):
self.ports = ports
self.token = token
self.cookie_value = cookie_value
def validate_session(self, session):
return session == self.cookie_value
def get(self, token):
if token == self.token:
return Addon(self.ports, self.token)
return None
class CoreSysAttributes():
def __init__(self, ports: Ports, session: ClientSession, token: str, cookie_value: str):
self.sys_ingress = SysIngress(ports, token, cookie_value)
self.sys_websession = session
@singleton
class APIIngress(CoreSysAttributes, BaseServer):
@inject
def __init__(self, ports: Ports, session: ClientSession, supervisor: SimulatedSupervisor):
self.addon_token = self.generateId(10)
self.cookie_value = self.generateId(10)
super().__init__(ports, session, self.addon_token, self.cookie_value)
self.ports = ports
self.supervisor = supervisor
def routes(self):
return [
web.get("/startingress", self.start_ingress),
web.get("/hassio/ingress/{slug}", self.ingress_panel),
web.view("/api/hassio_ingress/{token}/{path:.*}", self.handler),
]
def start_ingress(self, request: web.Request):
resp = web.Response(status=303)
resp.headers[hdrs.LOCATION] = "/hassio/ingress/" + self.supervisor._addon_slug
resp.set_cookie(name=COOKIE_INGRESS, value=self.cookie_value, expires="Session", domain=request.url.host, path="/api/hassio_ingress/", httponly="false", secure="false")
return resp
def ingress_panel(self, request: web.Request):
slug = request.match_info.get("slug")
if slug != self.supervisor._addon_slug:
raise HTTPNotFound()
body = """
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type">
<meta content="utf-8" http-equiv="encoding">
<title>Simulated Supervisor Ingress Panel</title>
<style type="text/css" >
iframe {{
display: block;
width: 100%;
height: 100%;
border: 0;
}}
</style>
</head>
<body>
<div>
The Web-UI below is loaded through an iframe. <a href='startingress'>Start a new ingress session</a> if you get permission errors.
</div>
<iframe src="api/hassio_ingress/{0}/">
<html>
<head></head>
<body></body>
</html>
</iframe>
</body>
</html>
""".format(self.addon_token)
resp = web.Response(body=body, content_type="text/html")
resp.set_cookie(name=COOKIE_INGRESS, value=self.cookie_value, expires="Session", domain=request.url.host, path="/api/hassio_ingress/", httponly="false", secure="false")
return resp
"""
The class body below here is copied from
https://github.com/home-assistant/supervisor/blob/38b0aea8e2a3b9a9614bb5d94959235a0fae235e/supervisor/api/ingress.py#L35
In order to correctly reproduce the supervisor's kooky ingress proxy behavior.
"""
def _extract_addon(self, request: web.Request) -> Addon:
"""Return addon, throw an exception it it doesn't exist."""
token = request.match_info.get("token")
# Find correct add-on
addon = self.sys_ingress.get(token)
if not addon:
_LOGGER.warning("Ingress for %s not available", token)
raise HTTPServiceUnavailable()
return addon
def _check_ha_access(self, request: web.Request) -> None:
# always allow
pass
def _create_url(self, addon: Addon, path: str) -> str:
"""Create URL to container."""
return f"http://{addon.ip_address}:{addon.ingress_port}/{path}"
@api_process
async def panels(self, request: web.Request) -> Dict[str, Any]:
"""Create a list of panel data."""
addons = {}
for addon in self.sys_ingress.addons:
addons[addon.slug] = {
ATTR_TITLE: addon.panel_title,
ATTR_ICON: addon.panel_icon,
ATTR_ADMIN: addon.panel_admin,
ATTR_ENABLE: addon.ingress_panel,
}
return {ATTR_PANELS: addons}
@api_process
async def create_session(self, request: web.Request) -> Dict[str, Any]:
"""Create a new session."""
self._check_ha_access(request)
session = self.sys_ingress.create_session()
return {ATTR_SESSION: session}
async def handler(
self, request: web.Request
) -> Union[web.Response, web.StreamResponse, web.WebSocketResponse]:
"""Route data to Supervisor ingress service."""
self._check_ha_access(request)
# Check Ingress Session
session = request.cookies.get(COOKIE_INGRESS)
if not self.sys_ingress.validate_session(session):
_LOGGER.warning("No valid ingress session %s", session)
raise HTTPUnauthorized()
# Process requests
addon = self._extract_addon(request)
path = request.match_info.get("path")
try:
# Websocket
if _is_websocket(request):
return await self._handle_websocket(request, addon, path)
# Request
return await self._handle_request(request, addon, path)
except aiohttp.ClientError as err:
_LOGGER.error("Ingress error: %s", err)
raise HTTPBadGateway()
async def _handle_websocket(
self, request: web.Request, addon: Addon, path: str
) -> web.WebSocketResponse:
"""Ingress route for websocket."""
if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers:
req_protocols = [
str(proto.strip())
for proto in request.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
else:
req_protocols = ()
ws_server = web.WebSocketResponse(
protocols=req_protocols, autoclose=False, autoping=False
)
await ws_server.prepare(request)
# Preparing
url = self._create_url(addon, path)
source_header = _init_header(request, addon)
# Support GET query
if request.query_string:
url = f"{url}?{request.query_string}"
# Start proxy
async with self.sys_websession.ws_connect(
url,
headers=source_header,
protocols=req_protocols,
autoclose=False,
autoping=False,
) as ws_client:
# Proxy requests
await asyncio.wait(
[
_websocket_forward(ws_server, ws_client),
_websocket_forward(ws_client, ws_server),
],
return_when=asyncio.FIRST_COMPLETED,
)
return ws_server
async def _handle_request(
self, request: web.Request, addon: Addon, path: str
) -> Union[web.Response, web.StreamResponse]:
"""Ingress route for request."""
url = self._create_url(addon, path)
data = await request.read()
source_header = _init_header(request, addon)
async with self.sys_websession.request(
request.method,
url,
headers=source_header,
params=request.query,
allow_redirects=False,
data=data,
) as result:
headers = _response_header(result)
# Simple request
if (
hdrs.CONTENT_LENGTH in result.headers and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
):
# Return Response
body = await result.read()
return web.Response(
headers=headers,
status=result.status,
content_type=result.content_type,
body=body,
)
# Stream response
response = web.StreamResponse(status=result.status, headers=headers)
response.content_type = result.content_type
try:
await response.prepare(request)
async for data in result.content.iter_chunked(4096):
await response.write(data)
except (
aiohttp.ClientError,
aiohttp.ClientPayloadError,
ConnectionResetError,
) as err:
_LOGGER.error("Stream error with %s: %s", url, err)
return response
def _init_header(
request: web.Request, addon: str
) -> Union[CIMultiDict, Dict[str, str]]:
"""Create initial header."""
headers = {}
# filter flags
for name, value in request.headers.items():
if name in (
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_ENCODING,
hdrs.SEC_WEBSOCKET_EXTENSIONS,
hdrs.SEC_WEBSOCKET_PROTOCOL,
hdrs.SEC_WEBSOCKET_VERSION,
hdrs.SEC_WEBSOCKET_KEY,
istr(HEADER_TOKEN),
istr(HEADER_TOKEN_OLD),
):
continue
headers[name] = value
# Update X-Forwarded-For
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
headers[hdrs.X_FORWARDED_FOR] = f"{forward_for}, {connected_ip!s}"
return headers
def _response_header(response: aiohttp.ClientResponse) -> Dict[str, str]:
"""Create response header."""
headers = {}
for name, value in response.headers.items():
if name in (
hdrs.TRANSFER_ENCODING,
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_TYPE,
hdrs.CONTENT_ENCODING
):
continue
headers[name] = value
return headers
def _is_websocket(request: web.Request) -> bool:
"""Return True if request is a websocket."""
headers = request.headers
if (
"upgrade" in headers.get(hdrs.CONNECTION, "").lower() and headers.get(hdrs.UPGRADE, "").lower() == "websocket"
):
return True
return False
async def _websocket_forward(ws_from, ws_to):
"""Handle websocket message directly."""
try:
async for msg in ws_from:
if msg.type == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(msg.data)
elif msg.type == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws_to.ping()
elif msg.type == aiohttp.WSMsgType.PONG:
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code, message=msg.extra)
except RuntimeError:
_LOGGER.warning("Ingress Websocket runtime error")

View File

@@ -0,0 +1,56 @@
import random
import re
import io
from aiohttp.web import HTTPBadRequest, Request, Response
from typing import Any
rangePattern = re.compile("bytes=\\d+-\\d+")
bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$")
intPattern = re.compile("\\d+")
class BaseServer:
def generateId(self, length: int = 30) -> str:
random_int = random.randint(0, 1000000)
ret = str(random_int)
return ret + ''.join(map(lambda x: str(x), range(0, length - len(ret))))
def timeToRfc3339String(self, time) -> str:
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
def serve_bytes(self, request: Request, bytes: bytearray, include_length: bool = True) -> Any:
if "Range" in request.headers:
# Do range request
if not rangePattern.match(request.headers['Range']):
raise HTTPBadRequest()
numbers = intPattern.findall(request.headers['Range'])
start = int(numbers[0])
end = int(numbers[1])
if start < 0:
raise HTTPBadRequest()
if start > end:
raise HTTPBadRequest()
if end > len(bytes) - 1:
raise HTTPBadRequest()
resp = Response(body=bytes[start:end + 1], status=206)
resp.headers['Content-Range'] = "bytes {0}-{1}/{2}".format(
start, end, len(bytes))
if include_length:
resp.headers["Content-length"] = str(len(bytes))
return resp
else:
resp = Response(body=io.BytesIO(bytes))
resp.headers["Content-length"] = str(len(bytes))
return resp
async def readAll(self, request):
data = bytearray()
content = request.content
while True:
chunk, done = await content.readchunk()
data.extend(chunk)
if len(chunk) == 0:
break
return data

View File

@@ -0,0 +1,3 @@
authorization_host: "https://dev.habackup.io"
token_server_hosts: "https://token1.dev.habackup.io,https://dev.habackup.io"
default_drive_client_id: "795575624694-jcdhoh1jr1ngccfsbi2f44arr4jupl79.apps.googleusercontent.com"

View File

@@ -0,0 +1,27 @@
{
"drive_url": "http://localhost:56153",
"supervisor_url": "http://localhost:56153/",
"hassio_header": "test_header",
"retained_file_path": "hassio-google-drive-backup/dev/data/retained.json",
"data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json",
"backup_directory_path": "hassio-google-drive-backup/dev/backup",
"certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem",
"keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem",
"secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml",
"credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat",
"folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat",
"id_file_path": "hassio-google-drive-backup/dev/data/id.json",
"stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json",
"authorization_host": "http://localhost:56153",
"token_server_hosts": "http://localhost:56153",
"drive_refresh_url": "http://localhost:56153/oauth2/v4/token",
"drive_authorize_url": "http://localhost:56153/o/oauth2/v2/auth",
"drive_device_code_url": "http://localhost:56153/device/code",
"drive_token_url": "http://localhost:56153/token",
"ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat",
"log_level": "TRACE",
"console_log_level": "TRACE",
"ingress_port": 56152,
"port": 56151,
"cache_warmup_max_seconds": 300
}

View File

@@ -0,0 +1,20 @@
{
"supervisor_url": "http://localhost:56153/",
"authorization_host": "https://dev.habackup.io",
"token_server_hosts": "https://token1.dev.habackup.io,https://dev.habackup.io",
"hassio_header": "test_header",
"data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json",
"retained_file_path": "hassio-google-drive-backup/dev/data/retained.json",
"backup_directory_path": "hassio-google-drive-backup/dev/backup",
"certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem",
"keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem",
"secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml",
"credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat",
"folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat",
"id_file_path": "hassio-google-drive-backup/dev/data/id.json",
"stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json",
"ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat",
"default_drive_client_id": "795575624694-jcdhoh1jr1ngccfsbi2f44arr4jupl79.apps.googleusercontent.com",
"ingress_port": 56152,
"port": 56151
}

View File

@@ -0,0 +1,17 @@
{
"supervisor_url": "http://localhost:56153/",
"hassio_header": "test_header",
"data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json",
"retained_file_path": "hassio-google-drive-backup/dev/data/retained.json",
"backup_directory_path": "hassio-google-drive-backup/dev/backup",
"certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem",
"keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem",
"secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml",
"credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat",
"folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat",
"ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat",
"id_file_path": "hassio-google-drive-backup/dev/data/id.json",
"stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json",
"ingress_port": 56155,
"port": 56156
}

View File

@@ -0,0 +1,11 @@
{
"send_error_reports": true,
"max_backups_in_ha": 4,
"max_backups_in_google_drive": 3,
"days_between_backups": 10,
"use_ssl": false,
"backup_name": "{type} Backup {year}-{month}-{day} {hr24}:{min}:{sec}",
"backup_password": "!secret password1",
"drive_experimental": true,
"drive_ipv4": ""
}

View File

@@ -0,0 +1,2 @@
password1: "Test value"
for_unit_tests: "password value"

View File

@@ -0,0 +1,6 @@
#!/bin/bash
sudo docker run --rm --privileged \
-v /home/coder/.docker:/root/.docker \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ..:/data \
homeassistant/amd64-builder --all -t /data

View File

@@ -0,0 +1,19 @@
import subprocess
import os
import json
from os.path import abspath, join
with open(abspath(join(__file__, "..", "..", "config.json"))) as f:
version = json.load(f)["version"]
print("Version will be: " + version)
subprocess.run("docker login", shell=True)
platforms = ["amd64", "armv7", "aarch64", "armhf", "i386"]
os.chdir("hassio-google-drive-backup")
for platform in platforms:
subprocess.run("docker build -f Dockerfile-addon -t sabeechen/hassio-google-drive-backup-{0}:{1} --build-arg BUILD_FROM=homeassistant/{0}-base .".format(platform, version), shell=True)
for platform in platforms:
subprocess.run("docker push sabeechen/hassio-google-drive-backup-{0}:{1}".format(platform, version), shell=True)

View File

@@ -0,0 +1,20 @@
import getpass
import subprocess
import os
import json
from os.path import abspath, join
with open(abspath(join(__file__, "..", "..", "config.json"))) as f:
version = json.load(f)["version"]
try:
p = getpass.getpass("Enter DockerHub Password")
except Exception as error:
print('ERROR', error)
exit()
os.chdir("hassio-google-drive-backup")
print("Setting the appropriate gcloud project...")
subprocess.run("gcloud config set project hassio-drive-backup", shell=True)
print("Building and uploading dev container...")
subprocess.run("gcloud builds submit --config cloudbuild-dev.yaml --substitutions _DOCKERHUB_PASSWORD={0},_VERSION={1}".format(p, version), shell=True)

View File

@@ -0,0 +1,8 @@
import subprocess
import os
os.chdir("hassio-google-drive-backup")
print("Setting the appropriate gcloud project...")
subprocess.run("gcloud config set project hassio-drive-backup-dev", shell=True)
print("Building and uploading server container...")
subprocess.run("gcloud builds submit --config cloudbuild-server.yaml", shell=True)

View File

@@ -0,0 +1,8 @@
import subprocess
import os
os.chdir("hassio-google-drive-backup")
print("Setting the appropriate gcloud project...")
subprocess.run("gcloud config set project hassio-drive-backup", shell=True)
print("Building and uploading server container...")
subprocess.run("gcloud builds submit --config cloudbuild-server.yaml", shell=True)

View File

@@ -0,0 +1,57 @@
import argparse
from google.cloud import firestore
from datetime import datetime, timedelta
DELETE_BATCH_SIZE = 200
STORE_NAME = "error_reports"
def delete_old_data():
# Initialize Firestore
db = firestore.Client()
collection_ref = db.collection(STORE_NAME)
# Define the datetime for one week ago
week_ago = datetime.now() - timedelta(days=7)
# Query to find all documents older than a week
total_deleted = 0
while True:
to_delete = 0
batch = db.batch()
docs = collection_ref.where('server_time', '<', week_ago).stream()
for doc in docs:
to_delete += 1
batch.delete(doc.reference)
if to_delete >= DELETE_BATCH_SIZE:
break
if to_delete > 0:
batch.commit()
total_deleted += to_delete
print(f"Deleted {to_delete} documents ({total_deleted} total)")
else:
break
print(f"Success: All documents older than a week deleted ({total_deleted} total)")
def main():
# Create command line argument parser
parser = argparse.ArgumentParser()
# Add purge argument
parser.add_argument("--purge", help="Delete all documents older than a week.", action="store_true")
# Add any other argument you want in future. For example:
# parser.add_argument("--future_arg", help="Perform some future operation.")
args = parser.parse_args()
# Respond to arguments
if args.purge:
confirm = input('Are you sure you want to delete all documents older than a week? (y/n): ')
if confirm.lower() == 'y':
delete_old_data()
else:
print("Abort: No documents were deleted.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,6 @@
from aiohttp.web import HTTPClientError
class HttpMultiException(HTTPClientError):
def __init__(self, code):
self.status_code = code

View File

@@ -0,0 +1,5 @@
class Ports:
def __init__(self, server, ui, ingress):
self.server = server
self.ui = ui
self.ingress = ingress

View File

@@ -0,0 +1,136 @@
import re
from aiohttp.web import Request, Response
from asyncio import Event
from aiohttp.web_response import json_response
from injector import singleton, inject
from backup.time import Time
from typing import List
class UrlMatch():
def __init__(self, time: Time, url, fail_after=None, status=None, response=None, wait=False, sleep=None, fail_for=None):
self.time = time
self.url: str = url
self.fail_after: int = fail_after
self.status: int = status
self.wait_event: Event = Event()
self.trigger_event: Event = Event()
self.response: str = ""
self.wait: bool = wait
self.trigger_event.clear()
self.wait_event.clear()
self.sleep = sleep
self.response = response
self.fail_for = fail_for
self.responses = []
self._calls = 0
self.time = time
def addResponse(self, response):
self.responses.append(response)
def stop(self):
self.wait_event.set()
self.trigger_event.set()
def isMatch(self, request):
return re.match(self.url, request.url.path) or re.match(self.url, str(request.url))
async def waitForCall(self):
await self.trigger_event.wait()
def clear(self):
self.wait_event.set()
def callCount(self):
return self._calls
async def _doAction(self, request: Request):
self._calls += 1
if len(self.responses) > 0:
return self.responses.pop(0)
if self.status is not None:
await self._readAll(request)
if self.response:
return json_response(self.response, status=self.status)
else:
return Response(status=self.status)
elif self.wait:
self.trigger_event.set()
await self.wait_event.wait()
elif self.sleep is not None:
await self.time.sleepAsync(self.sleep, early_exit=self.wait_event)
async def called(self, request: Request):
if self.fail_after is None or self.fail_after <= 0:
if self.fail_for is not None and self.fail_for > 0:
self.fail_for -= 1
return await self._doAction(request)
elif self.fail_for is not None:
return None
return await self._doAction(request)
elif self.fail_after is not None:
self.fail_after -= 1
async def _readAll(self, request: Request):
data = bytearray()
content = request.content
while True:
chunk, done = await content.readchunk()
data.extend(chunk)
if len(chunk) == 0:
break
return data
@singleton
class RequestInterceptor:
@inject
def __init__(self):
self._matchers: List[UrlMatch] = []
self._history = []
self.time = Time()
def stop(self):
for matcher in self._matchers:
matcher.stop()
def setError(self, url, status=None, fail_after=None, fail_for=None, response=None) -> UrlMatch:
matcher = UrlMatch(self.time, url, fail_after, status=status, response=response, fail_for=fail_for)
self._matchers.append(matcher)
return matcher
def clear(self):
self._matchers.clear()
self._history.clear()
def setWaiter(self, url, attempts=None):
matcher = UrlMatch(self.time, url, attempts, wait=True)
self._matchers.append(matcher)
return matcher
def setSleep(self, url, attempts=None, sleep=None, wait_for=None):
matcher = UrlMatch(self.time, url, attempts, sleep=sleep, fail_for=wait_for)
self._matchers.append(matcher)
return matcher
async def checkUrl(self, request):
ret = None
self.record(request)
for match in self._matchers:
if match.isMatch(request):
ret = await match.called(request)
return ret
def record(self, request: Request):
record = str(request.url.path)
if len(request.url.query_string) > 0:
record += "?" + str(request.url.query_string)
self._history.append(record)
def urlWasCalled(self, url) -> bool:
for called_url in self._history:
if url == called_url or re.match(url, called_url):
return True
return False

View File

@@ -0,0 +1,522 @@
import re
from yarl import URL
from datetime import timedelta
from backup.logger import getLogger
from backup.config import Setting, Config
from backup.time import Time
from backup.creds import KEY_CLIENT_SECRET, KEY_CLIENT_ID, KEY_ACCESS_TOKEN, KEY_TOKEN_EXPIRY
from aiohttp.web import (HTTPBadRequest, HTTPNotFound,
HTTPUnauthorized, Request, Response, delete, get,
json_response, patch, post, put, HTTPSeeOther)
from injector import inject, singleton
from .base_server import BaseServer, bytesPattern, intPattern
from .ports import Ports
from typing import Any, Dict
from asyncio import Event
from backup.creds import Creds
logger = getLogger(__name__)
mimeTypeQueryPattern = re.compile("^mimeType='.*'$")
parentsQueryPattern = re.compile("^'.*' in parents$")
resumeBytesPattern = re.compile("^bytes \\*/\\d+$")
URL_MATCH_DRIVE_API = "^.*drive.*$"
URL_MATCH_UPLOAD = "^/upload/drive/v3/files/$"
URL_MATCH_UPLOAD_PROGRESS = "^/upload/drive/v3/files/progress/.*$"
URL_MATCH_CREATE = "^/upload/drive/v3/files/progress/.*$"
URL_MATCH_FILE = "^/drive/v3/files/.*$"
URL_MATCH_DEVICE_CODE = "^/device/code$"
URL_MATCH_TOKEN = "^/token$"
@singleton
class SimulatedGoogle(BaseServer):
@inject
def __init__(self, config: Config, time: Time, ports: Ports):
self._time = time
self.config = config
# auth state
self._custom_drive_client_id = self.generateId(5)
self._custom_drive_client_secret = self.generateId(5)
self._custom_drive_client_expiration = None
self._drive_auth_code = "drive_auth_code"
self._port = ports.server
self._auth_token = ""
self._refresh_token = "test_refresh_token"
self._client_id_hack = None
# Drive item states
self.items = {}
self.lostPermission = []
self.space_available = 5 * 1024 * 1024 * 1024
self.usage = 0
# Upload state information
self._upload_info: Dict[str, Any] = {}
self.chunks = []
self._upload_chunk_wait = Event()
self._upload_chunk_trigger = Event()
self._current_chunk = 1
self._waitOnChunk = 0
self.device_auth_params = {}
self._device_code_accepted = None
def setDriveSpaceAvailable(self, bytes_available):
self.space_available = bytes_available
def generateNewAccessToken(self):
new_token = self.generateId(20)
self._auth_token = new_token
def generateNewRefreshToken(self):
new_token = self.generateId(20)
self._refresh_token = new_token
def expireCreds(self):
self.generateNewAccessToken()
self.generateNewRefreshToken()
def expireRefreshToken(self):
self.generateNewRefreshToken()
def resetDriveAuth(self):
self.expireCreds()
self.config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, self.generateId(5))
self.config.override(Setting.DEFAULT_DRIVE_CLIENT_SECRET, self.generateId(5))
def creds(self):
return Creds(self._time,
id=self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID),
expiration=self._time.now() + timedelta(hours=1),
access_token=self._auth_token,
refresh_token=self._refresh_token)
def routes(self):
return [
put('/upload/drive/v3/files/progress/{id}', self._uploadProgress),
post('/upload/drive/v3/files/', self._upload),
post('/drive/v3/files/', self._create),
get('/drive/v3/files/', self._query),
delete('/drive/v3/files/{id}/', self._delete),
patch('/drive/v3/files/{id}/', self._update),
get('/drive/v3/files/{id}/', self._get),
post('/oauth2/v4/token', self._oauth2Token),
get('/o/oauth2/v2/auth', self._oAuth2Authorize),
get('/drive/customcreds', self._getCustomCred),
get('/drive/v3/about', self._driveAbout),
post('/device/code', self._deviceCode),
get('/device', self._device),
get('/debug/google', self._debug),
post('/token', self._driveToken),
]
async def _debug(self, request: Request):
return json_response({
"custom_drive_client_id": self._custom_drive_client_id,
"custom_drive_client_secret": self._custom_drive_client_secret,
"device_auth_params": self.device_auth_params
})
async def _checkDriveHeaders(self, request: Request):
if request.headers.get("Authorization", "") != "Bearer " + self._auth_token:
raise HTTPUnauthorized()
async def _deviceCode(self, request: Request):
params = await request.post()
client_id = params['client_id']
scope = params['scope']
if client_id != self._custom_drive_client_id or scope != 'https://www.googleapis.com/auth/drive.file':
raise HTTPUnauthorized()
self.device_auth_params = {
'device_code': self.generateId(10),
'expires_in': 60,
'interval': 1,
'user_code': self.generateId(8),
'verification_url': str(URL("http://localhost").with_port(self._port).with_path("device"))
}
self._device_code_accepted = None
return json_response(self.device_auth_params)
async def _device(self, request: Request):
code = request.query.get('code')
if code:
if self.device_auth_params.get('user_code', "dfsdfsdfsdfs") == code:
body = "Accepted"
self._device_code_accepted = True
self.generateNewRefreshToken()
self.generateNewAccessToken()
else:
body = "Wrong code"
else:
body = """
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type">
<meta content="utf-8" http-equiv="encoding">
<title>Simulated Drive Device Authorization</title>
</head>
<body>
<div>
Enter the device code provided below
</div>
<form>
<label for="code">Device Code:</label><br>
<input type="text" value="Device Code" id="code" name="code">
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
resp = Response(body=body, content_type="text/html")
return resp
async def _oAuth2Authorize(self, request: Request):
query = request.query
if query.get('client_id') != self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID) and query.get('client_id') != self._custom_drive_client_id:
raise HTTPUnauthorized()
if query.get('scope') != 'https://www.googleapis.com/auth/drive.file':
raise HTTPUnauthorized()
if query.get('response_type') != 'code':
raise HTTPUnauthorized()
if query.get('include_granted_scopes') != 'true':
raise HTTPUnauthorized()
if query.get('access_type') != 'offline':
raise HTTPUnauthorized()
if 'state' not in query:
raise HTTPUnauthorized()
if 'redirect_uri' not in query:
raise HTTPUnauthorized()
if query.get('prompt') != 'consent':
raise HTTPUnauthorized()
if query.get('redirect_uri') == 'urn:ietf:wg:oauth:2.0:oob':
return json_response({"code": self._drive_auth_code})
url = URL(query.get('redirect_uri')).with_query({'code': self._drive_auth_code, 'state': query.get('state')})
raise HTTPSeeOther(str(url))
async def _getCustomCred(self, request: Request):
return json_response({
"client_id": self._custom_drive_client_id,
"client_secret": self._custom_drive_client_secret
})
async def _driveToken(self, request: Request):
data = await request.post()
if not self._checkClientIdandSecret(data.get('client_id'), data.get('client_secret')):
raise HTTPUnauthorized()
if data.get('grant_type') == 'authorization_code':
if data.get('redirect_uri') not in ["http://localhost:{}/drive/authorize".format(self._port), 'urn:ietf:wg:oauth:2.0:oob']:
raise HTTPUnauthorized()
if data.get('code') != self._drive_auth_code:
raise HTTPUnauthorized()
elif data.get('grant_type') == 'urn:ietf:params:oauth:grant-type:device_code':
if data.get('device_code') != self.device_auth_params['device_code']:
raise HTTPUnauthorized()
if self._device_code_accepted is None:
return json_response({
"error": "authorization_pending",
"error_description": "Precondition Required"
}, status=428)
elif self._device_code_accepted is False:
raise HTTPUnauthorized()
else:
raise HTTPBadRequest()
self.generateNewRefreshToken()
resp = {
'access_token': self._auth_token,
'refresh_token': self._refresh_token,
KEY_CLIENT_ID: data.get('client_id'),
KEY_CLIENT_SECRET: self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET),
KEY_TOKEN_EXPIRY: self.timeToRfc3339String(self._time.now()),
}
if self._custom_drive_client_expiration is not None:
resp[KEY_TOKEN_EXPIRY] = self.timeToRfc3339String(self._custom_drive_client_expiration)
return json_response(resp)
def _checkClientIdandSecret(self, client_id: str, client_secret: str) -> bool:
if self._custom_drive_client_id == client_id and self._custom_drive_client_secret == client_secret:
return True
if client_id == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID) == client_id and client_secret == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET):
return True
if self._client_id_hack is not None:
if client_id == self._client_id_hack and client_secret == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET):
return True
return False
async def _oauth2Token(self, request: Request):
params = await request.post()
if not self._checkClientIdandSecret(params['client_id'], params['client_secret']):
raise HTTPUnauthorized()
if params['refresh_token'] != self._refresh_token:
raise HTTPUnauthorized()
if params['grant_type'] == 'refresh_token':
self.generateNewAccessToken()
return json_response({
'access_token': self._auth_token,
'expires_in': 3600,
'token_type': 'doesn\'t matter'
})
elif params['grant_type'] == 'urn:ietf:params:oauth:grant-type:device_code':
if params['device_code'] != self.device_auth_params['device_code']:
raise HTTPUnauthorized()
if not self._device_code_accepted:
return json_response({
"error": "authorization_pending",
"error_description": "Precondition Required"
}, status=428)
return json_response({
'access_token': self._auth_token,
'expires_in': 3600,
'token_type': 'doesn\'t matter'
})
else:
raise HTTPUnauthorized()
def filter_fields(self, item: Dict[str, Any], fields) -> Dict[str, Any]:
ret = {}
for field in fields:
if field in item:
ret[field] = item[field]
return ret
def parseFields(self, source: str):
fields = []
for field in source.split(","):
if field.startswith("files("):
fields.append(field[6:])
elif field.endswith(")"):
fields.append(field[:-1])
else:
fields.append(field)
return fields
def formatItem(self, base, id):
caps = base.get('capabilites', {})
if 'capabilities' not in base:
base['capabilities'] = caps
if 'canAddChildren' not in caps:
caps['canAddChildren'] = True
if 'canListChildren' not in caps:
caps['canListChildren'] = True
if 'canDeleteChildren' not in caps:
caps['canDeleteChildren'] = True
if 'canTrashChildren' not in caps:
caps['canTrashChildren'] = True
if 'canTrash' not in caps:
caps['canTrash'] = True
if 'canDelete' not in caps:
caps['canDelete'] = True
for parent in base.get("parents", []):
parent_item = self.items[parent]
# This simulates a very simply shared drive permissions structure
if parent_item.get("driveId", None) is not None:
base["driveId"] = parent_item["driveId"]
base["capabilities"] = parent_item["capabilities"]
base['trashed'] = False
base['id'] = id
base['modifiedTime'] = self.timeToRfc3339String(self._time.now())
return base
async def _get(self, request: Request):
id = request.match_info.get('id')
await self._checkDriveHeaders(request)
if id not in self.items:
raise HTTPNotFound()
if id in self.lostPermission:
return Response(
status=403,
content_type="application/json",
text='{"error": {"errors": [{"reason": "forbidden"}]}}')
request_type = request.query.get("alt", "metadata")
if request_type == "media":
# return bytes
item = self.items[id]
if 'bytes' not in item:
raise HTTPBadRequest()
return self.serve_bytes(request, item['bytes'], include_length=False)
else:
fields = request.query.get("fields", "id").split(",")
return json_response(self.filter_fields(self.items[id], fields))
async def _update(self, request: Request):
id = request.match_info.get('id')
await self._checkDriveHeaders(request)
if id not in self.items:
return HTTPNotFound
update = await request.json()
for key in update:
if key in self.items[id] and isinstance(self.items[id][key], dict):
self.items[id][key].update(update[key])
else:
self.items[id][key] = update[key]
return Response()
async def _driveAbout(self, request: Request):
return json_response({
'storageQuota': {
'usage': self.usage,
'limit': self.space_available
},
'user': {
'emailAddress': "testing@no.where"
}
})
async def _delete(self, request: Request):
id = request.match_info.get('id')
await self._checkDriveHeaders(request)
if id not in self.items:
raise HTTPNotFound()
del self.items[id]
return Response()
async def _query(self, request: Request):
await self._checkDriveHeaders(request)
query: str = request.query.get("q", "")
fields = self.parseFields(request.query.get('fields', 'id'))
if mimeTypeQueryPattern.match(query):
ret = []
mimeType = query[len("mimeType='"):-1]
for item in self.items.values():
if item.get('mimeType', '') == mimeType:
ret.append(self.filter_fields(item, fields))
return json_response({'files': ret})
elif parentsQueryPattern.match(query):
ret = []
parent = query[1:-len("' in parents")]
if parent not in self.items:
raise HTTPNotFound()
if parent in self.lostPermission:
return Response(
status=403,
content_type="application/json",
text='{"error": {"errors": [{"reason": "forbidden"}]}}')
for item in self.items.values():
if parent in item.get('parents', []):
ret.append(self.filter_fields(item, fields))
return json_response({'files': ret})
elif len(query) == 0:
ret = []
for item in self.items.values():
ret.append(self.filter_fields(item, fields))
return json_response({'files': ret})
else:
raise HTTPBadRequest
async def _create(self, request: Request):
await self._checkDriveHeaders(request)
item = self.formatItem(await request.json(), self.generateId(30))
self.items[item['id']] = item
return json_response({'id': item['id']})
async def _upload(self, request: Request):
logger.info("Drive start upload request")
await self._checkDriveHeaders(request)
if request.query.get('uploadType') != 'resumable':
raise HTTPBadRequest()
mimeType = request.headers.get('X-Upload-Content-Type', None)
if mimeType is None:
raise HTTPBadRequest()
size = int(request.headers.get('X-Upload-Content-Length', -1))
if size < 0:
raise HTTPBadRequest()
total_size = 0
for item in self.items.values():
total_size += item.get('size', 0)
total_size += size
if total_size > self.space_available:
return json_response({
"error": {
"errors": [
{"reason": "storageQuotaExceeded"}
]
}
}, status=400)
metadata = await request.json()
id = self.generateId()
# Validate parents
if 'parents' in metadata:
for parent in metadata['parents']:
if parent not in self.items:
raise HTTPNotFound()
if parent in self.lostPermission:
return Response(status=403, content_type="application/json", text='{"error": {"errors": [{"reason": "forbidden"}]}}')
self._upload_info['size'] = size
self._upload_info['mime'] = mimeType
self._upload_info['item'] = self.formatItem(metadata, id)
self._upload_info['id'] = id
self._upload_info['next_start'] = 0
metadata['bytes'] = bytearray()
metadata['size'] = size
resp = Response()
resp.headers['Location'] = "http://localhost:" + \
str(self._port) + "/upload/drive/v3/files/progress/" + id
return resp
async def _uploadProgress(self, request: Request):
if self._waitOnChunk > 0:
if self._current_chunk == self._waitOnChunk:
self._upload_chunk_trigger.set()
await self._upload_chunk_wait.wait()
else:
self._current_chunk += 1
id = request.match_info.get('id')
await self._checkDriveHeaders(request)
if self._upload_info.get('id', "") != id:
raise HTTPBadRequest()
chunk_size = int(request.headers['Content-Length'])
info = request.headers['Content-Range']
if resumeBytesPattern.match(info):
resp = Response(status=308)
if self._upload_info['next_start'] != 0:
resp.headers['Range'] = "bytes=0-{0}".format(self._upload_info['next_start'] - 1)
return resp
if not bytesPattern.match(info):
raise HTTPBadRequest()
numbers = intPattern.findall(info)
start = int(numbers[0])
end = int(numbers[1])
total = int(numbers[2])
if total != self._upload_info['size']:
raise HTTPBadRequest()
if start != self._upload_info['next_start']:
raise HTTPBadRequest()
if not (end == total - 1 or chunk_size % (256 * 1024) == 0):
raise HTTPBadRequest()
if end > total - 1:
raise HTTPBadRequest()
# get the chunk
received_bytes = await self.readAll(request)
# validate the chunk
if len(received_bytes) != chunk_size:
raise HTTPBadRequest()
if len(received_bytes) != end - start + 1:
raise HTTPBadRequest()
self._upload_info['item']['bytes'].extend(received_bytes)
if len(self._upload_info['item']['bytes']) != end + 1:
raise HTTPBadRequest()
self.usage += len(received_bytes)
self.chunks.append(len(received_bytes))
if end == total - 1:
# upload is complete, so create the item
completed = self.formatItem(self._upload_info['item'], self._upload_info['id'])
self.items[completed['id']] = completed
return json_response({"id": completed['id']})
else:
# Return an incomplete response
# For some reason, the tests like to stop right here
resp = Response(status=308)
self._upload_info['next_start'] = end + 1
resp.headers['Range'] = "bytes=0-{0}".format(end)
return resp

View File

@@ -0,0 +1,459 @@
import asyncio
from asyncio.tasks import sleep
from datetime import timedelta
import random
import string
import io
from backup.config import Config, Version
from backup.time import Time
from aiohttp.web import (HTTPBadRequest, HTTPNotFound,
HTTPUnauthorized, Request, Response, get,
json_response, post, delete, FileResponse)
from injector import inject, singleton
from .base_server import BaseServer
from .ports import Ports
from typing import Any, Dict
from tests.helpers import all_addons, createBackupTar, parseBackupInfo
URL_MATCH_BACKUP_FULL = "^/backups/new/full$"
URL_MATCH_BACKUP_DELETE = "^/backups/.*$"
URL_MATCH_BACKUP_DOWNLOAD = "^/backups/.*/download$"
URL_MATCH_MISC_INFO = "^/info$"
URL_MATCH_CORE_API = "^/core/api.*$"
URL_MATCH_START_ADDON = "^/addons/.*/start$"
URL_MATCH_STOP_ADDON = "^/addons/.*/stop$"
URL_MATCH_ADDON_INFO = "^/addons/.*/info$"
URL_MATCH_SELF_OPTIONS = "^/addons/self/options$"
URL_MATCH_SNAPSHOT = "^/snapshots.*$"
URL_MATCH_BACKUPS = "^/backups.*$"
URL_MATCH_MOUNT = "^/mounts*$"
@singleton
class SimulatedSupervisor(BaseServer):
@inject
def __init__(self, config: Config, ports: Ports, time: Time):
self._config = config
self._time = time
self._ports = ports
self._auth_token = "test_header"
self._backups: Dict[str, Any] = {}
self._backup_data: Dict[str, bytearray] = {}
self._backup_lock = asyncio.Lock()
self._backup_inner_lock = asyncio.Lock()
self._entities = {}
self._events = []
self._attributes = {}
self._notification = None
self._min_backup_size = 1024 * 1024 * 5
self._max_backup_size = 1024 * 1024 * 5
self._addon_slug = "self_slug"
self._options = self.defaultOptions()
self._username = "user"
self._password = "pass"
self._addons = all_addons.copy()
self._super_version = Version(2023, 7)
self._mounts = {
'default_backup_mount': None,
'mounts': [
{
"name": "my_media_share",
"usage": "media",
"type": "cifs",
"server": "server.local",
"share": "media",
"state": "active"
},
{
"name": "my_backup_share",
"usage": "backup",
"type": "nfs",
"server": "server.local",
"share": "media",
"state": "active"
}
]
}
self.installAddon(self._addon_slug, "Home Assistant Google drive Backup")
self.installAddon("42", "The answer")
self.installAddon("sgadg", "sdgsagsdgsggsd")
def defaultOptions(self):
return {
"max_backups_in_ha": 4,
"max_backups_in_google_drive": 4,
"days_between_backups": 3
}
def routes(self):
return [
post('/addons/{slug}/options', self._updateOptions),
post("/core/api/services/persistent_notification/dismiss", self._dismissNotification),
post("/core/api/services/persistent_notification/create", self._createNotification),
post("/core/api/events/{name}", self._haEventUpdate),
post("/core/api/states/{entity}", self._haStateUpdate),
post('/auth', self._authenticate),
get('/auth', self._authenticate),
get('/info', self._miscInfo),
get('/addons/self/info', self._selfInfo),
get('/addons', self._allAddons),
get('/addons/{slug}/info', self._addonInfo),
post('/addons/{slug}/start', self._startAddon),
post('/addons/{slug}/stop', self._stopAddon),
get('/addons/{slug}/logo', self._logoAddon),
get('/addons/{slug}/icon', self._logoAddon),
get('/core/info', self._coreInfo),
get('/supervisor/info', self._supervisorInfo),
get('/supervisor/logs', self._supervisorLogs),
get('/core/logs', self._coreLogs),
get('/debug/insert/backup', self._debug_insert_backup),
get('/debug/info', self._debugInfo),
post("/debug/mounts", self._setMounts),
get('/backups', self._getBackups),
get('/mounts', self._getMounts),
delete('/backups/{slug}', self._deletebackup),
post('/backups/new/upload', self._uploadbackup),
post('/backups/new/partial', self._newbackup),
post('/backups/new/full', self._newbackup),
get('/backups/new/full', self._newbackup),
get('/backups/{slug}/download', self._backupDownload),
get('/backups/{slug}/info', self._backupDetail),
get('/debug/backups/lock', self._lock_backups),
# TODO: remove once the api path is fully deprecated
get('/snapshots', self._getSnapshots),
post('/snapshots/{slug}/remove', self._deletebackup),
post('/snapshots/new/upload', self._uploadbackup),
post('/snapshots/new/partial', self._newbackup),
post('/snapshots/new/full', self._newbackup),
get('/snapshots/new/full', self._newbackup),
get('/snapshots/{slug}/download', self._backupDownload),
get('/snapshots/{slug}/info', self._backupDetail),
]
def getEvents(self):
return self._events.copy()
def getEntity(self, entity):
return self._entities.get(entity)
def clearEntities(self):
self._entities = {}
def addon(self, slug):
for addon in self._addons:
if addon["slug"] == slug:
return addon
return None
def getAttributes(self, attribute):
return self._attributes.get(attribute)
def getNotification(self):
return self._notification
def _formatErrorResponse(self, error: str) -> str:
return json_response({'result': error})
def _formatDataResponse(self, data: Any) -> Response:
return json_response({'result': 'ok', 'data': data})
async def toggleBlockBackup(self):
if self._backup_lock.locked():
self._backup_lock.release()
else:
await self._backup_lock.acquire()
async def _verifyHeader(self, request) -> bool:
if request.headers.get("Authorization", None) == "Bearer " + self._auth_token:
return
if request.headers.get("X-Supervisor-Token", None) == self._auth_token:
return
raise HTTPUnauthorized()
async def _getSnapshots(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse({'snapshots': list(self._backups.values())})
async def _getBackups(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse({'backups': list(self._backups.values())})
async def _getMounts(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse(self._mounts)
async def _setMounts(self, request: Request):
self._mounts = await request.json()
return self._formatDataResponse({})
async def _stopAddon(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
for addon in self._addons:
if addon.get("slug", "") == slug:
if addon.get("state") == "started":
addon["state"] = "stopped"
return self._formatDataResponse({})
raise HTTPBadRequest()
async def _logoAddon(self, request: Request):
await self._verifyHeader(request)
return FileResponse('hassio-google-drive-backup/backup/static/images/logo.png')
async def _startAddon(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
for addon in self._addons:
if addon.get("slug", "") == slug:
if addon.get("state") != "started":
addon["state"] = "started"
return self._formatDataResponse({})
raise HTTPBadRequest()
async def _addonInfo(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
for addon in self._addons:
if addon.get("slug", "") == slug:
return self._formatDataResponse({
'boot': addon.get("boot"),
'watchdog': addon.get("watchdog"),
'state': addon.get("state"),
})
raise HTTPBadRequest()
async def _supervisorInfo(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse(
{
'version': str(self._super_version)
}
)
async def _allAddons(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse(
{
"addons": list(self._addons).copy()
}
)
async def _supervisorLogs(self, request: Request):
await self._verifyHeader(request)
return Response(body=self.generate_random_text(20, 10, 20))
def generate_random_text(self, line_count, min_words=5, max_words=10):
lines = []
log_levels = ["WARN", "WARNING", "INFO", "ERROR", "DEBUG"]
for _ in range(line_count):
level = random.choice(log_levels)
word_count = random.randint(min_words, max_words)
words = [random.choice(string.ascii_lowercase) for _ in range(word_count)]
line = level + " " + ' '.join(''.join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(3, 10))) for _ in words)
lines.append(line)
return '\n'.join(lines)
async def _coreLogs(self, request: Request):
await self._verifyHeader(request)
return Response(body="Core Log line 1\nCore Log Line 2")
async def _coreInfo(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse(
{
"version": "1.3.3.7",
"last_version": "1.3.3.8",
"machine": "VS Dev",
"ip_address": "127.0.0.1",
"arch": "x86",
"image": "image",
"custom": "false",
"boot": "true",
"port": self._ports.server,
"ssl": "false",
"watchdog": "what is this",
"wait_boot": "so many arguments"
}
)
async def _internalNewBackup(self, request: Request, input_json, date=None, verify_header=True) -> str:
async with self._backup_lock:
async with self._backup_inner_lock:
if 'wait' in input_json:
await sleep(input_json['wait'])
if verify_header:
await self._verifyHeader(request)
slug = self.generateId(8)
password = input_json.get('password', None)
data = createBackupTar(
slug,
input_json.get('name', "Default name"),
date=date or self._time.now(),
padSize=int(random.uniform(self._min_backup_size, self._max_backup_size)),
included_folders=input_json.get('folders', None),
included_addons=input_json.get('addons', None),
password=password)
backup_info = parseBackupInfo(data)
self._backups[slug] = backup_info
self._backup_data[slug] = bytearray(data.getbuffer())
return slug
async def createBackup(self, input_json, date=None):
return await self._internalNewBackup(None, input_json, date=date, verify_header=False)
async def _newbackup(self, request: Request):
if self._backup_lock.locked():
raise HTTPBadRequest()
input_json = await request.json()
task = asyncio.shield(asyncio.create_task(self._internalNewBackup(request, input_json)))
return self._formatDataResponse({"slug": await task})
async def _lock_backups(self, request: Request):
await self._backup_lock.acquire()
return self._formatDataResponse({"message": "locked"})
async def _uploadbackup(self, request: Request):
await self._verifyHeader(request)
try:
reader = await request.multipart()
contents = await reader.next()
received_bytes = bytearray()
while True:
chunk = await contents.read_chunk()
if not chunk:
break
received_bytes.extend(chunk)
info = parseBackupInfo(io.BytesIO(received_bytes))
self._backups[info['slug']] = info
self._backup_data[info['slug']] = received_bytes
return self._formatDataResponse({"slug": info['slug']})
except Exception as e:
print(str(e))
return self._formatErrorResponse("Bad backup")
async def _deletebackup(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
if slug not in self._backups:
raise HTTPNotFound()
del self._backups[slug]
del self._backup_data[slug]
return self._formatDataResponse("deleted")
async def _backupDetail(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
if slug not in self._backups:
raise HTTPNotFound()
return self._formatDataResponse(self._backups[slug])
async def _backupDownload(self, request: Request):
await self._verifyHeader(request)
slug = request.match_info.get('slug')
if slug not in self._backup_data:
raise HTTPNotFound()
return self.serve_bytes(request, self._backup_data[slug])
async def _selfInfo(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse({
"webui": "http://some/address",
'ingress_url': "fill me in later",
"slug": self._addon_slug,
"options": self._options
})
async def _debugInfo(self, request: Request):
return self._formatDataResponse({
"config": {
" webui": "http://some/address",
'ingress_url': "fill me in later",
"slug": self._addon_slug,
"options": self._options
}
})
async def _miscInfo(self, request: Request):
await self._verifyHeader(request)
return self._formatDataResponse({
"supervisor": "super version",
"homeassistant": "ha version",
"hassos": "hassos version",
"hostname": "hostname",
"machine": "machine",
"arch": "Arch",
"supported_arch": "supported arch",
"channel": "channel"
})
def installAddon(self, slug, name, version="v1.0", boot=True, started=True):
self._addons.append({
"name": 'Name for ' + name,
"slug": slug,
"description": slug + " description",
"version": version,
"watchdog": False,
"boot": "auto" if boot else "manual",
"logo": True,
"ingress_entry": "/api/hassio_ingress/" + slug,
"state": "started" if started else "stopped"
})
async def _authenticate(self, request: Request):
await self._verifyHeader(request)
input_json = await request.json()
if input_json.get("username") != self._username or input_json.get("password") != self._password:
raise HTTPBadRequest()
return self._formatDataResponse({})
async def _updateOptions(self, request: Request):
slug = request.match_info.get('slug')
if slug == "self":
await self._verifyHeader(request)
self._options = (await request.json())['options'].copy()
else:
self.addon(slug).update(await request.json())
return self._formatDataResponse({})
async def _haStateUpdate(self, request: Request):
await self._verifyHeader(request)
entity = request.match_info.get('entity')
json = await request.json()
self._entities[entity] = json['state']
self._attributes[entity] = json['attributes']
return Response()
async def _haEventUpdate(self, request: Request):
await self._verifyHeader(request)
name = request.match_info.get('name')
self._events.append((name, await request.json()))
return Response()
async def _createNotification(self, request: Request):
await self._verifyHeader(request)
notification = await request.json()
print("Created notification with: {}".format(notification))
self._notification = notification.copy()
return Response()
async def _dismissNotification(self, request: Request):
await self._verifyHeader(request)
print("Dismissed notification with: {}".format(await request.json()))
self._notification = None
return Response()
async def _debug_insert_backup(self, request: Request) -> Response:
days_back = int(request.query.get("days"))
date = self._time.now() - timedelta(days=days_back)
name = date.strftime("Full Backup %Y-%m-%d %H:%M-%S")
wait = int(request.query.get("wait", 0))
slug = await self._internalNewBackup(request, {'name': name, 'wait': wait}, date=date, verify_header=False)
return self._formatDataResponse({'slug': slug})

View File

@@ -0,0 +1,165 @@
import re
from typing import Dict
from yarl import URL
import aiohttp
from aiohttp.web import (Application,
HTTPException,
Request, Response, get,
json_response, middleware, post, HTTPSeeOther)
from aiohttp.client import ClientSession
from injector import inject, singleton, Injector, provider
from backup.time import Time
from backup.logger import getLogger
from backup.server import Server
from tests.faketime import FakeTime
from backup.module import BaseModule
from backup.config import Config, Setting
from .http_exception import HttpMultiException
from .simulated_google import SimulatedGoogle
from .base_server import BaseServer
from .ports import Ports
from .request_interceptor import RequestInterceptor
from .simulated_supervisor import SimulatedSupervisor
from .apiingress import APIIngress
import aiorun
logger = getLogger(__name__)
mimeTypeQueryPattern = re.compile("^mimeType='.*'$")
parentsQueryPattern = re.compile("^'.*' in parents$")
bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$")
resumeBytesPattern = re.compile("^bytes \\*/\\d+$")
intPattern = re.compile("\\d+")
rangePattern = re.compile("bytes=\\d+-\\d+")
@singleton
class SimulationServer(BaseServer):
@inject
def __init__(self, ports: Ports, time: Time, session: ClientSession, authserver: Server, config: Config, google: SimulatedGoogle, supervisor: SimulatedSupervisor, api_ingress: APIIngress, interceptor: RequestInterceptor):
self.interceptor = interceptor
self.google = google
self.supervisor = supervisor
self.config = config
self.id_counter = 0
self.files: Dict[str, bytearray] = {}
self._port = ports.server
self._time: FakeTime = time
self.urls = []
self.relative = True
self._authserver = authserver
self._api_ingress = api_ingress
def wasUrlRequested(self, pattern):
for url in self.urls:
if pattern in url:
return True
return False
def blockBackups(self):
self.block_backups = True
def unBlockBackups(self):
self.block_backups = False
async def uploadfile(self, request: Request):
name: str = str(request.query.get("name", "test"))
self.files[name] = await self.readAll(request)
return Response(text="")
async def readFile(self, request: Request):
return self.serve_bytes(request, self.files[request.query.get("name", "test")])
async def slugRedirect(self, request: Request):
raise HTTPSeeOther("https://localhost:" + str(self.config.get(Setting.INGRESS_PORT)))
@middleware
async def error_middleware(self, request: Request, handler):
self.urls.append(str(request.url))
resp = await self.interceptor.checkUrl(request)
if resp is not None:
return resp
try:
resp = await handler(request)
return resp
except Exception as ex:
await self.readAll(request)
if isinstance(ex, HttpMultiException):
return Response(status=ex.status_code)
elif isinstance(ex, HTTPException):
raise
else:
logger.printException(ex)
return json_response(str(ex), status=500)
def createApp(self):
app = Application(middlewares=[self.error_middleware])
app.add_routes(self.routes())
self._authserver.buildApp(app)
return app
async def start(self, port):
self.runner = aiohttp.web.AppRunner(self.createApp())
await self.runner.setup()
site = aiohttp.web.TCPSite(self.runner, "0.0.0.0", port=port)
await site.start()
async def stop(self):
self.interceptor.stop()
await self.runner.shutdown()
await self.runner.cleanup()
def routes(self):
return [
get('/readfile', self.readFile),
post('/uploadfile', self.uploadfile),
get('/ingress/self_slug', self.slugRedirect),
get('/debug/config', self.debug_config)
] + self.google.routes() + self.supervisor.routes() + self._api_ingress.routes()
async def debug_config(self, request: Request):
return json_response(self.supervisor._options)
class SimServerModule(BaseModule):
def __init__(self, base_url: URL):
super().__init__(override_dns=False)
self._base_url = base_url
@provider
@singleton
def getConfig(self) -> Config:
return Config.withOverrides({
Setting.DRIVE_AUTHORIZE_URL: str(self._base_url.with_path("o/oauth2/v2/auth")),
Setting.AUTHORIZATION_HOST: str(self._base_url),
Setting.TOKEN_SERVER_HOSTS: str(self._base_url),
Setting.DRIVE_TOKEN_URL: str(self._base_url.with_path("token")),
Setting.DRIVE_DEVICE_CODE_URL: str(self._base_url.with_path("device/code")),
Setting.DRIVE_REFRESH_URL: str(self._base_url.with_path("oauth2/v4/token")),
Setting.INGRESS_PORT: 56152
})
@provider
@singleton
def getPorts(self) -> Ports:
return Ports(56153, 56151, 56152)
async def main():
port = 56153
base = URL("http://localhost").with_port(port)
injector = Injector(SimServerModule(base))
server = injector.get(SimulationServer)
# start the server
runner = aiohttp.web.AppRunner(server.createApp())
await runner.setup()
site = aiohttp.web.TCPSite(runner, "0.0.0.0", port=port)
await site.start()
print("Server started on port " + str(port))
print("Open a browser at http://localhost:" + str(port))
if __name__ == '__main__':
aiorun.run(main())

View File

@@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC5TCCAc2gAwIBAgIJAN+M1w1AVtigMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
BAMMCWxvY2FsaG9zdDAeFw0xOTAzMjYwMzI2MDJaFw0xOTA0MjUwMzI2MDJaMBQx
EjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBANAa2QE9uHexG6b/ggk7muXB4AhEcpPU+eqGmp4kFx/cKTYe+rPfui4FbARa
QyajXrVRMukEs0wZpUJ11LeGOmuTJ1Cu6mKtk4ub35ZrTfY0W0YdTW0ASYifDNQZ
pt4S0HAcY9A6wlorADxqDkqBt3cSuXdDaR6wFhc4x2kN7xMcKgX5Exv6AS04ksLm
fu0JNSvY1PcLQOA8bFc8tm4eEQcF51xBJBchCcXwpsr5OXt33govGcgxEPLZIueO
nmzzbF0jWBzBhwmjGGnEVsHnxgTG59QshFuB2xf6uWuZolLaPg32b2CV4gomFbn1
7j4JMFTlxw80OkWILLR6pMr1gy0CAwEAAaM6MDgwFAYDVR0RBA0wC4IJbG9jYWxo
b3N0MAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0B
AQsFAAOCAQEAeK7VMbYO1lQmQcNIG/X42sS5Dm/YFSKgXG0VNMwjEa0xOPS54a6P
a3n7Lb6cVgwSstCSkQa0/Paqy/OvoJlvvgSrV8ZkqwU7100d7gohrReMAhWbRRDK
GkiJDUUQLAT8DXLRry2r5zRDaHX8OzzQuF8dPbFVkjXv9EMpBISY0hmodQFxBmiK
hxiYQWDcNQOTLwRk/x/b61AFLSXduonWM3r+29e8ej7LEHh9UJeLFF7S0+8t+7W4
F8j8rGWFjYa2KCUFgTOWSg1cUnKYqFaakcMQAlfcXCzuDOso/gwuVFeZZ1hY7gEQ
OHJt0Tu+PWE4CQ3118AIajj2pxTuEHc6Ow==
-----END CERTIFICATE-----

View File

@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIDAjCCAeoCAQAwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDTzETMBEGA1UE
BwwKU291dGggUGFyazEYMBYGA1UECgwPVW5pdCBUZXN0cyBJbmMuMR4wHAYDVQQL
DBVUZXN0aW5nIERlcHQuIEkgZ3Vlc3MxEjAQBgNVBAMMCWxvY2FsaG9zdDEjMCEG
CSqGSIb3DQEJARYUc3RlcGhlbkBiZWVjaGVucy5jb20wggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDCu0+68ol5a9ShDmeg41INbwR0QdG0khlzA54Yhu3t
yhEYv7H1XE5JKwSENc1YkBTMlnmbEySW+YMpRXy6R/GoCaNU2wnz6UCdkJQQf6l+
xIAkaRB+tj7uPpz65olC6tx5CFD+je/A6ZrHzAoEhiKTsQhI5uxexnl191BIQvcj
u7qKaN+TXmvKGlixPrYp4T30EWMDsbONyNjcZr/C4Xs1SzicfscDKt8qiINP8Fgd
tBDxyPIa4deYVKHG/1le9L1ccPFy1+wSQQG3d4YED7h94ajc5chmjMkJnTTYlRKL
XwMZxcsqX9ngHhPvoB5ZahGOLtjyYpxrvduY4kQ8XSaxAgMBAAGgGjAYBgkqhkiG
9w0BCQcxCwwJY2hhbGxlbmdlMA0GCSqGSIb3DQEBCwUAA4IBAQCT+ZSEvz9mJhMA
v71WWd+QjTyT4+9SItLVK3EAcpPbbJWayCuD+mKCGQr5plixC3w+tjy4coIG8lUo
pCX8sXi7TKMVKw6LYvBJeaRRAJ2+exeAQWJvGtRBBohXzm2+SxJ5Zp5+XEY7L3o8
Apk++px7kLQTSRZxFAQ/irL/cUrp5Sn33ago+bzGA2AGryrqfBbe/nCwlCGF6cV2
2w9oqY38tPeHQK9+MLOWDE0mBZvu+ab1mpTR7hxFVaVIKOBf8BifSVc4qJ8CDS+l
N4vEnxHIGdTXVp6yjpWN86qidjbLBqS6ZvY1dw6uFuXWSZP7gRixJi4/NUCf0NSO
yd+jFL0b
-----END CERTIFICATE REQUEST-----

View File

@@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC8DCCAdigAwIBAgIUUOqXw4hsjBcEzJwlO1o9TYw+f+wwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIwMDIwMzA4MDYyNVoXDTIwMDMw
NDA4MDYyNVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAwrtPuvKJeWvUoQ5noONSDW8EdEHRtJIZcwOeGIbt7coR
GL+x9VxOSSsEhDXNWJAUzJZ5mxMklvmDKUV8ukfxqAmjVNsJ8+lAnZCUEH+pfsSA
JGkQfrY+7j6c+uaJQurceQhQ/o3vwOmax8wKBIYik7EISObsXsZ5dfdQSEL3I7u6
imjfk15ryhpYsT62KeE99BFjA7GzjcjY3Ga/wuF7NUs4nH7HAyrfKoiDT/BYHbQQ
8cjyGuHXmFShxv9ZXvS9XHDxctfsEkEBt3eGBA+4feGo3OXIZozJCZ002JUSi18D
GcXLKl/Z4B4T76AeWWoRji7Y8mKca73bmOJEPF0msQIDAQABozowODAUBgNVHREE
DTALgglsb2NhbGhvc3QwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMB
MA0GCSqGSIb3DQEBCwUAA4IBAQBsZ29ZHTO6yNGPKWpxfOG38Z+mk6eh6TpbIVze
b7L2cFr/ONEFyz9hnS3kf23S9VsoX0AMdqYZbGmUT/4+d9+Q8hRXv7W3zenUk4KY
SkMfvB3J27w2l9Zx7oYfonBC7SSbfYrCBHgZwsINzdP5aC2q6eFTOadIdcF2bxf9
FU/4aUyOeCkHAtYkVyxM3F33Qmf7ym7OZYKLn4SrPLFRSYiWRd8w+ww75uinnS5W
bG96OojPYzIZu8rb3b5ISR2BMWP0JVQRdmV+8TG1ekaA6EB5gAven55OxCmIUAJm
UEOLPRtVvJN0SE1S6jZBXBHler7IRDKpxATXbdFBK01s4rDz
-----END CERTIFICATE-----

View File

@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDCu0+68ol5a9Sh
Dmeg41INbwR0QdG0khlzA54Yhu3tyhEYv7H1XE5JKwSENc1YkBTMlnmbEySW+YMp
RXy6R/GoCaNU2wnz6UCdkJQQf6l+xIAkaRB+tj7uPpz65olC6tx5CFD+je/A6ZrH
zAoEhiKTsQhI5uxexnl191BIQvcju7qKaN+TXmvKGlixPrYp4T30EWMDsbONyNjc
Zr/C4Xs1SzicfscDKt8qiINP8FgdtBDxyPIa4deYVKHG/1le9L1ccPFy1+wSQQG3
d4YED7h94ajc5chmjMkJnTTYlRKLXwMZxcsqX9ngHhPvoB5ZahGOLtjyYpxrvduY
4kQ8XSaxAgMBAAECggEAJ1rt0S2FRSnazjX4EZb/lUFzl/9ZX3ILfKglgnV6jo1B
CUxsrdba54SvI/0vpA9ydKqQpxumUHDa5jNp8sfpefmArfyatVXVvkJi+jaizcDu
2Oz27XTtoP68gSSoZwLKThe1Ls0GwGk1491DxQhK4qhrsTgiW0EneQTjj8cg5XKH
/2l0WDslZDwW8XkJ1iqGi/OPs/X4SHggzX3xEFS2SpDK0e6GovyTfijpaql3MLMR
jnEeF69hUKKN7ADxhWvQ8d5C0CICYUzryGScVUs5312Zl83iOoeaixxfh6UaNOmE
jjdM6Hc7VbYEcfQTdZXyIPrzcz+Tc0DSDW+QsktLMQKBgQDn7j/oCNqLhxa1XnA8
HgQqUUTav/OWlWpieTmcyZ2LkRRw9MJTnP1FIfIvOXplWFSpbSSArAEzsjpjRt0n
2+7VxwN3qNirNGAk3PZiRXXHq7sE3z39PhLPthpNisYTDTIx8fcYK032uEPHsSSj
i13yKeYqeGOmfnu0nrlmZ9+ThQKBgQDW8MnvhqjMxZDdVdxZKlY/8ihnubVBlp59
s2SFIrWD1/QcKawCzagJHe/YR865k3ti7XIBghmKwLSMa6ENdTxTSSLHbBXlXJtH
tlWFgfVb8eDi7zo9178W8TrWEB7dSC2F6qMN17wOKWRkyo/c4cYBiAUaNQ1inJjk
ACOvHesAPQKBgHXEttKd3EtJNzC1WYxNOZQ7XBkvqwLlr/V81NJWVhdOffC1eA95
AeoeyJlOOGZJqgO2Ffj4XkvfzmIm05mvxeDrg0k5hXu5xrAxOzK/ToUrIHXi3dk/
sdGjCEwjkVyPMNPHp86v/pCvFEvMGWyqEfQrbmJWa1NZmnsmtcHYMOD5AoGAD1AW
Qt9IFVaZ7HraeOvAO0wIPuOHG0Ycwn3OUoHXhq4S8RKy83wtVYDxfmoXOzdbmf+q
mJrpMO5rrnlYfvn0M0bJmIWFxdJkKaa+zwUkMsm3qNM8Rf2h2oOTGn8Jg+BJhfni
ZfERr7yZL2kS+LyI+8DyBBz1eCoJ5mxwHmC2Rk0CgYBcrhxANSpikw07XLRFcvk9
m79qiEThhmiBf1WVZdtWNi9hR+zs6mWrTk8N8jfLzNLLNMPdAAybF8feeMTa9xpS
zXF9Gqlayzx/+wyPts7ocrdJKikDVdVZauoxG+mNE87VcVEx87ZiboirQVoKSsxe
OmwKminJ/E4GHJCY7RLQAw==
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDQGtkBPbh3sRum
/4IJO5rlweAIRHKT1PnqhpqeJBcf3Ck2Hvqz37ouBWwEWkMmo161UTLpBLNMGaVC
ddS3hjprkydQrupirZOLm9+Wa032NFtGHU1tAEmInwzUGabeEtBwHGPQOsJaKwA8
ag5Kgbd3Erl3Q2kesBYXOMdpDe8THCoF+RMb+gEtOJLC5n7tCTUr2NT3C0DgPGxX
PLZuHhEHBedcQSQXIQnF8KbK+Tl7d94KLxnIMRDy2SLnjp5s82xdI1gcwYcJoxhp
xFbB58YExufULIRbgdsX+rlrmaJS2j4N9m9gleIKJhW59e4+CTBU5ccPNDpFiCy0
eqTK9YMtAgMBAAECggEADlvr4UQK+GdGCy3SIST1uSi5dpiSd1TYsa/79zFyTwZ3
6X4VuleTlx1UqLA5te7L2CL0KlPiszuJxZ4vwUIHwehzbAPFtG1ZouZsdQqOZJCU
Q7A96Wl9qWmgDvp+IxCVRUcQNAv54RLaf1CqD8YHjLXEClCibjWkMJIAYGVPu7ez
44sbXenPi+4OfI5IHhhBm+RmXv6QpP/A4OyIg/X35NoIp+z+J/aajFsb6AMvFejU
kMCj23PUv4MGA0zrc09UDzM/d7qwCeOMCW0QqKidbkZ+UtY3lsSj7b0l50TTEYsf
2sB/xjkUVHg9sJc8ieuf8LaHedvmiQPfECjZU9VhmQKBgQDx0h359EJSvil/iQ4o
OrsmxMz40mi/9pwznF0SUuRyKOsmJsSx7zL3rVFo/YLHOE5Ju4PSDm1OL4drUE0z
2l/0S6tlN4teHU6x969Xqm2vpwKP3jFXpD0zEi4QRGXgqtY1sVFO4ZIKfTa3KKMu
wqNmAB1KczvIkU71ClzqaVUULwKBgQDcTqI1SkwmIGP4PnGbLQTRI8pmw4xx/d7X
bpgAeCegSwfCy94nX7TdDYujhxa1rp3ya5YSnkTTN7oGCXIsZkLjmfFmjiIh3uEk
YX0obydQvVUfnPTPXQP3QhZG2dQtFdUUJOsu1bJKC7a/jcLGqbJzeBUg/Sb0/gXP
KCPCCr5bYwKBgHrbVX94KXoAQvUYnKizrgG0Wq7Pt4hPsmxGNMLqekXFpDJt3+DG
tg4/b+z3X0n3wU6UhhRiYAYo/5P16EM/3yAukZWK8rOOED06qUrQu4lSQGr3Z/ou
5yjbQ6vgFCJgqRP+UmDRGXFazEGh08Yd/QYFaNw6T1VG/eZgrXQqr57hAoGBALcb
qFiQm0ApNc4T4IrwXQuTKtxE9guczUXTxwTE2XKySg4PMmMZehMs+f39/tMdAmyG
HWL2JxBDRhtUaJAcosXXorvxsM7kF88MNGGSGWRTKVgwNY3QqsYtKKTU0jRy6/pl
QRBZT2mZ2NfXdKd4TjkI+s7DekiwhZWLsETMdzEvAoGARDyJNOpPPm/VpDgV08uU
P1yPOT6j8qhQ2dN1mEab0NeyY6HGriUg8y6HJ81Obt4YyVPlEplDJe8TkphWNsby
B93FpH56WF4g8ivKD4oC2JghlWf4c0MgxiWyoNvlHSM7Dmq2UfPDyV+1UhnNH1ty
CUMs7Fjk4BeJbrYmJf3VxYU=
-----END PRIVATE KEY-----

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

View File

@@ -0,0 +1,19 @@
google-api-python-client
google-auth-httplib2
google-auth-oauthlib
oauth2client
python-dateutil
watchdog
pyyaml
dnspython
aiorun
aiohttp
aiodns
injector
aiofiles
aiofile
colorlog
aiohttp-jinja2
aioping
pytz
tzlocal

View File

@@ -0,0 +1,20 @@
aiodns
aiofiles
aiofile
aiohttp
aiorun
colorlog
dnspython
google-cloud-logging
google-cloud-firestore
injector
oauth2client
ptvsd
python-dateutil
pyyaml
watchdog
aiohttp-jinja2
firebase-admin
pytz
tzlocal
aioping

View File

@@ -0,0 +1,8 @@
from setuptools import setup, find_packages
setup(
name="hgdb",
packages=find_packages(),
package_data={
'backup': ['static/*', 'static/*/*', 'static/*/*/*']
}
)

View File

Some files were not shown because too many files have changed in this diff Show More