diff --git a/arpspoof/CHANGELOG.md b/arpspoof/CHANGELOG.md new file mode 100644 index 0000000..ebf5f62 --- /dev/null +++ b/arpspoof/CHANGELOG.md @@ -0,0 +1,9 @@ +- Implemented healthcheck +- WARNING : update to supervisor 2022.11 before installing +- Add codenotary sign +- New standardized logic for Dockerfile build and packages installation + +## 1.0.0 (07-12-2021) + +- Update to latest version from t0mer/Arpspoof-Docker +- Initial release diff --git a/arpspoof/Dockerfile b/arpspoof/Dockerfile new file mode 100644 index 0000000..f390510 --- /dev/null +++ b/arpspoof/Dockerfile @@ -0,0 +1,131 @@ +#============================# +# ALEXBELGIUM'S DOCKERFILE # +#============================# +# _.------. +# _.-` ('>.-`"""-. +# '.--'` _'` _ .--.) +# -' '-.-';` ` +# ' - _.' ``'--. +# '---` .-'""` +# /` +#=== Home Assistant Addon ===# + +################# +# 1 Build Image # +################# + +ARG BUILD_FROM +ARG BUILD_VERSION +ARG BUILD_UPSTREAM="1.0.0" +FROM ${BUILD_FROM} + +################## +# 2 Modify Image # +################## + +# Set S6 wait time +ENV S6_CMD_WAIT_FOR_SERVICES=1 \ + S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 \ + S6_SERVICES_GRACETIME=0 + +################## +# 3 Install apps # +################## + +# Add rootfs +COPY rootfs/ / + +# Uses /bin for compatibility purposes +# hadolint ignore=DL4005 +RUN if [ ! -f /bin/sh ] && [ -f /usr/bin/sh ]; then ln -s /usr/bin/sh /bin/sh; fi && \ + if [ ! -f /bin/bash ] && [ -f /usr/bin/bash ]; then ln -s /usr/bin/bash /bin/bash; fi + +# Modules +ARG MODULES="00-banner.sh 01-custom_script.sh 00-global_var.sh" + +# Automatic modules download +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_automodules.sh" "/ha_automodules.sh" +RUN chmod 744 /ha_automodules.sh && /ha_automodules.sh "$MODULES" && rm /ha_automodules.sh + +# Manual apps +ENV PACKAGES="jq curl iproute2" + +# Automatic apps & bashio +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh" +RUN chmod 744 /ha_autoapps.sh && /ha_autoapps.sh "$PACKAGES" && rm /ha_autoapps.sh + +################ +# 4 Entrypoint # +################ + +# Add entrypoint +ENV S6_STAGE2_HOOK=/ha_entrypoint.sh +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh" + +# Entrypoint modifications +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh" +RUN chmod 777 /ha_entrypoint.sh /ha_entrypoint_modif.sh && /ha_entrypoint_modif.sh && rm /ha_entrypoint_modif.sh + + +ENTRYPOINT [ "/usr/bin/env" ] +CMD [ "/ha_entrypoint.sh" ] + +############ +# 5 Labels # +############ + +ARG BUILD_ARCH +ARG BUILD_DATE +ARG BUILD_DESCRIPTION +ARG BUILD_NAME +ARG BUILD_REF +ARG BUILD_REPOSITORY +ARG BUILD_VERSION +ENV BUILD_VERSION="${BUILD_VERSION}" +LABEL \ + io.hass.name="${BUILD_NAME}" \ + io.hass.description="${BUILD_DESCRIPTION}" \ + io.hass.arch="${BUILD_ARCH}" \ + io.hass.type="addon" \ + io.hass.version=${BUILD_VERSION} \ + maintainer="alexbelgium (https://github.com/alexbelgium)" \ + org.opencontainers.image.title="${BUILD_NAME}" \ + org.opencontainers.image.description="${BUILD_DESCRIPTION}" \ + org.opencontainers.image.vendor="Home Assistant Add-ons" \ + org.opencontainers.image.authors="alexbelgium (https://github.com/alexbelgium)" \ + org.opencontainers.image.licenses="MIT" \ + org.opencontainers.image.url="https://github.com/alexbelgium" \ + org.opencontainers.image.source="https://github.com/${BUILD_REPOSITORY}" \ + org.opencontainers.image.documentation="https://github.com/${BUILD_REPOSITORY}/blob/main/README.md" \ + org.opencontainers.image.created=${BUILD_DATE} \ + org.opencontainers.image.revision=${BUILD_REF} \ + org.opencontainers.image.version=${BUILD_VERSION} + +################# +# 6 Healthcheck # +################# + +# Avoid spamming logs +# hadolint ignore=SC2016 +RUN \ + # Handle Apache configuration + if [ -d /etc/apache2/sites-available ]; then \ + for file in /etc/apache2/sites-*/*.conf; do \ + sed -i '/ /etc/nginx/nginx.conf.new && \ + mv /etc/nginx/nginx.conf.new /etc/nginx/nginx.conf; \ + fi + +ENV HEALTH_PORT="7022" \ + HEALTH_URL="" +HEALTHCHECK \ + --interval=5s \ + --retries=5 \ + --start-period=30s \ + --timeout=25s \ + CMD curl -A "HealthCheck: Docker/1.0" -s -f "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1 diff --git a/arpspoof/README.md b/arpspoof/README.md new file mode 100644 index 0000000..daa3fb6 --- /dev/null +++ b/arpspoof/README.md @@ -0,0 +1,76 @@ +# Home assistant add-on: Arpspoof + +[![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) +[![Donate][paypal-badge]](https://www.paypal.com/donate/?hosted_button_id=DZFULJZTP3UQA) + +[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white +[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white + +![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json) +![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json) +![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Farpspoof%2Fconfig.json) + +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade) +[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml) +[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml) + +_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_ + +[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers) + +![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/arpspoof/stats.png) + +## About + +[arpspoof](https://github.com/t0mer/Arpspoof-Docker) adds ability to block internet connection for local network devices +This addon is based on the docker image https://hub.docker.com/r/techblog/arpspoof-docker + +See all informations here : https://en.techblog.co.il/2021/03/15/home-assistant-cut-internet-connection-using-arpspoof/ , on in the upstream image documentation : https://github.com/t0mer/Arpspoof-Docker + +## Installation + +The installation of this add-on is pretty straightforward and not different in comparison to installing any other add-on. + +1. Add my add-ons repository to your home assistant instance (in supervisor addons store at top right, or click button below if you have configured my HA) + [![Open your Home Assistant instance and show the add add-on repository dialog with a specific repository URL pre-filled.](https://my.home-assistant.io/badges/supervisor_add_addon_repository.svg)](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons) +1. Install this add-on. +1. Click the `Save` button to store your configuration. +1. Set the add-on options to your preferences +1. Start the add-on. +1. Check the logs of the add-on to see if everything went well. +1. Open the webUI and adapt the software options + +## Configuration + +Webui can be found at . + +```yaml +ROUTER_IP: 127.0.0.1 #Required Router IP +INTERFACE_NAME: name #Required Interface name. Autofilled if empty. +``` + +## Home-Assistant configuration + +Description : [techblog](https://en.techblog.co.il/2021/03/15/home-assistant-cut-internet-connection-using-arpspoof/) + +You can use a `command_line` switch to temporary disable a internet device in your network. + +```yaml +- platform: command_line + switches: + iphone_internet: + friendly_name: "iPhone internet" + command_off: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/disconnect?ip={iPhoneIP}" + command_on: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/reconnect?ip={iPhoneIP}" + command_state: "/usr/bin/curl -f -X GET http://{HA-IP}:7022/status?ip={iPhoneIP}" + value_template: > + {{ value != "1" }} +``` + +## Support + +Create an issue on github + +## Illustration + +No illustration diff --git a/arpspoof/build.json b/arpspoof/build.json new file mode 100644 index 0000000..2521c23 --- /dev/null +++ b/arpspoof/build.json @@ -0,0 +1,10 @@ +{ + "build_from": { + "aarch64": "techblog/arpspoof-docker:1.0.0", + "amd64": "techblog/arpspoof-docker:1.0.0", + "armv7": "techblog/arpspoof-docker:1.0.0" + }, + "codenotary": { + "signer": "alexandrep.github@gmail.com" + } +} diff --git a/arpspoof/icon.png b/arpspoof/icon.png new file mode 100644 index 0000000..31ca24c Binary files /dev/null and b/arpspoof/icon.png differ diff --git a/arpspoof/logo.png b/arpspoof/logo.png new file mode 100644 index 0000000..31ca24c Binary files /dev/null and b/arpspoof/logo.png differ diff --git a/arpspoof/rootfs/etc/cont-init.d/99-run.sh b/arpspoof/rootfs/etc/cont-init.d/99-run.sh new file mode 100755 index 0000000..9e05420 --- /dev/null +++ b/arpspoof/rootfs/etc/cont-init.d/99-run.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bashio +# shellcheck shell=bash +set -e + +# Avoid unbound variables +set +u + +# Autodefine if not defined +if [ -n "$INTERFACE_NAME" ]; then + # shellcheck disable=SC2155 + export INTERFACE_NAME="$(ip route get 8.8.8.8 | sed -nr 's/.*dev ([^\ ]+).*/\1/p')" + bashio::log.blue "Autodetection : INTERFACE_NAME=$INTERFACE_NAME" +fi + +bashio::log.info "Starting..." +/usr/bin/python3 /opt/arpspoof/arpspoof.py diff --git a/arpspoof/stats.png b/arpspoof/stats.png new file mode 100644 index 0000000..af50c2f Binary files /dev/null and b/arpspoof/stats.png differ diff --git a/arpspoof/updater.json b/arpspoof/updater.json new file mode 100644 index 0000000..a7ad1a9 --- /dev/null +++ b/arpspoof/updater.json @@ -0,0 +1,8 @@ +{ + "last_update": "07-12-2021", + "repository": "alexbelgium/hassio-addons", + "slug": "arpspoof", + "source": "github", + "upstream_repo": "t0mer/Arpspoof-Docker", + "upstream_version": "1.0.0" +} diff --git a/beta/CHANGELOG.md b/beta/CHANGELOG.md new file mode 100755 index 0000000..fce77a5 --- /dev/null +++ b/beta/CHANGELOG.md @@ -0,0 +1 @@ +Please reference the [beta commits](https://github.com/jakowenko/double-take/commits/beta) for changes. diff --git a/beta/Dockerfile b/beta/Dockerfile new file mode 100644 index 0000000..1b8b6cd --- /dev/null +++ b/beta/Dockerfile @@ -0,0 +1 @@ +FROM jakowenko/double-take:beta \ No newline at end of file diff --git a/beta/README.md b/beta/README.md new file mode 100755 index 0000000..167055b --- /dev/null +++ b/beta/README.md @@ -0,0 +1,11 @@ +[![Double Take](https://badgen.net/github/release/jakowenko/double-take/stable)](https://github.com/jakowenko/double-take) [![Double Take](https://badgen.net/github/stars/jakowenko/double-take)](https://github.com/jakowenko/double-take/stargazers) [![Docker Pulls](https://flat.badgen.net/docker/pulls/jakowenko/double-take)](https://hub.docker.com/r/jakowenko/double-take) [![Discord](https://flat.badgen.net/discord/members/3pumsskdN5?label=Discord)](https://discord.gg/3pumsskdN5) + +![amd64][amd64-shield] + +# Double Take + +Unified UI and API for processing and training images for facial recognition. + +[Documentation](https://github.com/jakowenko/double-take/tree/beta#readme) + +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg diff --git a/beta/config.json b/beta/config.json new file mode 100755 index 0000000..79175a1 --- /dev/null +++ b/beta/config.json @@ -0,0 +1,35 @@ +{ + "name": "Double Take (beta)", + "version": "1.13.1", + "url": "https://github.com/jakowenko/double-take", + "panel_icon": "mdi:face-recognition", + "slug": "double-take-beta", + "description": "Unified UI and API for processing and training images for facial recognition", + "arch": ["amd64"], + "startup": "application", + "boot": "auto", + "ingress": true, + "ingress_port": 3000, + "ports": { + "3000/tcp": 3000 + }, + "ports_description": { + "3000/tcp": "Web interface (not required for Home Assistant ingress)" + }, + "map": ["media:rw", "config:rw"], + "environment": { + "HA_ADDON": "true" + }, + "options": { + "STORAGE_PATH": "/config/double-take", + "CONFIG_PATH": "/config/double-take", + "SECRETS_PATH": "/config", + "MEDIA_PATH": "/media/double-take" + }, + "schema": { + "STORAGE_PATH": "str", + "CONFIG_PATH": "str", + "SECRETS_PATH": "str", + "MEDIA_PATH": "str" + } +} diff --git a/beta/icon.png b/beta/icon.png new file mode 100755 index 0000000..ed7663c Binary files /dev/null and b/beta/icon.png differ diff --git a/changedetection.io/CHANGELOG.md b/changedetection.io/CHANGELOG.md new file mode 100644 index 0000000..008ff49 --- /dev/null +++ b/changedetection.io/CHANGELOG.md @@ -0,0 +1,132 @@ + +## 0.49.4 (15-03-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.49.3 (01-03-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.49.2 (21-02-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.49.1 (15-02-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.49.0 (25-01-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.48.6 (11-01-2025) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.48.5 (28-12-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.48.4 (21-12-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.48.1 (07-12-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.47.6 (09-11-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.47.5 (02-11-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.47.3 (12-10-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.46.4 (07-09-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.46.3 (24-08-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.46.2 (03-08-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) +## 0.46.1-2 (23-07-2024) +- Minor bugs fixed + +## 0.46.1 (20-07-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.26 (13-07-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) +## 0.45.25-2 (08-07-2024) +- Minor bugs fixed + +## 0.45.25 (06-07-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.24 (22-06-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.23 (25-05-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) +## 0.45.22-2 (21-05-2024) +- Minor bugs fixed + +## 0.45.22 (04-05-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.21 (27-04-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.20 (20-04-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.17 (06-04-2024) +- Update to latest version from linuxserver/docker-changedetection.io (changelog : https://github.com/linuxserver/docker-changedetection.io/releases) + +## 0.45.16 (09-03-2024) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.14 (10-02-2024) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.13 (20-01-2024) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.12 (06-01-2024) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.9 (23-12-2023) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.8.1 (02-12-2023) + +- Update to latest version from linuxserver/docker-changedetection.io +## 0.45.7.3-2 (21-11-2023) + +- Minor bugs fixed + +## 0.45.7.3 (18-11-2023) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.7 (11-11-2023) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.5 (04-11-2023) + +- Update to latest version from linuxserver/docker-changedetection.io +## 0.45.3-2 (01-11-2023) + +- Minor bugs fixed + +## 0.45.3 (07-10-2023) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.2 (23-09-2023) + +- Update to latest version from linuxserver/docker-changedetection.io + +## 0.45.1 (10-09-2023) + +- Initial build diff --git a/changedetection.io/README.md b/changedetection.io/README.md new file mode 100644 index 0000000..adf0f64 --- /dev/null +++ b/changedetection.io/README.md @@ -0,0 +1,85 @@ +# Home assistant add-on: changedetection.io + +[![Donate][donation-badge]](https://www.buymeacoffee.com/alexbelgium) +[![Donate][paypal-badge]](https://www.paypal.com/donate/?hosted_button_id=DZFULJZTP3UQA) + +![Version](https://img.shields.io/badge/dynamic/json?label=Version&query=%24.version&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json) +![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json) +![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fchangedetection.io%2Fconfig.json) + +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade) +[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml) +[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml) + +[donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white +[paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white + +_Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_ + +[![Stargazers repo roster for @alexbelgium/hassio-addons](https://reporoster.com/stars/alexbelgium/hassio-addons)](https://github.com/alexbelgium/hassio-addons/stargazers) + +![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/changedetection.io/stats.png) + +## About + +[Changedetection.io](https://github.com/dgtlmoon/changedetection.io) provides free, open-source web page monitoring, notification and change detection. + +This addon is based on the [docker image](https://github.com/linuxserver/docker-changedetection.io) from linuxserver.io. + +## Configuration + +### Main app + +Web UI can be found at `:5000`, also accessible from the add-on page. + +#### Sidebar shortcut + +You can add a shortcut pointing to your Changedetection.io instance with the following steps: +1. Go to ⚙ Settings > Dashboards +2. Click ➕ Add Dashboard at the bottom corner +3. Select the Webpage option, and paste the Web UI URL you got from the add-on page. +4. Fill in the title for the sidebar item, an icon (suggestion: `mdi:vector-difference`), and a **relative URL** for that panel (e.g. `change-detection`). Lastly, confirm it. + +### Configurable options + +```yaml +PGID: user +GPID: user +TZ: Etc/UTC specify a timezone to use, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List +BASE_URL: Specify the full URL (including protocol) when running behind a reverse proxy +``` + +### Connect to browserless Chrome (from @RhysMcW) + +In HA, use the File Editor add-on (or Filebrowser) and edit the Changedetection.io config file at `/homeassistant/addons_config/changedetection.io/config.yaml`. + +Add the following line to the end of it: +```yaml +PLAYWRIGHT_DRIVER_URL: ws://2937404c-browserless-chrome:3000/chromium?launch={"defaultViewport":{"height":720,"width":1280},"headless":false,"stealth":true}&blockAds=true +``` + +Remember to add a blank line at the end of the file too according to yaml requirements. + +The `2937404c-browserless-chrome` hostname is displayed in the UI, on the Browserless Chromium addon page: +![image](https://github.com/user-attachments/assets/a63514f6-027a-4361-a33f-0d8f87461279) + +You can also fetch it: +* By using SSH and running `docker exec -i hassio_dns cat "/config/hosts"` +* From the CLI in HA, using arp +* You should also be able to use your HA IP address. + +Then restart the Changedetection.io add-on - after that you can use the browser options in Changedetection.io. + +## Installation + +The installation of this add-on is pretty straightforward and not different in +comparison to installing any other Hass.io add-on. + +1. [Add my Hass.io add-ons repository][repository] to your Hass.io instance. +1. Install this add-on. +1. Click the `Save` button to store your configuration. +1. Start the add-on. +1. Check the logs of the add-on to see if everything went well. +1. Carefully configure the add-on to your preferences, see the official documentation for for that. + +[repository]: https://github.com/alexbelgium/hassio-addons diff --git a/changedetection.io/apparmor.txt b/changedetection.io/apparmor.txt new file mode 100644 index 0000000..9de4432 --- /dev/null +++ b/changedetection.io/apparmor.txt @@ -0,0 +1,67 @@ +#include + +profile addon_db21ed7f_changedetection.io_nas flags=(attach_disconnected,mediate_deleted) { + #include + + capability, + file, + signal, + mount, + umount, + remount, + network udp, + network tcp, + network dgram, + network stream, + network inet, + network inet6, + network netlink raw, + network unix dgram, + + capability setgid, + capability setuid, + capability sys_admin, + capability dac_read_search, + # capability dac_override, + # capability sys_rawio, + +# S6-Overlay + /init ix, + /run/{s6,s6-rc*,service}/** ix, + /package/** ix, + /command/** ix, + /run/{,**} rwk, + /dev/tty rw, + /bin/** ix, + /usr/bin/** ix, + /usr/lib/bashio/** ix, + /etc/s6/** rix, + /run/s6/** rix, + /etc/services.d/** rwix, + /etc/cont-init.d/** rwix, + /etc/cont-finish.d/** rwix, + /init rix, + /var/run/** mrwkl, + /var/run/ mrwkl, + /dev/i2c-1 mrwkl, + # Files required + /dev/fuse mrwkl, + /dev/sda1 mrwkl, + /dev/sdb1 mrwkl, + /dev/nvme0 mrwkl, + /dev/nvme1 mrwkl, + /dev/mmcblk0p1 mrwkl, + /dev/ttyUSB0 mrwkl, + /dev/* mrwkl, + /tmp/** mrkwl, + + # Data access + /data/** rw, + + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer=docker-default, + + # docker daemon confinement requires explict allow rule for signal + signal (receive) set=(kill,term) peer=/usr/bin/docker, + +} diff --git a/changedetection.io/build.json b/changedetection.io/build.json new file mode 100644 index 0000000..13fca3e --- /dev/null +++ b/changedetection.io/build.json @@ -0,0 +1,9 @@ +{ + "build_from": { + "aarch64": "lscr.io/linuxserver/changedetection.io:arm64v8-latest", + "amd64": "lscr.io/linuxserver/changedetection.io:amd64-latest" + }, + "codenotary": { + "signer": "alexandrep.github@gmail.com" + } +} diff --git a/changedetection.io/config.json b/changedetection.io/config.json new file mode 100644 index 0000000..0285e15 --- /dev/null +++ b/changedetection.io/config.json @@ -0,0 +1,41 @@ +{ + "arch": [ + "aarch64", + "amd64" + ], + "codenotary": "alexandrep.github@gmail.com", + "description": "web page monitoring, notification and change detection", + "environment": { + "LC_ALL": "en_US.UTF-8", + "TIMEOUT": "60000" + }, + "image": "ghcr.io/alexbelgium/changedetection.io-{arch}", + "init": false, + "map": [ + "config:rw" + ], + "name": "Changedetection.io", + "options": { + "PGID": 0, + "PUID": 0, + "TIMEOUT": "60000" + }, + "ports": { + "5000/tcp": 5000 + }, + "ports_description": { + "5000/tcp": "Webui" + }, + "schema": { + "BASE_URL": "str?", + "PGID": "int", + "PUID": "int", + "TIMEOUT": "int", + "TZ": "str?" + }, + "slug": "changedetection.io", + "udev": true, + "url": "https://github.com/alexbelgium/hassio-addons/tree/master/changedetection.io", + "version": "0.49.4", + "webui": "http://[HOST]:[PORT:5000]" +} diff --git a/changedetection.io/icon.png b/changedetection.io/icon.png new file mode 100644 index 0000000..4f43c7a Binary files /dev/null and b/changedetection.io/icon.png differ diff --git a/changedetection.io/logo.png b/changedetection.io/logo.png new file mode 100644 index 0000000..4f43c7a Binary files /dev/null and b/changedetection.io/logo.png differ diff --git a/changedetection.io/rootfs/blank b/changedetection.io/rootfs/blank new file mode 100644 index 0000000..e69de29 diff --git a/changedetection.io/rootfs/etc/cont-init.d/21-folders.sh b/changedetection.io/rootfs/etc/cont-init.d/21-folders.sh new file mode 100755 index 0000000..4ed99bf --- /dev/null +++ b/changedetection.io/rootfs/etc/cont-init.d/21-folders.sh @@ -0,0 +1,15 @@ +#!/usr/bin/with-contenv bashio +# shellcheck shell=bash +set -e + +# Define user +PUID=$(bashio::config "PUID") +PGID=$(bashio::config "PGID") + +# Check data location +LOCATION="/config/addons_config/changedetection.io" + +# Check structure +mkdir -p "$LOCATION" +chown -R "$PUID":"$PGID" "$LOCATION" +chmod -R 755 "$LOCATION" diff --git a/changedetection.io/stats.png b/changedetection.io/stats.png new file mode 100644 index 0000000..9e0a2ef Binary files /dev/null and b/changedetection.io/stats.png differ diff --git a/changedetection.io/updater.json b/changedetection.io/updater.json new file mode 100644 index 0000000..02e0914 --- /dev/null +++ b/changedetection.io/updater.json @@ -0,0 +1,9 @@ +{ + "github_fulltag": "false", + "last_update": "15-03-2025", + "repository": "alexbelgium/hassio-addons", + "slug": "changedetection.io", + "source": "github", + "upstream_repo": "linuxserver/docker-changedetection.io", + "upstream_version": "0.49.4" +} diff --git a/compreface/CHANGELOG.md b/compreface/CHANGELOG.md new file mode 100755 index 0000000..d78215d --- /dev/null +++ b/compreface/CHANGELOG.md @@ -0,0 +1 @@ +Please reference the [release notes](https://github.com/exadel-inc/CompreFace/releases) for changes. diff --git a/compreface/Dockerfile b/compreface/Dockerfile new file mode 100755 index 0000000..14ae3f2 --- /dev/null +++ b/compreface/Dockerfile @@ -0,0 +1,6 @@ +FROM exadel/compreface:1.1.0 +ENV PGDATA=/data/database +RUN apt-get update && apt-get install jq -y && rm -rf /var/lib/apt/lists/* +COPY postgresql.conf /etc/postgresql/13/main/postgresql.conf +COPY run.sh / +CMD ["/run.sh"] diff --git a/compreface/README.md b/compreface/README.md new file mode 100755 index 0000000..5e8e16a --- /dev/null +++ b/compreface/README.md @@ -0,0 +1,7 @@ +# Exadel CompreFace + +This add-on runs the [single container](https://github.com/exadel-inc/CompreFace/issues/651) version of CompreFace. + +CompreFace will be exposed on port 8000 - you can change this in the add-on configuration if another port is required. + +[Documentation](https://github.com/exadel-inc/CompreFace#readme) diff --git a/compreface/config.json b/compreface/config.json new file mode 100755 index 0000000..eb5c4af --- /dev/null +++ b/compreface/config.json @@ -0,0 +1,30 @@ +{ + "name": "Exadel CompreFace", + "version": "1.1.0", + "url": "https://github.com/exadel-inc/CompreFace", + "slug": "compreface", + "description": "Exadel CompreFace is a leading free and open-source face recognition system", + "arch": ["amd64"], + "startup": "application", + "boot": "auto", + "ports": { + "80/tcp": 8000 + }, + "ports_description": { + "80/tcp": "UI/API" + }, + "options": { + "POSTGRES_URL": "jdbc:postgresql://localhost:5432/frs", + "POSTGRES_USER": "compreface", + "POSTGRES_PASSWORD": "M7yfTsBscdqvZs49", + "POSTGRES_DB": "frs", + "API_JAVA_OPTS": "-Xmx1g" + }, + "schema": { + "POSTGRES_URL": "str", + "POSTGRES_USER": "str", + "POSTGRES_PASSWORD": "str", + "POSTGRES_DB": "str", + "API_JAVA_OPTS": "str" + } +} diff --git a/compreface/icon.png b/compreface/icon.png new file mode 100755 index 0000000..a73b9fe Binary files /dev/null and b/compreface/icon.png differ diff --git a/compreface/postgresql.conf b/compreface/postgresql.conf new file mode 100755 index 0000000..cc57ec7 --- /dev/null +++ b/compreface/postgresql.conf @@ -0,0 +1,785 @@ +# CompreFace changes: +# 1. Changed `data_directory`, so it will always link to `/var/lib/postgresql/data` and do not depend on postgres version. + +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/data/database' # use data in another directory + # (change requires restart) +hba_file = '/etc/postgresql/13/main/pg_hba.conf' # host-based authentication file + # (change requires restart) +ident_file = '/etc/postgresql/13/main/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +external_pid_file = '/var/run/postgresql/13-main.pid' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +#listen_addresses = 'localhost' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = md5 # md5 or scram-sha-256 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +ssl = on +#ssl_ca_file = '' +ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' +#ssl_crl_file = '' +ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#parallel_leader_participation = on +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +max_wal_size = 1GB +min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' + # (change requires restart) +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_seqscan = on +#enable_sort = on +#enable_incremental_sort = on +#enable_tidscan = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_parallel_hash = on +#enable_partition_pruning = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off +#jit = on # allow JIT compilation +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%m [%p] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Etc/UTC' + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +cluster_name = '13/main' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +stats_temp_directory = '/var/run/postgresql/13-main.pg_stat_tmp' + + +# - Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#default_table_access_method = 'heap' +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples + # before index cleanup, 0 always performs + # index cleanup +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'Etc/UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'C.UTF-8' # locale for system error message + # strings +lc_monetary = 'C.UTF-8' # locale for monetary formatting +lc_numeric = 'C.UTF-8' # locale for number formatting +lc_time = 'C.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#shared_preload_libraries = '' # (change requires restart) +#local_preload_libraries = '' +#session_preload_libraries = '' +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_destdir = '' # prepend path when loading extensions + # and shared objects (added by Debian) + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +include_dir = 'conf.d' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/compreface/run.sh b/compreface/run.sh new file mode 100755 index 0000000..3619571 --- /dev/null +++ b/compreface/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# +# Entrypoint +# +# Ensure persistent data is stored in /data/ and then start the stack + +set -euo pipefail + +start() { + echo "Starting CompreFace" >&2 + values=$(cat /data/options.json) + for s in $(echo "$values" | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]" ); do + export "${s?}" + done + + if [ "$PGDATA" == "/data/database" ] && [ -d /data ] + then + if [ ! -d /data/database ] + then + cp -rp /var/lib/postgresql/data /data/database + fi + fi + + chown -R postgres:postgres "$PGDATA" + + exec /usr/bin/supervisord +} + + +if grep -q avx /proc/cpuinfo +then + start +else + echo "AVX not detected" >&2 + exit 1 +fi diff --git a/deepstack-cpu/CHANGELOG.md b/deepstack-cpu/CHANGELOG.md new file mode 100755 index 0000000..70e4859 --- /dev/null +++ b/deepstack-cpu/CHANGELOG.md @@ -0,0 +1 @@ +Please reference the [release notes](https://github.com/johnolafenwa/DeepStack/releases) for changes. diff --git a/deepstack-cpu/Dockerfile b/deepstack-cpu/Dockerfile new file mode 100755 index 0000000..eed8dd8 --- /dev/null +++ b/deepstack-cpu/Dockerfile @@ -0,0 +1,3 @@ +ARG BUILD_FROM +FROM $BUILD_FROM +ENV DATA_DIR=/data/database \ No newline at end of file diff --git a/deepstack-cpu/README.md b/deepstack-cpu/README.md new file mode 100755 index 0000000..8d1420c --- /dev/null +++ b/deepstack-cpu/README.md @@ -0,0 +1,7 @@ +![amd64]amd64-shield + +# DeepStack (CPU) + +[Documentation](https://docs.deepstack.cc) + +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg diff --git a/deepstack-cpu/build.json b/deepstack-cpu/build.json new file mode 100755 index 0000000..8db40b9 --- /dev/null +++ b/deepstack-cpu/build.json @@ -0,0 +1,5 @@ +{ + "build_from": { + "amd64": "deepquestai/deepstack:cpu" + } +} \ No newline at end of file diff --git a/deepstack-cpu/config.json b/deepstack-cpu/config.json new file mode 100755 index 0000000..4b06a00 --- /dev/null +++ b/deepstack-cpu/config.json @@ -0,0 +1,20 @@ +{ + "name": "DeepStack (CPU)", + "version": "2021.09.1", + "url": "https://github.com/johnolafenwa/DeepStack", + "slug": "deepstack-cpu", + "description": "The World's Leading Cross Platform AI Engine for Edge Devices", + "arch": ["amd64"], + "startup": "application", + "boot": "auto", + "ports": { + "5000/tcp": 5001 + }, + "ports_description": { + "5000/tcp": "API" + }, + "environment": { + "VISION-FACE": "True", + "VISION-DETECTION": "True" + } +} diff --git a/deepstack-cpu/icon.png b/deepstack-cpu/icon.png new file mode 100755 index 0000000..051c6f1 Binary files /dev/null and b/deepstack-cpu/icon.png differ diff --git a/deepstack/CHANGELOG.md b/deepstack/CHANGELOG.md new file mode 100755 index 0000000..70e4859 --- /dev/null +++ b/deepstack/CHANGELOG.md @@ -0,0 +1 @@ +Please reference the [release notes](https://github.com/johnolafenwa/DeepStack/releases) for changes. diff --git a/deepstack/Dockerfile b/deepstack/Dockerfile new file mode 100755 index 0000000..eed8dd8 --- /dev/null +++ b/deepstack/Dockerfile @@ -0,0 +1,3 @@ +ARG BUILD_FROM +FROM $BUILD_FROM +ENV DATA_DIR=/data/database \ No newline at end of file diff --git a/deepstack/README.md b/deepstack/README.md new file mode 100755 index 0000000..53d53a9 --- /dev/null +++ b/deepstack/README.md @@ -0,0 +1,9 @@ +![amd64][amd64-shield] ![armv7][armv7-shield] ![aarch64][aarch64-shield] + +# DeepStack + +[Documentation](https://docs.deepstack.cc) + +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg +[aarch64-shield]: https://img.shields.io/badge/aarch64-yes-green.svg +[armv7-shield]: https://img.shields.io/badge/armv7-yes-green.svg diff --git a/deepstack/build.json b/deepstack/build.json new file mode 100755 index 0000000..0228d2a --- /dev/null +++ b/deepstack/build.json @@ -0,0 +1,7 @@ +{ + "build_from": { + "amd64": "deepquestai/deepstack:latest", + "aarch64": "deepquestai/deepstack:arm64", + "armv7": "deepquestai/deepstack:arm64" + } +} diff --git a/deepstack/config.json b/deepstack/config.json new file mode 100755 index 0000000..6933752 --- /dev/null +++ b/deepstack/config.json @@ -0,0 +1,20 @@ +{ + "name": "DeepStack", + "version": "2021.09.1", + "url": "https://github.com/johnolafenwa/DeepStack", + "slug": "deepstack", + "description": "The World's Leading Cross Platform AI Engine for Edge Devices", + "arch": ["amd64", "armv7", "aarch64"], + "startup": "application", + "boot": "auto", + "ports": { + "5000/tcp": 5001 + }, + "ports_description": { + "5000/tcp": "API" + }, + "environment": { + "VISION-FACE": "True", + "VISION-DETECTION": "True" + } +} diff --git a/deepstack/icon.png b/deepstack/icon.png new file mode 100755 index 0000000..051c6f1 Binary files /dev/null and b/deepstack/icon.png differ diff --git a/facebox/Dockerfile b/facebox/Dockerfile new file mode 100755 index 0000000..2dcd5d4 --- /dev/null +++ b/facebox/Dockerfile @@ -0,0 +1,4 @@ +FROM machinebox/facebox +RUN apt-get install jq -y +COPY run.sh / +ENTRYPOINT ["/run.sh"] \ No newline at end of file diff --git a/facebox/README.md b/facebox/README.md new file mode 100755 index 0000000..6ca84c6 --- /dev/null +++ b/facebox/README.md @@ -0,0 +1,14 @@ +![amd64][amd64-shield] + +# Facebox + +To use this add-on create a Veritone Developer account and login at: +https://machinebox.io/login + +Click on the Machine Box navigation link to view your `MB_KEY`. + +Enter your `MB_KEY` into the configuration of the add-on. + +[Documentation](https://machinebox.io/docs/facebox) + +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg diff --git a/facebox/config.json b/facebox/config.json new file mode 100755 index 0000000..d242b67 --- /dev/null +++ b/facebox/config.json @@ -0,0 +1,22 @@ +{ + "name": "Facebox", + "version": "1a1358d", + "url": "https://machinebox.io/docs/facebox", + "slug": "facebox", + "description": "Facial recognition with one-shot teaching", + "arch": ["amd64"], + "startup": "application", + "boot": "auto", + "ports": { + "8080/tcp": 8000 + }, + "ports_description": { + "8080/tcp": "UI/API" + }, + "options": { + "MB_KEY": "" + }, + "schema": { + "MB_KEY": "str" + } +} diff --git a/facebox/icon.png b/facebox/icon.png new file mode 100755 index 0000000..abbe0a2 Binary files /dev/null and b/facebox/icon.png differ diff --git a/facebox/run.sh b/facebox/run.sh new file mode 100755 index 0000000..514b03c --- /dev/null +++ b/facebox/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +values=`cat /data/options.json` +for s in $(echo $values | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]" ); do + export $s +done +/app/facebox \ No newline at end of file diff --git a/gitea/CHANGELOG.md b/gitea/CHANGELOG.md index 49f3665..553ea28 100644 --- a/gitea/CHANGELOG.md +++ b/gitea/CHANGELOG.md @@ -1,4 +1,31 @@ +## 1.23.5 (08-03-2025) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.23.4 (21-02-2025) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.23.3 (08-02-2025) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.23.1 (11-01-2025) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.22.6 (14-12-2024) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.22.4 (30-11-2024) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.22.3 (12-10-2024) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.22.2 (07-09-2024) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + +## 1.22.1 (06-07-2024) +- Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) + ## 1.22.0 (01-06-2024) - Update to latest version from go-gitea/gitea (changelog : https://github.com/go-gitea/gitea/releases) diff --git a/gitea/Dockerfile b/gitea/Dockerfile index 0ec50c3..580ac6e 100644 --- a/gitea/Dockerfile +++ b/gitea/Dockerfile @@ -1,3 +1,15 @@ +#============================# +# ALEXBELGIUM'S DOCKERFILE # +#============================# +# _.------. +# _.-` ('>.-`"""-. +# '.--'` _'` _ .--.) +# -' '-.-';` ` +# ' - _.' ``'--. +# '---` .-'""` +# /` +#=== Home Assistant Addon ===# + ################# # 1 Build Image # ################# @@ -23,7 +35,7 @@ USER root ################## # Add rootfs -COPY rootfs/ / +COPY rootfs / # Uses /bin for compatibility purposes # hadolint ignore=DL4005 @@ -34,14 +46,14 @@ RUN if [ ! -f /bin/sh ] && [ -f /usr/bin/sh ]; then ln -s /usr/bin/sh /bin/sh; f ARG MODULES="00-banner.sh 00-global_var.sh 01-custom_script.sh" # Automatic modules download -ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_automodules.sh" "/ha_automodules.sh" +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_automodules.sh" "/ha_automodules.sh" RUN chmod 744 /ha_automodules.sh && /ha_automodules.sh "$MODULES" && rm /ha_automodules.sh # Manual apps ENV PACKAGES="" # Automatic apps & bashio -ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh" +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_autoapps.sh" "/ha_autoapps.sh" RUN chmod 744 /ha_autoapps.sh && /ha_autoapps.sh "$PACKAGES" && rm /ha_autoapps.sh EXPOSE 22 3000 @@ -52,10 +64,10 @@ EXPOSE 22 3000 # Add entrypoint ENV S6_STAGE2_HOOK=/ha_entrypoint.sh -ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh" +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint.sh" "/ha_entrypoint.sh" # Entrypoint modifications -ADD "https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh" +ADD "https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.templates/ha_entrypoint_modif.sh" "/ha_entrypoint_modif.sh" RUN chmod 777 /ha_entrypoint.sh /ha_entrypoint_modif.sh && /ha_entrypoint_modif.sh && rm /ha_entrypoint_modif.sh @@ -72,6 +84,7 @@ ARG BUILD_NAME ARG BUILD_REF ARG BUILD_REPOSITORY ARG BUILD_VERSION +ENV BUILD_VERSION="${BUILD_VERSION}" LABEL \ io.hass.name="${BUILD_NAME}" \ io.hass.description="${BUILD_DESCRIPTION}" \ @@ -95,6 +108,22 @@ LABEL \ # 6 Healthcheck # ################# +# Avoid spamming logs +# hadolint ignore=SC2016 +RUN \ + # Handle Apache configuration + if [ -d /etc/apache2/sites-available ]; then \ + for file in /etc/apache2/sites-*/*.conf; do \ + sed -i '/ /etc/nginx/nginx.conf.new && \ + mv /etc/nginx/nginx.conf.new /etc/nginx/nginx.conf; \ + fi + ENV HEALTH_PORT="3000" \ HEALTH_URL="" HEALTHCHECK \ @@ -102,4 +131,4 @@ HEALTHCHECK \ --retries=5 \ --start-period=30s \ --timeout=25s \ - CMD curl --fail "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1 + CMD curl -A "HealthCheck: Docker/1.0" -s -f "http://127.0.0.1:${HEALTH_PORT}${HEALTH_URL}" &>/dev/null || exit 1 diff --git a/gitea/README.md b/gitea/README.md index 09f1b86..07710f4 100644 --- a/gitea/README.md +++ b/gitea/README.md @@ -7,18 +7,18 @@ ![Ingress](https://img.shields.io/badge/dynamic/json?label=Ingress&query=%24.ingress&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fconfig.json) ![Arch](https://img.shields.io/badge/dynamic/json?color=success&label=Arch&query=%24.arch&url=https%3A%2F%2Fraw.githubusercontent.com%2Falexbelgium%2Fhassio-addons%2Fmaster%2Fgitea%2Fconfig.json) -[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/Mesteriis/hassio-addons-avm/dashboard?utm_source=github.com&utm_medium=referral&utm_content=Mesteriis/hassio-addons-avm&utm_campaign=Badge_Grade) -[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/Mesteriis/hassio-addons-avm/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/Mesteriis/hassio-addons-avm/actions/workflows/weekly-supelinter.yaml) -[![Builder](https://img.shields.io/github/actions/workflow/status/Mesteriis/hassio-addons-avm/onpush_builder.yaml?label=Builder)](https://github.com/Mesteriis/hassio-addons-avm/actions/workflows/onpush_builder.yaml) +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/9c6cf10bdbba45ecb202d7f579b5be0e)](https://www.codacy.com/gh/alexbelgium/hassio-addons/dashboard?utm_source=github.com&utm_medium=referral&utm_content=alexbelgium/hassio-addons&utm_campaign=Badge_Grade) +[![GitHub Super-Linter](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/weekly-supelinter.yaml?label=Lint%20code%20base)](https://github.com/alexbelgium/hassio-addons/actions/workflows/weekly-supelinter.yaml) +[![Builder](https://img.shields.io/github/actions/workflow/status/alexbelgium/hassio-addons/onpush_builder.yaml?label=Builder)](https://github.com/alexbelgium/hassio-addons/actions/workflows/onpush_builder.yaml) [donation-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20(no%20paypal)-%23d32f2f?logo=buy-me-a-coffee&style=flat&logoColor=white [paypal-badge]: https://img.shields.io/badge/Buy%20me%20a%20coffee%20with%20Paypal-0070BA?logo=paypal&style=flat&logoColor=white _Thanks to everyone having starred my repo! To star it click on the image below, then it will be on top right. Thanks!_ -[![Stargazers repo roster for @Mesteriis/hassio-addons-avm](https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/.github/stars2.svg)](https://github.com/Mesteriis/hassio-addons-avm/stargazers) +[![Stargazers repo roster for @alexbelgium/hassio-addons](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/.github/stars2.svg)](https://github.com/alexbelgium/hassio-addons/stargazers) -![downloads evolution](https://raw.githubusercontent.com/Mesteriis/hassio-addons-avm/master/gitea/stats.png) +![downloads evolution](https://raw.githubusercontent.com/alexbelgium/hassio-addons/master/gitea/stats.png) ## About @@ -53,4 +53,4 @@ comparison to installing any other Hass.io add-on. 1. Go to the webui, where you will initialize the app 1. Restart the addon, to apply any option that should be applied -[repository]: https://github.com/Mesteriis/hassio-addons-avm +[repository]: https://github.com/alexbelgium/hassio-addons diff --git a/gitea/config.json b/gitea/config.json index 150ca9a..e7ad9cf 100644 --- a/gitea/config.json +++ b/gitea/config.json @@ -27,15 +27,19 @@ "/dev/sdf", "/dev/sdg", "/dev/nvme", + "/dev/nvme0", + "/dev/nvme0n1", "/dev/nvme0n1p1", "/dev/nvme0n1p2", "/dev/nvme0n1p3", + "/dev/nvme1n1", "/dev/nvme1n1p1", "/dev/nvme1n1p2", "/dev/nvme1n1p3", + "/dev/nvme2n1", "/dev/nvme2n1p1", "/dev/nvme2n1p2", - "/dev/nvme3n1p3", + "/dev/nvme2n3p3", "/dev/mmcblk", "/dev/fuse", "/dev/sda1", @@ -97,7 +101,7 @@ }, "slug": "gitea", "udev": true, - "url": "https://github.com/Mesteriis/hassio-addons-avm/tree/master/gitea", - "version": "1.22.0", + "url": "https://github.com/alexbelgium/hassio-addons/tree/master/gitea", + "version": "1.23.5", "webui": "[PROTO:ssl]://[HOST]:[PORT:3000]" } diff --git a/gitea/stats.png b/gitea/stats.png index c0a4e00..5364872 100644 Binary files a/gitea/stats.png and b/gitea/stats.png differ diff --git a/gitea/updater.json b/gitea/updater.json index d3d9660..60d9e5c 100644 --- a/gitea/updater.json +++ b/gitea/updater.json @@ -1,8 +1,8 @@ { - "last_update": "01-06-2024", - "repository": "Mesteriis/hassio-addons-avm", + "last_update": "08-03-2025", + "repository": "alexbelgium/hassio-addons", "slug": "gitea", "source": "github", "upstream_repo": "go-gitea/gitea", - "upstream_version": "1.22.0" + "upstream_version": "1.23.5" } diff --git a/hassio-google-drive-backup/AUTHENTICATION.md b/hassio-google-drive-backup/AUTHENTICATION.md new file mode 100644 index 0000000..098c3be --- /dev/null +++ b/hassio-google-drive-backup/AUTHENTICATION.md @@ -0,0 +1,44 @@ +# Authentication with Google Drive +This document describes how the addon (Home Assistant Google Drive Backup) authenticates with Google Drive and stores your credentials. It's geared toward those who wish to know more detail and is not necessary to take advantage of the full features of the addon. The document is provided in the interest of providing full transparency into how the add-on works. I've tried to describe this as plainly as possible, but it is technical and therefore may not be understandable to everyone. Feedback on its clarity is appreciated. + + > This document describes how authentication works if you use the big blue "AUTHENTICATE WITH GOOGLE DRIVE" button in the addon. If you're using [your own Google Drive credentials](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/LOCAL_AUTH.md), then none of this applies. + +## Your Credentials and the Needed Permission +To have access to any information in Google Drive, Google's authentication servers must be told that the add-on has the permission. The add-on uses [Google Drive's Rest API (v3)](https://developers.google.com/drive/api/v3/about-sdk) for communication and requests the [drive.file](https://developers.google.com/drive/api/v3/about-auth) permission *scope*. This *scope* means the add-on has access to files and folders that the add-on created, but nothing else. It can't see files you've added to Google Drive through their web interface or anywhere else. Google Drive's Rest API allows the addon to periodically check what backups are uploaded and upload new ones if necessary by making requests over the internet. + +## Authentication with Google Services +For reference, Google's documentation for how to authenticate users with the Google Drive REST API is [here](https://developers.google.com/drive/api/v3/about-auth). Authentication is handled through [OAuth 2.0](https://developers.google.com/identity/protocols/OAuth2), which means that the add-on never actually sees your Google username and password, only an opaque [security token](https://en.wikipedia.org/wiki/Access_token) used to verify that the addon has been given permission. More detail is provided about what that token is and where it is stored later in this document. + +The way a web-based application would normally authenticate with a Google service (eg Google Drive) looks something like this: +1. User navigates to the app's webpage, eg http://examplegoogleapp.com +2. The app generates a URL to Google's servers (https://accounts.google.com) used to grant the app permission. +3. User navigates there, enters their Google username and password, and confirms the intention to give the app some permission (eg one or more *scopes*). +4. Google redirects the user back to the app's webpage with an access token appended to the URL (eg http://examplegoogleapp.com/authenticate?token=0x12345678) +5. The app stores the access token (0x12345678 in this example), and then passes it back to Google whenever it wishes to make access the API on behalf of the user who logged in. + +This access token allows the app to act as if it is the user who created it. In the case of this add-on, the permission granted by the drive.file scope allows it to create folders, upload backups, and retrieve the previously created folders. Because the add-on only ever sees the access token (not the username/password), and the access token only grants limited permissions, the add-on doesn't have a way to elevate its permission further to access other information in Google Drive or your Google account. + +## Authentication for the Add-on + +Google puts some limitations on how the access token must be generated that will be important for understanding how the add-on authenticates in reality: +* When the user is redirected to https://accounts.google.com (step 2), the redirect must be from a known public website associated with the app. +* When the user is redirected back to the app after authorization (step 4), the redirect must be a statically addressed and publicly accessible website. + +These limitations make a technical problem for the addon because most people's Home Assistant instances aren't publicly accessible and the address is different for each one. Performing the authentication workflow exactly as described above won't work. To get around this, I (the developer of this addon) set up a website, https://habackup.io, which serves as the known public and statically addressable website that Google redirects from/to. The source code for this server is available within the add-on's GitHub repository. + +So when you authenticate the add-on, the workflow looks like this: +1. You start at the add-on's web interface, something like https://homeassistant.local:8123/ingress/hassio_google_drive_backup +2. You click the "Authenticate With Google Drive" button, which takes note of the address of your Home Assistant installation (https://homeassistant.local:8123 in this case) and sends you to https://habackup.io/drive/authorize +3. https://habackup.io immediately generates the Google login URL for you and redirects you to https://accounts.google.com +4. You log in with your Google credentials on Google's domain, and confirm you want to give the add-on permission to see files and folders it creates (the drive.file scope) +5. Google redirects you back to https://habackup.io, along with the access token that will be used for future authentication. +6. https://habackup.io redirects you back to your add-on web-UI (which is kept track of in step 2) along with the access token. +7. The addon (on your local Home Assistant installation) persists the access token and uses it in the future any time it needs to talk to Google Drive. + +Notably, your access token isn't persisted at https://habackup.io, it is only passed through back to your local add-on installation. I do this because: +- It ensures your information is only ever stored on your machine, which is reassuring from the user's perspective (eg you). +- If my server (https://habackup.io) ever gets compromised, there isn't any valuable information stored there that compromises you as well. +- This is practicing a form of [defense-in-depth](https://en.wikipedia.org/wiki/Defense_in_depth_%28computing%29) security, where-in [personal data](https://en.wikipedia.org/wiki/Personal_data) is only stored in the places where it is strictly critical. +- It makes the server more simple since it is a stateless machine that doesn't require a database (eg to store your token). + +After your token is generated and stored on your machine, it needs to be *refreshed* periodically with Google Drive. To do this, the addon will again ask https://habackup.io who will relay the request with Google Drive. diff --git a/hassio-google-drive-backup/BACKUP_AND_SNAPSHOT.md b/hassio-google-drive-backup/BACKUP_AND_SNAPSHOT.md new file mode 100644 index 0000000..0a82ab5 --- /dev/null +++ b/hassio-google-drive-backup/BACKUP_AND_SNAPSHOT.md @@ -0,0 +1,123 @@ +# 'Snapshot' vs 'Backup' +In August 2021 [the Home Assistant team announced](https://www.home-assistant.io/blog/2021/08/24/supervisor-update/) that 'snapshots' will be called 'backups' moving forward. This addon exposes a binary sensor to indicate if snapshots are stale and a another sensor that publishes details about backups. Both of the sensors used 'snapshot' in their names and values, so they had to be changed to match the new language. To prevent breaking any existing automations you might have, the addon will only start using the new names and values when you upgrade if you tell it to. + +This can be controlled by using the configuration option ```call_backup_snapshot```, which will use the old names and values for sensors when it is true. If you updated the addon from a version that used to use 'snapshot' in it names, this option will be automatically added when you update to make sure it doesn't break any existing automations. + +Here is a breakdown of what the new and old sensor values mean: + +## Old sensor name/values +These will be the sensor values used when ```call_backup_snapshot: True``` or if the addon is below version 0.105.1. The addon sets ```call_backup_snapshot: True``` automatically if you upgrade the addon from an older version. +### Backup Stale Binary Sensor +#### Entity Id: +```yaml +binary_sensor.snapshots_stale +``` +#### Possible states: +```yaml +on +off +``` +#### Example Attributes: +```yaml +friendly_name: Snapshots Stale +device_class: problem +``` +### Backup State Sensor +#### Entity Id: +```yaml +sensor.snapshot_backup +``` +#### Possible States: +```yaml +error +waiting +backed_up +``` +#### Example Attributes: +```yaml +friendly_name: Snapshots State +last_snapshot: 2021-09-01T20:26:49.100376+00:00 +snapshots_in_google_drive: 2 +snapshots_in_hassio: 2 +snapshots_in_home_assistant: 2 +size_in_google_drive: 2.5 GB +size_in_home_assistant: 2.5 GB +snapshots: +- name: Full Snapshot 2021-02-06 11:37:00 + date: '2021-02-06T18:37:00.916510+00:00' + state: Backed Up + slug: DFG123 +- name: Full Snapshot 2021-02-07 11:00:00 + date: '2021-02-07T18:00:00.916510+00:00' + state: Backed Up + slug: DFG124 +``` + +## New Sensor Names/Values +These will be the sensor values used when ```call_backup_snapshot: False``` or if the configuration option is un-set. New installations of the addon will default to this. +### Backup Stale Binary Sensor +#### Entity Id +```yaml +binary_sensor.backups_stale +``` +#### Possible States +```yaml +on +off +``` +#### Example Attributes: +```yaml +friendly_name: Backups Stale +device_class: problem +``` +### Backup State Sensor +#### Entity Id +```yaml +sensor.backup_state +``` +#### Possible States +```yaml +error +waiting +backed_up +``` +#### Example Attributes: +```yaml +friendly_name: Backup State +last_backup: 2021-09-01T20:26:49.100376+00:00 +last_upload: 2021-09-01T20:26:49.100376+00:00 +backups_in_google_drive: 2 +backups_in_home_assistant: 2 +size_in_google_drive: 2.5 GB +size_in_home_assistant: 2.5 GB +backups: +- name: Full Snapshot 2021-02-06 11:37:00 + date: '2021-02-06T18:37:00.916510+00:00 + state: Backed Up + slug: DFG123 +- name: Full Snapshot 2021-02-07 11:00:00 + date: '2021-02-07T18:00:00.916510+00:00' + state: Backed Up + slug: DFG124 +``` + +### What do the values mean? +```binary_sensor.backups_stale``` is "on" when backups are stale and "off"" otherwise. Backups are stale when the addon is 6 hours past a scheduled backup and no new backup has been made. This delay is in place to avoid triggerring on transient errors (eg internet connectivity problems or one-off problems in Home Assistant). + +```sensor.backup_state``` is: +- ```waiting``` when the addon is first booted up or hasn't been connected to Google Drive yet. +- ```error``` immediately after any error is encountered, even transient ones. +- ```backed_up``` when everything is running fine without errors. + +It's attributes are: +- ```last_backup``` The UTC ISO-8601 date of the most recent backup in Home Assistant or Google Drive. +- ```last_upload``` The UTC ISO-8601 date of the most recent backup uploaded to Google Drive. +- ```backups_in_google_drive``` The number of backups in Google Drive. +- ```backups_in_home_assistant``` The number of backups in Home Assistant. +- ```size_in_google_drive``` A string representation of the space used by backups in Google Drive. +- ```size_in_home_assistant``` A string representation of the space used by backups in Home Assistant. +- ```backups``` The list of each snapshot in decending order of date. Each snapshot includes its ```name```, ```date```, ```slug```, and ```state```. ```state``` can be one of: + - ```Backed Up``` if its in Home Assistant and Google Drive. + - ```HA Only``` if its only in Home Assistant. + - ```Drive Only``` if its only in Google Drive. + - ```Pending``` if the snapshot was requested but not yet complete. diff --git a/hassio-google-drive-backup/CHANGELOG.md b/hassio-google-drive-backup/CHANGELOG.md new file mode 100644 index 0000000..6ff6ef2 --- /dev/null +++ b/hassio-google-drive-backup/CHANGELOG.md @@ -0,0 +1,43 @@ +## v0.112.1 [2023-11-03] + +- Added warnings about using the "Stop Addons" feature. I plan on removing this in the near future. If you'd like to keep the feature around, please give your feedback in [this GitHub issue](https://github.com/sabeechen/hassio-google-drive-backup/issues/940). +- When backups are stuck in the "pending" state, the addon now provides you with the Supervisor logs to help figure out whats wrong. +- Added support for the "exclude Home Assistant database" options for automatic backups +- Added configuration options to limit the speed of uploads to Google Drive +- When Google Drive doesn't have enough space, the addon now explains how much space you're using and how much is left. This was a source of confusion for users. +- When the addon halts because it needs to delete more than one backup, it now tells you which backups will be deleted. +- Fixed a bug when using "stop addons" that prevented it from recognizing addons in the "starting" state. +- The addon's containers are now donwloaded from Github (previously was DockerHub) +- Added another redundant token provider, hosted on heroku, that the addon uses for its cloud-required component when you aren't using your own google app credentials. + +## v0.111.1 [2023-06-19] + +- Support for the new network storage features in Home Assistant. The addon will now create backups in what Home Assistant has configured as its default backup location. This can be overridden in the addon's settings. +- Raised the addon's required permissions to "Admin" in order to access the supervisor's mount API. +- Fixed a CSS error causing toast messages to render partially off screen on small displays. +- Fixed misreporting of some error codes from Google Drive when a partial upload can't be resumed. + +## v0.110.4 [2023-04-28] + +- Fix a whitespace error causing authorization to fail. + +## v0.110.3 [2023-03-24] + +- Fix an error causing "Days Between Backups" to be ignored when "Time of Day" for a backup is set. +- Fix a bug causing some timezones to make the addon to fail to start. + +## v0.110.2 [2023-03-24] + +- Fix a potential cause of SSL errors when communicating with Google Drive +- Fix a bug causing backups to be requested indefinitely if scheduled during DST transitions. + +## v0.110.1 [2023-01-09] + +- Adds some additional options for donating +- Mitgigates SD card corruption by redundantly storing config files needed for addon startup. +- Avoid global throttling of Google Drive API calls by: + - Making sync intervals more spread out and a little random. + - Syncing more selectively when there are modifications to the /backup directory. + - Caching data from Google Drive for short periods during periodic syncing. + - Backing off for a longer time (2 hours) when the addon hits permanent errors. +- Fixes CSS issues that made the logs page hard to use. diff --git a/hassio-google-drive-backup/DOCS.md b/hassio-google-drive-backup/DOCS.md new file mode 100644 index 0000000..521e351 --- /dev/null +++ b/hassio-google-drive-backup/DOCS.md @@ -0,0 +1,205 @@ +# Home Assistant Add-on: Google Assistant SDK + +## Installation + +To install the add-on, first follow the installation steps from the [README on GitHub](https://github.com/sabeechen/hassio-google-drive-backup#installation). + +## Configuration + +_Note_: The configuration can be changed easily by starting the add-on and clicking `Settings` in the web UI. +The UI explains what each setting is and you don't need to modify anything before clicking `Start`. +If you would still prefer to modify the settings in yaml, the options are detailed below. + +### Add-on configuration example +Don't use this directly, the addon has a lot of configuration options that most users don't need or want: + +```yaml +# Keep 10 backups in Home Assistant +max_backups_in_ha: 10 + +# Keep 10 backups in Google Drive +max_backups_in_google_drive: 10 + +# Create backups in Home Assistant on network storage +backup_location: my_nfs_share + +# Ignore backups the add-on hasn't created +ignore_other_backups: True + +# Ignore backups that look like they were created by Home Assistant automatic backup option during upgrades +ignore_upgrade_backups: True + +# Automatically delete "ignored" snapshots after this many days +delete_ignored_after_days: 7 + +# Take a backup every 3 days +days_between_backups: 3 + +# Create backups at 1:30pm exactly +backup_time_of_day: "13:30" + +# Delete backups from Home Assistant immediately after uploading them to Google Drive +delete_after_upload: True + +# Manually specify the backup folder used in Google Drive +specify_backup_folder: true + +# Use a dark and red theme +background_color: "#242424" +accent_color: "#7D0034" + +# Use a password for backup archives. Use "!secret secret_name" to use a password form your secrets file +backup_password: "super_secret" + +# Create backup names like 'Full Backup HA 0.92.0' +backup_name: "{type} Backup HA {version_ha}" + +# Keep a backup once every day for 3 days and once a week for 4 weeks +generational_days: 3 +generational_weeks: 4 + +# Create partial backups with no folders and no configurator add-on +exclude_folders: "homeassistant,ssl,share,addons/local,media" +exclude_addons: "core_configurator" + +# Turn off notifications and staleness sensor +enable_backup_stale_sensor: false +notify_for_stale_backups: false + +# Enable server directly on port 1627 +expose_extra_server: true + +# Allow sending error reports +send_error_reports: true + +# Delete backups after they're uploaded to Google Drive +delete_after_upload: true +``` + +### Option: `max_backups_in_ha` (default: 4) + +The number of backups the add-on will allow Home Assistant to store locally before old ones are deleted. + +### Option: `max_backups_in_google_drive` (default: 4) + +The number of backups the add-on will keep in Google Drive before old ones are deleted. Google Drive gives you 15GB of free storage (at the time of writing) so plan accordingly if you know how big your backups are. + +### Option: `backup_location` (default: None) +The place where backups are created in Home Assistant before uploading to Google Drive. Can be "local-disk" or the name of any backup network storage you've configured in Home Assistant. Leave unspecified (the default) to have backups created in whatever Home Assistant uses as the default backup location. + +### Option: `ignore_other_backups` (default: False) +Make the addon ignore any backups it didn't directly create. Any backup already uploaded to Google Drive will not be ignored until you delete it from Google Drive. + +### Option: `ignore_upgrade_backups` (default: False) +Ignores backups that look like they were automatically created from updating an add-on or Home Assistant itself. This will make the add-on ignore any partial backup that has only one add-on or folder in it. + +### Option: `days_between_backups` (default: 3) + +How often a new backup should be scheduled, eg `1` for daily and `7` for weekly. + +### Option: `backup_time_of_day` + +The time of day (local time) that new backups should be created in 24-hour ("HH:MM") format. When not specified backups are created at (roughly) the same time of day as the most recent backup. + + +### Options: `delete_after_upload` (default: False) + +Deletes backups from Home Assistant immediately after uploading them to Google Drive. This is useful if you have very limited space inside Home Assistant since you only need to have available space for a single backup locally. + +### Option: `specify_backup_folder` (default: False) + +When true, you must select the folder in Google Drive where backups are stored. Once you turn this on, restart the add-on and visit the Web-UI to be prompted to select the backup folder. + +### Option: `background_color` and `accent_color` + +The background and accent colors for the web UI. You can use this to make the UI fit in with whatever color scheme you use in Home Assistant. When unset, the interface matches Home Assistant's default blue/white style. + +### Option: `backup_password` + +When set, backups are created with a password. You can use a value from your secrets.yaml by prefixing the password with "!secret". You'll need to remember this password when restoring a backup. + +> Example: Use a password for backup archives +> +> ```yaml +> backup_password: "super_secret" +> ``` +> +> Example: Use a password from secrets.yaml +> +> ```yaml +> backup_password: "!secret backup_password" +> ``` + +### Option: `backup_name` (default: "{type} Backup {year}-{month}-{day} {hr24}:{min}:{sec}") + +Sets the name for new backups. Variable parameters of the form `{variable_name}` can be used to modify the name to your liking. A list of available variables is available [here](https://github.com/sabeechen/hassio-google-drive-backup#can-i-give-backups-a-different-name). + +### Option: `generational_*` + +When set, older backups will be kept longer using a [generational backup scheme](https://en.wikipedia.org/wiki/Backup_rotation_scheme). See the [question here](https://github.com/sabeechen/hassio-google-drive-backup#can-i-keep-older-backups-for-longer) for configuration options. + +### Option: `exclude_folders` + +When set, excludes the comma-separated list of folders by creating a partial backup. + +### Option: `exclude_addons` + +When set, excludes the comma-separated list of addons by creating a partial backup. + +_Note_: Folders and add-ons must be identified by their "slug" name. It is recommended to use the `Settings` dialog within the add-on web UI to configure partial backups since these names are esoteric and hard to find. + +### Option: `enable_backup_stale_sensor` (default: True) + +When false, the add-on will not publish the [binary_sensor.backups_stale](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) stale sensor. + +### Option: `enable_backup_state_sensor` (default: True) + +When false, the add-on will not publish the [sensor.backup_state](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) sensor. + +### Option: `notify_for_stale_backups` (default: True) + +When false, the add-on will send a [persistent notification](https://github.com/sabeechen/hassio-google-drive-backup#how-will-i-know-this-will-be-there-when-i-need-it) in Home Assistant when backups are stale. + +--- + +### UI Server Options + +The UI is available through Home Assistant [ingress](https://www.home-assistant.io/blog/2019/04/15/hassio-ingress/). + +It can also be exposed through a web server on port `1627`, which you can map to an externally visible port from the add-on `Network` panel. You can configure a few more options to add SSL or require your Home Assistant username/password. + +#### Option: `expose_extra_server` (default: False) + +Expose the webserver on port `1627`. This is optional, as the add-on is already available with Home Assistant ingress. + +#### Option: `require_login` (default: False) + +When true, requires your home assistant username and password to access the Web UI. + +#### Option: `use_ssl` (default: False) + +When true, the Web UI exposed by `expose_extra_server` will be served over SSL (HTTPS). + +#### Option: `certfile` (default: `/ssl/certfile.pem`) + +Required when `use_ssl: True`. The path to your SSL key file + +#### Option: `keyfile` (default: `/ssl/keyfile.pem`) + +Required when `use_ssl: True`. The path to your SSL cert file. + +#### Option: `verbose` (default: False) + +If true, enable additional debug logging. Useful if you start seeing errors and need to file a bug with me. + +#### Option: `send_error_reports` (default: False) + +When true, the text of unexpected errors will be sent to a database maintained by the developer. This helps identify problems with new releases and provide better context messages when errors come up. + +#### Option: `delete_after_upload` (default: False) + +When true, backups are always deleted after they've been uploaded to Google Drive. 'max_backups_in_ha' is ignored when this option is True, since a backup is always deleted from Home Assistant after it gets uploaded to Google Drive. Some find this useful if they only have enough space on their Home Assistant machine for one backup. + +## FAQ + +Read the [FAQ on GitHub](https://github.com/sabeechen/hassio-google-drive-backup#faq). diff --git a/hassio-google-drive-backup/Dockerfile b/hassio-google-drive-backup/Dockerfile new file mode 100644 index 0000000..7311e8e --- /dev/null +++ b/hassio-google-drive-backup/Dockerfile @@ -0,0 +1,12 @@ +ARG BUILD_FROM +FROM $BUILD_FROM +WORKDIR /app +COPY . /app +RUN chmod +x addon_deps.sh +RUN ./addon_deps.sh +RUN pip3 install . +COPY config.json /usr/local/lib/python3.11/site-packages/config.json + +EXPOSE 1627 +EXPOSE 8099 +ENTRYPOINT ["python3", "-m", "backup"] \ No newline at end of file diff --git a/hassio-google-drive-backup/Dockerfile-server b/hassio-google-drive-backup/Dockerfile-server new file mode 100644 index 0000000..fe79696 --- /dev/null +++ b/hassio-google-drive-backup/Dockerfile-server @@ -0,0 +1,16 @@ +# Use the official lightweight Python image. +# https://hub.docker.com/_/python +FROM python:3.11-buster + +# Copy local code to the container image. +ENV APP_HOME /server +WORKDIR $APP_HOME +COPY . ./ +COPY config.json /usr/local/lib/python3.11/site-packages/config.json + +# Install server python requirements +RUN pip3 install --trusted-host pypi.python.org -r requirements-server.txt +RUN pip3 install . + +WORKDIR / +ENTRYPOINT ["python3", "-m", "backup.server"] \ No newline at end of file diff --git a/hassio-google-drive-backup/GENERATIONAL_BACKUP.md b/hassio-google-drive-backup/GENERATIONAL_BACKUP.md new file mode 100644 index 0000000..770ee53 --- /dev/null +++ b/hassio-google-drive-backup/GENERATIONAL_BACKUP.md @@ -0,0 +1,41 @@ +# Generational Backup +Generational backup lets you keep a longer history of backups on daily, weekly, monthly, and yearly cycles. This is in contrast to the "regular" scheme for keeping history backups, which will always just delete the oldest backup when needed. This has the effect of keeping older backups around for a longer time, which is particularly useful if you've made a bad configuration change but didn't notice until several days later. + +## Configuration +The generational backup will be used when any one of `generational_days`, `generational_weeks`, `generational_months`, or `generational_years` is greater than zero. All of the available configuration options are given below, but utes much easier to configure from the Settings dialog accessible from the "Settings" menu at the top of the web UI. +* `generational_days` (int): The number of days to keep +* `generational_weeks` (int): The number of weeks to keep +* `generational_months` (int): The number of months to keep +* `generational_years` (int): The number of years to keep +* `generational_day_of_week` (str): The day of the week when weekly backups will be kept. It can be one of 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' or 'sun'. The default is 'mon'. +* `generational_day_of_month` (int): The day of the month when monthly backups will be kept, from 1 to 31. If a month has less than the configured number of days, the latest day of that month is used. +* `generational_day_of_year` (int): The day of the year that yearly backups are kept, from 1 to 365. + +## Some Details to Consider +* Generational backup assumes that a backup is available for every day to work properly, so it's recommended that you set `days_between_backups`=1 if you're using the feature. Otherwise, a backup may not be available to be saved for a given day. +* The backups maintained by generational backup will still never exceed the number you permit to be maintained in Google Drive or Home Assistant. For example, if `max_backups_in_google_drive`=3 and `generational_weeks`=4, then only 3 weeks of backups will be kept in Google Drive. +* Generational backup will only delete older backups when it has to. For example, if you've configured it to keep 5 weekly backups on Monday, you've been running it for a week (so you have 7 backups), and `max_backups_in_google_drive`=7, then your backups on Tuesday, Wednesday, etc won't get deleted yet. They won't get deleted until doing so is necessary to keep older backups around without violating the maximum allowed in Google Drive. +>Note: You can configure the addon to delete backups more aggressively by setting `generational_delete_early`=true. With this, the addon will delete old backups that don't match a daily, weekly, monthly, or yearly configured cycle even if you aren't yet at risk of exceeding `max_backups_in_ha` or `max_backups_in_google_drive`. Careful though! You can accidentally delete all your backups this way if you don't have all your settings configured just the way you want them. +* If more than one backup is created for a day (for example if you create one manually) then only the latest backup from that day will be kept. + +## Schedule +Figuring out date math in your head is hard, so it's useful to see a concrete example. Consider you have the following configuration. Two backups for each day, week, month, and year along with a limit in Google drive large enough to accommodate them all: +```json +"days_between_backups": 1, +"generational_days": 2, +"generational_weeks": 2, +"generational_months": 2 +"generational_years": 2 +"max_backups_in_google_drive": 8 +``` +Imagine you've been running the add-on for 2 years now, diligently making a backup every day with no interruptions. On 19 May 2021, you could expect your list of backups in Google Drive to look like this: +- May 19, 2021 <-- 1st Daily backup +- May 18, 2021 <-- 2nd Daily backup +- May 13, 2021 <-- 1st Weekly backup +- May 06, 2021 <-- 2nd Weekly backup +- May 01, 2021 <-- 1st Monthly backup +- April 01, 2021 <-- 2nd Monthly backup +- January 01, 2021 <-- 1st Yearly backup +- January 01, 2020 <-- 2nd Yearly backup + +Note that sometimes a day might overlap more than one schedule. For example, a backup on January 1st could satisfy the constraints for both a yearly and monthly backup. In this case, the add-on will only delete older backups when it *must* to keep from exceeding `max_backups_in_ha` or `max_backups_in_google_drive`. Thus, the most recent backup that would otherwise be deleted will be kept until space is needed somewhere else in the schedule. diff --git a/hassio-google-drive-backup/README.md b/hassio-google-drive-backup/README.md new file mode 100644 index 0000000..94489ae --- /dev/null +++ b/hassio-google-drive-backup/README.md @@ -0,0 +1,34 @@ +# Home Assistant Add-on: Google Drive Backup + +A complete and easy way to upload your Home Assistant backups to Google Drive. + +## About + +Quickly set up a backup strategy without much fuss. It doesn't require much familiarity with Home Assistant, its architecture, or Google Drive. Detailed install instructions are provided below but you can just add the repo, click install and open the Web UI. It will tell you what to do and only takes a few simple clicks. + +>This project requires financial support to make the Google Drive integration work, but it is free for you to use. You can join those helping to keep the lights on at: +> +>[](https://www.buymeacoffee.com/sabeechen) +>[](https://www.paypal.com/paypalme/stephenbeechen) +>[](https://www.patreon.com/bePatron?u=4064183) +>[](https://github.com/sponsors/sabeechen) +>[](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md) +>[](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md) +>[](https://github.com/sabeechen/hassio-google-drive-backup/blob/master/donate-crypto.md) + + +### Features + +- Creates backups on a configurable schedule. +- Uploads backups to Drive, even the ones it didn't create. +- Clean up old backups in Home Assistant and Google Drive, so you don't run out of space. +- Restore from a fresh install or recover quickly from disaster by uploading your backups directly from Google Drive. +- Integrates with Home Assistant Notifications, and provides sensors you can trigger off of. +- Notifies you when something goes wrong with your backups. +- Super easy installation and configuration. +- Privacy-centric design philosophy. +- Comprehensive documentation. +- _Most certainly_ doesn't mine bitcoin on your home automation server. Definitely no. + +See the [README on GitHub](https://github.com/sabeechen/hassio-google-drive-backup) for all the details, or just install the add-on and open the Web UI. +The Web-UI explains everything you have to do. diff --git a/hassio-google-drive-backup/addon_deps.sh b/hassio-google-drive-backup/addon_deps.sh new file mode 100644 index 0000000..2b781f6 --- /dev/null +++ b/hassio-google-drive-backup/addon_deps.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +apk add python3 fping linux-headers libc-dev libffi-dev python3-dev gcc py3-pip +pip3 install --upgrade pip wheel setuptools +pip3 install --trusted-host pypi.python.org -r requirements-addon.txt +# Remove packages we only needed for installation +apk del linux-headers libc-dev libffi-dev python3-dev gcc \ No newline at end of file diff --git a/hassio-google-drive-backup/cloudbuild-dev.yaml b/hassio-google-drive-backup/cloudbuild-dev.yaml new file mode 100644 index 0000000..bc1bebb --- /dev/null +++ b/hassio-google-drive-backup/cloudbuild-dev.yaml @@ -0,0 +1,17 @@ +# How to use: +# cd hassio-google-drive-backup +# gcloud config set project hassio-drive-backup +# gcloud builds submit --config cloudbuild-dev.yaml --substitutions _DOCKERHUB_PASSWORD= + +steps: +- name: "gcr.io/cloud-builders/docker" + entrypoint: "bash" + args: ["-c", "docker login --username=sabeechen --password=${_DOCKERHUB_PASSWORD}"] +- name: 'gcr.io/cloud-builders/docker' + args: [ 'build', '-f', 'Dockerfile-addon', '-t', 'sabeechen/hassio-google-drive-backup-dev-amd64:${_VERSION}', "--build-arg", "BUILD_FROM=homeassistant/amd64-base", '.' ] +substitutions: + _DOCKERHUB_PASSWORD: "define me" # default value + _VERSION: "dev-testing" # default value +images: +- "sabeechen/hassio-google-drive-backup-dev-amd64:${_VERSION}" + \ No newline at end of file diff --git a/hassio-google-drive-backup/cloudbuild-server.yaml b/hassio-google-drive-backup/cloudbuild-server.yaml new file mode 100644 index 0000000..b0432e9 --- /dev/null +++ b/hassio-google-drive-backup/cloudbuild-server.yaml @@ -0,0 +1,22 @@ +# How to use: +# gcloud config set project hassio-drive-backup +# gcloud builds submit --config cloudbuild-server.yaml + +#steps: +#- name: 'gcr.io/cloud-builders/docker' +# args: [ 'build', '-f', 'Dockerfile-server', '-t', 'gcr.io/$PROJECT_ID/authserver', '.' ] +#images: +#- 'gcr.io/$PROJECT_ID/authserver' + +steps: +# Build the container image +- name: 'gcr.io/cloud-builders/docker' + args: ['build', '-f', 'Dockerfile-server', '-t', 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}', '.'] +# Push the container image to Container Registry +- name: 'gcr.io/cloud-builders/docker' + args: ['push', 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}'] +substitutions: + _SERVICE_NAME: "authserver-dev" # default value + _VERSION: "test-deployment" # default value +images: +- 'gcr.io/$PROJECT_ID/${_SERVICE_NAME}:${_VERSION}' \ No newline at end of file diff --git a/hassio-google-drive-backup/config.json b/hassio-google-drive-backup/config.json new file mode 100644 index 0000000..11415b4 --- /dev/null +++ b/hassio-google-drive-backup/config.json @@ -0,0 +1,110 @@ +{ + "name": "Home Assistant Google Drive Backup", + "version": "0.112.1", + "slug": "hassio_google_drive_backup", + "description": "Automatically manage backups between Home Assistant and Google Drive", + "arch": ["armhf", "armv7", "aarch64", "amd64", "i386"], + "url": "https://github.com/sabeechen/hassio-google-drive-backup", + "homeassistant_api": true, + "hassio_api": true, + "hassio_role": "admin", + "auth_api": true, + "ingress": true, + "panel_icon": "mdi:cloud", + "panel_title": "Backups", + "map": ["ssl", "backup:rw", "config"], + "options": { + "max_backups_in_ha": 4, + "max_backups_in_google_drive": 4, + "days_between_backups": 3 + }, + "schema": { + "max_backups_in_ha": "int(0,)?", + "max_backups_in_google_drive": "int(0,)?", + "days_between_backups": "float(0,)?", + "ignore_other_backups": "bool?", + "ignore_upgrade_backups": "bool?", + "backup_storage": "str?", + + "delete_after_upload": "bool?", + "delete_before_new_backup": "bool?", + "verbose": "bool?", + "use_ssl": "bool?", + "certfile": "str?", + "keyfile": "str?", + "require_login": "bool?", + + "backup_name": "str?", + "backup_time_of_day": "match(^[0-2]\\d:[0-5]\\d$)?", + "specify_backup_folder": "bool?", + "warn_for_low_space": "bool?", + "watch_backup_directory": "bool?", + "trace_requests": "bool?", + + "generational_days": "int(0,)?", + "generational_weeks": "int(0,)?", + "generational_months": "int(0,)?", + "generational_years": "int(0,)?", + "generational_day_of_year": "int(1,365)?", + "generational_day_of_month": "int(1,31)?", + "generational_day_of_week": "list(mon|tue|wed|thu|fri|sat|sun)?", + "generational_delete_early": "bool?", + + "notify_for_stale_backups": "bool?", + "enable_backup_stale_sensor": "bool?", + "enable_backup_state_sensor": "bool?", + "send_error_reports": "bool?", + "backup_password": "str?", + "exclude_folders": "str?", + "exclude_addons": "str?", + "exclude_ha_database": "bool?", + "stop_addons": "str?", + "disable_watchdog_when_stopping": "bool?", + "expose_extra_server": "bool?", + "drive_experimental": "bool?", + "drive_ipv4": "match(^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$)?", + "ignore_ipv6_addresses": "bool?", + "confirm_multiple_deletes": "bool?", + "google_drive_timeout_seconds": "float(1,)?", + "alternate_dns_servers": "match(^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})(,[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})*$)?", + "enable_drive_upload": "bool?", + "call_backup_snapshot": "bool?", + + "background_color": "match(^(#[0-9ABCDEFabcdef]{6}|)$)?", + "accent_color": "match(^(#[0-9ABCDEFabcdef]{6}|)$)?", + + "max_sync_interval_seconds": "float(300,)?", + "default_sync_interval_variation": "float(0,1)?", + "port": "int(0,)?", + "debugger_port": "int(100,)?", + "log_level": "list(DEBUG|TRACE|INFO|WARN|CRITICAL|WARNING)?", + "console_log_level": "list(DEBUG|TRACE|INFO|WARN|CRITICAL|WARNING)?", + "max_backoff_seconds": "int(3600,)?", + + "max_snapshots_in_hassio": "int(0,)?", + "max_snapshots_in_google_drive": "int(0,)?", + "days_between_snapshots": "float(0,)?", + "ignore_other_snapshots": "bool?", + "ignore_upgrade_snapshots": "bool?", + "delete_before_new_snapshot": "bool?", + "delete_ignored_after_days": "float(0,)?", + "snapshot_name": "str?", + "snapshot_time_of_day": "match(^[0-2]\\d:[0-5]\\d$)?", + "specify_snapshot_folder": "bool?", + "notify_for_stale_snapshots": "bool?", + "enable_snapshot_stale_sensor": "bool?", + "enable_snapshot_state_sensor": "bool?", + "snapshot_password": "str?", + "maximum_upload_chunk_bytes": "float(262144,)?", + "ha_reporting_interval_seconds": "int(1,)?", + + "upload_limit_bytes_per_second": "float(0,)?" + }, + "ports": { + "1627/tcp": 1627 + }, + "ports_description": { + "1627/tcp": "Direct access to the add-on without ingress. Must be enabled in the settings, see 'expose_extra_server'." + }, + "image": "ghcr.io/sabeechen/hassio-google-drive-backup-{arch}" +} diff --git a/hassio-google-drive-backup/dev/__init__.py b/hassio-google-drive-backup/dev/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hassio-google-drive-backup/dev/apiingress.py b/hassio-google-drive-backup/dev/apiingress.py new file mode 100644 index 0000000..bbf2e85 --- /dev/null +++ b/hassio-google-drive-backup/dev/apiingress.py @@ -0,0 +1,404 @@ +from injector import singleton, inject +import asyncio +from ipaddress import ip_address +from typing import Any, Dict, Union, Optional + +import aiohttp +from aiohttp import hdrs, web, ClientSession +from aiohttp.web_exceptions import ( + HTTPBadGateway, + HTTPServiceUnavailable, + HTTPUnauthorized, + HTTPNotFound +) +from multidict import CIMultiDict, istr + +from backup.logger import getLogger +from .ports import Ports +from .base_server import BaseServer +from .simulated_supervisor import SimulatedSupervisor + +ATTR_ADMIN = "admin" +ATTR_ENABLE = "enable" +ATTR_ICON = "icon" +ATTR_PANELS = "panels" +ATTR_SESSION = "session" +ATTR_TITLE = "title" +COOKIE_INGRESS = "ingress_session" +HEADER_TOKEN = "X-Supervisor-Token" +HEADER_TOKEN_OLD = "X-Hassio-Key" +REQUEST_FROM = "HASSIO_FROM" +JSON_RESULT = "result" +JSON_DATA = "data" +JSON_MESSAGE = "message" +RESULT_ERROR = "error" +RESULT_OK = "ok" + +_LOGGER = getLogger(__name__) + + +def api_return_error(message: Optional[str] = None) -> web.Response: + """Return an API error message.""" + return web.json_response( + {JSON_RESULT: RESULT_ERROR, JSON_MESSAGE: message}, status=400 + ) + + +def api_return_ok(data: Optional[Dict[str, Any]] = None) -> web.Response: + """Return an API ok answer.""" + return web.json_response({JSON_RESULT: RESULT_OK, JSON_DATA: data or {}}) + + +def api_process(method): + """Wrap function with true/false calls to rest api.""" + + async def wrap_api(api, *args, **kwargs): + """Return API information.""" + try: + answer = await method(api, *args, **kwargs) + except Exception as err: + return api_return_error(message=str(err)) + + if isinstance(answer, dict): + return api_return_ok(data=answer) + if isinstance(answer, web.Response): + return answer + elif isinstance(answer, bool) and not answer: + return api_return_error() + return api_return_ok() + + return wrap_api + + +class Addon(): + def __init__(self, ports: Ports, token: str): + self.ports = ports + self.ip_address = "127.0.0.1" + self.ingress_port = ports.ingress + self.token = token + + +class SysIngress(): + def __init__(self, ports: Ports, token: str, cookie_value: str): + self.ports = ports + self.token = token + self.cookie_value = cookie_value + + def validate_session(self, session): + return session == self.cookie_value + + def get(self, token): + if token == self.token: + return Addon(self.ports, self.token) + return None + + +class CoreSysAttributes(): + def __init__(self, ports: Ports, session: ClientSession, token: str, cookie_value: str): + self.sys_ingress = SysIngress(ports, token, cookie_value) + self.sys_websession = session + + +@singleton +class APIIngress(CoreSysAttributes, BaseServer): + @inject + def __init__(self, ports: Ports, session: ClientSession, supervisor: SimulatedSupervisor): + self.addon_token = self.generateId(10) + self.cookie_value = self.generateId(10) + super().__init__(ports, session, self.addon_token, self.cookie_value) + self.ports = ports + self.supervisor = supervisor + + def routes(self): + return [ + web.get("/startingress", self.start_ingress), + web.get("/hassio/ingress/{slug}", self.ingress_panel), + web.view("/api/hassio_ingress/{token}/{path:.*}", self.handler), + ] + + def start_ingress(self, request: web.Request): + resp = web.Response(status=303) + resp.headers[hdrs.LOCATION] = "/hassio/ingress/" + self.supervisor._addon_slug + resp.set_cookie(name=COOKIE_INGRESS, value=self.cookie_value, expires="Session", domain=request.url.host, path="/api/hassio_ingress/", httponly="false", secure="false") + return resp + + def ingress_panel(self, request: web.Request): + slug = request.match_info.get("slug") + if slug != self.supervisor._addon_slug: + raise HTTPNotFound() + body = """ + + + + + Simulated Supervisor Ingress Panel + + + +
+ The Web-UI below is loaded through an iframe. Start a new ingress session if you get permission errors. +
+ + + + """.format(self.addon_token) + resp = web.Response(body=body, content_type="text/html") + resp.set_cookie(name=COOKIE_INGRESS, value=self.cookie_value, expires="Session", domain=request.url.host, path="/api/hassio_ingress/", httponly="false", secure="false") + return resp + + """ + The class body below here is copied from + https://github.com/home-assistant/supervisor/blob/38b0aea8e2a3b9a9614bb5d94959235a0fae235e/supervisor/api/ingress.py#L35 + In order to correctly reproduce the supervisor's kooky ingress proxy behavior. + """ + + def _extract_addon(self, request: web.Request) -> Addon: + """Return addon, throw an exception it it doesn't exist.""" + token = request.match_info.get("token") + + # Find correct add-on + addon = self.sys_ingress.get(token) + if not addon: + _LOGGER.warning("Ingress for %s not available", token) + raise HTTPServiceUnavailable() + + return addon + + def _check_ha_access(self, request: web.Request) -> None: + # always allow + pass + + def _create_url(self, addon: Addon, path: str) -> str: + """Create URL to container.""" + return f"http://{addon.ip_address}:{addon.ingress_port}/{path}" + + @api_process + async def panels(self, request: web.Request) -> Dict[str, Any]: + """Create a list of panel data.""" + addons = {} + for addon in self.sys_ingress.addons: + addons[addon.slug] = { + ATTR_TITLE: addon.panel_title, + ATTR_ICON: addon.panel_icon, + ATTR_ADMIN: addon.panel_admin, + ATTR_ENABLE: addon.ingress_panel, + } + + return {ATTR_PANELS: addons} + + @api_process + async def create_session(self, request: web.Request) -> Dict[str, Any]: + """Create a new session.""" + self._check_ha_access(request) + + session = self.sys_ingress.create_session() + return {ATTR_SESSION: session} + + async def handler( + self, request: web.Request + ) -> Union[web.Response, web.StreamResponse, web.WebSocketResponse]: + """Route data to Supervisor ingress service.""" + self._check_ha_access(request) + + # Check Ingress Session + session = request.cookies.get(COOKIE_INGRESS) + if not self.sys_ingress.validate_session(session): + _LOGGER.warning("No valid ingress session %s", session) + raise HTTPUnauthorized() + + # Process requests + addon = self._extract_addon(request) + path = request.match_info.get("path") + try: + # Websocket + if _is_websocket(request): + return await self._handle_websocket(request, addon, path) + + # Request + return await self._handle_request(request, addon, path) + + except aiohttp.ClientError as err: + _LOGGER.error("Ingress error: %s", err) + + raise HTTPBadGateway() + + async def _handle_websocket( + self, request: web.Request, addon: Addon, path: str + ) -> web.WebSocketResponse: + """Ingress route for websocket.""" + if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers: + req_protocols = [ + str(proto.strip()) + for proto in request.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + else: + req_protocols = () + + ws_server = web.WebSocketResponse( + protocols=req_protocols, autoclose=False, autoping=False + ) + await ws_server.prepare(request) + + # Preparing + url = self._create_url(addon, path) + source_header = _init_header(request, addon) + + # Support GET query + if request.query_string: + url = f"{url}?{request.query_string}" + + # Start proxy + async with self.sys_websession.ws_connect( + url, + headers=source_header, + protocols=req_protocols, + autoclose=False, + autoping=False, + ) as ws_client: + # Proxy requests + await asyncio.wait( + [ + _websocket_forward(ws_server, ws_client), + _websocket_forward(ws_client, ws_server), + ], + return_when=asyncio.FIRST_COMPLETED, + ) + + return ws_server + + async def _handle_request( + self, request: web.Request, addon: Addon, path: str + ) -> Union[web.Response, web.StreamResponse]: + """Ingress route for request.""" + url = self._create_url(addon, path) + data = await request.read() + source_header = _init_header(request, addon) + + async with self.sys_websession.request( + request.method, + url, + headers=source_header, + params=request.query, + allow_redirects=False, + data=data, + ) as result: + headers = _response_header(result) + + # Simple request + if ( + hdrs.CONTENT_LENGTH in result.headers and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000 + ): + # Return Response + body = await result.read() + + return web.Response( + headers=headers, + status=result.status, + content_type=result.content_type, + body=body, + ) + + # Stream response + response = web.StreamResponse(status=result.status, headers=headers) + response.content_type = result.content_type + + try: + await response.prepare(request) + async for data in result.content.iter_chunked(4096): + await response.write(data) + + except ( + aiohttp.ClientError, + aiohttp.ClientPayloadError, + ConnectionResetError, + ) as err: + _LOGGER.error("Stream error with %s: %s", url, err) + + return response + + +def _init_header( + request: web.Request, addon: str +) -> Union[CIMultiDict, Dict[str, str]]: + """Create initial header.""" + headers = {} + + # filter flags + for name, value in request.headers.items(): + if name in ( + hdrs.CONTENT_LENGTH, + hdrs.CONTENT_ENCODING, + hdrs.SEC_WEBSOCKET_EXTENSIONS, + hdrs.SEC_WEBSOCKET_PROTOCOL, + hdrs.SEC_WEBSOCKET_VERSION, + hdrs.SEC_WEBSOCKET_KEY, + istr(HEADER_TOKEN), + istr(HEADER_TOKEN_OLD), + ): + continue + headers[name] = value + + # Update X-Forwarded-For + forward_for = request.headers.get(hdrs.X_FORWARDED_FOR) + connected_ip = ip_address(request.transport.get_extra_info("peername")[0]) + headers[hdrs.X_FORWARDED_FOR] = f"{forward_for}, {connected_ip!s}" + + return headers + + +def _response_header(response: aiohttp.ClientResponse) -> Dict[str, str]: + """Create response header.""" + headers = {} + + for name, value in response.headers.items(): + if name in ( + hdrs.TRANSFER_ENCODING, + hdrs.CONTENT_LENGTH, + hdrs.CONTENT_TYPE, + hdrs.CONTENT_ENCODING + ): + continue + headers[name] = value + + return headers + + +def _is_websocket(request: web.Request) -> bool: + """Return True if request is a websocket.""" + headers = request.headers + + if ( + "upgrade" in headers.get(hdrs.CONNECTION, "").lower() and headers.get(hdrs.UPGRADE, "").lower() == "websocket" + ): + return True + return False + + +async def _websocket_forward(ws_from, ws_to): + """Handle websocket message directly.""" + try: + async for msg in ws_from: + if msg.type == aiohttp.WSMsgType.TEXT: + await ws_to.send_str(msg.data) + elif msg.type == aiohttp.WSMsgType.BINARY: + await ws_to.send_bytes(msg.data) + elif msg.type == aiohttp.WSMsgType.PING: + await ws_to.ping() + elif msg.type == aiohttp.WSMsgType.PONG: + await ws_to.pong() + elif ws_to.closed: + await ws_to.close(code=ws_to.close_code, message=msg.extra) + except RuntimeError: + _LOGGER.warning("Ingress Websocket runtime error") diff --git a/hassio-google-drive-backup/dev/base_server.py b/hassio-google-drive-backup/dev/base_server.py new file mode 100644 index 0000000..8c80085 --- /dev/null +++ b/hassio-google-drive-backup/dev/base_server.py @@ -0,0 +1,56 @@ +import random +import re +import io +from aiohttp.web import HTTPBadRequest, Request, Response +from typing import Any + +rangePattern = re.compile("bytes=\\d+-\\d+") +bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$") +intPattern = re.compile("\\d+") + + +class BaseServer: + def generateId(self, length: int = 30) -> str: + random_int = random.randint(0, 1000000) + ret = str(random_int) + return ret + ''.join(map(lambda x: str(x), range(0, length - len(ret)))) + + def timeToRfc3339String(self, time) -> str: + return time.strftime("%Y-%m-%dT%H:%M:%SZ") + + def serve_bytes(self, request: Request, bytes: bytearray, include_length: bool = True) -> Any: + if "Range" in request.headers: + # Do range request + if not rangePattern.match(request.headers['Range']): + raise HTTPBadRequest() + + numbers = intPattern.findall(request.headers['Range']) + start = int(numbers[0]) + end = int(numbers[1]) + + if start < 0: + raise HTTPBadRequest() + if start > end: + raise HTTPBadRequest() + if end > len(bytes) - 1: + raise HTTPBadRequest() + resp = Response(body=bytes[start:end + 1], status=206) + resp.headers['Content-Range'] = "bytes {0}-{1}/{2}".format( + start, end, len(bytes)) + if include_length: + resp.headers["Content-length"] = str(len(bytes)) + return resp + else: + resp = Response(body=io.BytesIO(bytes)) + resp.headers["Content-length"] = str(len(bytes)) + return resp + + async def readAll(self, request): + data = bytearray() + content = request.content + while True: + chunk, done = await content.readchunk() + data.extend(chunk) + if len(chunk) == 0: + break + return data diff --git a/hassio-google-drive-backup/dev/data/dev_addon_config.yaml b/hassio-google-drive-backup/dev/data/dev_addon_config.yaml new file mode 100644 index 0000000..661b974 --- /dev/null +++ b/hassio-google-drive-backup/dev/data/dev_addon_config.yaml @@ -0,0 +1,3 @@ +authorization_host: "https://dev.habackup.io" +token_server_hosts: "https://token1.dev.habackup.io,https://dev.habackup.io" +default_drive_client_id: "795575624694-jcdhoh1jr1ngccfsbi2f44arr4jupl79.apps.googleusercontent.com" \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/data/dev_options.json b/hassio-google-drive-backup/dev/data/dev_options.json new file mode 100644 index 0000000..ef34abc --- /dev/null +++ b/hassio-google-drive-backup/dev/data/dev_options.json @@ -0,0 +1,27 @@ +{ + "drive_url": "http://localhost:56153", + "supervisor_url": "http://localhost:56153/", + "hassio_header": "test_header", + "retained_file_path": "hassio-google-drive-backup/dev/data/retained.json", + "data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json", + "backup_directory_path": "hassio-google-drive-backup/dev/backup", + "certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem", + "keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem", + "secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml", + "credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat", + "folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat", + "id_file_path": "hassio-google-drive-backup/dev/data/id.json", + "stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json", + "authorization_host": "http://localhost:56153", + "token_server_hosts": "http://localhost:56153", + "drive_refresh_url": "http://localhost:56153/oauth2/v4/token", + "drive_authorize_url": "http://localhost:56153/o/oauth2/v2/auth", + "drive_device_code_url": "http://localhost:56153/device/code", + "drive_token_url": "http://localhost:56153/token", + "ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat", + "log_level": "TRACE", + "console_log_level": "TRACE", + "ingress_port": 56152, + "port": 56151, + "cache_warmup_max_seconds": 300 +} \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/data/drive_dev_options.json b/hassio-google-drive-backup/dev/data/drive_dev_options.json new file mode 100644 index 0000000..6e5efba --- /dev/null +++ b/hassio-google-drive-backup/dev/data/drive_dev_options.json @@ -0,0 +1,20 @@ +{ + "supervisor_url": "http://localhost:56153/", + "authorization_host": "https://dev.habackup.io", + "token_server_hosts": "https://token1.dev.habackup.io,https://dev.habackup.io", + "hassio_header": "test_header", + "data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json", + "retained_file_path": "hassio-google-drive-backup/dev/data/retained.json", + "backup_directory_path": "hassio-google-drive-backup/dev/backup", + "certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem", + "keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem", + "secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml", + "credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat", + "folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat", + "id_file_path": "hassio-google-drive-backup/dev/data/id.json", + "stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json", + "ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat", + "default_drive_client_id": "795575624694-jcdhoh1jr1ngccfsbi2f44arr4jupl79.apps.googleusercontent.com", + "ingress_port": 56152, + "port": 56151 +} \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/data/drive_options.json b/hassio-google-drive-backup/dev/data/drive_options.json new file mode 100644 index 0000000..48c9545 --- /dev/null +++ b/hassio-google-drive-backup/dev/data/drive_options.json @@ -0,0 +1,17 @@ +{ + "supervisor_url": "http://localhost:56153/", + "hassio_header": "test_header", + "data_cache_file_path": "hassio-google-drive-backup/dev/data/data_cache.json", + "retained_file_path": "hassio-google-drive-backup/dev/data/retained.json", + "backup_directory_path": "hassio-google-drive-backup/dev/backup", + "certfile": "hassio-google-drive-backup/dev/ssl/fullchain.pem", + "keyfile": "hassio-google-drive-backup/dev/ssl/privkey.pem", + "secrets_file_path": "hassio-google-drive-backup/dev/data/secrets.yaml", + "credentials_file_path": "hassio-google-drive-backup/dev/data/credentials.dat", + "folder_file_path": "hassio-google-drive-backup/dev/data/folder.dat", + "ingress_token_file_path": "hassio-google-drive-backup/dev/data/ingress.dat", + "id_file_path": "hassio-google-drive-backup/dev/data/id.json", + "stop_addon_state_path": "hassio-google-drive-backup/dev/data/stop_addon_state.json", + "ingress_port": 56155, + "port": 56156 +} \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/data/options.json b/hassio-google-drive-backup/dev/data/options.json new file mode 100644 index 0000000..5773758 --- /dev/null +++ b/hassio-google-drive-backup/dev/data/options.json @@ -0,0 +1,11 @@ +{ + "send_error_reports": true, + "max_backups_in_ha": 4, + "max_backups_in_google_drive": 3, + "days_between_backups": 10, + "use_ssl": false, + "backup_name": "{type} Backup {year}-{month}-{day} {hr24}:{min}:{sec}", + "backup_password": "!secret password1", + "drive_experimental": true, + "drive_ipv4": "" +} \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/data/secrets.yaml b/hassio-google-drive-backup/dev/data/secrets.yaml new file mode 100644 index 0000000..ffb471e --- /dev/null +++ b/hassio-google-drive-backup/dev/data/secrets.yaml @@ -0,0 +1,2 @@ +password1: "Test value" +for_unit_tests: "password value" \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/deploy.sh b/hassio-google-drive-backup/dev/deploy.sh new file mode 100755 index 0000000..0d47688 --- /dev/null +++ b/hassio-google-drive-backup/dev/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/bash +sudo docker run --rm --privileged \ + -v /home/coder/.docker:/root/.docker \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v ..:/data \ + homeassistant/amd64-builder --all -t /data \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/deploy_addon.py b/hassio-google-drive-backup/dev/deploy_addon.py new file mode 100644 index 0000000..af065d5 --- /dev/null +++ b/hassio-google-drive-backup/dev/deploy_addon.py @@ -0,0 +1,19 @@ +import subprocess +import os +import json +from os.path import abspath, join + +with open(abspath(join(__file__, "..", "..", "config.json"))) as f: + version = json.load(f)["version"] +print("Version will be: " + version) +subprocess.run("docker login", shell=True) + + +platforms = ["amd64", "armv7", "aarch64", "armhf", "i386"] + +os.chdir("hassio-google-drive-backup") +for platform in platforms: + subprocess.run("docker build -f Dockerfile-addon -t sabeechen/hassio-google-drive-backup-{0}:{1} --build-arg BUILD_FROM=homeassistant/{0}-base .".format(platform, version), shell=True) + +for platform in platforms: + subprocess.run("docker push sabeechen/hassio-google-drive-backup-{0}:{1}".format(platform, version), shell=True) diff --git a/hassio-google-drive-backup/dev/deploy_dev_addon.py b/hassio-google-drive-backup/dev/deploy_dev_addon.py new file mode 100644 index 0000000..badce61 --- /dev/null +++ b/hassio-google-drive-backup/dev/deploy_dev_addon.py @@ -0,0 +1,20 @@ +import getpass +import subprocess +import os +import json +from os.path import abspath, join + +with open(abspath(join(__file__, "..", "..", "config.json"))) as f: + version = json.load(f)["version"] + +try: + p = getpass.getpass("Enter DockerHub Password") +except Exception as error: + print('ERROR', error) + exit() + +os.chdir("hassio-google-drive-backup") +print("Setting the appropriate gcloud project...") +subprocess.run("gcloud config set project hassio-drive-backup", shell=True) +print("Building and uploading dev container...") +subprocess.run("gcloud builds submit --config cloudbuild-dev.yaml --substitutions _DOCKERHUB_PASSWORD={0},_VERSION={1}".format(p, version), shell=True) diff --git a/hassio-google-drive-backup/dev/deploy_dev_server.py b/hassio-google-drive-backup/dev/deploy_dev_server.py new file mode 100644 index 0000000..cd03597 --- /dev/null +++ b/hassio-google-drive-backup/dev/deploy_dev_server.py @@ -0,0 +1,8 @@ +import subprocess +import os + +os.chdir("hassio-google-drive-backup") +print("Setting the appropriate gcloud project...") +subprocess.run("gcloud config set project hassio-drive-backup-dev", shell=True) +print("Building and uploading server container...") +subprocess.run("gcloud builds submit --config cloudbuild-server.yaml", shell=True) diff --git a/hassio-google-drive-backup/dev/deploy_server.py b/hassio-google-drive-backup/dev/deploy_server.py new file mode 100644 index 0000000..936f938 --- /dev/null +++ b/hassio-google-drive-backup/dev/deploy_server.py @@ -0,0 +1,8 @@ +import subprocess +import os + +os.chdir("hassio-google-drive-backup") +print("Setting the appropriate gcloud project...") +subprocess.run("gcloud config set project hassio-drive-backup", shell=True) +print("Building and uploading server container...") +subprocess.run("gcloud builds submit --config cloudbuild-server.yaml", shell=True) diff --git a/hassio-google-drive-backup/dev/error_tools.py b/hassio-google-drive-backup/dev/error_tools.py new file mode 100644 index 0000000..3da939c --- /dev/null +++ b/hassio-google-drive-backup/dev/error_tools.py @@ -0,0 +1,57 @@ +import argparse +from google.cloud import firestore +from datetime import datetime, timedelta +DELETE_BATCH_SIZE = 200 +STORE_NAME = "error_reports" + + +def delete_old_data(): + # Initialize Firestore + db = firestore.Client() + collection_ref = db.collection(STORE_NAME) + + # Define the datetime for one week ago + week_ago = datetime.now() - timedelta(days=7) + + # Query to find all documents older than a week + total_deleted = 0 + while True: + to_delete = 0 + batch = db.batch() + docs = collection_ref.where('server_time', '<', week_ago).stream() + for doc in docs: + to_delete += 1 + batch.delete(doc.reference) + if to_delete >= DELETE_BATCH_SIZE: + break + if to_delete > 0: + batch.commit() + total_deleted += to_delete + print(f"Deleted {to_delete} documents ({total_deleted} total)") + else: + break + print(f"Success: All documents older than a week deleted ({total_deleted} total)") + + +def main(): + # Create command line argument parser + parser = argparse.ArgumentParser() + + # Add purge argument + parser.add_argument("--purge", help="Delete all documents older than a week.", action="store_true") + + # Add any other argument you want in future. For example: + # parser.add_argument("--future_arg", help="Perform some future operation.") + + args = parser.parse_args() + + # Respond to arguments + if args.purge: + confirm = input('Are you sure you want to delete all documents older than a week? (y/n): ') + if confirm.lower() == 'y': + delete_old_data() + else: + print("Abort: No documents were deleted.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/hassio-google-drive-backup/dev/http_exception.py b/hassio-google-drive-backup/dev/http_exception.py new file mode 100644 index 0000000..6a03e19 --- /dev/null +++ b/hassio-google-drive-backup/dev/http_exception.py @@ -0,0 +1,6 @@ +from aiohttp.web import HTTPClientError + + +class HttpMultiException(HTTPClientError): + def __init__(self, code): + self.status_code = code diff --git a/hassio-google-drive-backup/dev/ports.py b/hassio-google-drive-backup/dev/ports.py new file mode 100644 index 0000000..c0ebae7 --- /dev/null +++ b/hassio-google-drive-backup/dev/ports.py @@ -0,0 +1,5 @@ +class Ports: + def __init__(self, server, ui, ingress): + self.server = server + self.ui = ui + self.ingress = ingress diff --git a/hassio-google-drive-backup/dev/request_interceptor.py b/hassio-google-drive-backup/dev/request_interceptor.py new file mode 100644 index 0000000..92ae178 --- /dev/null +++ b/hassio-google-drive-backup/dev/request_interceptor.py @@ -0,0 +1,136 @@ +import re +from aiohttp.web import Request, Response +from asyncio import Event +from aiohttp.web_response import json_response +from injector import singleton, inject +from backup.time import Time +from typing import List + + +class UrlMatch(): + def __init__(self, time: Time, url, fail_after=None, status=None, response=None, wait=False, sleep=None, fail_for=None): + self.time = time + self.url: str = url + self.fail_after: int = fail_after + self.status: int = status + self.wait_event: Event = Event() + self.trigger_event: Event = Event() + self.response: str = "" + self.wait: bool = wait + self.trigger_event.clear() + self.wait_event.clear() + self.sleep = sleep + self.response = response + self.fail_for = fail_for + self.responses = [] + self._calls = 0 + self.time = time + + def addResponse(self, response): + self.responses.append(response) + + def stop(self): + self.wait_event.set() + self.trigger_event.set() + + def isMatch(self, request): + return re.match(self.url, request.url.path) or re.match(self.url, str(request.url)) + + async def waitForCall(self): + await self.trigger_event.wait() + + def clear(self): + self.wait_event.set() + + def callCount(self): + return self._calls + + async def _doAction(self, request: Request): + self._calls += 1 + if len(self.responses) > 0: + return self.responses.pop(0) + if self.status is not None: + await self._readAll(request) + if self.response: + return json_response(self.response, status=self.status) + else: + return Response(status=self.status) + elif self.wait: + self.trigger_event.set() + await self.wait_event.wait() + elif self.sleep is not None: + await self.time.sleepAsync(self.sleep, early_exit=self.wait_event) + + async def called(self, request: Request): + if self.fail_after is None or self.fail_after <= 0: + if self.fail_for is not None and self.fail_for > 0: + self.fail_for -= 1 + return await self._doAction(request) + elif self.fail_for is not None: + return None + + return await self._doAction(request) + elif self.fail_after is not None: + self.fail_after -= 1 + + async def _readAll(self, request: Request): + data = bytearray() + content = request.content + while True: + chunk, done = await content.readchunk() + data.extend(chunk) + if len(chunk) == 0: + break + return data + + +@singleton +class RequestInterceptor: + @inject + def __init__(self): + self._matchers: List[UrlMatch] = [] + self._history = [] + self.time = Time() + + def stop(self): + for matcher in self._matchers: + matcher.stop() + + def setError(self, url, status=None, fail_after=None, fail_for=None, response=None) -> UrlMatch: + matcher = UrlMatch(self.time, url, fail_after, status=status, response=response, fail_for=fail_for) + self._matchers.append(matcher) + return matcher + + def clear(self): + self._matchers.clear() + self._history.clear() + + def setWaiter(self, url, attempts=None): + matcher = UrlMatch(self.time, url, attempts, wait=True) + self._matchers.append(matcher) + return matcher + + def setSleep(self, url, attempts=None, sleep=None, wait_for=None): + matcher = UrlMatch(self.time, url, attempts, sleep=sleep, fail_for=wait_for) + self._matchers.append(matcher) + return matcher + + async def checkUrl(self, request): + ret = None + self.record(request) + for match in self._matchers: + if match.isMatch(request): + ret = await match.called(request) + return ret + + def record(self, request: Request): + record = str(request.url.path) + if len(request.url.query_string) > 0: + record += "?" + str(request.url.query_string) + self._history.append(record) + + def urlWasCalled(self, url) -> bool: + for called_url in self._history: + if url == called_url or re.match(url, called_url): + return True + return False diff --git a/hassio-google-drive-backup/dev/simulated_google.py b/hassio-google-drive-backup/dev/simulated_google.py new file mode 100644 index 0000000..651b328 --- /dev/null +++ b/hassio-google-drive-backup/dev/simulated_google.py @@ -0,0 +1,522 @@ +import re + +from yarl import URL +from datetime import timedelta +from backup.logger import getLogger +from backup.config import Setting, Config +from backup.time import Time +from backup.creds import KEY_CLIENT_SECRET, KEY_CLIENT_ID, KEY_ACCESS_TOKEN, KEY_TOKEN_EXPIRY +from aiohttp.web import (HTTPBadRequest, HTTPNotFound, + HTTPUnauthorized, Request, Response, delete, get, + json_response, patch, post, put, HTTPSeeOther) +from injector import inject, singleton +from .base_server import BaseServer, bytesPattern, intPattern +from .ports import Ports +from typing import Any, Dict +from asyncio import Event +from backup.creds import Creds + +logger = getLogger(__name__) + +mimeTypeQueryPattern = re.compile("^mimeType='.*'$") +parentsQueryPattern = re.compile("^'.*' in parents$") +resumeBytesPattern = re.compile("^bytes \\*/\\d+$") + +URL_MATCH_DRIVE_API = "^.*drive.*$" +URL_MATCH_UPLOAD = "^/upload/drive/v3/files/$" +URL_MATCH_UPLOAD_PROGRESS = "^/upload/drive/v3/files/progress/.*$" +URL_MATCH_CREATE = "^/upload/drive/v3/files/progress/.*$" +URL_MATCH_FILE = "^/drive/v3/files/.*$" +URL_MATCH_DEVICE_CODE = "^/device/code$" +URL_MATCH_TOKEN = "^/token$" + + +@singleton +class SimulatedGoogle(BaseServer): + @inject + def __init__(self, config: Config, time: Time, ports: Ports): + self._time = time + self.config = config + + # auth state + self._custom_drive_client_id = self.generateId(5) + self._custom_drive_client_secret = self.generateId(5) + self._custom_drive_client_expiration = None + self._drive_auth_code = "drive_auth_code" + self._port = ports.server + self._auth_token = "" + self._refresh_token = "test_refresh_token" + self._client_id_hack = None + + # Drive item states + self.items = {} + self.lostPermission = [] + self.space_available = 5 * 1024 * 1024 * 1024 + self.usage = 0 + + # Upload state information + self._upload_info: Dict[str, Any] = {} + self.chunks = [] + self._upload_chunk_wait = Event() + self._upload_chunk_trigger = Event() + self._current_chunk = 1 + self._waitOnChunk = 0 + self.device_auth_params = {} + self._device_code_accepted = None + + def setDriveSpaceAvailable(self, bytes_available): + self.space_available = bytes_available + + def generateNewAccessToken(self): + new_token = self.generateId(20) + self._auth_token = new_token + + def generateNewRefreshToken(self): + new_token = self.generateId(20) + self._refresh_token = new_token + + def expireCreds(self): + self.generateNewAccessToken() + self.generateNewRefreshToken() + + def expireRefreshToken(self): + self.generateNewRefreshToken() + + def resetDriveAuth(self): + self.expireCreds() + self.config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, self.generateId(5)) + self.config.override(Setting.DEFAULT_DRIVE_CLIENT_SECRET, self.generateId(5)) + + def creds(self): + return Creds(self._time, + id=self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID), + expiration=self._time.now() + timedelta(hours=1), + access_token=self._auth_token, + refresh_token=self._refresh_token) + + def routes(self): + return [ + put('/upload/drive/v3/files/progress/{id}', self._uploadProgress), + post('/upload/drive/v3/files/', self._upload), + post('/drive/v3/files/', self._create), + get('/drive/v3/files/', self._query), + delete('/drive/v3/files/{id}/', self._delete), + patch('/drive/v3/files/{id}/', self._update), + get('/drive/v3/files/{id}/', self._get), + post('/oauth2/v4/token', self._oauth2Token), + get('/o/oauth2/v2/auth', self._oAuth2Authorize), + get('/drive/customcreds', self._getCustomCred), + get('/drive/v3/about', self._driveAbout), + post('/device/code', self._deviceCode), + get('/device', self._device), + get('/debug/google', self._debug), + post('/token', self._driveToken), + ] + + async def _debug(self, request: Request): + return json_response({ + "custom_drive_client_id": self._custom_drive_client_id, + "custom_drive_client_secret": self._custom_drive_client_secret, + "device_auth_params": self.device_auth_params + }) + + async def _checkDriveHeaders(self, request: Request): + if request.headers.get("Authorization", "") != "Bearer " + self._auth_token: + raise HTTPUnauthorized() + + async def _deviceCode(self, request: Request): + params = await request.post() + client_id = params['client_id'] + scope = params['scope'] + if client_id != self._custom_drive_client_id or scope != 'https://www.googleapis.com/auth/drive.file': + raise HTTPUnauthorized() + + self.device_auth_params = { + 'device_code': self.generateId(10), + 'expires_in': 60, + 'interval': 1, + 'user_code': self.generateId(8), + 'verification_url': str(URL("http://localhost").with_port(self._port).with_path("device")) + } + self._device_code_accepted = None + return json_response(self.device_auth_params) + + async def _device(self, request: Request): + code = request.query.get('code') + if code: + if self.device_auth_params.get('user_code', "dfsdfsdfsdfs") == code: + body = "Accepted" + self._device_code_accepted = True + self.generateNewRefreshToken() + self.generateNewAccessToken() + else: + body = "Wrong code" + else: + body = """ + + + + + Simulated Drive Device Authorization + + +
+ Enter the device code provided below +
+
+
+ + +
+ + + """ + resp = Response(body=body, content_type="text/html") + return resp + + async def _oAuth2Authorize(self, request: Request): + query = request.query + if query.get('client_id') != self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID) and query.get('client_id') != self._custom_drive_client_id: + raise HTTPUnauthorized() + if query.get('scope') != 'https://www.googleapis.com/auth/drive.file': + raise HTTPUnauthorized() + if query.get('response_type') != 'code': + raise HTTPUnauthorized() + if query.get('include_granted_scopes') != 'true': + raise HTTPUnauthorized() + if query.get('access_type') != 'offline': + raise HTTPUnauthorized() + if 'state' not in query: + raise HTTPUnauthorized() + if 'redirect_uri' not in query: + raise HTTPUnauthorized() + if query.get('prompt') != 'consent': + raise HTTPUnauthorized() + if query.get('redirect_uri') == 'urn:ietf:wg:oauth:2.0:oob': + return json_response({"code": self._drive_auth_code}) + url = URL(query.get('redirect_uri')).with_query({'code': self._drive_auth_code, 'state': query.get('state')}) + raise HTTPSeeOther(str(url)) + + async def _getCustomCred(self, request: Request): + return json_response({ + "client_id": self._custom_drive_client_id, + "client_secret": self._custom_drive_client_secret + }) + + async def _driveToken(self, request: Request): + data = await request.post() + if not self._checkClientIdandSecret(data.get('client_id'), data.get('client_secret')): + raise HTTPUnauthorized() + if data.get('grant_type') == 'authorization_code': + if data.get('redirect_uri') not in ["http://localhost:{}/drive/authorize".format(self._port), 'urn:ietf:wg:oauth:2.0:oob']: + raise HTTPUnauthorized() + if data.get('code') != self._drive_auth_code: + raise HTTPUnauthorized() + elif data.get('grant_type') == 'urn:ietf:params:oauth:grant-type:device_code': + if data.get('device_code') != self.device_auth_params['device_code']: + raise HTTPUnauthorized() + if self._device_code_accepted is None: + return json_response({ + "error": "authorization_pending", + "error_description": "Precondition Required" + }, status=428) + elif self._device_code_accepted is False: + raise HTTPUnauthorized() + else: + raise HTTPBadRequest() + self.generateNewRefreshToken() + resp = { + 'access_token': self._auth_token, + 'refresh_token': self._refresh_token, + KEY_CLIENT_ID: data.get('client_id'), + KEY_CLIENT_SECRET: self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET), + KEY_TOKEN_EXPIRY: self.timeToRfc3339String(self._time.now()), + } + if self._custom_drive_client_expiration is not None: + resp[KEY_TOKEN_EXPIRY] = self.timeToRfc3339String(self._custom_drive_client_expiration) + return json_response(resp) + + def _checkClientIdandSecret(self, client_id: str, client_secret: str) -> bool: + if self._custom_drive_client_id == client_id and self._custom_drive_client_secret == client_secret: + return True + if client_id == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_ID) == client_id and client_secret == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET): + return True + + if self._client_id_hack is not None: + if client_id == self._client_id_hack and client_secret == self.config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET): + return True + return False + + async def _oauth2Token(self, request: Request): + params = await request.post() + if not self._checkClientIdandSecret(params['client_id'], params['client_secret']): + raise HTTPUnauthorized() + if params['refresh_token'] != self._refresh_token: + raise HTTPUnauthorized() + if params['grant_type'] == 'refresh_token': + self.generateNewAccessToken() + return json_response({ + 'access_token': self._auth_token, + 'expires_in': 3600, + 'token_type': 'doesn\'t matter' + }) + elif params['grant_type'] == 'urn:ietf:params:oauth:grant-type:device_code': + if params['device_code'] != self.device_auth_params['device_code']: + raise HTTPUnauthorized() + if not self._device_code_accepted: + return json_response({ + "error": "authorization_pending", + "error_description": "Precondition Required" + }, status=428) + return json_response({ + 'access_token': self._auth_token, + 'expires_in': 3600, + 'token_type': 'doesn\'t matter' + }) + else: + raise HTTPUnauthorized() + + def filter_fields(self, item: Dict[str, Any], fields) -> Dict[str, Any]: + ret = {} + for field in fields: + if field in item: + ret[field] = item[field] + return ret + + def parseFields(self, source: str): + fields = [] + for field in source.split(","): + if field.startswith("files("): + fields.append(field[6:]) + elif field.endswith(")"): + fields.append(field[:-1]) + else: + fields.append(field) + return fields + + def formatItem(self, base, id): + caps = base.get('capabilites', {}) + if 'capabilities' not in base: + base['capabilities'] = caps + if 'canAddChildren' not in caps: + caps['canAddChildren'] = True + if 'canListChildren' not in caps: + caps['canListChildren'] = True + if 'canDeleteChildren' not in caps: + caps['canDeleteChildren'] = True + if 'canTrashChildren' not in caps: + caps['canTrashChildren'] = True + if 'canTrash' not in caps: + caps['canTrash'] = True + if 'canDelete' not in caps: + caps['canDelete'] = True + + for parent in base.get("parents", []): + parent_item = self.items[parent] + # This simulates a very simply shared drive permissions structure + if parent_item.get("driveId", None) is not None: + base["driveId"] = parent_item["driveId"] + base["capabilities"] = parent_item["capabilities"] + base['trashed'] = False + base['id'] = id + base['modifiedTime'] = self.timeToRfc3339String(self._time.now()) + return base + + async def _get(self, request: Request): + id = request.match_info.get('id') + await self._checkDriveHeaders(request) + if id not in self.items: + raise HTTPNotFound() + if id in self.lostPermission: + return Response( + status=403, + content_type="application/json", + text='{"error": {"errors": [{"reason": "forbidden"}]}}') + request_type = request.query.get("alt", "metadata") + if request_type == "media": + # return bytes + item = self.items[id] + if 'bytes' not in item: + raise HTTPBadRequest() + return self.serve_bytes(request, item['bytes'], include_length=False) + else: + fields = request.query.get("fields", "id").split(",") + return json_response(self.filter_fields(self.items[id], fields)) + + async def _update(self, request: Request): + id = request.match_info.get('id') + await self._checkDriveHeaders(request) + if id not in self.items: + return HTTPNotFound + update = await request.json() + for key in update: + if key in self.items[id] and isinstance(self.items[id][key], dict): + self.items[id][key].update(update[key]) + else: + self.items[id][key] = update[key] + return Response() + + async def _driveAbout(self, request: Request): + return json_response({ + 'storageQuota': { + 'usage': self.usage, + 'limit': self.space_available + }, + 'user': { + 'emailAddress': "testing@no.where" + } + }) + + async def _delete(self, request: Request): + id = request.match_info.get('id') + await self._checkDriveHeaders(request) + if id not in self.items: + raise HTTPNotFound() + del self.items[id] + return Response() + + async def _query(self, request: Request): + await self._checkDriveHeaders(request) + query: str = request.query.get("q", "") + fields = self.parseFields(request.query.get('fields', 'id')) + if mimeTypeQueryPattern.match(query): + ret = [] + mimeType = query[len("mimeType='"):-1] + for item in self.items.values(): + if item.get('mimeType', '') == mimeType: + ret.append(self.filter_fields(item, fields)) + return json_response({'files': ret}) + elif parentsQueryPattern.match(query): + ret = [] + parent = query[1:-len("' in parents")] + if parent not in self.items: + raise HTTPNotFound() + if parent in self.lostPermission: + return Response( + status=403, + content_type="application/json", + text='{"error": {"errors": [{"reason": "forbidden"}]}}') + for item in self.items.values(): + if parent in item.get('parents', []): + ret.append(self.filter_fields(item, fields)) + return json_response({'files': ret}) + elif len(query) == 0: + ret = [] + for item in self.items.values(): + ret.append(self.filter_fields(item, fields)) + return json_response({'files': ret}) + else: + raise HTTPBadRequest + + async def _create(self, request: Request): + await self._checkDriveHeaders(request) + item = self.formatItem(await request.json(), self.generateId(30)) + self.items[item['id']] = item + return json_response({'id': item['id']}) + + async def _upload(self, request: Request): + logger.info("Drive start upload request") + await self._checkDriveHeaders(request) + if request.query.get('uploadType') != 'resumable': + raise HTTPBadRequest() + mimeType = request.headers.get('X-Upload-Content-Type', None) + if mimeType is None: + raise HTTPBadRequest() + size = int(request.headers.get('X-Upload-Content-Length', -1)) + if size < 0: + raise HTTPBadRequest() + total_size = 0 + for item in self.items.values(): + total_size += item.get('size', 0) + total_size += size + if total_size > self.space_available: + return json_response({ + "error": { + "errors": [ + {"reason": "storageQuotaExceeded"} + ] + } + }, status=400) + metadata = await request.json() + id = self.generateId() + + # Validate parents + if 'parents' in metadata: + for parent in metadata['parents']: + if parent not in self.items: + raise HTTPNotFound() + if parent in self.lostPermission: + return Response(status=403, content_type="application/json", text='{"error": {"errors": [{"reason": "forbidden"}]}}') + self._upload_info['size'] = size + self._upload_info['mime'] = mimeType + self._upload_info['item'] = self.formatItem(metadata, id) + self._upload_info['id'] = id + self._upload_info['next_start'] = 0 + metadata['bytes'] = bytearray() + metadata['size'] = size + resp = Response() + resp.headers['Location'] = "http://localhost:" + \ + str(self._port) + "/upload/drive/v3/files/progress/" + id + return resp + + async def _uploadProgress(self, request: Request): + if self._waitOnChunk > 0: + if self._current_chunk == self._waitOnChunk: + self._upload_chunk_trigger.set() + await self._upload_chunk_wait.wait() + else: + self._current_chunk += 1 + id = request.match_info.get('id') + await self._checkDriveHeaders(request) + if self._upload_info.get('id', "") != id: + raise HTTPBadRequest() + chunk_size = int(request.headers['Content-Length']) + info = request.headers['Content-Range'] + if resumeBytesPattern.match(info): + resp = Response(status=308) + if self._upload_info['next_start'] != 0: + resp.headers['Range'] = "bytes=0-{0}".format(self._upload_info['next_start'] - 1) + return resp + if not bytesPattern.match(info): + raise HTTPBadRequest() + numbers = intPattern.findall(info) + start = int(numbers[0]) + end = int(numbers[1]) + total = int(numbers[2]) + if total != self._upload_info['size']: + raise HTTPBadRequest() + if start != self._upload_info['next_start']: + raise HTTPBadRequest() + if not (end == total - 1 or chunk_size % (256 * 1024) == 0): + raise HTTPBadRequest() + if end > total - 1: + raise HTTPBadRequest() + + # get the chunk + received_bytes = await self.readAll(request) + + # validate the chunk + if len(received_bytes) != chunk_size: + raise HTTPBadRequest() + + if len(received_bytes) != end - start + 1: + raise HTTPBadRequest() + + self._upload_info['item']['bytes'].extend(received_bytes) + + if len(self._upload_info['item']['bytes']) != end + 1: + raise HTTPBadRequest() + self.usage += len(received_bytes) + self.chunks.append(len(received_bytes)) + if end == total - 1: + # upload is complete, so create the item + completed = self.formatItem(self._upload_info['item'], self._upload_info['id']) + self.items[completed['id']] = completed + return json_response({"id": completed['id']}) + else: + # Return an incomplete response + # For some reason, the tests like to stop right here + resp = Response(status=308) + self._upload_info['next_start'] = end + 1 + resp.headers['Range'] = "bytes=0-{0}".format(end) + return resp diff --git a/hassio-google-drive-backup/dev/simulated_supervisor.py b/hassio-google-drive-backup/dev/simulated_supervisor.py new file mode 100644 index 0000000..23f56cc --- /dev/null +++ b/hassio-google-drive-backup/dev/simulated_supervisor.py @@ -0,0 +1,459 @@ +import asyncio +from asyncio.tasks import sleep +from datetime import timedelta +import random +import string +import io + +from backup.config import Config, Version +from backup.time import Time +from aiohttp.web import (HTTPBadRequest, HTTPNotFound, + HTTPUnauthorized, Request, Response, get, + json_response, post, delete, FileResponse) +from injector import inject, singleton +from .base_server import BaseServer +from .ports import Ports +from typing import Any, Dict +from tests.helpers import all_addons, createBackupTar, parseBackupInfo + +URL_MATCH_BACKUP_FULL = "^/backups/new/full$" +URL_MATCH_BACKUP_DELETE = "^/backups/.*$" +URL_MATCH_BACKUP_DOWNLOAD = "^/backups/.*/download$" +URL_MATCH_MISC_INFO = "^/info$" +URL_MATCH_CORE_API = "^/core/api.*$" +URL_MATCH_START_ADDON = "^/addons/.*/start$" +URL_MATCH_STOP_ADDON = "^/addons/.*/stop$" +URL_MATCH_ADDON_INFO = "^/addons/.*/info$" +URL_MATCH_SELF_OPTIONS = "^/addons/self/options$" + +URL_MATCH_SNAPSHOT = "^/snapshots.*$" +URL_MATCH_BACKUPS = "^/backups.*$" +URL_MATCH_MOUNT = "^/mounts*$" + + +@singleton +class SimulatedSupervisor(BaseServer): + @inject + def __init__(self, config: Config, ports: Ports, time: Time): + self._config = config + self._time = time + self._ports = ports + self._auth_token = "test_header" + self._backups: Dict[str, Any] = {} + self._backup_data: Dict[str, bytearray] = {} + self._backup_lock = asyncio.Lock() + self._backup_inner_lock = asyncio.Lock() + self._entities = {} + self._events = [] + self._attributes = {} + self._notification = None + self._min_backup_size = 1024 * 1024 * 5 + self._max_backup_size = 1024 * 1024 * 5 + self._addon_slug = "self_slug" + self._options = self.defaultOptions() + self._username = "user" + self._password = "pass" + self._addons = all_addons.copy() + self._super_version = Version(2023, 7) + self._mounts = { + 'default_backup_mount': None, + 'mounts': [ + { + "name": "my_media_share", + "usage": "media", + "type": "cifs", + "server": "server.local", + "share": "media", + "state": "active" + }, + { + "name": "my_backup_share", + "usage": "backup", + "type": "nfs", + "server": "server.local", + "share": "media", + "state": "active" + } + ] + } + + self.installAddon(self._addon_slug, "Home Assistant Google drive Backup") + self.installAddon("42", "The answer") + self.installAddon("sgadg", "sdgsagsdgsggsd") + + def defaultOptions(self): + return { + "max_backups_in_ha": 4, + "max_backups_in_google_drive": 4, + "days_between_backups": 3 + } + + def routes(self): + return [ + post('/addons/{slug}/options', self._updateOptions), + post("/core/api/services/persistent_notification/dismiss", self._dismissNotification), + post("/core/api/services/persistent_notification/create", self._createNotification), + post("/core/api/events/{name}", self._haEventUpdate), + post("/core/api/states/{entity}", self._haStateUpdate), + post('/auth', self._authenticate), + get('/auth', self._authenticate), + get('/info', self._miscInfo), + get('/addons/self/info', self._selfInfo), + get('/addons', self._allAddons), + get('/addons/{slug}/info', self._addonInfo), + + post('/addons/{slug}/start', self._startAddon), + post('/addons/{slug}/stop', self._stopAddon), + get('/addons/{slug}/logo', self._logoAddon), + get('/addons/{slug}/icon', self._logoAddon), + + get('/core/info', self._coreInfo), + get('/supervisor/info', self._supervisorInfo), + get('/supervisor/logs', self._supervisorLogs), + get('/core/logs', self._coreLogs), + get('/debug/insert/backup', self._debug_insert_backup), + get('/debug/info', self._debugInfo), + post("/debug/mounts", self._setMounts), + + get('/backups', self._getBackups), + get('/mounts', self._getMounts), + delete('/backups/{slug}', self._deletebackup), + post('/backups/new/upload', self._uploadbackup), + post('/backups/new/partial', self._newbackup), + post('/backups/new/full', self._newbackup), + get('/backups/new/full', self._newbackup), + get('/backups/{slug}/download', self._backupDownload), + get('/backups/{slug}/info', self._backupDetail), + get('/debug/backups/lock', self._lock_backups), + + # TODO: remove once the api path is fully deprecated + get('/snapshots', self._getSnapshots), + post('/snapshots/{slug}/remove', self._deletebackup), + post('/snapshots/new/upload', self._uploadbackup), + post('/snapshots/new/partial', self._newbackup), + post('/snapshots/new/full', self._newbackup), + get('/snapshots/new/full', self._newbackup), + get('/snapshots/{slug}/download', self._backupDownload), + get('/snapshots/{slug}/info', self._backupDetail), + ] + + def getEvents(self): + return self._events.copy() + + def getEntity(self, entity): + return self._entities.get(entity) + + def clearEntities(self): + self._entities = {} + + def addon(self, slug): + for addon in self._addons: + if addon["slug"] == slug: + return addon + return None + + def getAttributes(self, attribute): + return self._attributes.get(attribute) + + def getNotification(self): + return self._notification + + def _formatErrorResponse(self, error: str) -> str: + return json_response({'result': error}) + + def _formatDataResponse(self, data: Any) -> Response: + return json_response({'result': 'ok', 'data': data}) + + async def toggleBlockBackup(self): + if self._backup_lock.locked(): + self._backup_lock.release() + else: + await self._backup_lock.acquire() + + async def _verifyHeader(self, request) -> bool: + if request.headers.get("Authorization", None) == "Bearer " + self._auth_token: + return + if request.headers.get("X-Supervisor-Token", None) == self._auth_token: + return + raise HTTPUnauthorized() + + async def _getSnapshots(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse({'snapshots': list(self._backups.values())}) + + async def _getBackups(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse({'backups': list(self._backups.values())}) + + async def _getMounts(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse(self._mounts) + + async def _setMounts(self, request: Request): + self._mounts = await request.json() + return self._formatDataResponse({}) + + async def _stopAddon(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + for addon in self._addons: + if addon.get("slug", "") == slug: + if addon.get("state") == "started": + addon["state"] = "stopped" + return self._formatDataResponse({}) + raise HTTPBadRequest() + + async def _logoAddon(self, request: Request): + await self._verifyHeader(request) + return FileResponse('hassio-google-drive-backup/backup/static/images/logo.png') + + async def _startAddon(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + for addon in self._addons: + if addon.get("slug", "") == slug: + if addon.get("state") != "started": + addon["state"] = "started" + return self._formatDataResponse({}) + raise HTTPBadRequest() + + async def _addonInfo(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + for addon in self._addons: + if addon.get("slug", "") == slug: + return self._formatDataResponse({ + 'boot': addon.get("boot"), + 'watchdog': addon.get("watchdog"), + 'state': addon.get("state"), + }) + raise HTTPBadRequest() + + async def _supervisorInfo(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse( + { + 'version': str(self._super_version) + } + ) + + async def _allAddons(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse( + { + "addons": list(self._addons).copy() + } + ) + + async def _supervisorLogs(self, request: Request): + await self._verifyHeader(request) + return Response(body=self.generate_random_text(20, 10, 20)) + + def generate_random_text(self, line_count, min_words=5, max_words=10): + lines = [] + log_levels = ["WARN", "WARNING", "INFO", "ERROR", "DEBUG"] + for _ in range(line_count): + level = random.choice(log_levels) + word_count = random.randint(min_words, max_words) + words = [random.choice(string.ascii_lowercase) for _ in range(word_count)] + line = level + " " + ' '.join(''.join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(3, 10))) for _ in words) + lines.append(line) + return '\n'.join(lines) + + async def _coreLogs(self, request: Request): + await self._verifyHeader(request) + return Response(body="Core Log line 1\nCore Log Line 2") + + async def _coreInfo(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse( + { + "version": "1.3.3.7", + "last_version": "1.3.3.8", + "machine": "VS Dev", + "ip_address": "127.0.0.1", + "arch": "x86", + "image": "image", + "custom": "false", + "boot": "true", + "port": self._ports.server, + "ssl": "false", + "watchdog": "what is this", + "wait_boot": "so many arguments" + } + ) + + async def _internalNewBackup(self, request: Request, input_json, date=None, verify_header=True) -> str: + async with self._backup_lock: + async with self._backup_inner_lock: + if 'wait' in input_json: + await sleep(input_json['wait']) + if verify_header: + await self._verifyHeader(request) + slug = self.generateId(8) + password = input_json.get('password', None) + data = createBackupTar( + slug, + input_json.get('name', "Default name"), + date=date or self._time.now(), + padSize=int(random.uniform(self._min_backup_size, self._max_backup_size)), + included_folders=input_json.get('folders', None), + included_addons=input_json.get('addons', None), + password=password) + backup_info = parseBackupInfo(data) + self._backups[slug] = backup_info + self._backup_data[slug] = bytearray(data.getbuffer()) + return slug + + async def createBackup(self, input_json, date=None): + return await self._internalNewBackup(None, input_json, date=date, verify_header=False) + + async def _newbackup(self, request: Request): + if self._backup_lock.locked(): + raise HTTPBadRequest() + input_json = await request.json() + task = asyncio.shield(asyncio.create_task(self._internalNewBackup(request, input_json))) + return self._formatDataResponse({"slug": await task}) + + async def _lock_backups(self, request: Request): + await self._backup_lock.acquire() + return self._formatDataResponse({"message": "locked"}) + + async def _uploadbackup(self, request: Request): + await self._verifyHeader(request) + try: + reader = await request.multipart() + contents = await reader.next() + received_bytes = bytearray() + while True: + chunk = await contents.read_chunk() + if not chunk: + break + received_bytes.extend(chunk) + info = parseBackupInfo(io.BytesIO(received_bytes)) + self._backups[info['slug']] = info + self._backup_data[info['slug']] = received_bytes + return self._formatDataResponse({"slug": info['slug']}) + except Exception as e: + print(str(e)) + return self._formatErrorResponse("Bad backup") + + async def _deletebackup(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + if slug not in self._backups: + raise HTTPNotFound() + del self._backups[slug] + del self._backup_data[slug] + return self._formatDataResponse("deleted") + + async def _backupDetail(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + if slug not in self._backups: + raise HTTPNotFound() + return self._formatDataResponse(self._backups[slug]) + + async def _backupDownload(self, request: Request): + await self._verifyHeader(request) + slug = request.match_info.get('slug') + if slug not in self._backup_data: + raise HTTPNotFound() + return self.serve_bytes(request, self._backup_data[slug]) + + async def _selfInfo(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse({ + "webui": "http://some/address", + 'ingress_url': "fill me in later", + "slug": self._addon_slug, + "options": self._options + }) + + async def _debugInfo(self, request: Request): + return self._formatDataResponse({ + "config": { + " webui": "http://some/address", + 'ingress_url': "fill me in later", + "slug": self._addon_slug, + "options": self._options + } + }) + + async def _miscInfo(self, request: Request): + await self._verifyHeader(request) + return self._formatDataResponse({ + "supervisor": "super version", + "homeassistant": "ha version", + "hassos": "hassos version", + "hostname": "hostname", + "machine": "machine", + "arch": "Arch", + "supported_arch": "supported arch", + "channel": "channel" + }) + + def installAddon(self, slug, name, version="v1.0", boot=True, started=True): + self._addons.append({ + "name": 'Name for ' + name, + "slug": slug, + "description": slug + " description", + "version": version, + "watchdog": False, + "boot": "auto" if boot else "manual", + "logo": True, + "ingress_entry": "/api/hassio_ingress/" + slug, + "state": "started" if started else "stopped" + }) + + async def _authenticate(self, request: Request): + await self._verifyHeader(request) + input_json = await request.json() + if input_json.get("username") != self._username or input_json.get("password") != self._password: + raise HTTPBadRequest() + return self._formatDataResponse({}) + + async def _updateOptions(self, request: Request): + slug = request.match_info.get('slug') + + if slug == "self": + await self._verifyHeader(request) + self._options = (await request.json())['options'].copy() + else: + self.addon(slug).update(await request.json()) + return self._formatDataResponse({}) + + async def _haStateUpdate(self, request: Request): + await self._verifyHeader(request) + entity = request.match_info.get('entity') + json = await request.json() + self._entities[entity] = json['state'] + self._attributes[entity] = json['attributes'] + return Response() + + async def _haEventUpdate(self, request: Request): + await self._verifyHeader(request) + name = request.match_info.get('name') + self._events.append((name, await request.json())) + return Response() + + async def _createNotification(self, request: Request): + await self._verifyHeader(request) + notification = await request.json() + print("Created notification with: {}".format(notification)) + self._notification = notification.copy() + return Response() + + async def _dismissNotification(self, request: Request): + await self._verifyHeader(request) + print("Dismissed notification with: {}".format(await request.json())) + self._notification = None + return Response() + + async def _debug_insert_backup(self, request: Request) -> Response: + days_back = int(request.query.get("days")) + date = self._time.now() - timedelta(days=days_back) + name = date.strftime("Full Backup %Y-%m-%d %H:%M-%S") + wait = int(request.query.get("wait", 0)) + slug = await self._internalNewBackup(request, {'name': name, 'wait': wait}, date=date, verify_header=False) + return self._formatDataResponse({'slug': slug}) diff --git a/hassio-google-drive-backup/dev/simulationserver.py b/hassio-google-drive-backup/dev/simulationserver.py new file mode 100644 index 0000000..cfc555d --- /dev/null +++ b/hassio-google-drive-backup/dev/simulationserver.py @@ -0,0 +1,165 @@ +import re +from typing import Dict +from yarl import URL +import aiohttp +from aiohttp.web import (Application, + HTTPException, + Request, Response, get, + json_response, middleware, post, HTTPSeeOther) +from aiohttp.client import ClientSession +from injector import inject, singleton, Injector, provider + +from backup.time import Time +from backup.logger import getLogger +from backup.server import Server +from tests.faketime import FakeTime +from backup.module import BaseModule +from backup.config import Config, Setting +from .http_exception import HttpMultiException +from .simulated_google import SimulatedGoogle +from .base_server import BaseServer +from .ports import Ports +from .request_interceptor import RequestInterceptor +from .simulated_supervisor import SimulatedSupervisor +from .apiingress import APIIngress +import aiorun + +logger = getLogger(__name__) + +mimeTypeQueryPattern = re.compile("^mimeType='.*'$") +parentsQueryPattern = re.compile("^'.*' in parents$") +bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$") +resumeBytesPattern = re.compile("^bytes \\*/\\d+$") +intPattern = re.compile("\\d+") +rangePattern = re.compile("bytes=\\d+-\\d+") + + +@singleton +class SimulationServer(BaseServer): + @inject + def __init__(self, ports: Ports, time: Time, session: ClientSession, authserver: Server, config: Config, google: SimulatedGoogle, supervisor: SimulatedSupervisor, api_ingress: APIIngress, interceptor: RequestInterceptor): + self.interceptor = interceptor + self.google = google + self.supervisor = supervisor + self.config = config + self.id_counter = 0 + self.files: Dict[str, bytearray] = {} + self._port = ports.server + self._time: FakeTime = time + self.urls = [] + self.relative = True + self._authserver = authserver + self._api_ingress = api_ingress + + def wasUrlRequested(self, pattern): + for url in self.urls: + if pattern in url: + return True + return False + + def blockBackups(self): + self.block_backups = True + + def unBlockBackups(self): + self.block_backups = False + + async def uploadfile(self, request: Request): + name: str = str(request.query.get("name", "test")) + self.files[name] = await self.readAll(request) + return Response(text="") + + async def readFile(self, request: Request): + return self.serve_bytes(request, self.files[request.query.get("name", "test")]) + + async def slugRedirect(self, request: Request): + raise HTTPSeeOther("https://localhost:" + str(self.config.get(Setting.INGRESS_PORT))) + + @middleware + async def error_middleware(self, request: Request, handler): + self.urls.append(str(request.url)) + resp = await self.interceptor.checkUrl(request) + if resp is not None: + return resp + try: + resp = await handler(request) + return resp + except Exception as ex: + await self.readAll(request) + if isinstance(ex, HttpMultiException): + return Response(status=ex.status_code) + elif isinstance(ex, HTTPException): + raise + else: + logger.printException(ex) + return json_response(str(ex), status=500) + + def createApp(self): + app = Application(middlewares=[self.error_middleware]) + app.add_routes(self.routes()) + self._authserver.buildApp(app) + return app + + async def start(self, port): + self.runner = aiohttp.web.AppRunner(self.createApp()) + await self.runner.setup() + site = aiohttp.web.TCPSite(self.runner, "0.0.0.0", port=port) + await site.start() + + async def stop(self): + self.interceptor.stop() + await self.runner.shutdown() + await self.runner.cleanup() + + def routes(self): + return [ + get('/readfile', self.readFile), + post('/uploadfile', self.uploadfile), + get('/ingress/self_slug', self.slugRedirect), + get('/debug/config', self.debug_config) + ] + self.google.routes() + self.supervisor.routes() + self._api_ingress.routes() + + async def debug_config(self, request: Request): + return json_response(self.supervisor._options) + + +class SimServerModule(BaseModule): + def __init__(self, base_url: URL): + super().__init__(override_dns=False) + self._base_url = base_url + + @provider + @singleton + def getConfig(self) -> Config: + return Config.withOverrides({ + Setting.DRIVE_AUTHORIZE_URL: str(self._base_url.with_path("o/oauth2/v2/auth")), + Setting.AUTHORIZATION_HOST: str(self._base_url), + Setting.TOKEN_SERVER_HOSTS: str(self._base_url), + Setting.DRIVE_TOKEN_URL: str(self._base_url.with_path("token")), + Setting.DRIVE_DEVICE_CODE_URL: str(self._base_url.with_path("device/code")), + Setting.DRIVE_REFRESH_URL: str(self._base_url.with_path("oauth2/v4/token")), + Setting.INGRESS_PORT: 56152 + }) + + @provider + @singleton + def getPorts(self) -> Ports: + return Ports(56153, 56151, 56152) + + +async def main(): + port = 56153 + base = URL("http://localhost").with_port(port) + injector = Injector(SimServerModule(base)) + server = injector.get(SimulationServer) + + # start the server + runner = aiohttp.web.AppRunner(server.createApp()) + await runner.setup() + site = aiohttp.web.TCPSite(runner, "0.0.0.0", port=port) + await site.start() + print("Server started on port " + str(port)) + print("Open a browser at http://localhost:" + str(port)) + + +if __name__ == '__main__': + aiorun.run(main()) diff --git a/hassio-google-drive-backup/dev/ssl/fullchain.pem b/hassio-google-drive-backup/dev/ssl/fullchain.pem new file mode 100644 index 0000000..a83c71f --- /dev/null +++ b/hassio-google-drive-backup/dev/ssl/fullchain.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC5TCCAc2gAwIBAgIJAN+M1w1AVtigMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0xOTAzMjYwMzI2MDJaFw0xOTA0MjUwMzI2MDJaMBQx +EjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBANAa2QE9uHexG6b/ggk7muXB4AhEcpPU+eqGmp4kFx/cKTYe+rPfui4FbARa +QyajXrVRMukEs0wZpUJ11LeGOmuTJ1Cu6mKtk4ub35ZrTfY0W0YdTW0ASYifDNQZ +pt4S0HAcY9A6wlorADxqDkqBt3cSuXdDaR6wFhc4x2kN7xMcKgX5Exv6AS04ksLm +fu0JNSvY1PcLQOA8bFc8tm4eEQcF51xBJBchCcXwpsr5OXt33govGcgxEPLZIueO +nmzzbF0jWBzBhwmjGGnEVsHnxgTG59QshFuB2xf6uWuZolLaPg32b2CV4gomFbn1 +7j4JMFTlxw80OkWILLR6pMr1gy0CAwEAAaM6MDgwFAYDVR0RBA0wC4IJbG9jYWxo +b3N0MAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0B +AQsFAAOCAQEAeK7VMbYO1lQmQcNIG/X42sS5Dm/YFSKgXG0VNMwjEa0xOPS54a6P +a3n7Lb6cVgwSstCSkQa0/Paqy/OvoJlvvgSrV8ZkqwU7100d7gohrReMAhWbRRDK +GkiJDUUQLAT8DXLRry2r5zRDaHX8OzzQuF8dPbFVkjXv9EMpBISY0hmodQFxBmiK +hxiYQWDcNQOTLwRk/x/b61AFLSXduonWM3r+29e8ej7LEHh9UJeLFF7S0+8t+7W4 +F8j8rGWFjYa2KCUFgTOWSg1cUnKYqFaakcMQAlfcXCzuDOso/gwuVFeZZ1hY7gEQ +OHJt0Tu+PWE4CQ3118AIajj2pxTuEHc6Ow== +-----END CERTIFICATE----- diff --git a/hassio-google-drive-backup/dev/ssl/localhost-ca-bundle.csr b/hassio-google-drive-backup/dev/ssl/localhost-ca-bundle.csr new file mode 100644 index 0000000..72aa58e --- /dev/null +++ b/hassio-google-drive-backup/dev/ssl/localhost-ca-bundle.csr @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDAjCCAeoCAQAwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDTzETMBEGA1UE +BwwKU291dGggUGFyazEYMBYGA1UECgwPVW5pdCBUZXN0cyBJbmMuMR4wHAYDVQQL +DBVUZXN0aW5nIERlcHQuIEkgZ3Vlc3MxEjAQBgNVBAMMCWxvY2FsaG9zdDEjMCEG +CSqGSIb3DQEJARYUc3RlcGhlbkBiZWVjaGVucy5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDCu0+68ol5a9ShDmeg41INbwR0QdG0khlzA54Yhu3t +yhEYv7H1XE5JKwSENc1YkBTMlnmbEySW+YMpRXy6R/GoCaNU2wnz6UCdkJQQf6l+ +xIAkaRB+tj7uPpz65olC6tx5CFD+je/A6ZrHzAoEhiKTsQhI5uxexnl191BIQvcj +u7qKaN+TXmvKGlixPrYp4T30EWMDsbONyNjcZr/C4Xs1SzicfscDKt8qiINP8Fgd +tBDxyPIa4deYVKHG/1le9L1ccPFy1+wSQQG3d4YED7h94ajc5chmjMkJnTTYlRKL +XwMZxcsqX9ngHhPvoB5ZahGOLtjyYpxrvduY4kQ8XSaxAgMBAAGgGjAYBgkqhkiG +9w0BCQcxCwwJY2hhbGxlbmdlMA0GCSqGSIb3DQEBCwUAA4IBAQCT+ZSEvz9mJhMA +v71WWd+QjTyT4+9SItLVK3EAcpPbbJWayCuD+mKCGQr5plixC3w+tjy4coIG8lUo +pCX8sXi7TKMVKw6LYvBJeaRRAJ2+exeAQWJvGtRBBohXzm2+SxJ5Zp5+XEY7L3o8 +Apk++px7kLQTSRZxFAQ/irL/cUrp5Sn33ago+bzGA2AGryrqfBbe/nCwlCGF6cV2 +2w9oqY38tPeHQK9+MLOWDE0mBZvu+ab1mpTR7hxFVaVIKOBf8BifSVc4qJ8CDS+l +N4vEnxHIGdTXVp6yjpWN86qidjbLBqS6ZvY1dw6uFuXWSZP7gRixJi4/NUCf0NSO +yd+jFL0b +-----END CERTIFICATE REQUEST----- diff --git a/hassio-google-drive-backup/dev/ssl/localhost.crt b/hassio-google-drive-backup/dev/ssl/localhost.crt new file mode 100644 index 0000000..74e65de --- /dev/null +++ b/hassio-google-drive-backup/dev/ssl/localhost.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIUUOqXw4hsjBcEzJwlO1o9TYw+f+wwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIwMDIwMzA4MDYyNVoXDTIwMDMw +NDA4MDYyNVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAwrtPuvKJeWvUoQ5noONSDW8EdEHRtJIZcwOeGIbt7coR +GL+x9VxOSSsEhDXNWJAUzJZ5mxMklvmDKUV8ukfxqAmjVNsJ8+lAnZCUEH+pfsSA +JGkQfrY+7j6c+uaJQurceQhQ/o3vwOmax8wKBIYik7EISObsXsZ5dfdQSEL3I7u6 +imjfk15ryhpYsT62KeE99BFjA7GzjcjY3Ga/wuF7NUs4nH7HAyrfKoiDT/BYHbQQ +8cjyGuHXmFShxv9ZXvS9XHDxctfsEkEBt3eGBA+4feGo3OXIZozJCZ002JUSi18D +GcXLKl/Z4B4T76AeWWoRji7Y8mKca73bmOJEPF0msQIDAQABozowODAUBgNVHREE +DTALgglsb2NhbGhvc3QwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMB +MA0GCSqGSIb3DQEBCwUAA4IBAQBsZ29ZHTO6yNGPKWpxfOG38Z+mk6eh6TpbIVze +b7L2cFr/ONEFyz9hnS3kf23S9VsoX0AMdqYZbGmUT/4+d9+Q8hRXv7W3zenUk4KY +SkMfvB3J27w2l9Zx7oYfonBC7SSbfYrCBHgZwsINzdP5aC2q6eFTOadIdcF2bxf9 +FU/4aUyOeCkHAtYkVyxM3F33Qmf7ym7OZYKLn4SrPLFRSYiWRd8w+ww75uinnS5W +bG96OojPYzIZu8rb3b5ISR2BMWP0JVQRdmV+8TG1ekaA6EB5gAven55OxCmIUAJm +UEOLPRtVvJN0SE1S6jZBXBHler7IRDKpxATXbdFBK01s4rDz +-----END CERTIFICATE----- diff --git a/hassio-google-drive-backup/dev/ssl/localhost.key b/hassio-google-drive-backup/dev/ssl/localhost.key new file mode 100644 index 0000000..9e78356 --- /dev/null +++ b/hassio-google-drive-backup/dev/ssl/localhost.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDCu0+68ol5a9Sh +Dmeg41INbwR0QdG0khlzA54Yhu3tyhEYv7H1XE5JKwSENc1YkBTMlnmbEySW+YMp +RXy6R/GoCaNU2wnz6UCdkJQQf6l+xIAkaRB+tj7uPpz65olC6tx5CFD+je/A6ZrH +zAoEhiKTsQhI5uxexnl191BIQvcju7qKaN+TXmvKGlixPrYp4T30EWMDsbONyNjc +Zr/C4Xs1SzicfscDKt8qiINP8FgdtBDxyPIa4deYVKHG/1le9L1ccPFy1+wSQQG3 +d4YED7h94ajc5chmjMkJnTTYlRKLXwMZxcsqX9ngHhPvoB5ZahGOLtjyYpxrvduY +4kQ8XSaxAgMBAAECggEAJ1rt0S2FRSnazjX4EZb/lUFzl/9ZX3ILfKglgnV6jo1B +CUxsrdba54SvI/0vpA9ydKqQpxumUHDa5jNp8sfpefmArfyatVXVvkJi+jaizcDu +2Oz27XTtoP68gSSoZwLKThe1Ls0GwGk1491DxQhK4qhrsTgiW0EneQTjj8cg5XKH +/2l0WDslZDwW8XkJ1iqGi/OPs/X4SHggzX3xEFS2SpDK0e6GovyTfijpaql3MLMR +jnEeF69hUKKN7ADxhWvQ8d5C0CICYUzryGScVUs5312Zl83iOoeaixxfh6UaNOmE +jjdM6Hc7VbYEcfQTdZXyIPrzcz+Tc0DSDW+QsktLMQKBgQDn7j/oCNqLhxa1XnA8 +HgQqUUTav/OWlWpieTmcyZ2LkRRw9MJTnP1FIfIvOXplWFSpbSSArAEzsjpjRt0n +2+7VxwN3qNirNGAk3PZiRXXHq7sE3z39PhLPthpNisYTDTIx8fcYK032uEPHsSSj +i13yKeYqeGOmfnu0nrlmZ9+ThQKBgQDW8MnvhqjMxZDdVdxZKlY/8ihnubVBlp59 +s2SFIrWD1/QcKawCzagJHe/YR865k3ti7XIBghmKwLSMa6ENdTxTSSLHbBXlXJtH +tlWFgfVb8eDi7zo9178W8TrWEB7dSC2F6qMN17wOKWRkyo/c4cYBiAUaNQ1inJjk +ACOvHesAPQKBgHXEttKd3EtJNzC1WYxNOZQ7XBkvqwLlr/V81NJWVhdOffC1eA95 +AeoeyJlOOGZJqgO2Ffj4XkvfzmIm05mvxeDrg0k5hXu5xrAxOzK/ToUrIHXi3dk/ +sdGjCEwjkVyPMNPHp86v/pCvFEvMGWyqEfQrbmJWa1NZmnsmtcHYMOD5AoGAD1AW +Qt9IFVaZ7HraeOvAO0wIPuOHG0Ycwn3OUoHXhq4S8RKy83wtVYDxfmoXOzdbmf+q +mJrpMO5rrnlYfvn0M0bJmIWFxdJkKaa+zwUkMsm3qNM8Rf2h2oOTGn8Jg+BJhfni +ZfERr7yZL2kS+LyI+8DyBBz1eCoJ5mxwHmC2Rk0CgYBcrhxANSpikw07XLRFcvk9 +m79qiEThhmiBf1WVZdtWNi9hR+zs6mWrTk8N8jfLzNLLNMPdAAybF8feeMTa9xpS +zXF9Gqlayzx/+wyPts7ocrdJKikDVdVZauoxG+mNE87VcVEx87ZiboirQVoKSsxe +OmwKminJ/E4GHJCY7RLQAw== +-----END PRIVATE KEY----- diff --git a/hassio-google-drive-backup/dev/ssl/privkey.pem b/hassio-google-drive-backup/dev/ssl/privkey.pem new file mode 100644 index 0000000..a10446b --- /dev/null +++ b/hassio-google-drive-backup/dev/ssl/privkey.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDQGtkBPbh3sRum +/4IJO5rlweAIRHKT1PnqhpqeJBcf3Ck2Hvqz37ouBWwEWkMmo161UTLpBLNMGaVC +ddS3hjprkydQrupirZOLm9+Wa032NFtGHU1tAEmInwzUGabeEtBwHGPQOsJaKwA8 +ag5Kgbd3Erl3Q2kesBYXOMdpDe8THCoF+RMb+gEtOJLC5n7tCTUr2NT3C0DgPGxX +PLZuHhEHBedcQSQXIQnF8KbK+Tl7d94KLxnIMRDy2SLnjp5s82xdI1gcwYcJoxhp +xFbB58YExufULIRbgdsX+rlrmaJS2j4N9m9gleIKJhW59e4+CTBU5ccPNDpFiCy0 +eqTK9YMtAgMBAAECggEADlvr4UQK+GdGCy3SIST1uSi5dpiSd1TYsa/79zFyTwZ3 +6X4VuleTlx1UqLA5te7L2CL0KlPiszuJxZ4vwUIHwehzbAPFtG1ZouZsdQqOZJCU +Q7A96Wl9qWmgDvp+IxCVRUcQNAv54RLaf1CqD8YHjLXEClCibjWkMJIAYGVPu7ez +44sbXenPi+4OfI5IHhhBm+RmXv6QpP/A4OyIg/X35NoIp+z+J/aajFsb6AMvFejU +kMCj23PUv4MGA0zrc09UDzM/d7qwCeOMCW0QqKidbkZ+UtY3lsSj7b0l50TTEYsf +2sB/xjkUVHg9sJc8ieuf8LaHedvmiQPfECjZU9VhmQKBgQDx0h359EJSvil/iQ4o +OrsmxMz40mi/9pwznF0SUuRyKOsmJsSx7zL3rVFo/YLHOE5Ju4PSDm1OL4drUE0z +2l/0S6tlN4teHU6x969Xqm2vpwKP3jFXpD0zEi4QRGXgqtY1sVFO4ZIKfTa3KKMu +wqNmAB1KczvIkU71ClzqaVUULwKBgQDcTqI1SkwmIGP4PnGbLQTRI8pmw4xx/d7X +bpgAeCegSwfCy94nX7TdDYujhxa1rp3ya5YSnkTTN7oGCXIsZkLjmfFmjiIh3uEk +YX0obydQvVUfnPTPXQP3QhZG2dQtFdUUJOsu1bJKC7a/jcLGqbJzeBUg/Sb0/gXP +KCPCCr5bYwKBgHrbVX94KXoAQvUYnKizrgG0Wq7Pt4hPsmxGNMLqekXFpDJt3+DG +tg4/b+z3X0n3wU6UhhRiYAYo/5P16EM/3yAukZWK8rOOED06qUrQu4lSQGr3Z/ou +5yjbQ6vgFCJgqRP+UmDRGXFazEGh08Yd/QYFaNw6T1VG/eZgrXQqr57hAoGBALcb +qFiQm0ApNc4T4IrwXQuTKtxE9guczUXTxwTE2XKySg4PMmMZehMs+f39/tMdAmyG +HWL2JxBDRhtUaJAcosXXorvxsM7kF88MNGGSGWRTKVgwNY3QqsYtKKTU0jRy6/pl +QRBZT2mZ2NfXdKd4TjkI+s7DekiwhZWLsETMdzEvAoGARDyJNOpPPm/VpDgV08uU +P1yPOT6j8qhQ2dN1mEab0NeyY6HGriUg8y6HJ81Obt4YyVPlEplDJe8TkphWNsby +B93FpH56WF4g8ivKD4oC2JghlWf4c0MgxiWyoNvlHSM7Dmq2UfPDyV+1UhnNH1ty +CUMs7Fjk4BeJbrYmJf3VxYU= +-----END PRIVATE KEY----- diff --git a/hassio-google-drive-backup/icon.png b/hassio-google-drive-backup/icon.png new file mode 100644 index 0000000..107f2b9 Binary files /dev/null and b/hassio-google-drive-backup/icon.png differ diff --git a/hassio-google-drive-backup/logo.png b/hassio-google-drive-backup/logo.png new file mode 100644 index 0000000..9df76d4 Binary files /dev/null and b/hassio-google-drive-backup/logo.png differ diff --git a/hassio-google-drive-backup/requirements-addon.txt b/hassio-google-drive-backup/requirements-addon.txt new file mode 100644 index 0000000..ba9643a --- /dev/null +++ b/hassio-google-drive-backup/requirements-addon.txt @@ -0,0 +1,19 @@ +google-api-python-client +google-auth-httplib2 +google-auth-oauthlib +oauth2client +python-dateutil +watchdog +pyyaml +dnspython +aiorun +aiohttp +aiodns +injector +aiofiles +aiofile +colorlog +aiohttp-jinja2 +aioping +pytz +tzlocal diff --git a/hassio-google-drive-backup/requirements-server.txt b/hassio-google-drive-backup/requirements-server.txt new file mode 100644 index 0000000..2104e54 --- /dev/null +++ b/hassio-google-drive-backup/requirements-server.txt @@ -0,0 +1,20 @@ +aiodns +aiofiles +aiofile +aiohttp +aiorun +colorlog +dnspython +google-cloud-logging +google-cloud-firestore +injector +oauth2client +ptvsd +python-dateutil +pyyaml +watchdog +aiohttp-jinja2 +firebase-admin +pytz +tzlocal +aioping diff --git a/hassio-google-drive-backup/setup.py b/hassio-google-drive-backup/setup.py new file mode 100644 index 0000000..64497b3 --- /dev/null +++ b/hassio-google-drive-backup/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup, find_packages +setup( + name="hgdb", + packages=find_packages(), + package_data={ + 'backup': ['static/*', 'static/*/*', 'static/*/*/*'] + } +) diff --git a/hassio-google-drive-backup/snapshot.json b/hassio-google-drive-backup/snapshot.json new file mode 100644 index 0000000..e69de29 diff --git a/hassio-google-drive-backup/tests/__init__.py b/hassio-google-drive-backup/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hassio-google-drive-backup/tests/conftest.py b/hassio-google-drive-backup/tests/conftest.py new file mode 100644 index 0000000..f124c0c --- /dev/null +++ b/hassio-google-drive-backup/tests/conftest.py @@ -0,0 +1,427 @@ +import json +import logging +import os +import tempfile +import asyncio +import platform +import aiohttp +from yarl import URL + +import pytest +from aiohttp import ClientSession +from injector import (ClassAssistedBuilder, Injector, Module, inject, provider, + singleton) + +from backup.config import Config, Setting +from backup.model import Coordinator +from dev.simulationserver import SimulationServer +from backup.drive import DriveRequests, DriveSource, FolderFinder, AuthCodeQuery +from backup.util import GlobalInfo, Estimator, Resolver, DataCache +from backup.ha import HaRequests, HaSource, HaUpdater +from backup.logger import reset +from backup.model import DummyBackup, DestinationPrecache, Model +from backup.time import Time +from backup.module import BaseModule +from backup.debugworker import DebugWorker +from backup.creds import Creds, DriveRequester +from backup.server import ErrorStore +from backup.ha import AddonStopper +from backup.ui import UiServer +from backup.watcher import Watcher +from .faketime import FakeTime +from .helpers import Uploader, createBackupTar +from dev.ports import Ports +from dev.simulated_google import SimulatedGoogle +from dev.request_interceptor import RequestInterceptor +from dev.simulated_supervisor import SimulatedSupervisor + + +@singleton +class FsFaker(): + @inject + def __init__(self): + self.bytes_free = 1024 * 1024 * 1024 + self.bytes_total = 1024 * 1024 * 1024 + self.old_method = None + + def start(self): + if platform.system() != "Windows": + self.old_method = os.statvfs + os.statvfs = self._hijack + + def stop(self): + if platform.system() != "Windows": + os.statvfs = self.old_method + + def _hijack(self, path): + return os.statvfs_result((0, 1, int(self.bytes_total), int(self.bytes_free), int(self.bytes_free), 0, 0, 0, 0, 255)) + + def setFreeBytes(self, bytes_free, bytes_total=1): + self.bytes_free = bytes_free + self.bytes_total = bytes_total + if self.bytes_free > self.bytes_total: + self.bytes_total = self.bytes_free + + +class ReaderHelper: + def __init__(self, session, ui_port, ingress_port): + self.session = session + self.ui_port = ui_port + self.ingress_port = ingress_port + self.timeout = aiohttp.ClientTimeout(total=20) + + def getUrl(self, ingress=True, ssl=False): + if ssl: + protocol = "https" + else: + protocol = "http" + if ingress: + return protocol + "://localhost:" + str(self.ingress_port) + "/" + else: + return protocol + "://localhost:" + str(self.ui_port) + "/" + + async def getjson(self, path, status=200, json=None, auth=None, ingress=True, ssl=False, sslcontext=None): + async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, ssl=sslcontext, timeout=self.timeout) as resp: + assert resp.status == status + return await resp.json() + + async def get(self, path, status=200, json=None, auth=None, ingress=True, ssl=False): + async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, timeout=self.timeout) as resp: + if resp.status != status: + import logging + logging.getLogger().error(resp.text()) + assert resp.status == status + return await resp.text() + + async def postjson(self, path, status=200, json=None, ingress=True): + async with self.session.post(self.getUrl(ingress) + path, json=json, timeout=self.timeout) as resp: + assert resp.status == status + return await resp.json() + + async def assertError(self, path, error_type="generic_error", status=500, ingress=True, json=None): + logging.getLogger().info("Requesting " + path) + data = await self.getjson(path, status=status, ingress=ingress, json=json) + assert data['error_type'] == error_type + + +# This module should onyl ever have bindings that can also be satisfied by MainModule +class TestModule(Module): + def __init__(self, config: Config, ports: Ports): + self.ports = ports + self.config = config + + @provider + @singleton + def getDriveCreds(self, time: Time) -> Creds: + return Creds(time, "test_client_id", time.now(), "test_access_token", "test_refresh_token", "test_client_secret") + + @provider + @singleton + def getTime(self) -> Time: + return FakeTime() + + @provider + @singleton + def getPorts(self) -> Ports: + return self.ports + + @provider + @singleton + def getConfig(self) -> Config: + return self.config + + +@pytest.fixture +def event_loop(): + if platform.system() == "Windows": + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + return asyncio.new_event_loop() + + +@pytest.fixture +async def generate_config(server_url: URL, ports, cleandir): + return Config.withOverrides({ + Setting.DRIVE_URL: str(server_url), + Setting.SUPERVISOR_URL: str(server_url) + "/", + Setting.AUTHORIZATION_HOST: str(server_url), + Setting.TOKEN_SERVER_HOSTS: str(server_url), + Setting.DRIVE_REFRESH_URL: str(server_url.with_path("/oauth2/v4/token")), + Setting.DRIVE_AUTHORIZE_URL: str(server_url.with_path("/o/oauth2/v2/auth")), + Setting.DRIVE_TOKEN_URL: str(server_url.with_path("/token")), + Setting.DRIVE_DEVICE_CODE_URL: str(server_url.with_path("/device/code")), + Setting.SUPERVISOR_TOKEN: "test_header", + Setting.SECRETS_FILE_PATH: "secrets.yaml", + Setting.CREDENTIALS_FILE_PATH: "credentials.dat", + Setting.FOLDER_FILE_PATH: "folder.dat", + Setting.RETAINED_FILE_PATH: "retained.json", + Setting.ID_FILE_PATH: "id.json", + Setting.DATA_CACHE_FILE_PATH: "data_cache.json", + Setting.STOP_ADDON_STATE_PATH: "stop_addon.json", + Setting.INGRESS_TOKEN_FILE_PATH: "ingress.dat", + Setting.DEFAULT_DRIVE_CLIENT_ID: "test_client_id", + Setting.DEFAULT_DRIVE_CLIENT_SECRET: "test_client_secret", + Setting.BACKUP_DIRECTORY_PATH: os.path.join(cleandir, "backups"), + Setting.PORT: ports.ui, + Setting.INGRESS_PORT: ports.ingress, + Setting.BACKUP_STARTUP_DELAY_MINUTES: 0, + Setting.PING_TIMEOUT: 0.1, + }) + + +@pytest.fixture +async def injector(cleandir, ports, generate_config): + drive_creds = Creds(FakeTime(), "test_client_id", None, "test_access_token", "test_refresh_token") + + os.mkdir(os.path.join(cleandir, "backups")) + with open(os.path.join(cleandir, "secrets.yaml"), "w") as f: + f.write("for_unit_tests: \"password value\"\n") + + with open(os.path.join(cleandir, "credentials.dat"), "w") as f: + f.write(json.dumps(drive_creds.serialize())) + + return Injector([BaseModule(), TestModule(generate_config, ports)]) + + +@pytest.fixture +async def ui_server(injector, server): + os.mkdir("static") + server = injector.get(UiServer) + await server.run() + yield server + await server.shutdown() + + +@pytest.fixture +def reader(server, ui_server, session, ui_port, ingress_port): + return ReaderHelper(session, ui_port, ingress_port) + + +@pytest.fixture +async def uploader(injector: Injector, server_url): + return injector.get(ClassAssistedBuilder[Uploader]).build(host=str(server_url)) + + +@pytest.fixture +async def google(injector: Injector): + return injector.get(SimulatedGoogle) + + +@pytest.fixture +async def interceptor(injector: Injector): + return injector.get(RequestInterceptor) + + +@pytest.fixture +async def supervisor(injector: Injector, server, session): + return injector.get(SimulatedSupervisor) + + +@pytest.fixture +async def addon_stopper(injector: Injector): + return injector.get(AddonStopper) + + +@pytest.fixture +async def server(injector, port, drive_creds: Creds, session): + server = injector.get(SimulationServer) + + # start the server + logging.getLogger().info("Starting SimulationServer on port " + str(port)) + await server.start(port) + yield server + await server.stop() + + +@pytest.fixture +async def data_cache(injector): + return injector.get(DataCache) + + +@pytest.fixture +async def session(injector): + async with injector.get(ClientSession) as session: + yield session + + +@pytest.fixture +async def precache(injector): + return injector.get(DestinationPrecache) + + +@pytest.fixture +async def backup(coord, source, dest): + await coord.sync() + assert len(coord.backups()) == 1 + return coord.backups()[0] + + +@pytest.fixture +async def fs(injector): + faker = injector.get(FsFaker) + faker.start() + yield faker + faker.stop() + + +@pytest.fixture +async def estimator(injector, fs): + return injector.get(Estimator) + + +@pytest.fixture +async def device_code(injector): + return injector.get(AuthCodeQuery) + + +@pytest.fixture +async def error_store(injector): + return injector.get(ErrorStore) + + +@pytest.fixture +async def model(injector): + return injector.get(Model) + + +@pytest.fixture +async def global_info(injector): + return injector.get(GlobalInfo) + + +@pytest.fixture +async def server_url(port): + return URL("http://localhost:").with_port(port) + + +@pytest.fixture +async def ports(unused_tcp_port_factory): + return Ports(unused_tcp_port_factory(), unused_tcp_port_factory(), unused_tcp_port_factory()) + + +@pytest.fixture +async def port(ports: Ports): + return ports.server + + +@pytest.fixture +async def ui_url(ports: Ports): + return URL("http://localhost").with_port(ports.ingress) + + +@pytest.fixture +async def ui_port(ports: Ports): + return ports.ui + + +@pytest.fixture +async def ingress_port(ports: Ports): + return ports.ingress + + +@pytest.fixture +async def coord(injector): + return injector.get(Coordinator) + + +@pytest.fixture() +async def updater(injector): + return injector.get(HaUpdater) + + +@pytest.fixture() +async def cleandir(): + newpath = tempfile.mkdtemp() + os.chdir(newpath) + return newpath + + +@pytest.fixture +async def time(injector): + reset() + return injector.get(Time) + + +@pytest.fixture +async def config(injector): + return injector.get(Config) + + +@pytest.fixture +async def drive_creds(injector): + return injector.get(Creds) + + +@pytest.fixture +async def drive(injector, server, session): + return injector.get(DriveSource) + + +@pytest.fixture +async def ha(injector, server, session): + return injector.get(HaSource) + + +@pytest.fixture +async def ha_requests(injector, server): + return injector.get(HaRequests) + + +@pytest.fixture +async def drive_requests(injector, server): + return injector.get(DriveRequests) + + +@pytest.fixture +async def drive_requester(injector, server): + return injector.get(DriveRequester) + + +@pytest.fixture(autouse=True) +def verify_closed_responses(drive_requester: DriveRequester): + yield "unused" + for resp in drive_requester.all_resposnes: + assert resp.closed + + +@pytest.fixture +async def resolver(injector): + return injector.get(Resolver) + + +@pytest.fixture +async def client_identifier(injector): + return injector.get(Config).clientIdentifier() + + +@pytest.fixture +async def debug_worker(injector): + return injector.get(DebugWorker) + + +@pytest.fixture() +async def folder_finder(injector): + return injector.get(FolderFinder) + + +@pytest.fixture() +async def watcher(injector): + watcher = injector.get(Watcher) + yield watcher + await watcher.stop() + + +class BackupHelper(): + def __init__(self, uploader, time): + self.time = time + self.uploader = uploader + + async def createFile(self, size=1024 * 1024 * 2, slug="testslug", name="Test Name"): + from_backup: DummyBackup = DummyBackup( + name, self.time.toUtc(self.time.local(1985, 12, 6)), "fake source", slug) + data = await self.uploader.upload(createBackupTar(slug, name, self.time.now(), size)) + return from_backup, data + + +@pytest.fixture +def backup_helper(uploader, time): + return BackupHelper(uploader, time) diff --git a/hassio-google-drive-backup/tests/drive/__init__.py b/hassio-google-drive-backup/tests/drive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hassio-google-drive-backup/tests/drive/test_driverequests.py b/hassio-google-drive-backup/tests/drive/test_driverequests.py new file mode 100644 index 0000000..aca3638 --- /dev/null +++ b/hassio-google-drive-backup/tests/drive/test_driverequests.py @@ -0,0 +1,71 @@ +import os +import json +from time import sleep + +import pytest +import asyncio +from yarl import URL +from aiohttp.client_exceptions import ClientResponseError +from backup.config import Config, Setting +from dev.simulationserver import SimulationServer +from dev.simulated_google import SimulatedGoogle, URL_MATCH_UPLOAD_PROGRESS, URL_MATCH_FILE +from dev.request_interceptor import RequestInterceptor +from backup.drive import DriveSource, FolderFinder, DriveRequests, RETRY_SESSION_ATTEMPTS, UPLOAD_SESSION_EXPIRATION_DURATION, URL_START_UPLOAD +from backup.drive.driverequests import (BASE_CHUNK_SIZE, CHUNK_UPLOAD_TARGET_SECONDS) +from backup.drive.drivesource import FOLDER_MIME_TYPE +from backup.exceptions import (BackupFolderInaccessible, BackupFolderMissingError, + DriveQuotaExceeded, ExistingBackupFolderError, + GoogleCantConnect, GoogleCredentialsExpired, + GoogleInternalError, GoogleUnexpectedError, + GoogleSessionError, GoogleTimeoutError, CredRefreshMyError, CredRefreshGoogleError) +from backup.creds import Creds +from backup.model import DriveBackup, DummyBackup +from ..faketime import FakeTime +from ..helpers import compareStreams, createBackupTar + + +class BackupHelper(): + def __init__(self, uploader, time): + self.time = time + self.uploader = uploader + + async def createFile(self, size=1024 * 1024 * 2, slug="testslug", name="Test Name", note=None): + from_backup: DummyBackup = DummyBackup( + name, self.time.toUtc(self.time.local(1985, 12, 6)), "fake source", slug, note=note, size=size) + data = await self.uploader.upload(createBackupTar(slug, name, self.time.now(), size)) + return from_backup, data + + +@pytest.mark.asyncio +async def test_minimum_chunk_size(drive_requests: DriveRequests, time: FakeTime, backup_helper: BackupHelper, config: Config): + config.override(Setting.UPLOAD_LIMIT_BYTES_PER_SECOND, BASE_CHUNK_SIZE) + from_backup, data = await backup_helper.createFile(BASE_CHUNK_SIZE * 10) + async with data: + async for progress in drive_requests.create(data, {}, "unused"): + assert time.sleeps[-1] == 1 + assert len(time.sleeps) == 11 + + +@pytest.mark.asyncio +async def test_lower_chunk_size(drive_requests: DriveRequests, time: FakeTime, backup_helper: BackupHelper, config: Config): + config.override(Setting.UPLOAD_LIMIT_BYTES_PER_SECOND, BASE_CHUNK_SIZE / 2) + from_backup, data = await backup_helper.createFile(BASE_CHUNK_SIZE * 10) + + # It should still upload in 256 kb chunks, just with more delay + async with data: + async for progress in drive_requests.create(data, {}, "unused"): + assert time.sleeps[-1] == 2 + assert len(time.sleeps) == 11 + + +@pytest.mark.asyncio +async def test_higher_speed_limit(drive_requests: DriveRequests, time: FakeTime, backup_helper: BackupHelper, config: Config): + config.override(Setting.UPLOAD_LIMIT_BYTES_PER_SECOND, BASE_CHUNK_SIZE * 2) + from_backup, data = await backup_helper.createFile(BASE_CHUNK_SIZE * 10) + + # It should still upload in 256 kb chunks, just with more delay + async with data: + async for progress in drive_requests.create(data, {}, "unused"): + assert time.sleeps[-1] == 0.5 + assert len(time.sleeps) == 11 + diff --git a/hassio-google-drive-backup/tests/faketime.py b/hassio-google-drive-backup/tests/faketime.py new file mode 100644 index 0000000..69c6d55 --- /dev/null +++ b/hassio-google-drive-backup/tests/faketime.py @@ -0,0 +1,54 @@ +import asyncio +from datetime import datetime, timedelta +from backup.time import Time +from pytz import timezone + + +class FakeTime(Time): + def __init__(self, now: datetime = None): + super().__init__(local_tz=timezone('EST')) + if now: + self._now = now + else: + self._now = self.toUtc( + datetime(1985, 12, 6, 0, 0, 0, tzinfo=timezone('EST'))) + self._start = self._now + self.sleeps = [] + + def setTimeZone(self, tz): + if isinstance(tz, str): + self.local_tz = timezone(tz) + else: + self.local_tz = tz + + def monotonic(self): + return (self._now - self._start).total_seconds() + + def setNow(self, now: datetime): + self._now = now + return self + + def advanceDay(self, days=1): + return self.advance(days=1) + + def advance(self, days=0, hours=0, minutes=0, seconds=0, duration=None): + self._now = self._now + \ + timedelta(days=days, hours=hours, seconds=seconds, minutes=minutes) + if duration is not None: + self._now = self._now + duration + return self + + def now(self) -> datetime: + return self._now + + def nowLocal(self) -> datetime: + return self.toLocal(self._now) + + async def sleepAsync(self, seconds: float, _exit_early: asyncio.Event = None): + self.sleeps.append(seconds) + self._now = self._now + timedelta(seconds=seconds) + # allow the task to be interrupted if such a thing is requested. + await asyncio.sleep(0) + + def clearSleeps(self): + self.sleeps = [] diff --git a/hassio-google-drive-backup/tests/helpers.py b/hassio-google-drive-backup/tests/helpers.py new file mode 100644 index 0000000..68a4232 --- /dev/null +++ b/hassio-google-drive-backup/tests/helpers.py @@ -0,0 +1,219 @@ +import json +import tarfile +import pytest +import platform +import os +from datetime import datetime +from io import BytesIO, IOBase + +from aiohttp import ClientSession +from injector import inject, singleton + +from backup.util import AsyncHttpGetter +from backup.model import SimulatedSource +from backup.time import Time +from backup.config import CreateOptions + +all_folders = [ + "share", + "ssl", + "addons/local" +] +all_addons = [ + { + "name": "Sexy Robots", + "slug": "sexy_robots", + "description": "The robots you already know, but sexier. See what they don't want you to see.", + "version": "0.69", + "size": 1, + "logo": True, + "state": "started" + }, + { + "name": "Particle Accelerator", + "slug": "particla_accel", + "description": "What CAN'T you do with Home Assistant?", + "version": "0.5", + "size": 500.3, + "logo": True, + "state": "started" + }, + { + "name": "Empty Addon", + "slug": "addon_empty", + "description": "Explore the meaning of the universe by contemplating whats missing.", + "version": "0.-1", + "size": 1024 * 1024 * 1024 * 21.2, + "logo": False, + "state": "started" + } +] + + +def skipForWindows(): + if platform.system() == "Windows": + pytest.skip("This test can't be run in windows environments") + + +def skipForRoot(): + if os.getuid() == 0: + pytest.skip("This test can't be run as root") + + +def createBackupTar(slug: str, name: str, date: datetime, padSize: int, included_folders=None, included_addons=None, password=None) -> BytesIO: + backup_type = "full" + haVersion = None + if included_folders is not None: + folders = [] + for folder in included_folders: + if folder == "homeassistant": + haVersion = "0.92.2" + else: + folders.append(folder) + else: + folders = all_folders.copy() + haVersion = "0.92.2" + + if included_addons is not None: + backup_type = "partial" + addons = [] + for addon in all_addons: + if addon['slug'] in included_addons: + addons.append(addon) + else: + addons = all_addons.copy() + + backup_info = { + "slug": slug, + "name": name, + "date": date.isoformat(), + "type": backup_type, + "protected": password is not None, + "homeassistant": haVersion, + "folders": folders, + "addons": addons, + "repositories": [ + "https://github.com/hassio-addons/repository" + ] + } + stream = BytesIO() + tar = tarfile.open(fileobj=stream, mode="w") + add(tar, "backup.json", BytesIO(json.dumps(backup_info).encode())) + add(tar, "padding.dat", getTestStream(padSize)) + tar.close() + stream.seek(0) + stream.size = lambda: len(stream.getbuffer()) + return stream + + +def add(tar, name, stream): + info = tarfile.TarInfo(name) + info.size = len(stream.getbuffer()) + stream.seek(0) + tar.addfile(info, stream) + + +def parseBackupInfo(stream: BytesIO): + with tarfile.open(fileobj=stream, mode="r") as tar: + info = tar.getmember("backup.json") + with tar.extractfile(info) as f: + backup_data = json.load(f) + backup_data['size'] = float( + round(len(stream.getbuffer()) / 1024.0 / 1024.0, 2)) + backup_data['version'] = 'dev' + return backup_data + + +def getTestStream(size: int): + """ + Produces a stream of repeating prime sequences to avoid accidental repetition + """ + arr = bytearray() + while True: + for prime in [4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937]: + for x in range(prime): + if len(arr) < size: + arr.append(x % 255) + else: + break + if len(arr) >= size: + break + if len(arr) >= size: + break + return BytesIO(arr) + + +async def compareStreams(left, right): + await left.setup() + await right.setup() + while True: + from_left = await left.read(1024 * 1024) + from_right = await right.read(1024 * 1024) + if len(from_left.getbuffer()) == 0: + assert len(from_right.getbuffer()) == 0 + break + if from_left.getbuffer() != from_right.getbuffer(): + print("break!") + assert from_left.getbuffer() == from_right.getbuffer() + + +class IntentionalFailure(Exception): + pass + + +class HelperTestSource(SimulatedSource): + def __init__(self, name, is_destination=False): + super().__init__(name, is_destination=is_destination) + self.allow_create = True + self.allow_save = True + self.queries = 0 + + def reset(self): + self.saved = [] + self.deleted = [] + self.created = [] + self.queries = 0 + + @property + def query_count(self): + return self.queries + + async def get(self): + self.queries += 1 + return await super().get() + + def assertThat(self, created=0, deleted=0, saved=0, current=0): + assert len(self.saved) == saved + assert len(self.deleted) == deleted + assert len(self.created) == created + assert len(self.current) == current + return self + + def assertUnchanged(self): + self.assertThat(current=len(self.current)) + return self + + async def create(self, options: CreateOptions): + if not self.allow_create: + raise IntentionalFailure() + return await super().create(options) + + async def save(self, backup, bytes: IOBase = None): + if not self.allow_save: + raise IntentionalFailure() + return await super().save(backup, bytes=bytes) + + +@singleton +class Uploader(): + @inject + def __init__(self, host, session: ClientSession, time: Time): + self.host = host + self.session = session + self.time = time + + async def upload(self, data) -> AsyncHttpGetter: + async with await self.session.post(self.host + "/uploadfile", data=data) as resp: + resp.raise_for_status() + source = AsyncHttpGetter(self.host + "/readfile", {}, self.session, time=self.time) + return source diff --git a/hassio-google-drive-backup/tests/test_addon_stopper.py b/hassio-google-drive-backup/tests/test_addon_stopper.py new file mode 100644 index 0000000..6d468d7 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_addon_stopper.py @@ -0,0 +1,355 @@ +import json +import pytest +import os + +from stat import S_IREAD +from backup.config import Config, Setting +from backup.ha import AddonStopper +from backup.exceptions import SupervisorFileSystemError +from .faketime import FakeTime +from dev.simulated_supervisor import SimulatedSupervisor, URL_MATCH_START_ADDON, URL_MATCH_STOP_ADDON, URL_MATCH_ADDON_INFO +from dev.request_interceptor import RequestInterceptor +from .helpers import skipForRoot + + +def getSaved(config: Config): + with open(config.get(Setting.STOP_ADDON_STATE_PATH)) as f: + data = json.load(f) + return set(data["start"]), set(data["watchdog"]) + + +def save(config: Config, to_start, to_watchdog_enable): + with open(config.get(Setting.STOP_ADDON_STATE_PATH), "w") as f: + json.dump({"start": list(to_start), "watchdog": list(to_watchdog_enable)}, f) + +@pytest.mark.asyncio +async def test_no_stop_config(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug = "test_slug_1" + supervisor.installAddon(slug, "Test decription") + addon_stopper.allowRun() + addon_stopper.isBackingUp(False) + assert supervisor.addon(slug)["state"] == "started" + await addon_stopper.stopAddons("ignore") + assert supervisor.addon(slug)["state"] == "started" + await addon_stopper.check() + await addon_stopper.startAddons() + assert supervisor.addon(slug)["state"] == "started" + + +@pytest.mark.asyncio +async def test_load_addons_on_boot(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + slug2 = "test_slug_2" + supervisor.installAddon(slug2, "Test decription") + slug3 = "test_slug_3" + supervisor.installAddon(slug3, "Test decription") + + config.override(Setting.STOP_ADDONS, slug1) + + save(config, {slug3}, {slug2}) + + await addon_stopper.start(False) + assert addon_stopper.must_start == {slug3} + assert addon_stopper.must_enable_watchdog == {slug2} + + addon_stopper.allowRun() + assert addon_stopper.must_start == {slug1, slug3} + assert addon_stopper.must_enable_watchdog == {slug2} + + +@pytest.mark.asyncio +async def test_do_nothing_while_backing_up(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + slug2 = "test_slug_2" + supervisor.installAddon(slug2, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1, slug2])) + + await addon_stopper.start(False) + addon_stopper.allowRun() + addon_stopper.isBackingUp(True) + assert addon_stopper.must_start == {slug1, slug2} + + await addon_stopper.check() + + assert not interceptor.urlWasCalled(URL_MATCH_START_ADDON) + assert not interceptor.urlWasCalled(URL_MATCH_STOP_ADDON) + + +@pytest.mark.asyncio +async def test_start_and_stop(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + + await addon_stopper.stopAddons("ignore") + + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.check() + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.startAddons() + assert supervisor.addon(slug1)["state"] == "started" + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_start_and_stop_error(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + + await addon_stopper.stopAddons("ignore") + + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.check() + assert supervisor.addon(slug1)["state"] == "stopped" + supervisor.addon(slug1)["state"] = "error" + assert supervisor.addon(slug1)["state"] == "error" + await addon_stopper.startAddons() + assert supervisor.addon(slug1)["state"] == "started" + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_stop_failure(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, slug1) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + interceptor.setError(URL_MATCH_STOP_ADDON, 400) + + await addon_stopper.stopAddons("ignore") + assert interceptor.urlWasCalled(URL_MATCH_STOP_ADDON) + assert getSaved(config) == (set(), set()) + assert supervisor.addon(slug1)["state"] == "started" + await addon_stopper.check() + await addon_stopper.startAddons() + assert supervisor.addon(slug1)["state"] == "started" + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_start_failure(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor, time: FakeTime) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + + await addon_stopper.stopAddons("ignore") + + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + assert supervisor.addon(slug1)["state"] == "stopped" + interceptor.setError(URL_MATCH_START_ADDON, 400) + await addon_stopper.startAddons() + assert getSaved(config) == (set(), set()) + assert interceptor.urlWasCalled(URL_MATCH_START_ADDON) + assert supervisor.addon(slug1)["state"] == "stopped" + + +@pytest.mark.asyncio +async def test_delayed_start(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor, time: FakeTime) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + await addon_stopper.stopAddons("ignore") + assert supervisor.addon(slug1)["state"] == "stopped" + assert getSaved(config) == ({slug1}, set()) + + # start the addon again, which simluates the supervisor's tendency to report an addon as started right after stopping it. + supervisor.addon(slug1)["state"] = "started" + await addon_stopper.check() + await addon_stopper.startAddons() + assert getSaved(config) == ({slug1}, set()) + + time.advance(seconds=30) + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + + time.advance(seconds=30) + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + + time.advance(seconds=30) + supervisor.addon(slug1)["state"] = "stopped" + await addon_stopper.check() + assert supervisor.addon(slug1)["state"] == "started" + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_delayed_start_give_up(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor, time: FakeTime) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + await addon_stopper.stopAddons("ignore") + assert supervisor.addon(slug1)["state"] == "stopped" + assert getSaved(config) == ({slug1}, set()) + + # start the addon again, which simluates the supervisor's tendency to report an addon as started right after stopping it. + supervisor.addon(slug1)["state"] = "started" + await addon_stopper.check() + await addon_stopper.startAddons() + assert getSaved(config) == ({slug1}, set()) + + time.advance(seconds=30) + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + + time.advance(seconds=30) + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + + # Should clear saved state after this, since it stops checking after 2 minutes. + time.advance(seconds=100) + await addon_stopper.check() + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_disable_watchdog(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + supervisor.addon(slug1)["watchdog"] = True + + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + + await addon_stopper.stopAddons("ignore") + + assert supervisor.addon(slug1)["state"] == "stopped" + assert supervisor.addon(slug1)["watchdog"] is False + await addon_stopper.check() + assert supervisor.addon(slug1)["state"] == "stopped" + assert supervisor.addon(slug1)["watchdog"] is False + await addon_stopper.startAddons() + assert supervisor.addon(slug1)["state"] == "started" + assert supervisor.addon(slug1)["watchdog"] is True + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_enable_watchdog_on_reboot(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, time: FakeTime) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + supervisor.addon(slug1)["watchdog"] = False + save(config, set(), {slug1}) + + await addon_stopper.start(False) + addon_stopper.allowRun() + assert addon_stopper.must_enable_watchdog == {slug1} + + time.advance(minutes=5) + await addon_stopper.check() + assert supervisor.addon(slug1)["watchdog"] is True + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_enable_watchdog_waits_for_start(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + supervisor.addon(slug1)["watchdog"] = False + save(config, {slug1}, {slug1}) + + await addon_stopper.start(False) + addon_stopper.allowRun() + assert addon_stopper.must_enable_watchdog == {slug1} + + await addon_stopper.check() + assert getSaved(config) == ({slug1}, {slug1}) + + supervisor.addon(slug1)["state"] = "stopped" + await addon_stopper.check() + assert supervisor.addon(slug1)["state"] == "started" + assert supervisor.addon(slug1)["watchdog"] is True + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_get_info_failure_on_stop(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, slug1) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + interceptor.setError(URL_MATCH_ADDON_INFO, 400) + + await addon_stopper.stopAddons("ignore") + assert interceptor.urlWasCalled(URL_MATCH_ADDON_INFO) + assert getSaved(config) == (set(), set()) + assert supervisor.addon(slug1)["state"] == "started" + await addon_stopper.check() + await addon_stopper.startAddons() + assert supervisor.addon(slug1)["state"] == "started" + assert getSaved(config) == (set(), set()) + + +@pytest.mark.asyncio +async def test_get_info_failure_on_start(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor) -> None: + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + + await addon_stopper.stopAddons("ignore") + + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + assert supervisor.addon(slug1)["state"] == "stopped" + interceptor.setError(URL_MATCH_ADDON_INFO, 400) + await addon_stopper.startAddons() + assert getSaved(config) == (set(), set()) + assert interceptor.urlWasCalled(URL_MATCH_ADDON_INFO) + assert supervisor.addon(slug1)["state"] == "stopped" + + +@pytest.mark.asyncio +async def test_read_only_fs(supervisor: SimulatedSupervisor, addon_stopper: AddonStopper, config: Config, interceptor: RequestInterceptor) -> None: + # This test can't be run as the root user, since no file is read-only to root. + skipForRoot() + + # Stop an addon + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1])) + addon_stopper.allowRun() + addon_stopper.must_start = set() + assert supervisor.addon(slug1)["state"] == "started" + await addon_stopper.stopAddons("ignore") + assert supervisor.addon(slug1)["state"] == "stopped" + await addon_stopper.check() + assert getSaved(config) == ({slug1}, set()) + + # make the state file unmodifiable + os.chmod(config.get(Setting.STOP_ADDON_STATE_PATH), S_IREAD) + + # verify we raise a known error when trying to save. + with pytest.raises(SupervisorFileSystemError): + await addon_stopper.startAddons() diff --git a/hassio-google-drive-backup/tests/test_asynchttpgetter.py b/hassio-google-drive-backup/tests/test_asynchttpgetter.py new file mode 100644 index 0000000..dd8cd40 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_asynchttpgetter.py @@ -0,0 +1,117 @@ +from datetime import timedelta +import pytest +from aiohttp import ClientSession +from aiohttp.web import StreamResponse +from .conftest import Uploader +from backup.exceptions import LogicError +from dev.request_interceptor import RequestInterceptor +from .conftest import FakeTime + + +@pytest.mark.asyncio +async def test_basics(uploader: Uploader, server, session: ClientSession): + getter = await uploader.upload(bytearray([0, 1, 2, 3, 4, 5, 6, 7])) + await getter.setup() + assert (await getter.read(1)).read() == bytearray([0]) + assert (await getter.read(2)).read() == bytearray([1, 2]) + assert (await getter.read(3)).read() == bytearray([3, 4, 5]) + assert (await getter.read(3)).read() == bytearray([6, 7]) + assert (await getter.read(3)).read() == bytearray([]) + assert (await getter.read(3)).read() == bytearray([]) + + getter.position(2) + assert (await getter.read(2)).read() == bytearray([2, 3]) + assert (await getter.read(3)).read() == bytearray([4, 5, 6]) + + getter.position(2) + assert (await getter.read(2)).read() == bytearray([2, 3]) + + getter.position(2) + assert (await getter.read(2)).read() == bytearray([2, 3]) + assert (await getter.read(100)).read() == bytearray([4, 5, 6, 7]) + assert (await getter.read(3)).read() == bytearray([]) + assert (await getter.read(3)).read() == bytearray([]) + +@pytest.mark.asyncio +async def test_position_error(uploader: Uploader, server): + getter = await uploader.upload(bytearray([0, 1, 2, 3, 4, 5, 6, 7])) + await getter.setup() + assert (await getter.read(1)).read() == bytearray([0]) + + with pytest.raises(LogicError): + await getter.setup() + + +@pytest.mark.asyncio +async def test_no_content_length(uploader: Uploader, server, interceptor: RequestInterceptor): + getter = await uploader.upload(bytearray([0, 1, 2, 3, 4, 5, 6, 7])) + intercept = interceptor.setError("/readfile") + intercept.addResponse(StreamResponse(headers={})) + with pytest.raises(LogicError) as e: + await getter.setup() + assert e.value.message() == "Content size must be provided if the webserver doesn't provide it" + + +@pytest.mark.asyncio +async def test_no_setup_error(uploader: Uploader, server): + getter = await uploader.upload(bytearray([0, 1, 2, 3, 4, 5, 6, 7])) + with pytest.raises(LogicError): + await getter.read(1) + + +@pytest.mark.asyncio +async def test_progress(uploader: Uploader, server): + getter = await uploader.upload(bytearray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])) + await getter.setup() + assert getter.progress() == 0 + assert (await getter.read(1)).read() == bytearray([0]) + assert getter.progress() == 10 + assert (await getter.read(2)).read() == bytearray([1, 2]) + assert getter.progress() == 30 + assert (await getter.read(7)).read() == bytearray([3, 4, 5, 6, 7, 8, 9]) + assert getter.progress() == 100 + assert str.format("{0}", getter) == "100" + + +@pytest.mark.asyncio +async def test_speed(uploader: Uploader, server, time: FakeTime): + getter = await uploader.upload(bytearray(x for x in range(0, 100))) + assert getter.startTime() == time.now() + await getter.setup() + assert getter.speed(period=timedelta(seconds=10)) is None + time.advance(seconds=1) + await getter.read(1) + assert getter.speed(period=timedelta(seconds=10)) == 1 + + time.advance(seconds=1) + await getter.read(1) + assert getter.speed(period=timedelta(seconds=10)) == 1 + assert getter.speed(period=timedelta(seconds=1)) == 1 + assert getter.speed(period=timedelta(seconds=1.5)) == 1 + assert getter.speed(period=timedelta(seconds=0.5)) == 1 + + time.advance(seconds=1) + assert getter.speed(period=timedelta(seconds=10)) == 1 + assert getter.speed(period=timedelta(seconds=1)) == 1 + assert getter.speed(period=timedelta(seconds=1.5)) == 1 + time.advance(seconds=0.5) + assert getter.speed(period=timedelta(seconds=1)) == 0.5 + time.advance(seconds=0.5) + assert getter.speed(period=timedelta(seconds=1)) == 0 + + # Now 4 seconds have passed, and we've transferred 4 bytes + await getter.read(2) + assert getter.speed(period=timedelta(seconds=4)) == 1 + assert getter.speed(period=timedelta(seconds=10)) == 1 + + time.advance(seconds=10) + await getter.read(10) + assert getter.speed(period=timedelta(seconds=10)) == 1 + + time.advance(seconds=10) + await getter.read(20) + assert getter.speed(period=timedelta(seconds=10)) == 2 + time.advance(seconds=10) + assert getter.speed(period=timedelta(seconds=10)) == 2 + time.advance(seconds=5) + assert getter.speed(period=timedelta(seconds=10)) == 1 diff --git a/hassio-google-drive-backup/tests/test_authcodequery.py b/hassio-google-drive-backup/tests/test_authcodequery.py new file mode 100644 index 0000000..f04eda7 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_authcodequery.py @@ -0,0 +1,104 @@ +import pytest + +from backup.drive import AuthCodeQuery +from backup.exceptions import LogicError, GoogleCredGenerateError, ProtocolError +from dev.request_interceptor import RequestInterceptor +from dev.simulated_google import URL_MATCH_TOKEN, SimulatedGoogle, URL_MATCH_DEVICE_CODE +from aiohttp.web_response import json_response +from backup.config import Config, Setting + + +@pytest.mark.asyncio +async def test_invalid_sequence(device_code: AuthCodeQuery, interceptor: RequestInterceptor) -> None: + with pytest.raises(LogicError): + await device_code.waitForPermission() + + +@pytest.mark.asyncio +async def test_success(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + google._device_code_accepted = True + assert await device_code.waitForPermission() is not None + + +@pytest.mark.asyncio +async def test_google_failure_on_request(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + interceptor.setError(URL_MATCH_DEVICE_CODE, 458) + with pytest.raises(GoogleCredGenerateError) as error: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + assert error.value.message() == "Google responded with error status HTTP 458. Please verify your credentials are set up correctly." + + +@pytest.mark.asyncio +async def test_failure_on_http_unknown(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + interceptor.setError(URL_MATCH_TOKEN, 500) + + with pytest.raises(GoogleCredGenerateError) as error: + await device_code.waitForPermission() + assert error.value.message() == "Failed unexpectedly while trying to reach Google. See the add-on logs for details." + + +@pytest.mark.asyncio +async def test_success_after_wait(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + match = interceptor.setError(URL_MATCH_TOKEN) + match.addResponse(json_response(data={'error': "slow_down"}, status=403)) + + google._device_code_accepted = True + await device_code.waitForPermission() + + assert match.callCount() == 2 + + +@pytest.mark.asyncio +async def test_success_after_428(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + match = interceptor.setError(URL_MATCH_TOKEN) + match.addResponse(json_response(data={}, status=428)) + match.addResponse(json_response(data={}, status=428)) + match.addResponse(json_response(data={}, status=428)) + match.addResponse(json_response(data={}, status=428)) + match.addResponse(json_response(data={}, status=428)) + + google._device_code_accepted = True + await device_code.waitForPermission() + + assert match.callCount() == 6 + + +@pytest.mark.asyncio +async def test_permission_failure(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + match = interceptor.setError(URL_MATCH_TOKEN) + match.addResponse(json_response(data={}, status=403)) + + google._device_code_accepted = False + with pytest.raises(GoogleCredGenerateError) as error: + await device_code.waitForPermission() + assert error.value.message() == "Google refused the request to connect your account, either because you rejected it or they were set up incorrectly." + + +@pytest.mark.asyncio +async def test_json_parse_failure(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + interceptor.setError(URL_MATCH_TOKEN, 200) + + with pytest.raises(ProtocolError): + await device_code.waitForPermission() + + +@pytest.mark.asyncio +async def test_repeated_failure(device_code: AuthCodeQuery, interceptor: RequestInterceptor, google: SimulatedGoogle, server, config: Config) -> None: + await device_code.requestCredentials(google._custom_drive_client_id, google._custom_drive_client_secret) + + config.override(Setting.DRIVE_TOKEN_URL, "http://go.nowhere") + with pytest.raises(GoogleCredGenerateError) as error: + await device_code.waitForPermission() + error.value.message() == "Failed unexpectedly too many times while attempting to reach Google. See the logs for details." diff --git a/hassio-google-drive-backup/tests/test_backoff.py b/hassio-google-drive-backup/tests/test_backoff.py new file mode 100644 index 0000000..d643985 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_backoff.py @@ -0,0 +1,104 @@ +from pytest import fixture, raises + +from backup.util import Backoff + + +@fixture +def error(): + return Exception() + + +def test_defaults(error): + backoff = Backoff() + assert backoff.backoff(error) == 2 + assert backoff.backoff(error) == 4 + assert backoff.backoff(error) == 8 + assert backoff.backoff(error) == 16 + assert backoff.backoff(error) == 32 + assert backoff.backoff(error) == 64 + assert backoff.backoff(error) == 128 + assert backoff.backoff(error) == 256 + assert backoff.backoff(error) == 512 + assert backoff.backoff(error) == 1024 + assert backoff.backoff(error) == 2048 + + for x in range(10000): + assert backoff.backoff(error) == 3600 + + +def test_max(error): + backoff = Backoff(max=500) + assert backoff.backoff(error) == 2 + assert backoff.backoff(error) == 4 + assert backoff.backoff(error) == 8 + assert backoff.backoff(error) == 16 + assert backoff.backoff(error) == 32 + assert backoff.backoff(error) == 64 + assert backoff.backoff(error) == 128 + assert backoff.backoff(error) == 256 + + for x in range(10000): + assert backoff.backoff(error) == 500 + + +def test_initial(error): + backoff = Backoff(initial=0) + assert backoff.backoff(error) == 0 + assert backoff.backoff(error) == 2 + assert backoff.backoff(error) == 4 + assert backoff.backoff(error) == 8 + assert backoff.backoff(error) == 16 + assert backoff.backoff(error) == 32 + assert backoff.backoff(error) == 64 + assert backoff.backoff(error) == 128 + assert backoff.backoff(error) == 256 + assert backoff.backoff(error) == 512 + assert backoff.backoff(error) == 1024 + assert backoff.backoff(error) == 2048 + + for x in range(10000): + assert backoff.backoff(error) == 3600 + + +def test_attempts(error): + backoff = Backoff(attempts=5) + assert backoff.backoff(error) == 2 + assert backoff.backoff(error) == 4 + assert backoff.backoff(error) == 8 + assert backoff.backoff(error) == 16 + assert backoff.backoff(error) == 32 + + for x in range(5): + with raises(type(error)): + backoff.backoff(error) + + +def test_start(error): + backoff = Backoff(base=10) + assert backoff.backoff(error) == 10 + assert backoff.backoff(error) == 20 + assert backoff.backoff(error) == 40 + assert backoff.backoff(error) == 80 + + +def test_realistic(error): + backoff = Backoff(base=5, initial=0, exp=1.5, attempts=5) + assert backoff.backoff(error) == 0 + assert backoff.backoff(error) == 5 + assert backoff.backoff(error) == 5 * 1.5 + assert backoff.backoff(error) == 5 * (1.5**2) + assert backoff.backoff(error) == 5 * (1.5**3) + for x in range(5): + with raises(type(error)): + backoff.backoff(error) + + +def test_maxOut(error): + backoff = Backoff(base=10, max=100) + assert backoff.backoff(error) == 10 + assert backoff.backoff(error) == 20 + backoff.maxOut() + assert backoff.backoff(error) == 100 + assert backoff.backoff(error) == 100 + backoff.reset() + assert backoff.backoff(error) == 10 diff --git a/hassio-google-drive-backup/tests/test_bytesizeasstring.py b/hassio-google-drive-backup/tests/test_bytesizeasstring.py new file mode 100644 index 0000000..affafe6 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_bytesizeasstring.py @@ -0,0 +1,129 @@ +from backup.config import BytesizeAsStringValidator +from backup.exceptions import InvalidConfigurationValue +import pytest + + +def test_minimum(): + parser = BytesizeAsStringValidator("test", minimum=10) + assert parser.validate("11 bytes") == 11 + assert parser.validate(11) == 11 + with pytest.raises(InvalidConfigurationValue): + parser.validate("9 bytes") + + +def test_maximum(): + parser = BytesizeAsStringValidator("test", maximum=10) + assert parser.validate("9 bytes") == 9 + assert parser.validate(9) == 9 + with pytest.raises(InvalidConfigurationValue): + parser.validate("11 bytes") + assert parser.formatForUi(9) == "9 B" + + +def test_ui_format(): + parser = BytesizeAsStringValidator("test") + assert parser.formatForUi(25) == "25 B" + assert parser.formatForUi(25 * 1024) == "25 KB" + assert parser.formatForUi(25 * 1024 * 1024) == "25 MB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024) == "25 GB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024 * 1024) == "25 TB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024 * 1024 * 1024) == "25 PB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) == "25 EB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) == "25 ZB" + assert parser.formatForUi(25 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) == "25 YB" + assert parser.formatForUi(2000 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) == "2000 YB" + + assert parser.formatForUi(2.5 * 1024 * 1024) == "2.5 MB" + assert parser.formatForUi(2.534525 * 1024 * 1024) == "2.534525 MB" + assert parser.formatForUi(98743.1234 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) == "98743.1234 YB" + + assert parser.formatForUi(None) == "" + assert parser.formatForUi("") == "" + assert parser.formatForUi(0) == "" + + +def test_numbers(): + parser = BytesizeAsStringValidator("test") + parser.validate(1.2) == 1 + parser.validate(1024.9) == 1024 + parser.validate(1024) == 1024 + + +def test_parsing(): + parser = BytesizeAsStringValidator("test") + assert parser.validate("1 B") == 1 + assert parser.validate("1 b") == 1 + assert parser.validate("1 bytes") == 1 + assert parser.validate("1 byte") == 1 + assert parser.validate("") is None + assert parser.validate(" ") is None + assert parser.validate(" 5. bytes ") == 5 + assert parser.validate("10b") == 10 + + assert parser.validate("1 KB") == 1024 + assert parser.validate("1 k") == 1024 + assert parser.validate("1 kb") == 1024 + assert parser.validate("1 kilobytes") == 1024 + assert parser.validate("1 kibibytes") == 1024 + assert parser.validate("1 kibi") == 1024 + assert parser.validate("2.5 KB") == 1024 * 2.5 + assert parser.validate("10k") == 10 * 1024 + + assert parser.validate("1 MB") == 1024 * 1024 + assert parser.validate("1 m") == 1024 * 1024 + assert parser.validate("1 mb") == 1024 * 1024 + assert parser.validate("1 megs") == 1024 * 1024 + assert parser.validate("1 mega") == 1024 * 1024 + assert parser.validate("1 megabytes") == 1024 * 1024 + assert parser.validate("1 mebibytes") == 1024 * 1024 + assert parser.validate("10m") == 10 * 1024 * 1024 + + assert parser.validate("1 GB") == 1024 * 1024 * 1024 + assert parser.validate("1 g") == 1024 * 1024 * 1024 + assert parser.validate("1 gb") == 1024 * 1024 * 1024 + assert parser.validate("1 gigs") == 1024 * 1024 * 1024 + assert parser.validate("1 gig") == 1024 * 1024 * 1024 + assert parser.validate("1 giga") == 1024 * 1024 * 1024 + assert parser.validate("1 gigabytes") == 1024 * 1024 * 1024 + assert parser.validate("1 gibibytes") == 1024 * 1024 * 1024 + assert parser.validate("10G") == 10 * 1024 * 1024 * 1024 + + assert parser.validate("1 TB") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 t") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 tb") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 tera") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 tebi") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 terabytes") == 1024 * 1024 * 1024 * 1024 + assert parser.validate("10T") == 10 * 1024 * 1024 * 1024 * 1024 + + assert parser.validate("1 PB") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 p") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 pb") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 peta") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 pebi") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 petabytes") == 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("10P") == 10 * 1024 * 1024 * 1024 * 1024 * 1024 + + assert parser.validate("1 EB") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 e") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 eb") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 exa") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 exbi") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 exabytes") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("10E") == 10 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + + assert parser.validate("1 ZB") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 z") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 zb") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 zetta") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 zebi") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 zettabytes") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("10Z") == 10 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + + assert parser.validate("1 YB") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 y") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 yb") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 yotta") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 yobi") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("1 yottabytes") == 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + assert parser.validate("10Y") == 10 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 diff --git a/hassio-google-drive-backup/tests/test_config.py b/hassio-google-drive-backup/tests/test_config.py new file mode 100644 index 0000000..840f114 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_config.py @@ -0,0 +1,307 @@ +import os +from pytest import raises + +from backup.model import GenConfig +from backup.config import Config, Setting +from backup.exceptions import InvalidConfigurationValue + + +def test_validate_empty(): + config = Config() + assert config.validate({}) == defaultAnd() + + +def test_validate_int(): + assert Config().validate({'max_backups_in_ha': 5}) == defaultAnd( + {Setting.MAX_BACKUPS_IN_HA: 5}) + assert Config().validate({'max_backups_in_ha': 5.0}) == defaultAnd( + {Setting.MAX_BACKUPS_IN_HA: 5}) + assert Config().validate({'max_backups_in_ha': "5"}) == defaultAnd( + {Setting.MAX_BACKUPS_IN_HA: 5}) + + with raises(InvalidConfigurationValue): + Config().validate({'max_backups_in_ha': -2}) + + +def test_validate_float(): + setting = Setting.DAYS_BETWEEN_BACKUPS + assert Config().validate({setting: 5}) == defaultAnd({setting: 5}) + assert Config().validate({setting.key(): 5}) == defaultAnd({setting: 5}) + assert Config().validate({setting: 5.0}) == defaultAnd({setting: 5}) + assert Config().validate({setting: "5"}) == defaultAnd({setting: 5}) + + with raises(InvalidConfigurationValue): + Config().validate({'days_between_backups': -1}) + + +def test_validate_bool(): + setting = Setting.SEND_ERROR_REPORTS + assert Config().validate({setting: True}) == defaultAnd({setting: True}) + assert Config().validate({setting: False}) == defaultAnd({setting: False}) + assert Config().validate({setting: "true"}) == defaultAnd({setting: True}) + assert Config().validate({setting: "false"}) == defaultAnd({setting: False}) + assert Config().validate({setting: "1"}) == defaultAnd({setting: True}) + assert Config().validate({setting: "0"}) == defaultAnd({setting: False}) + assert Config().validate({setting: "yes"}) == defaultAnd({setting: True}) + assert Config().validate({setting: "no"}) == defaultAnd({setting: False}) + assert Config().validate({setting: "on"}) == defaultAnd({setting: True}) + assert Config().validate({setting: "off"}) == defaultAnd({setting: False}) + + +def test_validate_string(): + assert Config().validate({Setting.BACKUP_NAME: True}) == defaultAnd({Setting.BACKUP_NAME: "True"}) + assert Config().validate({Setting.BACKUP_NAME: False}) == defaultAnd({Setting.BACKUP_NAME: "False"}) + assert Config().validate({Setting.BACKUP_NAME: "true"}) == defaultAnd({Setting.BACKUP_NAME: "true"}) + assert Config().validate({Setting.BACKUP_NAME: "false"}) == defaultAnd({Setting.BACKUP_NAME: "false"}) + assert Config().validate({Setting.BACKUP_NAME: "1"}) == defaultAnd({Setting.BACKUP_NAME: "1"}) + assert Config().validate({Setting.BACKUP_NAME: "0"}) == defaultAnd({Setting.BACKUP_NAME: "0"}) + assert Config().validate({Setting.BACKUP_NAME: "yes"}) == defaultAnd({Setting.BACKUP_NAME: "yes"}) + assert Config().validate({Setting.BACKUP_NAME: "no"}) == defaultAnd({Setting.BACKUP_NAME: "no"}) + + +def test_validate_url(): + assert Config().validate({Setting.SUPERVISOR_URL: True}) == defaultAnd( + {Setting.SUPERVISOR_URL: "True"}) + assert Config().validate({Setting.SUPERVISOR_URL: False}) == defaultAnd( + {Setting.SUPERVISOR_URL: "False"}) + assert Config().validate({Setting.SUPERVISOR_URL: "true"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "true"}) + assert Config().validate({Setting.SUPERVISOR_URL: "false"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "false"}) + assert Config().validate({Setting.SUPERVISOR_URL: "1"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "1"}) + assert Config().validate({Setting.SUPERVISOR_URL: "0"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "0"}) + assert Config().validate({Setting.SUPERVISOR_URL: "yes"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "yes"}) + assert Config().validate({Setting.SUPERVISOR_URL: "no"}) == defaultAnd( + {Setting.SUPERVISOR_URL: "no"}) + + +def test_validate_regex(): + assert Config().validate({Setting.DRIVE_IPV4: "192.168.1.1"}) == defaultAnd( + {Setting.DRIVE_IPV4: "192.168.1.1"}) + with raises(InvalidConfigurationValue): + Config().validate({Setting.DRIVE_IPV4: -1}) + with raises(InvalidConfigurationValue): + Config().validate({Setting.DRIVE_IPV4: "192.168.1"}) + + +def test_remove_ssl(): + assert Config().validate({Setting.USE_SSL: True}) == defaultAnd({Setting.USE_SSL: True}) + assert Config().validate({Setting.USE_SSL: False}) == defaultAnd() + assert Config().validate({ + Setting.USE_SSL: False, + Setting.CERTFILE: "removed", + Setting.KEYFILE: 'removed' + }) == defaultAnd() + assert Config().validate({ + Setting.USE_SSL: True, + Setting.CERTFILE: "kept", + Setting.KEYFILE: 'kept' + }) == defaultAnd({ + Setting.USE_SSL: True, + Setting.CERTFILE: "kept", + Setting.KEYFILE: 'kept' + }) + + +def test_send_error_reports(): + assert Config().validate({Setting.SEND_ERROR_REPORTS: False}) == defaultAnd( + {Setting.SEND_ERROR_REPORTS: False}) + assert Config().validate({Setting.SEND_ERROR_REPORTS: True}) == defaultAnd( + {Setting.SEND_ERROR_REPORTS: True}) + assert Config().validate( + {Setting.SEND_ERROR_REPORTS: None}) == defaultAnd() + + +def test_unrecognized_values_filter(): + assert Config().validate({'blah': "bloo"}) == defaultAnd() + + +def test_removes_defaults(): + assert Config().validate( + {Setting.BACKUP_TIME_OF_DAY: ""}) == defaultAnd() + + +def defaultAnd(config={}): + ret = { + Setting.DAYS_BETWEEN_BACKUPS: 3, + Setting.MAX_BACKUPS_IN_HA: 4, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE: 4 + } + ret.update(config) + return (ret, False) + + +def test_GenerationalConfig() -> None: + assert Config().getGenerationalConfig() is None + + assert Config().override(Setting.GENERATIONAL_DAYS, 5).getGenerationalConfig() == GenConfig(days=5) + assert Config().override(Setting.GENERATIONAL_WEEKS, 3).getGenerationalConfig() == GenConfig(days=1, weeks=3) + assert Config().override(Setting.GENERATIONAL_MONTHS, 3).getGenerationalConfig() == GenConfig(days=1, months=3) + assert Config().override(Setting.GENERATIONAL_YEARS, 3).getGenerationalConfig() == GenConfig(days=1, years=3) + assert Config().override(Setting.GENERATIONAL_DELETE_EARLY, True).override( + Setting.GENERATIONAL_DAYS, 2).getGenerationalConfig() == GenConfig(days=2, aggressive=True) + assert Config().override(Setting.GENERATIONAL_DAYS, 1).override( + Setting.GENERATIONAL_DAY_OF_YEAR, 3).getGenerationalConfig() == GenConfig(days=1, day_of_year=3) + assert Config().override(Setting.GENERATIONAL_DAYS, 1).override( + Setting.GENERATIONAL_DAY_OF_MONTH, 3).getGenerationalConfig() == GenConfig(days=1, day_of_month=3) + assert Config().override(Setting.GENERATIONAL_DAYS, 1).override( + Setting.GENERATIONAL_DAY_OF_WEEK, "tue").getGenerationalConfig() == GenConfig(days=1, day_of_week="tue") + + assert Config().override(Setting.GENERATIONAL_DAY_OF_MONTH, 3).override(Setting.GENERATIONAL_DAY_OF_WEEK, "tue").override(Setting.GENERATIONAL_DAY_OF_YEAR, "4").getGenerationalConfig() is None + + +def test_from_environment(): + assert Config.fromEnvironment().get(Setting.PORT) != 1000 + + os.environ["PORT"] = str(1000) + assert Config.fromEnvironment().get(Setting.PORT) == 1000 + + del os.environ["PORT"] + assert Config.fromEnvironment().get(Setting.PORT) != 1000 + + os.environ["port"] = str(1000) + assert Config.fromEnvironment().get(Setting.PORT) == 1000 + + +def test_config_upgrade(): + # Test specifying one value + config = Config() + config.update({Setting.DEPRECTAED_BACKUP_TIME_OF_DAY: "00:01"}) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.BACKUP_TIME_OF_DAY: "00:01", + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + # Test specifying multiple values + config = Config() + config.update({ + Setting.DEPRECTAED_MAX_BACKUPS_IN_GOOGLE_DRIVE: 21, + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: 20, + Setting.DEPRECATED_BACKUP_PASSWORD: "boop" + }) + assert config.getAllConfig() == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: 20, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE: 21, + Setting.BACKUP_PASSWORD: "boop", + Setting.CALL_BACKUP_SNAPSHOT: True + })[0] + assert config.mustSaveUpgradeChanges() + + # test specifying value that don't get upgraded + config = Config() + config.update({Setting.EXCLUDE_ADDONS: "test"}) + assert config.getAllConfig() == defaultAnd({ + Setting.EXCLUDE_ADDONS: "test" + })[0] + assert not config.mustSaveUpgradeChanges() + + # Test specifying both + config = Config() + config.update({ + Setting.DEPRECTAED_BACKUP_TIME_OF_DAY: "00:01", + Setting.EXCLUDE_ADDONS: "test" + }) + assert config.getAllConfig() == defaultAnd({ + Setting.BACKUP_TIME_OF_DAY: "00:01", + Setting.EXCLUDE_ADDONS: "test", + Setting.CALL_BACKUP_SNAPSHOT: True + })[0] + assert config.mustSaveUpgradeChanges() + + +def test_overwrite_on_upgrade(): + config = Config() + config.update({ + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: 5, + Setting.MAX_BACKUPS_IN_HA: 6 + }) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: 6, + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + config = Config() + config.update({ + Setting.MAX_BACKUPS_IN_HA: 6, + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: 5 + }) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: 6, + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + config = Config() + config.update({ + Setting.MAX_BACKUPS_IN_HA: 6, + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: 4 + }) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: 6, + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + +def test_overwrite_on_upgrade_default_value(): + # Test specifying one value + config = Config() + config.update({ + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default() + 1, + Setting.MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default() + }) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default() + 1, + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + config = Config() + config.update({ + Setting.MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default(), + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default() + 1 + }) + assert (config.getAllConfig(), False) == defaultAnd({ + Setting.MAX_BACKUPS_IN_HA: Setting.MAX_BACKUPS_IN_HA.default() + 1, + Setting.CALL_BACKUP_SNAPSHOT: True + }) + assert config.mustSaveUpgradeChanges() + + +def test_empty_colors(): + # Test specifying one value + config = Config() + config.update({Setting.BACKGROUND_COLOR: "", Setting.ACCENT_COLOR: ""}) + assert config.get(Setting.BACKGROUND_COLOR) == Setting.BACKGROUND_COLOR.default() + assert config.get(Setting.ACCENT_COLOR) == Setting.ACCENT_COLOR.default() + + +def test_ignore_upgrades_default(): + # Test specifying one value + config = Config() + assert config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + config.useLegacyIgnoredBehavior(True) + assert not config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + config.useLegacyIgnoredBehavior(False) + assert config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + +def getGenConfig(update): + base = { + "days": 1, + "weeks": 0, + "months": 0, + "years": 0, + "day_of_week": "mon", + "day_of_year": 1, + "day_of_month": 1 + } + base.update(update) + return base diff --git a/hassio-google-drive-backup/tests/test_coordinator.py b/hassio-google-drive-backup/tests/test_coordinator.py new file mode 100644 index 0000000..0dca529 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_coordinator.py @@ -0,0 +1,552 @@ +import asyncio +from datetime import timedelta + +import pytest +from pytest import raises + +from backup.config import Config, Setting, CreateOptions +from backup.exceptions import LogicError, LowSpaceError, NoBackup, PleaseWait, UserCancelledError +from backup.util import GlobalInfo, DataCache +from backup.model import Coordinator, Model, Backup, DestinationPrecache +from .conftest import FsFaker +from .faketime import FakeTime +from .helpers import HelperTestSource, skipForWindows + + +@pytest.fixture +def source(): + return HelperTestSource("Source") + + +@pytest.fixture +def dest(): + return HelperTestSource("Dest") + + +@pytest.fixture +def simple_config(): + config = Config() + config.override(Setting.BACKUP_STARTUP_DELAY_MINUTES, 0) + return config + + +@pytest.fixture +def model(source, dest, time, simple_config, global_info, estimator, data_cache: DataCache): + return Model(simple_config, time, source, dest, global_info, estimator, data_cache) + + +@pytest.fixture +def coord(model, time, simple_config, global_info, estimator): + return Coordinator(model, time, simple_config, global_info, estimator) + + +@pytest.fixture +def precache(coord, time, dest, simple_config): + return DestinationPrecache(coord, time, dest, simple_config) + + +@pytest.mark.asyncio +async def test_enabled(coord: Coordinator, dest, time): + dest.setEnabled(True) + assert coord.enabled() + dest.setEnabled(False) + assert not coord.enabled() + + +@pytest.mark.asyncio +async def test_sync(coord: Coordinator, global_info: GlobalInfo, time: FakeTime): + await coord.sync() + assert global_info._syncs == 1 + assert global_info._successes == 1 + assert global_info._last_sync_start == time.now() + assert len(coord.backups()) == 1 + + +@pytest.mark.asyncio +async def test_blocking(coord: Coordinator): + # This just makes sure the wait thread is blocked while we do stuff + event_start = asyncio.Event() + event_end = asyncio.Event() + asyncio.create_task(coord._withSoftLock(lambda: sleepHelper(event_start, event_end))) + await event_start.wait() + + # Make sure PleaseWait gets called on these + with raises(PleaseWait): + await coord.delete(None, None) + with raises(PleaseWait): + await coord.sync() + with raises(PleaseWait): + await coord.uploadBackups(None) + with raises(PleaseWait): + await coord.startBackup(None) + event_end.set() + + +async def sleepHelper(event_start: asyncio.Event, event_end: asyncio.Event): + event_start.set() + await event_end.wait() + + +@pytest.mark.asyncio +async def test_new_backup(coord: Coordinator, time: FakeTime, source, dest): + await coord.startBackup(CreateOptions(time.now(), "Test Name")) + backups = coord.backups() + assert len(backups) == 1 + assert backups[0].name() == "Test Name" + assert backups[0].getSource(source.name()) is not None + assert backups[0].getSource(dest.name()) is None + + +@pytest.mark.asyncio +async def test_sync_error(coord: Coordinator, global_info: GlobalInfo, time: FakeTime, model): + error = Exception("BOOM") + old_sync = model.sync + model.sync = lambda s: doRaise(error) + await coord.sync() + assert global_info._last_error is error + assert global_info._last_failure_time == time.now() + assert global_info._successes == 0 + model.sync = old_sync + await coord.sync() + assert global_info._last_error is None + assert global_info._successes == 1 + assert global_info._last_success == time.now() + await coord.sync() + + +def doRaise(error): + raise error + + +@pytest.mark.asyncio +async def test_delete(coord: Coordinator, backup, source, dest): + assert backup.getSource(source.name()) is not None + assert backup.getSource(dest.name()) is not None + await coord.delete([source.name()], backup.slug()) + assert len(coord.backups()) == 1 + assert backup.getSource(source.name()) is None + assert backup.getSource(dest.name()) is not None + await coord.delete([dest.name()], backup.slug()) + assert backup.getSource(source.name()) is None + assert backup.getSource(dest.name()) is None + assert backup.isDeleted() + assert len(coord.backups()) == 0 + + await coord.sync() + assert len(coord.backups()) == 1 + await coord.delete([source.name(), dest.name()], coord.backups()[0].slug()) + assert len(coord.backups()) == 0 + + +@pytest.mark.asyncio +async def test_delete_errors(coord: Coordinator, source, dest, backup): + with raises(NoBackup): + await coord.delete([source.name()], "badslug") + bad_source = HelperTestSource("bad") + with raises(NoBackup): + await coord.delete([bad_source.name()], backup.slug()) + + +@pytest.mark.asyncio +async def test_retain(coord: Coordinator, source, dest, backup): + assert not backup.getSource(source.name()).retained() + assert not backup.getSource(dest.name()).retained() + await coord.retain({ + source.name(): True, + dest.name(): True + }, backup.slug()) + assert backup.getSource(source.name()).retained() + assert backup.getSource(dest.name()).retained() + + +@pytest.mark.asyncio +async def test_retain_errors(coord: Coordinator, source, dest, backup): + with raises(NoBackup): + await coord.retain({source.name(): True}, "badslug") + bad_source = HelperTestSource("bad") + with raises(NoBackup): + await coord.delete({bad_source.name(): True}, backup.slug()) + + +@pytest.mark.asyncio +async def test_freshness(coord: Coordinator, source: HelperTestSource, dest: HelperTestSource, backup: Backup, time: FakeTime): + source.setMax(2) + dest.setMax(2) + await coord.sync() + assert backup.getPurges() == { + source.name(): False, + dest.name(): False + } + + source.setMax(1) + dest.setMax(1) + await coord.sync() + assert backup.getPurges() == { + source.name(): True, + dest.name(): True + } + + dest.setMax(0) + await coord.sync() + assert backup.getPurges() == { + source.name(): True, + dest.name(): False + } + + source.setMax(0) + await coord.sync() + assert backup.getPurges() == { + source.name(): False, + dest.name(): False + } + + source.setMax(2) + dest.setMax(2) + time.advance(days=7) + await coord.sync() + assert len(coord.backups()) == 2 + assert backup.getPurges() == { + source.name(): True, + dest.name(): True + } + assert coord.backups()[1].getPurges() == { + source.name(): False, + dest.name(): False + } + + # should refresh on delete + source.setMax(1) + dest.setMax(1) + await coord.delete([source.name()], backup.slug()) + assert coord.backups()[0].getPurges() == { + dest.name(): True + } + assert coord.backups()[1].getPurges() == { + source.name(): True, + dest.name(): False + } + + # should update on retain + await coord.retain({dest.name(): True}, backup.slug()) + assert coord.backups()[0].getPurges() == { + dest.name(): False + } + assert coord.backups()[1].getPurges() == { + source.name(): True, + dest.name(): True + } + + # should update on upload + await coord.uploadBackups(coord.backups()[0].slug()) + assert coord.backups()[0].getPurges() == { + dest.name(): False, + source.name(): True + } + assert coord.backups()[1].getPurges() == { + source.name(): False, + dest.name(): True + } + + +@pytest.mark.asyncio +async def test_upload(coord: Coordinator, source: HelperTestSource, dest: HelperTestSource, backup): + await coord.delete([source.name()], backup.slug()) + assert backup.getSource(source.name()) is None + await coord.uploadBackups(backup.slug()) + assert backup.getSource(source.name()) is not None + + with raises(LogicError): + await coord.uploadBackups(backup.slug()) + + with raises(NoBackup): + await coord.uploadBackups("bad slug") + + await coord.delete([dest.name()], backup.slug()) + with raises(NoBackup): + await coord.uploadBackups(backup.slug()) + + +@pytest.mark.asyncio +async def test_download(coord: Coordinator, source, dest, backup): + await coord.download(backup.slug()) + await coord.delete([source.name()], backup.slug()) + await coord.download(backup.slug()) + + with raises(NoBackup): + await coord.download("bad slug") + + +@pytest.mark.asyncio +async def test_backoff(coord: Coordinator, model, source: HelperTestSource, dest: HelperTestSource, backup, time: FakeTime, simple_config: Config): + assert await coord.check() + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.MAX_SYNC_INTERVAL_SECONDS, 60 * 60 * 6) + simple_config.override(Setting.DEFAULT_SYNC_INTERVAL_VARIATION, 0) + + assert coord.nextSyncAttempt() == time.now() + timedelta(hours=6) + assert not await coord.check() + old_sync = model.sync + model.sync = lambda s: doRaise(Exception("BOOM")) + await coord.sync() + + # first backoff should be 0 seconds + assert coord.nextSyncAttempt() == time.now() + assert await coord.check() + + # backoff maxes out at 2 hr = 7200 seconds + for seconds in [10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 7200, 7200]: + await coord.sync() + assert coord.nextSyncAttempt() == time.now() + timedelta(seconds=seconds) + assert not await coord.check() + assert not await coord.check() + assert not await coord.check() + + # a good sync resets it back to 6 hours from now + model.sync = old_sync + await coord.sync() + assert coord.nextSyncAttempt() == time.now() + timedelta(hours=6) + assert not await coord.check() + + # if the next backup is less that 6 hours from the last one, that that shoudl be when we sync + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1.0 / 24.0) + assert coord.nextSyncAttempt() == time.now() + timedelta(hours=1) + assert not await coord.check() + + time.advance(hours=2) + assert coord.nextSyncAttempt() == time.now() - timedelta(hours=1) + assert await coord.check() + + +def test_save_creds(coord: Coordinator, source, dest): + pass + + +@pytest.mark.asyncio +async def test_check_size_new_backup(coord: Coordinator, source: HelperTestSource, dest: HelperTestSource, time, fs: FsFaker): + skipForWindows() + fs.setFreeBytes(0) + with raises(LowSpaceError): + await coord.startBackup(CreateOptions(time.now(), "Test Name")) + + +@pytest.mark.asyncio +async def test_check_size_sync(coord: Coordinator, source: HelperTestSource, dest: HelperTestSource, time, fs: FsFaker, global_info: GlobalInfo): + skipForWindows() + fs.setFreeBytes(0) + await coord.sync() + assert len(coord.backups()) == 0 + assert global_info._last_error is not None + + await coord.sync() + assert len(coord.backups()) == 0 + assert global_info._last_error is not None + + # Verify it resets the global size skip check, but gets through once + global_info.setSkipSpaceCheckOnce(True) + await coord.sync() + assert len(coord.backups()) == 1 + assert global_info._last_error is None + assert not global_info.isSkipSpaceCheckOnce() + + # Next attempt to backup shoudl fail again. + time.advance(days=7) + await coord.sync() + assert len(coord.backups()) == 1 + assert global_info._last_error is not None + + +@pytest.mark.asyncio +async def test_cancel(coord: Coordinator, global_info: GlobalInfo): + coord._sync_wait.clear() + asyncio.create_task(coord.sync()) + await coord._sync_start.wait() + await coord.cancel() + assert isinstance(global_info._last_error, UserCancelledError) + + +@pytest.mark.asyncio +async def test_working_through_upload(coord: Coordinator, global_info: GlobalInfo, dest): + coord._sync_wait.clear() + assert not coord.isWorkingThroughUpload() + sync_task = asyncio.create_task(coord.sync()) + await coord._sync_start.wait() + assert not coord.isWorkingThroughUpload() + dest.working = True + assert coord.isWorkingThroughUpload() + coord._sync_wait.set() + await asyncio.wait([sync_task]) + assert not coord.isWorkingThroughUpload() + + +@pytest.mark.asyncio +async def test_alternate_timezone(coord: Coordinator, time: FakeTime, model: Model, dest, source, simple_config: Config): + time.setTimeZone("Europe/Stockholm") + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "12:00") + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + source.setMax(10) + source.insert("Fri", time.toUtc(time.local(2020, 3, 16, 18, 5))) + time.setNow(time.local(2020, 3, 16, 18, 6)) + model.reinitialize() + coord.reset() + await coord.sync() + assert not await coord.check() + assert coord.nextBackupTime() == time.local(2020, 3, 17, 12) + + time.setNow(time.local(2020, 3, 17, 11, 59)) + await coord.sync() + assert not await coord.check() + time.setNow(time.local(2020, 3, 17, 12)) + assert await coord.check() + + +@pytest.mark.asyncio +async def test_disabled_at_install(coord: Coordinator, dest, time): + """ + Verifies that at install time, if some backups are already present the + addon doesn't try to sync over and over when drive is disabled. This was + a problem at one point. + """ + dest.setEnabled(True) + await coord.sync() + assert len(coord.backups()) == 1 + + dest.setEnabled(False) + time.advance(days=5) + assert await coord.check() + await coord.sync() + assert not await coord.check() + + +@pytest.mark.asyncio +async def test_only_source_configured(coord: Coordinator, dest: HelperTestSource, time, source: HelperTestSource): + source.setEnabled(True) + dest.setEnabled(False) + dest.setNeedsConfiguration(False) + await coord.sync() + assert len(coord.backups()) == 1 + + +@pytest.mark.asyncio +async def test_schedule_backup_next_sync_attempt(coord: Coordinator, model, source: HelperTestSource, dest: HelperTestSource, backup, time: FakeTime, simple_config: Config): + """ + Next backup is before max sync interval is reached + """ + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.MAX_SYNC_INTERVAL_SECONDS, 60 * 60) + simple_config.override(Setting.DEFAULT_SYNC_INTERVAL_VARIATION, 0) + + time.setTimeZone("Europe/Stockholm") + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "03:23") + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + source.setMax(10) + source.insert("Fri", time.toUtc(time.local(2020, 3, 16, 3, 33))) + + time.setNow(time.local(2020, 3, 17, 2, 29)) + model.reinitialize() + coord.reset() + await coord.sync() + assert coord.nextBackupTime() == time.local(2020, 3, 17, 3, 23) + assert coord.nextBackupTime() == coord.nextSyncAttempt() + + +@pytest.mark.asyncio +async def test_max_sync_interval_next_sync_attempt(coord: Coordinator, model, source: HelperTestSource, dest: HelperTestSource, backup, time: FakeTime, simple_config: Config): + """ + Next backup is after max sync interval is reached + """ + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.MAX_SYNC_INTERVAL_SECONDS, 60 * 60) + simple_config.override(Setting.DEFAULT_SYNC_INTERVAL_VARIATION, 0) + + time.setTimeZone("Europe/Stockholm") + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "03:23") + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + source.setMax(10) + source.insert("Fri", time.toUtc(time.local(2020, 3, 16, 3, 33))) + time.setNow(time.local(2020, 3, 17, 1, 29)) + model.reinitialize() + coord.reset() + await coord.sync() + assert coord.nextSyncAttempt() == time.local(2020, 3, 17, 2, 29) + assert coord.nextBackupTime() > coord.nextSyncAttempt() + + +@pytest.mark.asyncio +async def test_generational_only_ignored_snapshots(coord: Coordinator, model, source: HelperTestSource, dest: HelperTestSource, time: FakeTime, simple_config: Config, global_info: GlobalInfo): + """ + Verifies a sync with generational settings and only ignored snapshots doesn't cause an error. + Setup is taken from https://github.com/sabeechen/hassio-google-drive-backup/issues/727 + """ + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 4) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 2) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 6) + + backup = source.insert("Fri", time.toUtc(time.local(2020, 3, 16, 3, 33))) + backup.setIgnore(True) + time.setNow(time.local(2020, 3, 16, 4, 0)) + dest.setEnabled(False) + source.setEnabled(True) + + await coord.sync() + assert global_info._last_error is None + + +@pytest.mark.asyncio +async def test_max_sync_interval_randomness(coord: Coordinator, model, source: HelperTestSource, dest: HelperTestSource, backup, time: FakeTime, simple_config: Config): + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.MAX_SYNC_INTERVAL_SECONDS, 60 * 60) + simple_config.override(Setting.DEFAULT_SYNC_INTERVAL_VARIATION, 0.5) + + time.setTimeZone("Europe/Stockholm") + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "03:23") + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + source.setMax(10) + source.insert("Fri", time.toUtc(time.local(2020, 3, 16, 3, 33))) + time.setNow(time.local(2020, 3, 17, 1, 29)) + model.reinitialize() + coord.reset() + await coord.sync() + next_attempt = coord.nextSyncAttempt() + + # verify its within expected range + assert next_attempt - time.now() <= timedelta(hours=1) + assert next_attempt - time.now() >= timedelta(hours=0.5) + + # verify it doesn't change + assert coord.nextSyncAttempt() == next_attempt + time.advance(minutes=1) + assert coord.nextSyncAttempt() == next_attempt + + # sync, and verify it does change + await coord.sync() + assert coord.nextSyncAttempt() != next_attempt + + +@pytest.mark.asyncio +async def test_precaching(coord: Coordinator, precache: DestinationPrecache, dest: HelperTestSource, time: FakeTime, global_info: GlobalInfo): + await coord.sync() + dest.reset() + + # Warm the cache + assert precache.getNextWarmDate() < coord.nextSyncAttempt() + assert precache.cached(dest.name(), time.now()) is None + assert dest.query_count == 0 + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert precache.cached(dest.name(), time.now()) is not None + assert dest.query_count == 1 + + # No queries should have been made to dest, and the cache should now be cleared + time.setNow(coord.nextSyncAttempt()) + assert precache.cached(dest.name(), time.now()) is not None + await coord.sync() + assert dest.query_count == 1 + assert precache.cached(dest.name(), time.now()) is None + assert global_info._last_error is None diff --git a/hassio-google-drive-backup/tests/test_data_cache.py b/hassio-google-drive-backup/tests/test_data_cache.py new file mode 100644 index 0000000..91401ab --- /dev/null +++ b/hassio-google-drive-backup/tests/test_data_cache.py @@ -0,0 +1,210 @@ +import pytest +import os +import json +from injector import Injector +from datetime import timedelta +from backup.config import Config, Setting, VERSION, Version +from backup.util import DataCache, UpgradeFlags, KEY_CREATED, KEY_LAST_SEEN, CACHE_EXPIRATION_DAYS +from backup.time import Time +from os.path import join + + +@pytest.mark.asyncio +async def test_read_and_write(config: Config, time: Time) -> None: + cache = DataCache(config, time) + assert len(cache.backups) == 0 + + cache.backup("test")[KEY_CREATED] = time.now().isoformat() + assert not cache._dirty + cache.makeDirty() + assert cache._dirty + cache.saveIfDirty() + assert not cache._dirty + + cache = DataCache(config, time) + assert cache.backup("test")[KEY_CREATED] == time.now().isoformat() + assert not cache._dirty + + +@pytest.mark.asyncio +async def test_backup_expiration(config: Config, time: Time) -> None: + cache = DataCache(config, time) + assert len(cache.backups) == 0 + + cache.backup("new")[KEY_LAST_SEEN] = time.now().isoformat() + cache.backup("old")[KEY_LAST_SEEN] = ( + time.now() - timedelta(days=CACHE_EXPIRATION_DAYS + 1)) .isoformat() + cache.makeDirty() + cache.saveIfDirty() + + assert len(cache.backups) == 1 + assert "new" in cache.backups + assert "old" not in cache.backups + + +@pytest.mark.asyncio +async def test_version_upgrades(time: Time, injector: Injector, config: Config) -> None: + # Simluate upgrading from an un-tracked version + assert not os.path.exists(config.get(Setting.DATA_CACHE_FILE_PATH)) + cache = injector.get(DataCache) + upgrade_time = time.now() + assert cache.previousVersion == Version.default() + assert cache.currentVersion == Version.parse(VERSION) + + assert os.path.exists(config.get(Setting.DATA_CACHE_FILE_PATH)) + with open(config.get(Setting.DATA_CACHE_FILE_PATH)) as f: + data = json.load(f) + assert data["upgrades"] == [{ + "prev_version": str(Version.default()), + "new_version": VERSION, + "date": upgrade_time.isoformat() + }] + + # Reload the data cache, verify there is no upgrade. + time.advance(days=1) + cache = DataCache(config, time) + assert cache.previousVersion == Version.parse(VERSION) + assert cache.currentVersion == Version.parse(VERSION) + assert os.path.exists(config.get(Setting.DATA_CACHE_FILE_PATH)) + + with open(config.get(Setting.DATA_CACHE_FILE_PATH)) as f: + data = json.load(f) + assert data["upgrades"] == [{ + "prev_version": str(Version.default()), + "new_version": VERSION, + "date": upgrade_time.isoformat() + }] + + # simulate upgrading to a new version, verify an upgrade gets identified. + upgrade_version = Version.parse("200") + + class UpgradeCache(DataCache): + def __init__(self): + super().__init__(config, time) + + @property + def currentVersion(self): + return upgrade_version + + cache = UpgradeCache() + assert cache.previousVersion == Version.parse(VERSION) + assert cache.currentVersion == upgrade_version + assert os.path.exists(config.get(Setting.DATA_CACHE_FILE_PATH)) + + with open(config.get(Setting.DATA_CACHE_FILE_PATH)) as f: + data = json.load(f) + assert data["upgrades"] == [ + { + "prev_version": str(Version.default()), + "new_version": VERSION, + "date": upgrade_time.isoformat() + }, + { + "prev_version": VERSION, + "new_version": str(upgrade_version), + "date": time.now().isoformat() + } + ] + + next_upgrade_time = time.now() + time.advance(days=1) + # Verify version upgrade time queries work as expected + assert cache.getUpgradeTime(Version.parse(VERSION)) == upgrade_time + assert cache.getUpgradeTime(Version.default()) == upgrade_time + assert cache.getUpgradeTime(upgrade_version) == next_upgrade_time + + # degenerate case, should never happen but a sensible value needs to be returned + assert cache.getUpgradeTime(Version.parse("201")) == time.now() + + +@pytest.mark.asyncio +async def test_flag(config: Config, time: Time): + cache = DataCache(config, time) + assert not cache.checkFlag(UpgradeFlags.TESTING_FLAG) + assert not cache.dirty + + cache.addFlag(UpgradeFlags.TESTING_FLAG) + assert cache.dirty + assert cache.checkFlag(UpgradeFlags.TESTING_FLAG) + cache.saveIfDirty() + + cache = DataCache(config, time) + assert cache.checkFlag(UpgradeFlags.TESTING_FLAG) + + +@pytest.mark.asyncio +async def test_warn_upgrade_new_install(config: Config, time: Time): + """A fresh install of the addon should never warn about upgrade snapshots""" + cache = DataCache(config, time) + assert not cache.notifyForIgnoreUpgrades + assert cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + +@pytest.mark.asyncio +async def test_warn_upgrade_old_install(config: Config, time: Time): + """An old install of the addon warn about upgrade snapshots""" + with open(config.get(Setting.DATA_CACHE_FILE_PATH), "w") as f: + data = { + "upgrades": [ + { + "prev_version": str(Version.default()), + "new_version": "0.108.1", + "date": time.now().isoformat() + } + ] + } + json.dump(data, f) + cache = DataCache(config, time) + assert cache.notifyForIgnoreUpgrades + assert not cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + +@pytest.mark.asyncio +async def test_warn_upgrade_old_install_explicit_ignore_upgrades(config: Config, time: Time, cleandir: str): + """An old install of the addon should not warn about upgrade snapshots if it explicitly ignores them""" + with open(config.get(Setting.DATA_CACHE_FILE_PATH), "w") as f: + data = { + "upgrades": [ + { + "prev_version": str(Version.default()), + "new_version": "0.108.1", + "date": time.now().isoformat() + } + ] + } + json.dump(data, f) + config_path = join(cleandir, "config.json") + with open(config_path, "w") as f: + data = { + Setting.IGNORE_UPGRADE_BACKUPS.value: True, + Setting.DATA_CACHE_FILE_PATH.value: config.get(Setting.DATA_CACHE_FILE_PATH) + } + json.dump(data, f) + cache = DataCache(Config.fromFile(config_path), time) + assert not cache.notifyForIgnoreUpgrades + assert cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS) + + +@pytest.mark.asyncio +async def test_warn_upgrade_old_install_explicit_ignore_others(config: Config, time: Time, cleandir: str): + """An old install of the addon should not warn about upgrade snapshots if it explicitly ignores them""" + with open(config.get(Setting.DATA_CACHE_FILE_PATH), "w") as f: + data = { + "upgrades": [ + { + "prev_version": str(Version.default()), + "new_version": "0.108.1", + "date": time.now().isoformat() + } + ] + } + json.dump(data, f) + config_path = join(cleandir, "config.json") + with open(config_path, "w") as f: + data = { + Setting.IGNORE_OTHER_BACKUPS.value: True, + Setting.DATA_CACHE_FILE_PATH.value: config.get(Setting.DATA_CACHE_FILE_PATH) + } + json.dump(data, f) + cache = DataCache(Config.fromFile(config_path), time) + assert not cache.notifyForIgnoreUpgrades diff --git a/hassio-google-drive-backup/tests/test_debugworker.py b/hassio-google-drive-backup/tests/test_debugworker.py new file mode 100644 index 0000000..fdc2213 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_debugworker.py @@ -0,0 +1,142 @@ +import pytest + +from backup.config import Config, Setting +from backup.debugworker import DebugWorker +from backup.util import GlobalInfo +from backup.logger import getLogger +from dev.simulationserver import SimulationServer +from .helpers import skipForWindows +from backup.server import ErrorStore +from .conftest import FakeTime + + +@pytest.mark.asyncio +async def test_dns_info(debug_worker: DebugWorker, config: Config): + skipForWindows() + config.override(Setting.SEND_ERROR_REPORTS, True) + config.override(Setting.DRIVE_HOST_NAME, "localhost") + await debug_worker.doWork() + assert '127.0.0.1' in debug_worker.dns_info['localhost'] + assert 'localhost' in debug_worker.dns_info['localhost'] + + +@pytest.mark.asyncio +async def test_bad_host(debug_worker: DebugWorker, config: Config): + skipForWindows() + config.override(Setting.DRIVE_HOST_NAME, "dasdfdfgvxcvvsoejbr.com") + await debug_worker.doWork() + assert "Name or service not known" in debug_worker.dns_info['dasdfdfgvxcvvsoejbr.com']['dasdfdfgvxcvvsoejbr.com'] + +@pytest.mark.asyncio +async def test_send_error_report(time, debug_worker: DebugWorker, config: Config, global_info: GlobalInfo, server, error_store: ErrorStore): + config.override(Setting.SEND_ERROR_REPORTS, True) + config.override(Setting.DRIVE_HOST_NAME, "localhost") + global_info.sync() + global_info.success() + global_info.sync() + global_info.success() + global_info.sync() + global_info.failed(Exception()) + await debug_worker.doWork() + report = error_store.last_error + assert report['report']['sync_success_count'] == 2 + assert report['report']['sync_count'] == 3 + assert report['report']['failure_count'] == 1 + assert report['report']['sync_last_start'] == time.now().isoformat() + assert report['report']['failure_time'] == time.now().isoformat() + assert report['report']['error'] == getLogger("test").formatException(Exception()) + + +@pytest.mark.asyncio +async def test_dont_send_error_report(time, debug_worker: DebugWorker, config: Config, global_info: GlobalInfo, server: SimulationServer, error_store: ErrorStore): + config.override(Setting.SEND_ERROR_REPORTS, False) + config.override(Setting.DRIVE_HOST_NAME, "localhost") + global_info.failed(Exception()) + await debug_worker.doWork() + assert error_store.last_error is None + + +@pytest.mark.asyncio +async def test_only_send_duplicates(time, debug_worker: DebugWorker, config: Config, global_info: GlobalInfo, server, error_store: ErrorStore): + config.override(Setting.SEND_ERROR_REPORTS, True) + config.override(Setting.DRIVE_HOST_NAME, "localhost") + global_info.failed(Exception("boom1")) + firstExceptionTime = time.now() + await debug_worker.doWork() + report = error_store.last_error + assert report['report']["error"] == getLogger("test").formatException(Exception("boom1")) + assert report['report']["time"] == firstExceptionTime.isoformat() + + # Same exception shouldn't cause us to send the error report again + time.advance(days=1) + global_info.failed(Exception("boom1")) + await debug_worker.doWork() + report = error_store.last_error + assert report['report']["error"] == getLogger("test").formatException(Exception("boom1")) + assert report['report']["time"] == firstExceptionTime.isoformat() + + # Btu a new one will send a new report + global_info.failed(Exception("boom2")) + await debug_worker.doWork() + report = error_store.last_error + assert report['report']["error"] == getLogger("test").formatException(Exception("boom2")) + assert report['report']["time"] == time.now().isoformat() + + +@pytest.mark.asyncio +async def test_send_clear(time, debug_worker: DebugWorker, config: Config, global_info: GlobalInfo, server, error_store: ErrorStore): + config.override(Setting.SEND_ERROR_REPORTS, True) + config.override(Setting.DRIVE_HOST_NAME, "localhost") + + # Simulate failure + global_info.failed(Exception("boom")) + await debug_worker.doWork() + + # And then success + global_info.success() + time.advance(days=1) + await debug_worker.doWork() + report = error_store.last_error + assert report['report'] == { + 'duration': '1 day, 0:00:00' + } + + +@pytest.mark.asyncio +async def test_health_check_timing_success(server_url, time: FakeTime, debug_worker: DebugWorker, config: Config, server: SimulationServer): + # Only do successfull checks once a day + await debug_worker.doWork() + assert server.interceptor.urlWasCalled("/health") + server.interceptor.clear() + + await debug_worker.doWork() + assert not server.interceptor.urlWasCalled("/health") + + time.advance(hours=23) + await debug_worker.doWork() + assert not server.interceptor.urlWasCalled("/health") + + time.advance(hours=2) + await debug_worker.doWork() + assert server.interceptor.urlWasCalled("/health") + + +@pytest.mark.asyncio +async def test_health_check_timing_failure(server_url, time: FakeTime, debug_worker: DebugWorker, config: Config, server: SimulationServer): + # Failed helath checks retry after a minute + server.interceptor.setError("/health", 500) + + await debug_worker.doWork() + assert server.interceptor.urlWasCalled("/health") + server.interceptor.clear() + + await debug_worker.doWork() + assert not server.interceptor.urlWasCalled("/health") + + time.advance(seconds=59) + await debug_worker.doWork() + assert not server.interceptor.urlWasCalled("/health") + + time.advance(seconds=2) + await debug_worker.doWork() + assert server.interceptor.urlWasCalled("/health") diff --git a/hassio-google-drive-backup/tests/test_destinationprecache.py b/hassio-google-drive-backup/tests/test_destinationprecache.py new file mode 100644 index 0000000..d97bd53 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_destinationprecache.py @@ -0,0 +1,119 @@ + + +from backup.model import DestinationPrecache, Model, Coordinator +from backup.config import Config, Setting +from tests.faketime import FakeTime +from dev.request_interceptor import RequestInterceptor +from dev.simulated_google import URL_MATCH_DRIVE_API +from backup.drive import DriveSource +from datetime import timedelta +import pytest + + +@pytest.mark.asyncio +async def test_no_caching_before_cache_time(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + interceptor.clear() + await precache.checkForSmoothing() + assert precache.getNextWarmDate() > time.now() + assert not interceptor.urlWasCalled(URL_MATCH_DRIVE_API) + assert precache.cached(drive.name(), time.now()) is None + + +@pytest.mark.asyncio +async def test_no_caching_after_sync_time(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + time.setNow(coord.nextSyncAttempt()) + interceptor.clear() + await precache.checkForSmoothing() + assert precache.getNextWarmDate() < time.now() + assert not interceptor.urlWasCalled(URL_MATCH_DRIVE_API) + assert precache.cached(drive.name(), time.now()) is None + + +@pytest.mark.asyncio +async def test_cache_after_warm_date(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + interceptor.clear() + assert precache.getNextWarmDate() < coord.nextSyncAttempt() + + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert interceptor.urlWasCalled(URL_MATCH_DRIVE_API) + assert precache.cached(drive.name(), time.now()) is not None + + +async def test_no_double_caching(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + interceptor.clear() + + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert precache.cached(drive.name(), time.now()) is not None + + interceptor.clear() + time.setNow(precache.getNextWarmDate() + (coord.nextSyncAttempt() - precache.getNextWarmDate()) / 2) + await precache.checkForSmoothing() + assert not interceptor.urlWasCalled(URL_MATCH_DRIVE_API) + assert precache.cached(drive.name(), time.now()) is not None + + +async def test_cache_expiration(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert precache.cached(drive.name(), time.now()) is not None + + time.setNow(coord.nextSyncAttempt() + timedelta(minutes=2)) + assert precache.cached(drive.name(), time.now()) is None + + +async def test_cache_clear(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert precache.cached(drive.name(), time.now()) is not None + + precache.clear() + assert precache.cached(drive.name(), time.now()) is None + + +async def test_cache_error_backoff(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + time.setNow(precache.getNextWarmDate()) + interceptor.setError(URL_MATCH_DRIVE_API, status=503) + await precache.checkForSmoothing() + + assert precache.cached(drive.name(), time.now()) is None + delta = precache.getNextWarmDate() - time.now() + assert delta >= timedelta(days=1) + + +async def test_cache_warm_date_stability(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime) -> None: + await coord.sync() + + # The warm date shouldn't change + last_warm = precache.getNextWarmDate() + assert precache.getNextWarmDate() == last_warm + time.setNow(last_warm - timedelta(minutes=1)) + assert precache.getNextWarmDate() == last_warm + + # Until the cached is warmed + time.setNow(last_warm) + await precache.checkForSmoothing() + assert precache.cached(drive.name(), time.now()) is not None + assert precache.getNextWarmDate() != last_warm + + +async def test_disable_caching(server, precache: DestinationPrecache, model: Model, drive: DriveSource, interceptor: RequestInterceptor, coord: Coordinator, time: FakeTime, config: Config) -> None: + await coord.sync() + config.override(Setting.CACHE_WARMUP_MAX_SECONDS, 0) + + time.setNow(precache.getNextWarmDate()) + await precache.checkForSmoothing() + assert precache.cached(drive.name(), time.now()) is None diff --git a/hassio-google-drive-backup/tests/test_drivesource.py b/hassio-google-drive-backup/tests/test_drivesource.py new file mode 100644 index 0000000..0be00c9 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_drivesource.py @@ -0,0 +1,1048 @@ +import os +import json +from time import sleep + +import pytest +import asyncio +from yarl import URL +from aiohttp.client_exceptions import ClientResponseError +from backup.config import Config, Setting +from dev.simulationserver import SimulationServer +from dev.simulated_google import SimulatedGoogle, URL_MATCH_UPLOAD_PROGRESS, URL_MATCH_FILE +from dev.request_interceptor import RequestInterceptor +from backup.drive import DriveSource, FolderFinder, DriveRequests, RETRY_SESSION_ATTEMPTS, UPLOAD_SESSION_EXPIRATION_DURATION, URL_START_UPLOAD +from backup.drive.driverequests import (BASE_CHUNK_SIZE, CHUNK_UPLOAD_TARGET_SECONDS) +from backup.drive.drivesource import FOLDER_MIME_TYPE +from backup.exceptions import (BackupFolderInaccessible, BackupFolderMissingError, + DriveQuotaExceeded, ExistingBackupFolderError, + GoogleCantConnect, GoogleCredentialsExpired, + GoogleInternalError, GoogleUnexpectedError, + GoogleSessionError, GoogleTimeoutError, CredRefreshMyError, CredRefreshGoogleError) +from backup.creds import Creds +from backup.model import DriveBackup, DummyBackup +from .faketime import FakeTime +from .helpers import compareStreams, createBackupTar + +RETRY_EXHAUSTION_SLEEPS = [2, 4, 8, 16, 32] + + +class BackupHelper(): + def __init__(self, uploader, time): + self.time = time + self.uploader = uploader + + async def createFile(self, size=1024 * 1024 * 2, slug="testslug", name="Test Name", note=None): + from_backup: DummyBackup = DummyBackup( + name, self.time.toUtc(self.time.local(1985, 12, 6)), "fake source", slug, note=note, size=size) + data = await self.uploader.upload(createBackupTar(slug, name, self.time.now(), size)) + return from_backup, data + + +@pytest.fixture +def backup_helper(uploader, time): + return BackupHelper(uploader, time) + + +@pytest.mark.asyncio +async def test_sync_empty(drive) -> None: + assert len(await drive.get()) == 0 + + +@pytest.mark.asyncio +async def test_CRUD(backup_helper, drive, time, session) -> None: + from_backup, data = await backup_helper.createFile() + backup: DriveBackup = await drive.save(from_backup, data) + assert backup.name() == "Test Name" + assert backup.date() == time.local(1985, 12, 6) + assert not backup.retained() + assert backup.size() == data.size() + assert backup.slug() == "testslug" + assert len(backup.id()) > 0 + assert backup.backupType() == from_backup.backupType() + assert backup.protected() == from_backup.protected() + from_backup.addSource(backup) + + # downlaod the item, its bytes should match up + download = await drive.read(from_backup) + data.position(0) + await compareStreams(data, download) + + # read the item, make sure its data matches up + backups = await drive.get() + assert len(backups) == 1 + backup = backups[from_backup.slug()] + assert backup.name() == "Test Name" + assert backup.date() == time.local(1985, 12, 6) + assert not backup.retained() + assert backup.size() == data.size() + assert backup.slug() == "testslug" + assert len(backup.id()) > 0 + assert backup.backupType() == from_backup.backupType() + assert backup.protected() == from_backup.protected() + + # update retention + assert not backup.retained() + await drive.retain(from_backup, True) + assert (await drive.get())[from_backup.slug()].retained() + await drive.retain(from_backup, False) + assert not (await drive.get())[from_backup.slug()].retained() + + # Delete the item, make sure its gone + await drive.delete(from_backup) + backups = await drive.get() + assert len(backups) == 0 + + +@pytest.mark.asyncio +async def test_folder_creation(drive, time, config): + assert len(await drive.get()) == 0 + + folderId = await drive.getFolderId() + assert len(folderId) > 0 + + item = await drive.drivebackend.get(folderId) + assert not item["trashed"] + assert item["name"] == "Home Assistant Backups" + assert item["mimeType"] == FOLDER_MIME_TYPE + assert item["appProperties"]['backup_folder'] == 'true' + + # sync again, assert the folder is reused + time.advanceDay() + os.remove(config.get(Setting.FOLDER_FILE_PATH)) + assert len(await drive.get()) == 0 + assert await drive.getFolderId() == folderId + + # trash the folder, assert we create a new one on sync + await drive.drivebackend.update(folderId, {"trashed": True}) + assert (await drive.drivebackend.get(folderId))["trashed"] is True + assert len(await drive.get()) == 0 + time.advanceDay() + assert await drive.getFolderId() != folderId + + # delete the folder, assert we create a new one + folderId = await drive.getFolderId() + await drive.drivebackend.delete(folderId) + time.advanceDay() + assert len(await drive.get()) == 0 + time.advanceDay() + assert await drive.getFolderId() != folderId + + +@pytest.mark.asyncio +async def test_folder_selection(drive, time): + folder_metadata = { + 'name': "Junk Data", + 'mimeType': FOLDER_MIME_TYPE, + 'appProperties': { + "backup_folder": "true", + }, + } + + # create two fodlers at different times + id_old = (await drive.drivebackend.createFolder(folder_metadata))['id'] + sleep(2) + id_new = (await drive.drivebackend.createFolder(folder_metadata))['id'] + + # Verify we use the newest + await drive.get() + assert await drive.getFolderId() == id_new + assert await drive.getFolderId() != id_old + + +@pytest.mark.asyncio +async def test_bad_auth_creds(drive: DriveSource, time): + drive.drivebackend.creds._refresh_token = "not_allowed" + with pytest.raises(GoogleCredentialsExpired): + await drive.get() + assert time.sleeps == [] + + +@pytest.mark.asyncio +async def test_out_of_space(backup_helper, drive: DriveSource, google: SimulatedGoogle): + google.setDriveSpaceAvailable(100) + from_backup, data = await backup_helper.createFile() + await drive.get() + with pytest.raises(DriveQuotaExceeded) as e: + await drive.save(from_backup, data) + assert e.value.data() == { + 'backup_size': '2 MB', + 'free_space': '100 B' + } + + +@pytest.mark.asyncio +async def test_out_of_space_error(backup_helper, drive: DriveSource, google: SimulatedGoogle): + google.setDriveSpaceAvailable(100) + from_backup, data = await backup_helper.createFile() + # Without calling drive.get(), the parser has no info about drive's space usage + with pytest.raises(DriveQuotaExceeded) as e: + await drive.save(from_backup, data) + assert e.value.data() == { + 'backup_size': 'Error', + 'free_space': 'Error' + } + + +@pytest.mark.asyncio +async def test_drive_dns_resolution_error(drive: DriveSource, config: Config, time): + config.override(Setting.DRIVE_URL, + "http://fsdfsdasdasdf.saasdsdfsdfsd.com:2567") + with pytest.raises(GoogleCantConnect): + await drive.get() + assert time.sleeps == [] + + +@pytest.mark.asyncio +async def test_drive_connect_error(drive: DriveSource, config: Config, time): + config.override(Setting.DRIVE_URL, "http://localhost:1034") + with pytest.raises(GoogleCantConnect): + await drive.get() + assert time.sleeps == [] + + +@pytest.mark.asyncio +async def test_upload_session_expired(drive, time, backup_helper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, status=404) + with pytest.raises(GoogleSessionError): + await drive.save(from_backup, data) + assert time.sleeps == [] + + +@pytest.mark.asyncio +async def test_upload_resume(drive: DriveSource, time, backup_helper: BackupHelper, google: SimulatedGoogle, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, fail_after=1, status=500) + + # Upload, which will fail + with pytest.raises(GoogleInternalError): + await drive.save(from_backup, data) + + # Verify we uploaded one chunk + assert google.chunks == [BASE_CHUNK_SIZE] + + # Retry the upload, which shoudl now pass + interceptor.clear() + data.position(0) + drive_backup = await drive.save(from_backup, data) + from_backup.addSource(drive_backup) + assert google.chunks == [BASE_CHUNK_SIZE, + BASE_CHUNK_SIZE, (data.size()) - BASE_CHUNK_SIZE * 2] + + # Verify the data is correct + data.position(0) + await compareStreams(data, await drive.read(from_backup)) + + +def test_chunk_size(drive: DriveSource, config: Config): + max = config.get(Setting.MAXIMUM_UPLOAD_CHUNK_BYTES) / BASE_CHUNK_SIZE + assert drive.drivebackend._getNextChunkSize( + 1000000000, 0) == max + assert drive.drivebackend._getNextChunkSize( + 1, CHUNK_UPLOAD_TARGET_SECONDS) == 1 + assert drive.drivebackend._getNextChunkSize( + 1000000000, CHUNK_UPLOAD_TARGET_SECONDS) == max + assert drive.drivebackend._getNextChunkSize( + 1, CHUNK_UPLOAD_TARGET_SECONDS) == 1 + assert drive.drivebackend._getNextChunkSize( + 1, 1) == CHUNK_UPLOAD_TARGET_SECONDS + assert drive.drivebackend._getNextChunkSize( + 1, 1.01) == CHUNK_UPLOAD_TARGET_SECONDS - 1 + + +def test_chunk_size_limits(drive: DriveSource, config: Config): + config.override(Setting.MAXIMUM_UPLOAD_CHUNK_BYTES, 1) + assert drive.drivebackend._getNextChunkSize(1000000000, 0) == 1 + assert drive.drivebackend._getNextChunkSize(1, 1000000) == 1 + + config.override(Setting.MAXIMUM_UPLOAD_CHUNK_BYTES, BASE_CHUNK_SIZE * 1.5) + assert drive.drivebackend._getNextChunkSize(1000000000, 0) == 1 + assert drive.drivebackend._getNextChunkSize(1, 1000000) == 1 + + config.override(Setting.MAXIMUM_UPLOAD_CHUNK_BYTES, BASE_CHUNK_SIZE * 3.5) + assert drive.drivebackend._getNextChunkSize(1000000000, 0) == 3 + assert drive.drivebackend._getNextChunkSize(1, 1000000) == 1 + + +@pytest.mark.asyncio +async def test_working_through_upload(drive: DriveSource, server: SimulationServer, backup_helper: BackupHelper, interceptor: RequestInterceptor): + assert not drive.isWorking() + + # Let a single chunk upload, then wait + matcher = interceptor.setWaiter(URL_MATCH_UPLOAD_PROGRESS, attempts=1) + from_backup, data = await backup_helper.createFile(size=1024 * 1024 * 10) + save_task = asyncio.create_task(drive.save(from_backup, data)) + await matcher.waitForCall() + assert drive.isWorking() + + # let it complete + matcher.clear() + await save_task + assert not drive.isWorking() + + +@pytest.mark.asyncio +async def test_drive_timeout(drive, config, time: FakeTime): + # Ensure we have credentials + await drive.get() + + config.override(Setting.GOOGLE_DRIVE_TIMEOUT_SECONDS, 0.000001) + with pytest.raises(GoogleTimeoutError): + await drive.get() + assert time.sleeps == [] + + +@pytest.mark.asyncio +async def test_resume_upload_attempts_exhausted(drive: DriveSource, time, backup_helper, interceptor: RequestInterceptor, google: SimulatedGoogle): + # Allow an upload to update one chunk and then fail. + from_backup, data = await backup_helper.createFile() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, fail_after=1, status=500) + with pytest.raises(GoogleInternalError): + await drive.save(from_backup, data) + assert google.chunks == [BASE_CHUNK_SIZE] + + # Verify we have a cached location + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_count == 1 + last_location = drive.drivebackend.last_attempt_location + + for x in range(1, RETRY_SESSION_ATTEMPTS): + data.position(0) + with pytest.raises(GoogleInternalError): + await drive.save(from_backup, data) + assert drive.drivebackend.last_attempt_count == x + 1 + + # We should still be using the same location url + assert drive.drivebackend.last_attempt_location == last_location + + # Another attempt should use another location url + with pytest.raises(GoogleInternalError): + data.position(0) + await drive.save(from_backup, data) + assert drive.drivebackend.last_attempt_count == 0 + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_location != last_location + + # Now let it succeed + interceptor.clear() + data.position(0) + drive_backup = await drive.save(from_backup, data) + from_backup.addSource(drive_backup) + + # And verify the bytes are correct + data.position(0) + await compareStreams(data, await drive.read(from_backup)) + + +@pytest.mark.asyncio +async def test_google_internal_error(drive, server, time: FakeTime, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_FILE, 500) + with pytest.raises(GoogleInternalError): + await drive.get() + assert time.sleeps == RETRY_EXHAUSTION_SLEEPS + time.clearSleeps() + + interceptor.clear() + interceptor.setError(URL_MATCH_FILE, 500) + with pytest.raises(GoogleInternalError): + await drive.get() + assert time.sleeps == RETRY_EXHAUSTION_SLEEPS + + +@pytest.mark.asyncio +async def test_check_time(drive: DriveSource, drive_creds: Creds): + assert not await drive.check() + drive.saveCreds(drive_creds) + assert await drive.check() + + +@pytest.mark.asyncio +async def test_disable_upload(drive: DriveSource, config: Config): + assert drive.upload() + config.override(Setting.ENABLE_DRIVE_UPLOAD, False) + assert not drive.upload() + + +@pytest.mark.asyncio +async def test_resume_session_abandoned_on_http4XX(time, drive: DriveSource, config: Config, server, backup_helper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail after the first upload chunk + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 402, 1) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload but not cached + assert server.wasUrlRequested( + "/upload/drive/v3/files/?uploadType=resumable&supportsAllDrives=true") + assert drive.drivebackend.last_attempt_count == 1 + assert drive.drivebackend.last_attempt_location is None + assert drive.drivebackend.last_attempt_metadata is None + + # upload again, which should retry + server.urls.clear() + interceptor.clear() + data.position(0) + backup = await drive.save(from_backup, data) + assert server.wasUrlRequested(URL_START_UPLOAD) + + # Verify the uploaded bytes are identical + from_backup.addSource(backup) + download = await drive.read(from_backup) + data.position(0) + await compareStreams(data, download) + + +@pytest.mark.asyncio +async def test_resume_session_abandoned_after_a_long_time(time: FakeTime, drive: DriveSource, config: Config, server: SimulationServer, backup_helper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail after the first upload chunk + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501, 1) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + # Verify it reuses the session a few times + assert server.wasUrlRequested(URL_START_UPLOAD) + assert drive.drivebackend.last_attempt_count == 1 + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_metadata is not None + + data.position(0) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + assert drive.drivebackend.last_attempt_count == 2 + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_metadata is not None + last_location = drive.drivebackend.last_attempt_location + + # Fast forward a lot, then verify the session is restarted + server.urls.clear() + interceptor.clear() + time.advance(duration=UPLOAD_SESSION_EXPIRATION_DURATION) + data.position(0) + await drive.save(from_backup, data) + assert interceptor.urlWasCalled(URL_START_UPLOAD) + assert not interceptor.urlWasCalled(last_location) + + +@pytest.mark.asyncio +async def test_chunk_upload_resets_attempt_counter(time: FakeTime, drive: DriveSource, config: Config, server: SimulationServer, backup_helper: BackupHelper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile(size=1024 * 1024 * 10) + + # Configure the upload to fail after the first upload chunk + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501, 1) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + data.position(0) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + # Verify the session was started + assert interceptor.urlWasCalled(URL_START_UPLOAD) + assert interceptor.urlWasCalled(URL_MATCH_UPLOAD_PROGRESS) + assert drive.drivebackend.last_attempt_count == 2 + location = drive.drivebackend.last_attempt_location + assert location is not None + + # Allow one more chunk to succeed + interceptor.clear() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501, 2) + data.position(0) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + # Verify the session was reused and the attempt counter was reset + assert not interceptor.urlWasCalled(URL_START_UPLOAD) + assert interceptor.urlWasCalled(URL_MATCH_UPLOAD_PROGRESS) + assert interceptor.urlWasCalled(URL(location).path) + assert drive.drivebackend.last_attempt_count == 1 + assert drive.drivebackend.last_attempt_location == location + + +@pytest.mark.asyncio +async def test_resume_session_reused_on_http5XX(time, drive: DriveSource, config: Config, server, backup_helper, interceptor: RequestInterceptor): + await verify_upload_resumed(time, drive, config, server, interceptor, 550, backup_helper) + + +@pytest.mark.asyncio +async def test_resume_session_reused_abonded_after_retries(time, drive: DriveSource, config: Config, server: SimulationServer, backup_helper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail after the first upload chunk + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501, 1) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload but not cached + assert server.wasUrlRequested(URL_START_UPLOAD) + assert drive.drivebackend.last_attempt_count == 1 + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_metadata is not None + last_location = drive.drivebackend.last_attempt_location + + for x in range(1, RETRY_SESSION_ATTEMPTS): + server.urls.clear() + interceptor.clear() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501) + data.position(0) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + assert not server.wasUrlRequested(URL_START_UPLOAD) + assert server.wasUrlRequested(last_location) + assert drive.drivebackend.last_attempt_count == x + 1 + assert drive.drivebackend.last_attempt_location is last_location + assert drive.drivebackend.last_attempt_metadata is not None + + # Next attempt should give up and restart the upload + server.urls.clear() + interceptor.clear() + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 501, 1) + data.position(0) + with pytest.raises(ClientResponseError): + await drive.save(from_backup, data) + assert server.wasUrlRequested(URL_START_UPLOAD) + assert not server.wasUrlRequested(last_location) + assert drive.drivebackend.last_attempt_count == 1 + + # upload again, which should retry + server.urls.clear() + interceptor.clear() + data.position(0) + backup = await drive.save(from_backup, data) + assert not server.wasUrlRequested(URL_START_UPLOAD) + + # Verify the uploaded bytes are identical + from_backup.addSource(backup) + download = await drive.read(from_backup) + data.position(0) + await compareStreams(data, download) + + +async def verify_upload_resumed(time, drive: DriveSource, config: Config, server: SimulationServer, interceptor: RequestInterceptor, status, backup_helper, expected=ClientResponseError): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail after the first upload chunk + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, status, 1) + with pytest.raises(expected): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload + assert server.wasUrlRequested(URL_START_UPLOAD) + assert drive.drivebackend.last_attempt_location is not None + assert drive.drivebackend.last_attempt_metadata is not None + last_location = drive.drivebackend.last_attempt_location + + # Retry the upload and let is succeed + server.urls.clear() + interceptor.clear() + data.position(0) + backup = await drive.save(from_backup, data) + + # We shoudl nto see the upload "initialize" url + assert not server.wasUrlRequested(URL_START_UPLOAD) + + # We should see the last location url (which has a unique token) reused to resume the upload + assert server.wasUrlRequested(last_location) + + # The saved metadata should be cleared out. + assert drive.drivebackend.last_attempt_count == 1 + assert drive.drivebackend.last_attempt_location is None + assert drive.drivebackend.last_attempt_metadata is None + + # Verify the uploaded bytes are identical + from_backup.addSource(backup) + download = await drive.read(from_backup) + data.position(0) + await compareStreams(data, download) + + +@pytest.mark.asyncio +async def test_recreate_folder_when_deleted(time, drive: DriveSource, config: Config, backup_helper, folder_finder: FolderFinder): + await drive.get() + id = await drive.getFolderId() + await drive.drivebackend.delete(id) + assert len(await drive.get()) == 0 + assert id != await drive.getFolderId() + + +@pytest.mark.asyncio +async def test_recreate_folder_when_losing_permissions(time, drive: DriveSource, config: Config, backup_helper, google: SimulatedGoogle): + await drive.get() + id = await drive.getFolderId() + google.lostPermission.append(id) + assert len(await drive.get()) == 0 + assert id != await drive.getFolderId() + + +@pytest.mark.asyncio +async def test_folder_missing_on_upload(time, drive: DriveSource, config: Config, backup_helper): + # Make the folder + await drive.get() + + # Require a specified folder so we don't query + config.override(Setting.SPECIFY_BACKUP_FOLDER, True) + + # Delete the folder + await drive.drivebackend.delete(await drive.getFolderId()) + + # Then try to make one + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail after the first upload chunk + with pytest.raises(BackupFolderInaccessible): + await drive.save(from_backup, data) + + +@pytest.mark.asyncio +async def test_folder_error_on_upload_lost_permission(time, drive: DriveSource, config: Config, google: SimulatedGoogle, backup_helper, session): + # Make the folder + await drive.get() + + # Require a specified folder so we don't query + config.override(Setting.SPECIFY_BACKUP_FOLDER, True) + + # Make the folder inaccessible + google.lostPermission.append(await drive.getFolderId()) + time.advanceDay() + + # Fail to upload + with pytest.raises(BackupFolderInaccessible): + await drive.save(*await backup_helper.createFile()) + + +@pytest.mark.asyncio +async def test_folder_error_on_upload_lost_permission_custom_client(time, drive: DriveSource, config: Config, google: SimulatedGoogle, backup_helper, session): + # Make the folder + await drive.get() + + # Require a specified folder so we don't query + config.override(Setting.SPECIFY_BACKUP_FOLDER, True) + + google._client_id_hack = config.get(Setting.DEFAULT_DRIVE_CLIENT_ID) + config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, "something-else") + + # Make the folder inaccessible + google.lostPermission.append(await drive.getFolderId()) + time.advanceDay() + + # Fail to upload + with pytest.raises(BackupFolderInaccessible): + await drive.save(*await backup_helper.createFile()) + + +@pytest.mark.asyncio +async def test_folder_error_on_query_lost_permission(time, drive: DriveSource, config: Config, google: SimulatedGoogle): + # Make the folder + await drive.get() + + # Require a specified folder so we don't query + config.override(Setting.SPECIFY_BACKUP_FOLDER, "true") + config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, "something") + + # Make the folder inaccessible + google.lostPermission.append(await drive.getFolderId()) + + # It shoudl fail! + with pytest.raises(BackupFolderInaccessible): + await drive.get() + + +@pytest.mark.asyncio +async def test_folder_error_on_query_deleted(time, drive: DriveSource, config: Config, server): + # Make the folder + await drive.get() + + # Require a specified folder so we don't query + config.override(Setting.SPECIFY_BACKUP_FOLDER, "true") + config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, "something") + + # Delete the folder + await drive.drivebackend.delete(await drive.getFolderId()) + + # It should fail! + with pytest.raises(BackupFolderInaccessible): + await drive.get() + + +@pytest.mark.asyncio +async def test_backup_folder_not_specified(time, drive: DriveSource, config: Config, server, backup_helper): + config.override(Setting.SPECIFY_BACKUP_FOLDER, "true") + + with pytest.raises(BackupFolderMissingError): + await drive.get() + + from_backup, data = await backup_helper.createFile() + with pytest.raises(BackupFolderMissingError): + await drive.save(from_backup, data) + + config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, "something") + with pytest.raises(BackupFolderMissingError): + await drive.get() + with pytest.raises(BackupFolderMissingError): + await drive.save(from_backup, data) + + +@pytest.mark.asyncio +async def test_folder_invalid_when_specified(time, drive: DriveSource, config: Config, server): + await drive.get() + + config.override(Setting.SPECIFY_BACKUP_FOLDER, "true") + await drive.drivebackend.update(await drive.getFolderId(), {"trashed": True}) + + time.advanceDay() + + with pytest.raises(BackupFolderInaccessible): + await drive.get() + + +@pytest.mark.asyncio +async def test_no_folder_when_required(time, drive: DriveSource, config: Config): + config.override(Setting.SPECIFY_BACKUP_FOLDER, "true") + with pytest.raises(BackupFolderMissingError): + await drive.get() + + +@pytest.mark.asyncio +async def test_existing_folder_already_exists(time, drive: DriveSource, config: Config, folder_finder: FolderFinder): + await drive.get() + drive.checkBeforeChanges() + + # Reset folder, try again + folder_finder.reset() + await drive.get() + with pytest.raises(ExistingBackupFolderError): + drive.checkBeforeChanges() + + +@pytest.mark.asyncio +async def test_existing_resolved_use_existing(time, drive: DriveSource, config: Config, folder_finder: FolderFinder): + await drive.get() + drive.checkBeforeChanges() + + folder_id = await drive.getFolderId() + + # Reset folder, try again + folder_finder.reset() + await drive.get() + with pytest.raises(ExistingBackupFolderError): + drive.checkBeforeChanges() + + folder_finder.resolveExisting(True) + await drive.get() + drive.checkBeforeChanges() + assert await drive.getFolderId() == folder_id + + +@pytest.mark.asyncio +async def test_existing_resolved_create_new(time, drive: DriveSource, config: Config, folder_finder: FolderFinder): + await drive.get() + drive.checkBeforeChanges() + + folder_id = await drive.getFolderId() + + # Reset folder, try again + folder_finder.reset() + await drive.get() + with pytest.raises(ExistingBackupFolderError): + drive.checkBeforeChanges() + + folder_finder.resolveExisting(False) + await drive.get() + drive.checkBeforeChanges() + assert await drive.getFolderId() != folder_id + + +@pytest.mark.asyncio +async def test_cred_refresh_with_secret(drive: DriveSource, google: SimulatedGoogle, time: FakeTime, config: Config): + google.resetDriveAuth() + with open(config.get(Setting.CREDENTIALS_FILE_PATH), "w") as f: + creds = google.creds() + creds._secret = config.get(Setting.DEFAULT_DRIVE_CLIENT_SECRET) + json.dump(creds.serialize(), f) + drive.drivebackend.tryLoadCredentials() + await drive.get() + old_creds = drive.drivebackend.creds + + # valid creds should be reused + await drive.get() + assert old_creds.access_token == drive.drivebackend.creds.access_token + + # then refreshed when they expire + time.advanceDay() + await drive.get() + assert old_creds.access_token != drive.drivebackend.creds.access_token + + # verify the client_secret is kept + with open(config.get(Setting.CREDENTIALS_FILE_PATH)) as f: + assert "client_secret" in json.load(f) + + +@pytest.mark.asyncio +async def test_cred_refresh_no_secret(drive: DriveSource, google: SimulatedGoogle, time: FakeTime, config: Config): + drive.saveCreds(google.creds()) + await drive.get() + old_creds = drive.drivebackend.creds + await drive.get() + assert old_creds.access_token == drive.drivebackend.creds.access_token + time.advanceDay() + await drive.get() + assert old_creds.access_token != drive.drivebackend.creds.access_token + with open(config.get(Setting.CREDENTIALS_FILE_PATH)) as f: + assert "client_secret" not in json.load(f) + + +@pytest.mark.asyncio +async def test_cred_refresh_upgrade_default_client(drive: DriveSource, server: SimulationServer, time: FakeTime, config: Config): + return + # TODO: Enable this when we start removing the default client_secret + config.override(Setting.DEFAULT_DRIVE_CLIENT_ID, server.getSetting("drive_client_id")) + creds = server.getCurrentCreds() + creds_with_secret = server.getCurrentCreds() + creds_with_secret._secret = server.getSetting("drive_client_secret") + with open(config.get(Setting.CREDENTIALS_FILE_PATH), "w") as f: + json.dump(creds_with_secret.serialize(), f) + + # reload the creds + drive.drivebackend.tryLoadCredentials() + + # Verify the "client secret" was removed + with open(config.get(Setting.CREDENTIALS_FILE_PATH)) as f: + saved_creds = json.load(f) + assert saved_creds == creds.serialize() + + await drive.get() + old_creds = drive.drivebackend.cred_bearer + await drive.get() + assert old_creds == drive.drivebackend.cred_bearer + time.advanceDay() + await drive.get() + assert old_creds != drive.drivebackend.cred_bearer + + +@pytest.mark.asyncio +async def test_cant_reach_refresh_server(drive: DriveSource, server: SimulationServer, config: Config, time): + config.override(Setting.TOKEN_SERVER_HOSTS, "http://lkasdpoiwehjhcty.com") + drive.drivebackend.creds._secret = None + time.advanceDay() + with pytest.raises(CredRefreshMyError) as error: + await drive.get() + assert error.value.data() == {"reason": "Couldn't communicate with lkasdpoiwehjhcty.com"} + + +@pytest.mark.asyncio +async def test_refresh_problem_with_google(drive: DriveSource, interceptor: RequestInterceptor, config: Config, time): + time.advanceDay() + interceptor.setError(".*/oauth2/v4/token.*", status=510) + drive.drivebackend.creds._secret = None + with pytest.raises(CredRefreshGoogleError) as error: + await drive.get() + assert error.value.data() == {"from_google": "Google returned HTTP 510"} + + +@pytest.mark.asyncio +async def test_ignore_trashed_backups(time, drive: DriveSource, config: Config, server, backup_helper): + backup = await backup_helper.createFile() + drive_backup = await drive.save(*backup) + + assert len(await drive.get()) == 1 + await drive.drivebackend.update(drive_backup.id(), {"trashed": True}) + + assert len(await drive.get()) == 0 + + +@pytest.mark.asyncio +async def test_download_timeout(time, drive: DriveSource, config: Config, interceptor: RequestInterceptor, backup_helper): + config.override(Setting.DOWNLOAD_TIMEOUT_SECONDS, 0.1) + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + + # Verify the uploaded bytes are identical + from_backup.addSource(backup) + interceptor.setSleep(URL_MATCH_FILE, sleep=100) + download = await drive.read(from_backup) + data.position(0) + + with pytest.raises(GoogleTimeoutError): + await compareStreams(data, download) + + +@pytest.mark.asyncio +async def test_resume_session_ignored_on_http404(time, drive: DriveSource, config: Config, server: SimulationServer, backup_helper: BackupHelper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 500) + with pytest.raises(GoogleInternalError): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload + assert server.wasUrlRequested(URL_START_UPLOAD) + location = drive.drivebackend.last_attempt_location + assert location is not None + + server.urls.clear() + interceptor.clear() + data.position(0) + + interceptor.setError(location, 404) + with pytest.raises(GoogleUnexpectedError): + await drive.save(from_backup, data) + assert interceptor.urlWasCalled(URL(location).path) + + data.position(0) + + # Location should have been reset, and another attempt should succeed using a new url. + assert drive.drivebackend.last_attempt_location is None + await drive.save(from_backup, data) + + +@pytest.mark.asyncio +async def test_resume_session_ignored_on_http410(time, drive: DriveSource, config: Config, server: SimulationServer, backup_helper: BackupHelper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 500) + with pytest.raises(GoogleInternalError): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload + assert server.wasUrlRequested(URL_START_UPLOAD) + location = drive.drivebackend.last_attempt_location + assert location is not None + + server.urls.clear() + interceptor.clear() + data.position(0) + + data.position(0) + + interceptor.setError(location, 410) + with pytest.raises(GoogleUnexpectedError): + await drive.save(from_backup, data) + assert interceptor.urlWasCalled(URL(location).path) + + # Location should have been reset, and anohter attempt should succeed using a new url. + assert drive.drivebackend.last_attempt_location is None + await drive.save(from_backup, data) + + +@pytest.mark.asyncio +async def test_resume_session_reused_on_http408(time, drive: DriveSource, config: Config, server: SimulationServer, backup_helper: BackupHelper, interceptor: RequestInterceptor): + from_backup, data = await backup_helper.createFile() + + # Configure the upload to fail + interceptor.setError(URL_MATCH_UPLOAD_PROGRESS, 408) + with pytest.raises(GoogleTimeoutError): + await drive.save(from_backup, data) + + # Verify a requst was made to start the upload + assert server.wasUrlRequested(URL_START_UPLOAD) + location = drive.drivebackend.last_attempt_location + assert location is not None + + server.urls.clear() + interceptor.clear() + data.position(0) + + await drive.save(from_backup, data) + assert interceptor.urlWasCalled(URL(location).path) + + +@pytest.mark.asyncio +async def test_shared_drive_manager(drive: DriveSource, time: FakeTime, folder_finder: FolderFinder, backup_helper: BackupHelper, drive_requests: DriveRequests): + # Make a shared drive folder + folder_metadata = { + 'name': "Shared Drive", + 'mimeType': FOLDER_MIME_TYPE, + 'driveId': "test_shared_drive_id", + 'appProperties': { + "backup_folder": "true", + }, + } + shared_drive_folder_id = (await drive.drivebackend.createFolder(folder_metadata))['id'] + await folder_finder.save(shared_drive_folder_id) + + # Save a backup + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + assert len(await drive.get()) == 1 + from_backup.addSource(backup) + + # Delete the backup, and verify it was deleted instead of trashed + await drive.delete(from_backup) + assert len(await drive.get()) == 0 + with pytest.raises(ClientResponseError) as exc: + await drive_requests.get(backup.id()) + assert exc.value.code == 404 + + +@pytest.mark.asyncio +async def test_shared_drive_content_manager(drive: DriveSource, time: FakeTime, folder_finder: FolderFinder, backup_helper: BackupHelper, drive_requests: DriveRequests): + # Make a shared drive folder where the user has capabilities consistent with a "content manager" role. + folder_metadata = { + 'name': "Shared Drive", + 'mimeType': FOLDER_MIME_TYPE, + 'driveId': "test_shared_drive_id", + 'appProperties': { + "backup_folder": "true", + }, + 'capabilities': { + 'canDeleteChildren': False, + 'canTrashChildren': True, + 'canDelete': False, + 'canTrash': True, + } + } + + shared_drive_folder_id = (await drive.drivebackend.createFolder(folder_metadata))['id'] + await folder_finder.save(shared_drive_folder_id) + + # Save a backup + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + assert len(await drive.get()) == 1 + from_backup.addSource(backup) + + # Delete the backup, and verify it was onyl trashed + await drive.delete(from_backup) + assert len(await drive.get()) == 0 + assert (await drive_requests.get(backup.id()))['trashed'] + + +@pytest.mark.asyncio +async def test_note(drive: DriveSource, time: FakeTime, folder_finder: FolderFinder, backup_helper: BackupHelper, drive_requests: DriveRequests): + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + assert backup.note() is None + + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + await drive.note(full, "new note") + assert backup.note() == "new note" + + await drive.note(full, None) + assert backup.note() is None + + +@pytest.mark.asyncio +async def test_note_truncation(drive: DriveSource, time: FakeTime, folder_finder: FolderFinder, backup_helper: BackupHelper, drive_requests: DriveRequests): + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + assert backup.note() is None + + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + long_note = "".join(["䷷" for x in range(500)]) + await drive.note(full, long_note) + assert backup.note() == "".join(["䷷" for x in range(38)]) + + +@pytest.mark.asyncio +async def test_note_creation(drive: DriveSource, time: FakeTime, folder_finder: FolderFinder, backup_helper: BackupHelper, drive_requests: DriveRequests): + from_backup, data = await backup_helper.createFile(note="test") + backup = await drive.save(from_backup, data) + assert backup.note() == "test" diff --git a/hassio-google-drive-backup/tests/test_duration_parser.py b/hassio-google-drive-backup/tests/test_duration_parser.py new file mode 100644 index 0000000..9d91920 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_duration_parser.py @@ -0,0 +1,89 @@ +from backup.config.durationparser import DurationParser +from datetime import timedelta + + +def test_parse_days(): + parser = DurationParser() + assert parser.parse("1 days") == timedelta(days=1) + assert parser.parse("5 days") == timedelta(days=5) + assert parser.parse("5 d") == timedelta(days=5) + assert parser.parse("5d") == timedelta(days=5) + assert parser.parse("5.0d") == timedelta(days=5) + assert parser.parse("5.0day") == timedelta(days=5) + assert parser.parse("5.0 day") == timedelta(days=5) + assert parser.parse("5.5 days") == timedelta(days=5, hours=12) + + +def test_parse_hours(): + parser = DurationParser() + assert parser.parse("1 hours") == timedelta(hours=1) + assert parser.parse("5 hours") == timedelta(hours=5) + assert parser.parse("5 h") == timedelta(hours=5) + assert parser.parse("5hour") == timedelta(hours=5) + assert parser.parse("5.0h") == timedelta(hours=5) + assert parser.parse("5.0 hour") == timedelta(hours=5) + assert parser.parse("5.5 h") == timedelta(hours=5, minutes=30) + + +def test_parse_minutes(): + parser = DurationParser() + assert parser.parse("1 minutes") == timedelta(minutes=1) + assert parser.parse("5 min") == timedelta(minutes=5) + assert parser.parse("5 m") == timedelta(minutes=5) + assert parser.parse("5mins") == timedelta(minutes=5) + assert parser.parse("5.0m") == timedelta(minutes=5) + assert parser.parse("5.0 min") == timedelta(minutes=5) + assert parser.parse("5.5 m") == timedelta(minutes=5, seconds=30) + + +def test_parse_seconds(): + parser = DurationParser() + assert parser.parse("1 seconds") == timedelta(seconds=1) + assert parser.parse("5 sec") == timedelta(seconds=5) + assert parser.parse("5 s") == timedelta(seconds=5) + assert parser.parse("5secs") == timedelta(seconds=5) + assert parser.parse("5.0s") == timedelta(seconds=5) + assert parser.parse("5.0 secs") == timedelta(seconds=5) + assert parser.parse("5.5 s") == timedelta(seconds=5, milliseconds=500) + + +def test_parse_multiple(): + parser = DurationParser() + assert parser.parse("1 day, 5 hours, 30 seconds") == timedelta(days=1, hours=5, seconds=30) + assert parser.parse("1 day 5 hours 30 seconds") == timedelta(days=1, hours=5, seconds=30) + assert parser.parse("1d 5 hours 30s") == timedelta(days=1, hours=5, seconds=30) + assert parser.parse("1d 5h 30s") == timedelta(days=1, hours=5, seconds=30) + assert parser.parse("5m 1d 5h 30s") == timedelta(days=1, hours=5, minutes=5, seconds=30) + + +def test_format(): + parser = DurationParser() + assert parser.format(timedelta(days=1)) == "1 days" + assert parser.format(timedelta(seconds=86400)) == "1 days" + assert parser.format(timedelta(hours=1)) == "1 hours" + assert parser.format(timedelta(minutes=1)) == "1 minutes" + assert parser.format(timedelta(seconds=60)) == "1 minutes" + assert parser.format(timedelta(seconds=5)) == "5 seconds" + assert parser.format(timedelta(seconds=1)) == "1 seconds" + assert parser.format(timedelta(days=5, hours=6, minutes=7)) == "5 days, 6 hours, 7 minutes" + assert parser.format(timedelta(days=5, hours=6, minutes=7, seconds=8)) == "5 days, 6 hours, 7 minutes, 8 seconds" + + +def test_back_and_forth(): + doTestConvert(timedelta(hours=5)) + doTestConvert(timedelta(minutes=600)) + doTestConvert(timedelta(days=30)) + doTestConvert(timedelta(days=5, minutes=6, hours=10, seconds=20)) + + +def doTestConvert(duration): + parser = DurationParser() + assert parser.parse(parser.format(duration)) == duration + + +def test_convert_empty_seconds(): + parser = DurationParser() + assert parser.parse("") == timedelta(seconds=0) + assert parser.parse("0") == timedelta(seconds=0) + assert parser.parse("30") == timedelta(seconds=30) + assert parser.parse(str(60 * 60)) == timedelta(seconds=60 * 60) diff --git a/hassio-google-drive-backup/tests/test_durationasstringvalidator.py b/hassio-google-drive-backup/tests/test_durationasstringvalidator.py new file mode 100644 index 0000000..933fe2b --- /dev/null +++ b/hassio-google-drive-backup/tests/test_durationasstringvalidator.py @@ -0,0 +1,28 @@ +from backup.config import DurationAsStringValidator +from backup.exceptions import InvalidConfigurationValue +from datetime import timedelta +import pytest + + +def test_minimum(): + parser = DurationAsStringValidator("test", minimum=10) + assert parser.validate("11 seconds") == 11 + assert parser.validate(11) == 11 + with pytest.raises(InvalidConfigurationValue): + parser.validate("9 seconds") + + +def test_maximum(): + parser = DurationAsStringValidator("test", maximum=10) + assert parser.validate("9 seconds") == 9 + assert parser.validate(9) == 9 + with pytest.raises(InvalidConfigurationValue): + parser.validate("11 seconds") + assert parser.formatForUi(9) == "9 seconds" + + +def test_base(): + parser = DurationAsStringValidator("test", base_seconds=60) + assert parser.validate("60 seconds") == 1 + assert parser.validate(60) == 60 + assert parser.formatForUi(1) == "1 minutes" diff --git a/hassio-google-drive-backup/tests/test_estimator.py b/hassio-google-drive-backup/tests/test_estimator.py new file mode 100644 index 0000000..d2f11eb --- /dev/null +++ b/hassio-google-drive-backup/tests/test_estimator.py @@ -0,0 +1,13 @@ +import pytest +from backup.util import Estimator +from backup.config import Config, Setting +from backup.exceptions import LowSpaceError + +@pytest.mark.asyncio +async def test_check_space(estimator: Estimator, coord, config: Config): + estimator.refresh() + estimator.checkSpace(coord.backups()) + + config.override(Setting.LOW_SPACE_THRESHOLD, estimator.getBytesFree() + 1) + with pytest.raises(LowSpaceError): + estimator.checkSpace(coord.backups()) diff --git a/hassio-google-drive-backup/tests/test_exceptions.py b/hassio-google-drive-backup/tests/test_exceptions.py new file mode 100644 index 0000000..2af73ad --- /dev/null +++ b/hassio-google-drive-backup/tests/test_exceptions.py @@ -0,0 +1,49 @@ +from bs4 import BeautifulSoup +import backup.exceptions +import inspect +import pytest +from backup.exceptions import GoogleCredGenerateError, KnownError, KnownTransient, SimulatedError, GoogleDrivePermissionDenied, InvalidConfigurationValue, LogicError, ProtocolError, NoBackup, NotUploadable, PleaseWait, UploadFailed +from .conftest import ReaderHelper + + +@pytest.mark.asyncio +async def test_verify_coverage(ui_server, reader: ReaderHelper): + # Get the list of exception codes + ignore = [ + KnownError, + KnownTransient, + SimulatedError, + GoogleDrivePermissionDenied, + InvalidConfigurationValue, + LogicError, + NoBackup, + NotUploadable, + PleaseWait, + ProtocolError, + UploadFailed, + GoogleCredGenerateError, + ] + codes = {} + for name, obj in inspect.getmembers(backup.exceptions): + if inspect.isclass(obj) and (KnownError in obj.__bases__) and obj not in ignore: + codes[obj().code()] = obj + + # Get the list of ui dialogs + document = await reader.get("", json=False) + page = BeautifulSoup(document, 'html.parser') + + dialogs = {} + for div in page.find_all("div"): + cls = div.get("class") + if cls is None: + continue + if "error_card" in cls: + for specific_class in cls: + if specific_class in dialogs: + dialogs[specific_class] = dialogs[specific_class] + 1 + else: + dialogs[specific_class] = 1 + + # Make sure exactly one dialog has the class + for code in codes.keys(): + assert dialogs[code] == 1 diff --git a/hassio-google-drive-backup/tests/test_exchanger.py b/hassio-google-drive-backup/tests/test_exchanger.py new file mode 100644 index 0000000..9293e21 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_exchanger.py @@ -0,0 +1,186 @@ +import pytest + +from dev.simulationserver import SimulationServer, RequestInterceptor +from backup.time import Time +from backup.config import Config, Setting +from backup.drive import DriveRequests +from backup.exceptions import CredRefreshMyError, GoogleCredentialsExpired, CredRefreshGoogleError +from backup.tracing_session import TracingSession +from yarl import URL + + +@pytest.mark.asyncio +async def test_correct_host(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, server_url, interceptor: RequestInterceptor): + # Verify the correct endpoitns get called for a successful request + session.record = True + await drive_requests.exchanger.refresh(drive_requests.creds) + assert interceptor.urlWasCalled("/drive/refresh") + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_some_bad_hosts(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, server_url, interceptor: RequestInterceptor): + session.record = True + config.override(Setting.EXCHANGER_TIMEOUT_SECONDS, 1) + config.override(Setting.TOKEN_SERVER_HOSTS, "https://this.goes.nowhere.info," + str(server_url)) + + await drive_requests.exchanger.refresh(drive_requests.creds) + assert interceptor.urlWasCalled("/drive/refresh") + + # Verify both hosts were checked + session._records[0]['url'] == URL("https://this.goes.nowhere.info").with_path("/drive/refresh") + session._records[1]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_all_bad_hosts(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor): + session.record = True + config.override(Setting.EXCHANGER_TIMEOUT_SECONDS, 1) + config.override(Setting.TOKEN_SERVER_HOSTS, "https://this.goes.nowhere.info,http://also.a.bad.host") + + with pytest.raises(CredRefreshMyError) as e: + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Error should be about the last host name + assert e.value.reason.index("also.a.bad.host") >= 0 + + # Verify both hosts were checked + session._records[0]['url'] == URL("https://this.goes.nowhere.info").with_path("/drive/refresh") + session._records[1]['url'] == URL("http://also.a.bad.host").with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_exchange_timeout(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setSleep("/drive/refresh", sleep=10) + + config.override(Setting.EXCHANGER_TIMEOUT_SECONDS, 0.1) + + with pytest.raises(CredRefreshMyError) as e: + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Error should be about the last host name + assert e.value.reason == "Timed out communicating with localhost" + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_exchange_invalid_creds(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + drive_requests.creds._refresh_token = "fail" + with pytest.raises(GoogleCredentialsExpired): + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_503_with_error(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setError("^/drive/refresh$", 503, response={'error': 'test_value'}) + with pytest.raises(CredRefreshGoogleError) as e: + await drive_requests.exchanger.refresh(drive_requests.creds) + assert e.value.message() == "Couldn't refresh your credentials with Google because: 'test_value'" + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_503_invalid_grant(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setError("^/drive/refresh$", 503, response={'error': 'invalid_grant'}) + with pytest.raises(GoogleCredentialsExpired): + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_503_with_invalid_json(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setError("^/drive/refresh$", 503, response={'ignored': 'nothing'}) + with pytest.raises(CredRefreshMyError) as e: + await drive_requests.exchanger.refresh(drive_requests.creds) + assert e.value.message() == "Couldn't refresh Google Drive credentials because: HTTP 503 from localhost" + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_503_with_no_data(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setError("^/drive/refresh$", 503) + with pytest.raises(CredRefreshMyError) as e: + await drive_requests.exchanger.refresh(drive_requests.creds) + assert e.value.message() == "Couldn't refresh Google Drive credentials because: HTTP 503 from localhost" + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_401(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + interceptor.setError("^/drive/refresh$", 401) + with pytest.raises(GoogleCredentialsExpired): + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_fail_401_no_fall_through(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + config.override(Setting.TOKEN_SERVER_HOSTS, str(server_url) + "," + str(server_url)) + interceptor.setError("^/drive/refresh$", 401) + with pytest.raises(GoogleCredentialsExpired): + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + assert len(session._records) == 1 + + +@pytest.mark.asyncio +async def test_invalid_grant_no_fall_through(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + config.override(Setting.TOKEN_SERVER_HOSTS, str(server_url) + "," + str(server_url)) + interceptor.setError("^/drive/refresh$", 503, response={'error': 'invalid_grant'}) + with pytest.raises(GoogleCredentialsExpired): + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + assert len(session._records) == 1 + + +@pytest.mark.asyncio +async def test_timeout_fall_through(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + config.override(Setting.EXCHANGER_TIMEOUT_SECONDS, 0.1) + config.override(Setting.TOKEN_SERVER_HOSTS, str(server_url) + "," + str(server_url)) + interceptor.setSleep("^/drive/refresh$", sleep=10, wait_for=1) + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + session._records[1]['url'] == server_url.with_path("/drive/refresh") + + +@pytest.mark.asyncio +async def test_anything_else_through(time: Time, session: TracingSession, config: Config, server: SimulationServer, drive_requests: DriveRequests, interceptor: RequestInterceptor, server_url: URL): + session.record = True + config.override(Setting.TOKEN_SERVER_HOSTS, str(server_url) + "," + str(server_url)) + interceptor.setError("^/drive/refresh$", status=500, fail_for=1) + await drive_requests.exchanger.refresh(drive_requests.creds) + + # Verify both hosts were checked + session._records[0]['url'] == server_url.with_path("/drive/refresh") + session._records[1]['url'] == server_url.with_path("/drive/refresh") diff --git a/hassio-google-drive-backup/tests/test_file.py b/hassio-google-drive-backup/tests/test_file.py new file mode 100644 index 0000000..3c93e35 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_file.py @@ -0,0 +1,60 @@ + + +from backup.file import File +from os.path import exists, join +from os import remove +import pytest +import json + +TEST_DATA = "when you press my special key I play a little melody" + + +def readfile(path): + with open(path) as f: + return f.read() + + +@pytest.mark.asyncio +async def test_basic(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + backup_path = join(tmpdir, "test.json.backup") + + assert not File.exists(path) + File.write(path, TEST_DATA) + assert File.exists(path) + assert readfile(path) == TEST_DATA + assert readfile(backup_path) == TEST_DATA + assert File.read(path) == TEST_DATA + + File.delete(path) + assert not exists(path) + assert not exists(backup_path) + assert not File.exists(path) + + +@pytest.mark.asyncio +async def test_file_deleted(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + File.write(path, TEST_DATA) + remove(path) + assert File.read(path) == TEST_DATA + + +@pytest.mark.asyncio +async def test_backup_deleted(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + backup_path = join(tmpdir, "test.json.backup") + File.write(path, TEST_DATA) + remove(backup_path) + assert File.read(path) == TEST_DATA + +@pytest.mark.asyncio +async def test_decode_error(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + File.write(path, TEST_DATA) + with open(path, "w"): + # emptys the file contents + pass + with open(path) as f: + assert len(f.read()) == 0 + assert File.read(path) == TEST_DATA diff --git a/hassio-google-drive-backup/tests/test_hasource.py b/hassio-google-drive-backup/tests/test_hasource.py new file mode 100644 index 0000000..2cd7702 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_hasource.py @@ -0,0 +1,1168 @@ +import asyncio +from datetime import timedelta +import os + +import pytest +from aiohttp.client_exceptions import ClientResponseError + +from backup.config import Config, Setting, CreateOptions, Version +from backup.const import SOURCE_HA +from backup.exceptions import (HomeAssistantDeleteError, BackupInProgress, + BackupPasswordKeyInvalid, UploadFailed, SupervisorConnectionError, SupervisorPermissionError, SupervisorTimeoutError, UnknownNetworkStorageError, InactiveNetworkStorageError) +from backup.util import GlobalInfo, DataCache, KEY_CREATED, KEY_LAST_SEEN, KEY_NAME +from backup.ha import HaSource, PendingBackup, EVENT_BACKUP_END, EVENT_BACKUP_START, HABackup, Password, AddonStopper +from backup.model import DummyBackup +from dev.simulationserver import SimulationServer +from .faketime import FakeTime +from .helpers import all_addons, all_folders, createBackupTar, getTestStream +from dev.simulated_supervisor import SimulatedSupervisor, URL_MATCH_SELF_OPTIONS, URL_MATCH_START_ADDON, URL_MATCH_STOP_ADDON, URL_MATCH_BACKUP_FULL, URL_MATCH_BACKUP_DELETE, URL_MATCH_MISC_INFO, URL_MATCH_BACKUP_DOWNLOAD, URL_MATCH_BACKUPS, URL_MATCH_SNAPSHOT, URL_MATCH_MOUNT +from dev.request_interceptor import RequestInterceptor +from backup.model import Model +from backup.time import Time +from yarl import URL + + +@pytest.mark.asyncio +async def test_sync_empty(ha) -> None: + assert len(await ha.get()) == 0 + + +@pytest.mark.asyncio +async def test_CRUD(ha: HaSource, time, interceptor: RequestInterceptor, data_cache: DataCache) -> None: + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + + assert backup.name() == "Test Name" + assert type(backup) is HABackup + assert not backup.retained() + assert backup.backupType() == "full" + assert not backup.protected() + assert backup.name() == "Test Name" + assert backup.source() == SOURCE_HA + assert not backup.ignore() + assert backup.madeByTheAddon() + assert "pending" not in data_cache.backups + + # read the item directly, its metadata should match + from_ha = await ha.harequests.backup(backup.slug()) + assert from_ha.size() == backup.size() + assert from_ha.slug() == backup.slug() + assert from_ha.source() == SOURCE_HA + + backups = await ha.get() + assert len(backups) == 1 + assert backup.slug() in backups + + full = DummyBackup(from_ha.name(), from_ha.date(), + from_ha.size(), from_ha.slug(), "dummy") + full.addSource(backup) + + # download the item, its bytes should match up + download = await ha.read(full) + await download.setup() + direct_download = await ha.harequests.download(backup.slug()) + await direct_download.setup() + while True: + from_file = await direct_download.read(1024 * 1024) + from_download = await download.read(1024 * 1024) + if len(from_file.getbuffer()) == 0: + assert len(from_download.getbuffer()) == 0 + break + assert from_file.getbuffer() == from_download.getbuffer() + + # update retention + assert not backup.retained() + await ha.retain(full, True) + assert (await ha.get())[full.slug()].retained() + await ha.retain(full, False) + assert not (await ha.get())[full.slug()].retained() + + # Delete the item, make sure its gone + await ha.delete(full) + assert full.getSource(ha.name()) is None + backups = await ha.get() + assert len(backups) == 0 + + +@pytest.mark.asyncio +@pytest.mark.timeout(10) +async def test_pending_backup_nowait(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + interceptor.setSleep(URL_MATCH_BACKUP_FULL, sleep=5) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.1) + backup_immediate: PendingBackup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert isinstance(backup_immediate, PendingBackup) + backup_pending: HABackup = (await ha.get())['pending'] + + assert isinstance(backup_immediate, PendingBackup) + assert isinstance(backup_pending, PendingBackup) + assert backup_immediate is backup_pending + assert backup_immediate.name() == "Test Name" + assert backup_immediate.slug() == "pending" + assert not backup_immediate.uploadable() + assert backup_immediate.backupType() == "Full" + assert backup_immediate.source() == SOURCE_HA + assert backup_immediate.date() == time.now() + assert not backup_immediate.protected() + assert not backup_immediate.ignore() + assert backup_immediate.madeByTheAddon() + assert data_cache.backup("pending") == { + KEY_CREATED: time.now().isoformat(), + KEY_LAST_SEEN: time.now().isoformat(), + KEY_NAME: "Test Name" + } + + # Might be a little flaky but...whatever + await asyncio.wait({ha._pending_backup_task}) + + backups = await ha.get() + assert 'pending' not in backups + assert len(backups) == 1 + backup = next(iter(backups.values())) + assert isinstance(backup, HABackup) + assert not backup.ignore() + assert backup.madeByTheAddon() + assert data_cache.backup(backup.slug())[KEY_LAST_SEEN] == time.now().isoformat() + assert "pending" not in data_cache.backups + + return + # ignroe events for now + assert supervisor.getEvents() == [ + (EVENT_BACKUP_START, { + 'backup_name': backup_immediate.name(), + 'backup_time': str(backup_immediate.date())})] + ha.backup_thread.join() + assert supervisor.getEvents() == [ + (EVENT_BACKUP_START, { + 'backup_name': backup_immediate.name(), + 'backup_time': str(backup_immediate.date())}), + (EVENT_BACKUP_END, { + 'completed': True, + 'backup_name': backup_immediate.name(), + 'backup_time': str(backup_immediate.date())})] + + +@pytest.mark.asyncio +async def test_pending_backup_already_in_progress(ha, time, config: Config, supervisor: SimulatedSupervisor): + await ha.create(CreateOptions(time.now(), "Test Name")) + assert len(await ha.get()) == 1 + + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + await supervisor.toggleBlockBackup() + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Test Name")) + backups = list((await ha.get()).values()) + assert len(backups) == 2 + backup = backups[1] + + assert isinstance(backup, PendingBackup) + assert backup.name() == "Pending Backup" + assert backup.slug() == "pending" + assert not backup.uploadable() + assert backup.backupType() == "unknown" + assert backup.source() == SOURCE_HA + assert backup.date() == time.now() + assert not backup.protected() + + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Test Name")) + + +@pytest.mark.asyncio +async def test_partial_backup(ha, time, server, config: Config): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + for folder in all_folders: + config.override(Setting.EXCLUDE_FOLDERS, folder) + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + + assert backup.backupType() == "partial" + for search in all_folders: + if search == folder: + assert search not in backup.details()['folders'] + else: + assert search in backup.details()['folders'] + + for addon in all_addons: + config.override(Setting.EXCLUDE_ADDONS, addon['slug']) + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert backup.backupType() == "partial" + list_of_addons = [] + for included in backup.details()['addons']: + list_of_addons.append(included['slug']) + for search in list_of_addons: + if search == addon: + assert search not in list_of_addons + else: + assert search in list_of_addons + + # excluding addon/folders that don't exist should actually make a full backup + config.override(Setting.EXCLUDE_ADDONS, "none,of.these,are.addons") + config.override(Setting.EXCLUDE_FOLDERS, "not,folders,either") + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert backup.backupType() == "full" + + +@pytest.mark.asyncio +async def test_backup_password(ha: HaSource, config: Config, time): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert not backup.protected() + + config.override(Setting.BACKUP_PASSWORD, 'test') + backup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert backup.protected() + + config.override(Setting.BACKUP_PASSWORD, 'test') + assert Password(ha.config).resolve() == 'test' + + config.override(Setting.BACKUP_PASSWORD, '!secret for_unit_tests') + assert Password(ha.config).resolve() == 'password value' + + config.override(Setting.BACKUP_PASSWORD, '!secret bad_key') + with pytest.raises(BackupPasswordKeyInvalid): + Password(config).resolve() + + config.override(Setting.SECRETS_FILE_PATH, "/bad/file/path") + config.override(Setting.BACKUP_PASSWORD, '!secret for_unit_tests') + with pytest.raises(BackupPasswordKeyInvalid): + Password(ha.config).resolve() + + +@pytest.mark.asyncio +async def test_backup_name(time: FakeTime, ha): + time.setNow(time.local(1985, 12, 6, 15, 8, 9, 10)) + await assertName(ha, time.now(), "{type}", "Full") + await assertName(ha, time.now(), "{year}", "1985") + await assertName(ha, time.now(), "{year_short}", "85") + await assertName(ha, time.now(), "{weekday}", "Friday") + await assertName(ha, time.now(), "{weekday_short}", "Fri") + await assertName(ha, time.now(), "{month}", "12") + await assertName(ha, time.now(), "{month_long}", "December") + await assertName(ha, time.now(), "{month_short}", "Dec") + await assertName(ha, time.now(), "{ms}", "000010") + await assertName(ha, time.now(), "{day}", "06") + await assertName(ha, time.now(), "{hr24}", "15") + await assertName(ha, time.now(), "{hr12}", "03") + await assertName(ha, time.now(), "{min}", "08") + await assertName(ha, time.now(), "{sec}", "09") + await assertName(ha, time.now(), "{ampm}", "PM") + await assertName(ha, time.now(), "{version_ha}", "ha version") + await assertName(ha, time.now(), "{version_hassos}", "hassos version") + await assertName(ha, time.now(), "{version_super}", "super version") + await assertName(ha, time.now(), "{date}", "12/06/85") + await assertName(ha, time.now(), "{time}", "15:08:09") + await assertName(ha, time.now(), "{datetime}", "Fri Dec 6 15:08:09 1985") + await assertName(ha, time.now(), "{isotime}", "1985-12-06T15:08:09.000010-05:00") + + +async def assertName(ha: HaSource, time, template: str, expected: str): + backup: HABackup = await ha.create(CreateOptions(time, template)) + assert backup.name() == expected + + +@pytest.mark.asyncio +async def test_default_name(time: FakeTime, ha, server): + backup = await ha.create(CreateOptions(time.now(), "")) + assert backup.name() == "Full Backup 1985-12-06 00:00:00" + + +@pytest.mark.asyncio +async def test_pending_backup_timeout(time: FakeTime, ha: HaSource, config: Config, interceptor: RequestInterceptor, supervisor: SimulatedSupervisor): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 1) + config.override(Setting.FAILED_BACKUP_TIMEOUT_SECONDS, 1) + config.override(Setting.PENDING_BACKUP_TIMEOUT_SECONDS, 1) + + async with supervisor._backup_inner_lock: + backup_immediate: PendingBackup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert isinstance(backup_immediate, PendingBackup) + assert backup_immediate.name() == "Test Name" + assert not await ha.check() + assert ha.pending_backup is backup_immediate + + await asyncio.wait({ha._pending_backup_task}) + assert ha.pending_backup is backup_immediate + assert await ha.check() + assert not await ha.check() + + time.advance(minutes=1) + assert await ha.check() + assert len(await ha.get()) == 0 + assert not await ha.check() + assert ha.pending_backup is None + assert backup_immediate.isStale() + + +@pytest.mark.asyncio +async def test_pending_backup_timeout_external(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + # now configure a snapshto to start outside of the addon + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + await supervisor.toggleBlockBackup() + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Ignored")) + backup_immediate = (await ha.get())['pending'] + await supervisor.toggleBlockBackup() + assert isinstance(backup_immediate, PendingBackup) + assert backup_immediate.name() == "Pending Backup" + assert await ha.check() + assert not await ha.check() + assert ha.pending_backup is backup_immediate + + # should clean up after a day, since we're still waiting on the backup thread. + time.advanceDay() + assert await ha.check() + assert len(await ha.get()) == 0 + + +@pytest.mark.asyncio +async def test_pending_backup_replaces_original(time, ha: HaSource, config: Config, supervisor: SimulatedSupervisor): + # now configure a snapshto to start outside of the addon + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + await supervisor.toggleBlockBackup() + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Ignored")) + backup_immediate = (await ha.get())['pending'] + await supervisor.toggleBlockBackup() + assert isinstance(backup_immediate, PendingBackup) + assert backup_immediate.name() == "Pending Backup" + assert await ha.check() + assert ha.pending_backup is backup_immediate + assert await ha.get() == {backup_immediate.slug(): backup_immediate} + + # create a new backup behind the scenes, the pending backup should get replaced with the new one + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", "hardlock": True}))['slug'] + results = await ha.get() + assert len(results) == 1 + assert slug in results + assert results[slug].name() == "Suddenly Appears" + assert not results[slug].retained() + + +def test_retryable_errors(): + # SOMEDAY: retryable errors should be retried in the future + pass + + +@pytest.mark.asyncio +async def test_retained_on_finish(ha: HaSource, server, time, config: Config, supervisor: SimulatedSupervisor): + async with supervisor._backup_inner_lock: + retention = {ha.name(): True} + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.0001) + pending = await ha.create(CreateOptions(time.now(), "Test Name", retention)) + results = await ha.get() + assert pending.name() == "Test Name" + assert results == {pending.slug(): pending} + assert type(pending) == PendingBackup + assert not ha._pending_backup_task.done() + + await asyncio.wait({ha._pending_backup_task}) + results = list((await ha.get()).values()) + assert len(results) == 1 + assert results[0].name() == "Test Name" + assert type(results[0]) == HABackup + assert results[0].retained() + assert config.isRetained(results[0].slug()) + + +@pytest.mark.asyncio +async def test_upload(time, ha, server, uploader): + data = await uploader.upload(createBackupTar("slug", "Test Name", time.now(), 1024 * 1024)) + dummy = DummyBackup("Test Name", time.now(), "src", "slug", "dummy") + backup: HABackup = await ha.save(dummy, data) + assert backup.name() == "Test Name" + assert backup.slug() == "slug" + assert backup.size() == round(data.size() / 1024.0 / 1024.0, 2) * 1024 * 1024 + assert backup.retained() + # ensure its still retained on a refresh + assert list((await ha.get()).values())[0].retained() + + +@pytest.mark.asyncio +async def test_corrupt_upload(time, ha, server, uploader): + # verify a corrupt backup throws the right exception + bad_data = await uploader.upload(getTestStream(100)) + dummy = DummyBackup("Test Name", time.now(), "src", "slug2", "dummy") + + with pytest.raises(UploadFailed): + await ha.save(dummy, bad_data) + + +@pytest.mark.asyncio +async def test_upload_wrong_slug(time, ha, server, uploader): + # verify a backup with the wrong slug also throws + bad_data = await uploader.upload(createBackupTar("wrongslug", "Test Name", time.now(), 1024 * 1024)) + dummy = DummyBackup("Test Name", time.now(), "src", "slug", "dummy") + with pytest.raises(UploadFailed): + await ha.save(dummy, bad_data) + + +@pytest.mark.asyncio +async def test_failed_backup(time, ha: HaSource, supervisor: SimulatedSupervisor, config: Config, interceptor: RequestInterceptor): + # create a blocking backup + interceptor.setError(URL_MATCH_BACKUP_FULL, 524) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0) + await supervisor.toggleBlockBackup() + backup_immediate = await ha.create(CreateOptions(time.now(), "Some Name")) + assert isinstance(backup_immediate, PendingBackup) + assert backup_immediate.name() == "Some Name" + assert not await ha.check() + assert not backup_immediate.isFailed() + await supervisor.toggleBlockBackup() + + # let the backup attempt to complete + await asyncio.wait({ha._pending_backup_task}) + + # verify it failed with the expected http error + assert backup_immediate.isFailed() + assert backup_immediate._exception.status == 524 + + backups = list((await ha.get()).values()) + assert len(backups) == 1 + assert backups[0] is backup_immediate + + # verify we can create a new backup immediately + interceptor.clear() + await ha.create(CreateOptions(time.now(), "Some Name")) + assert len(await ha.get()) == 1 + + +@pytest.mark.asyncio +async def test_failed_backup_retry(ha: HaSource, time: FakeTime, config: Config, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + # create a blocking backup + interceptor.setError(URL_MATCH_BACKUP_FULL, 524) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0) + await supervisor.toggleBlockBackup() + backup_immediate = await ha.create(CreateOptions(time.now(), "Some Name")) + assert isinstance(backup_immediate, PendingBackup) + assert backup_immediate.name() == "Some Name" + assert not await ha.check() + assert not backup_immediate.isFailed() + await supervisor.toggleBlockBackup() + + # let the backup attempt to complete + await asyncio.wait({ha._pending_backup_task}) + + # verify it failed with the expected http error + assert backup_immediate.isFailed() + assert backup_immediate._exception.status == 524 + + assert await ha.check() + assert not await ha.check() + time.advance(seconds=config.get(Setting.FAILED_BACKUP_TIMEOUT_SECONDS)) + + # should trigger a sync after the failed backup timeout + assert await ha.check() + await ha.get() + assert not await ha.check() + + +@pytest.mark.asyncio +async def test_immediate_backup_failure(time: FakeTime, ha: HaSource, config: Config, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_BACKUP_FULL, 524) + with pytest.raises(ClientResponseError) as thrown: + await ha.create(CreateOptions(time.now(), "Some Name")) + assert thrown.value.status == 524 + + assert ha.pending_backup is not None + backups = list((await ha.get()).values()) + assert len(backups) == 1 + assert backups[0].isFailed() + + # Failed backup should go away after it times out + assert await ha.check() + assert not await ha.check() + time.advance(seconds=config.get( + Setting.FAILED_BACKUP_TIMEOUT_SECONDS) + 1) + assert await ha.check() + + assert len(await ha.get()) == 0 + assert not await ha.check() + + +@pytest.mark.asyncio +async def test_delete_error(time, ha: HaSource, interceptor: RequestInterceptor): + backup = await ha.create(CreateOptions(time.now(), "Some Name")) + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + interceptor.setError(URL_MATCH_BACKUP_DELETE, 400) + with pytest.raises(HomeAssistantDeleteError): + await ha.delete(full) + + interceptor.clear() + await ha.delete(full) + + +@pytest.mark.asyncio +async def test_hostname(time, ha: HaSource, server, global_info: GlobalInfo): + await ha.init() + assert global_info.url == "/hassio/ingress/self_slug" + + +@pytest.mark.asyncio +async def test_supervisor_error(time, ha: HaSource, server: SimulationServer, global_info: GlobalInfo): + await server.stop() + with pytest.raises(SupervisorConnectionError): + await ha.init() + + +@pytest.mark.asyncio +async def test_supervisor_permission_error(time, ha: HaSource, interceptor: RequestInterceptor, global_info: GlobalInfo): + interceptor.setError(URL_MATCH_MISC_INFO, 403) + with pytest.raises(SupervisorPermissionError): + await ha.init() + + interceptor.clear() + interceptor.setError(URL_MATCH_MISC_INFO, 404) + with pytest.raises(ClientResponseError): + await ha.init() + + +@pytest.mark.asyncio +async def test_download_timeout(ha: HaSource, time, interceptor: RequestInterceptor, config: Config) -> None: + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + from_ha = await ha.harequests.backup(backup.slug()) + full = DummyBackup(from_ha.name(), from_ha.date(), + from_ha.size(), from_ha.slug(), "dummy") + full.addSource(backup) + + interceptor.setSleep(URL_MATCH_BACKUP_DOWNLOAD, sleep=100) + config.override(Setting.DOWNLOAD_TIMEOUT_SECONDS, 1) + direct_download = await ha.harequests.download(backup.slug()) + + with pytest.raises(SupervisorTimeoutError): + await direct_download.setup() + await direct_download.read(1) + + +@pytest.mark.asyncio +async def test_start_and_stop_addon(ha: HaSource, time, interceptor: RequestInterceptor, config: Config, supervisor: SimulatedSupervisor, addon_stopper: AddonStopper) -> None: + addon_stopper.allowRun() + slug = "test_slug" + supervisor.installAddon(slug, "Test decription") + config.override(Setting.STOP_ADDONS, slug) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.001) + + assert supervisor.addon(slug)["state"] == "started" + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Test Name")) + assert supervisor.addon(slug)["state"] == "stopped" + await ha._pending_backup_task + assert supervisor.addon(slug)["state"] == "started" + + +@pytest.mark.asyncio +async def test_start_and_stop_two_addons(ha: HaSource, time, interceptor: RequestInterceptor, config: Config, supervisor: SimulatedSupervisor, addon_stopper: AddonStopper) -> None: + addon_stopper.allowRun() + slug1 = "test_slug_1" + supervisor.installAddon(slug1, "Test decription") + + slug2 = "test_slug_2" + supervisor.installAddon(slug2, "Test decription") + config.override(Setting.STOP_ADDONS, ",".join([slug1, slug2])) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.001) + + assert supervisor.addon(slug1)["state"] == "started" + assert supervisor.addon(slug2)["state"] == "started" + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Test Name")) + assert supervisor.addon(slug1)["state"] == "stopped" + assert supervisor.addon(slug2)["state"] == "stopped" + await ha._pending_backup_task + assert supervisor.addon(slug1)["state"] == "started" + assert supervisor.addon(slug2)["state"] == "started" + + +@pytest.mark.asyncio +async def test_stop_addon_failure(ha: HaSource, time, interceptor: RequestInterceptor, config: Config, supervisor: SimulatedSupervisor, addon_stopper: AddonStopper) -> None: + addon_stopper.allowRun() + slug = "test_slug" + supervisor.installAddon(slug, "Test decription") + config.override(Setting.STOP_ADDONS, slug) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.001) + interceptor.setError(URL_MATCH_STOP_ADDON, 400) + + assert supervisor.addon(slug)["state"] == "started" + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Test Name")) + assert supervisor.addon(slug)["state"] == "started" + await ha._pending_backup_task + assert supervisor.addon(slug)["state"] == "started" + assert len(await ha.get()) == 1 + + +@pytest.mark.asyncio +async def test_start_addon_failure(ha: HaSource, time, interceptor: RequestInterceptor, config: Config, supervisor: SimulatedSupervisor, addon_stopper: AddonStopper) -> None: + addon_stopper.allowRun() + slug = "test_slug" + supervisor.installAddon(slug, "Test decription") + config.override(Setting.STOP_ADDONS, slug) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.001) + interceptor.setError(URL_MATCH_START_ADDON, 400) + + assert supervisor.addon(slug)["state"] == "started" + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Test Name")) + assert supervisor.addon(slug)["state"] == "stopped" + await ha._pending_backup_task + assert supervisor.addon(slug)["state"] == "stopped" + assert len(await ha.get()) == 1 + + +@pytest.mark.asyncio +async def test_ingore_self_when_stopping(ha: HaSource, time, interceptor: RequestInterceptor, config: Config, supervisor: SimulatedSupervisor, addon_stopper: AddonStopper) -> None: + addon_stopper.allowRun() + slug = supervisor._addon_slug + config.override(Setting.STOP_ADDONS, slug) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.001) + interceptor.setError(URL_MATCH_START_ADDON, 400) + + assert supervisor.addon(slug)["state"] == "started" + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Test Name")) + assert supervisor.addon(slug)["state"] == "started" + await ha._pending_backup_task + assert supervisor.addon(slug)["state"] == "started" + assert not interceptor.urlWasCalled(URL_MATCH_START_ADDON) + assert not interceptor.urlWasCalled(URL_MATCH_STOP_ADDON) + assert len(await ha.get()) == 1 + + +@pytest.mark.asyncio +async def test_dont_purge_pending_backup(ha: HaSource, time, config: Config, supervisor: SimulatedSupervisor, model: Model, interceptor): + config.override(Setting.MAX_BACKUPS_IN_HA, 4) + await ha.create(CreateOptions(time.now(), "Test Name 1")) + await ha.create(CreateOptions(time.now(), "Test Name 2")) + await ha.create(CreateOptions(time.now(), "Test Name 3")) + await ha.create(CreateOptions(time.now(), "Test Name 4")) + await model.sync(time.now()) + + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.1) + interceptor.setSleep(URL_MATCH_BACKUP_FULL, sleep=2) + await ha.create(CreateOptions(time.now(), "Test Name")) + backups = list((await ha.get()).values()) + assert len(backups) == 5 + backup = backups[4] + assert isinstance(backup, PendingBackup) + + # no backup should get purged yet because the ending backup isn't considered for purging. + await model.sync(time.now()) + backups = list((await ha.get()).values()) + assert len(backups) == 5 + + # Wait for the backup to finish, then verify one gets purged. + await ha._pending_backup_task + await model.sync(time.now()) + backups = list((await ha.get()).values()) + assert len(backups) == 4 + + +@pytest.mark.asyncio +async def test_matching_pending_backup(ha: HaSource, time: Time, config: Config, supervisor: SimulatedSupervisor, model: Model, interceptor, data_cache: DataCache): + ''' + A pending backups with the same name and within a day of the backup time should be considered + made by the addon + ''' + data_cache.backup("pending")[KEY_NAME] = "Test Backup" + data_cache.backup("pending")[KEY_CREATED] = time.now().isoformat() + data_cache.backup("pending")[KEY_LAST_SEEN] = time.now().isoformat() + + await supervisor.createBackup({"name": "Test Backup"}, date=time.now() - timedelta(hours=12)) + + backups = await ha.get() + assert len(backups) == 1 + backup = next(iter(backups.values())) + assert backup.madeByTheAddon() + + +@pytest.mark.asyncio +async def test_date_match_wrong_pending_backup(ha: HaSource, time: Time, config: Config, supervisor: SimulatedSupervisor, model: Model, interceptor, data_cache: DataCache): + ''' + A pending backups with the same name but with the wrong date shoudl nto be considered made by the addon + ''' + data_cache.backup("pending")[KEY_NAME] = "Test Backup" + data_cache.backup("pending")[KEY_CREATED] = time.now().isoformat() + data_cache.backup("pending")[KEY_LAST_SEEN] = time.now().isoformat() + + await supervisor.createBackup({"name": "Test Backup"}, date=time.now() - timedelta(hours=25)) + + backups = await ha.get() + assert len(backups) == 1 + backups = next(iter(backups.values())) + assert not backups.madeByTheAddon() + + +@pytest.mark.asyncio +async def test_name_wrong_match_pending_backup(ha: HaSource, time: Time, config: Config, supervisor: SimulatedSupervisor, model: Model, interceptor, data_cache: DataCache): + ''' + A pending backups with the wrong name shoudl not be considered made by the addon + ''' + data_cache.backup("pending")[KEY_NAME] = "Test Backup" + data_cache.backup("pending")[KEY_CREATED] = time.now().isoformat() + data_cache.backup("pending")[KEY_LAST_SEEN] = time.now().isoformat() + + await supervisor.createBackup({"name": "Wrong Name"}, date=time.now() - timedelta(hours=12)) + + backups = await ha.get() + assert len(backups) == 1 + backup = next(iter(backups.values())) + assert not backup.madeByTheAddon() + + +@pytest.mark.asyncio +async def test_bump_last_seen(ha: HaSource, time: Time, config: Config, supervisor: SimulatedSupervisor, model: Model, interceptor, data_cache: DataCache): + backup = await ha.create(CreateOptions(time.now(), "Test Name")) + time.advance(days=1) + assert backup.slug() in await ha.get() + assert data_cache.backup(backup.slug())[KEY_LAST_SEEN] == time.now().isoformat() + + time.advance(days=1) + assert backup.slug() in await ha.get() + assert data_cache.backup(backup.slug())[KEY_LAST_SEEN] == time.now().isoformat() + + +@pytest.mark.asyncio +async def test_backup_supervisor_path(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + supervisor._super_version = Version(2021, 8) + await ha.get() + assert interceptor.urlWasCalled(URL_MATCH_BACKUPS) + assert not interceptor.urlWasCalled(URL_MATCH_SNAPSHOT) + + +@pytest.mark.asyncio +async def test_backup_supervisor_path_old_version(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + supervisor._super_version = Version(2021, 7) + await ha.get() + assert not interceptor.urlWasCalled(URL_MATCH_BACKUPS) + assert interceptor.urlWasCalled(URL_MATCH_SNAPSHOT) + + +@pytest.mark.asyncio +async def test_supervisor_host(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, server_url): + assert ha.harequests.getSupervisorURL() == server_url + + config.override(Setting.SUPERVISOR_URL, "") + assert ha.harequests.getSupervisorURL() == URL("http://hassio") + + os.environ['SUPERVISOR_TOKEN'] = "test" + assert ha.harequests.getSupervisorURL() == URL("http://supervisor") + + +@pytest.mark.asyncio +async def test_upgrade_default_config(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, server_url): + """Verify that converting the original default config optiosn works as expected""" + + # overwrite the addon options with old values + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 4, + Setting.DEPRECTAED_MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DEPRECATED_DAYS_BETWEEN_BACKUPS.value: 3, + Setting.USE_SSL.value: False, + } + + await ha.init() + + assert not config.mustSaveUpgradeChanges() + assert interceptor.urlWasCalled(URL_MATCH_SELF_OPTIONS) + + # Verify the config was upgraded + assert supervisor._options == { + Setting.MAX_BACKUPS_IN_HA.value: 4, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DAYS_BETWEEN_BACKUPS.value: 3, + Setting.CALL_BACKUP_SNAPSHOT.value: True, + } + + +@pytest.mark.asyncio +async def test_upgrade_all_config(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, server_url): + """Verify that converting all upgradeable config optiosn works as expected""" + + # overwrite the addon options with old values + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 1, + Setting.DEPRECTAED_MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 2, + Setting.DEPRECATED_DAYS_BETWEEN_BACKUPS.value: 5, + Setting.DEPRECTAED_IGNORE_OTHER_BACKUPS.value: True, + Setting.DEPRECTAED_IGNORE_UPGRADE_BACKUPS.value: True, + Setting.DEPRECTAED_BACKUP_TIME_OF_DAY.value: "01:11", + Setting.DEPRECTAED_DELETE_BEFORE_NEW_BACKUP.value: True, + Setting.DEPRECTAED_BACKUP_NAME.value: "test", + Setting.DEPRECTAED_SPECIFY_BACKUP_FOLDER.value: True, + Setting.DEPRECTAED_NOTIFY_FOR_STALE_BACKUPS.value: False, + Setting.DEPRECTAED_ENABLE_BACKUP_STALE_SENSOR.value: False, + Setting.DEPRECTAED_ENABLE_BACKUP_STATE_SENSOR.value: False, + Setting.DEPRECATED_BACKUP_PASSWORD.value: "test password", + } + + await ha.init() + assert not config.mustSaveUpgradeChanges() + assert interceptor.urlWasCalled(URL_MATCH_SELF_OPTIONS) + + # Verify the config was upgraded + assert supervisor._options == { + Setting.MAX_BACKUPS_IN_HA.value: 1, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 2, + Setting.DAYS_BETWEEN_BACKUPS.value: 5, + Setting.IGNORE_OTHER_BACKUPS.value: True, + Setting.IGNORE_UPGRADE_BACKUPS.value: True, + Setting.BACKUP_TIME_OF_DAY.value: "01:11", + Setting.DELETE_BEFORE_NEW_BACKUP.value: True, + Setting.BACKUP_NAME.value: "test", + Setting.SPECIFY_BACKUP_FOLDER.value: True, + Setting.NOTIFY_FOR_STALE_BACKUPS.value: False, + Setting.ENABLE_BACKUP_STALE_SENSOR.value: False, + Setting.ENABLE_BACKUP_STATE_SENSOR.value: False, + Setting.BACKUP_PASSWORD.value: "test password", + Setting.CALL_BACKUP_SNAPSHOT.value: True, + } + + interceptor.clear() + + await ha.init() + assert not interceptor.urlWasCalled(URL_MATCH_SELF_OPTIONS) + + +@pytest.mark.asyncio +async def test_upgrade_some_config(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, server_url): + """Verify that converting a mix of upgradeable and not upgradeable config works""" + + # overwrite the addon options with old values + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 4, + Setting.DEPRECTAED_MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DEPRECATED_DAYS_BETWEEN_BACKUPS.value: 3, + Setting.DEPRECTAED_BACKUP_TIME_OF_DAY.value: "01:11", + Setting.EXCLUDE_ADDONS.value: "test", + Setting.USE_SSL.value: False, + } + + await ha.init() + + assert not config.mustSaveUpgradeChanges() + assert interceptor.urlWasCalled(URL_MATCH_SELF_OPTIONS) + + # Verify the config was upgraded + assert supervisor._options == { + Setting.MAX_BACKUPS_IN_HA.value: 4, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DAYS_BETWEEN_BACKUPS.value: 3, + Setting.EXCLUDE_ADDONS.value: "test", + Setting.BACKUP_TIME_OF_DAY.value: "01:11", + Setting.CALL_BACKUP_SNAPSHOT.value: True, + } + + +@pytest.mark.asyncio +async def test_upgrade_no_config(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, server_url): + """Verifies that config not in need of an upgrade doesn't get upgraded""" + + # overwrite the addon options with old values + supervisor._options = { + Setting.MAX_BACKUPS_IN_HA.value: 4, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DAYS_BETWEEN_BACKUPS.value: 3, + Setting.BACKUP_TIME_OF_DAY.value: "01:11", + Setting.EXCLUDE_ADDONS.value: "test" + } + + await ha.init() + + assert not config.mustSaveUpgradeChanges() + assert not interceptor.urlWasCalled(URL_MATCH_SELF_OPTIONS) + + # Verify the config was upgraded + assert supervisor._options == { + Setting.MAX_BACKUPS_IN_HA.value: 4, + Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE.value: 4, + Setting.DAYS_BETWEEN_BACKUPS.value: 3, + Setting.BACKUP_TIME_OF_DAY.value: "01:11", + Setting.EXCLUDE_ADDONS.value: "test", + } + + +@pytest.mark.asyncio +async def test_old_delete_path(ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, time: FakeTime): + supervisor._super_version = Version(2020, 8) + await ha.get() + backup: HABackup = await ha.create(CreateOptions(time.now(), "Test Name")) + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + await ha.delete(full) + assert interceptor.urlWasCalled("/snapshots/{0}/remove".format(backup.slug())) + + +@pytest.mark.asyncio +async def test_ignore_upgrade_backup_ha_config(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", 'folders': ['homeassistant'], 'addons': []}))['slug'] + + backups = await ha.get() + assert len(backups) == 1 + assert slug in backups + assert backups[slug].ignore() + + +@pytest.mark.asyncio +async def test_ignore_upgrade_backup_single_folder(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", 'folders': ['share'], 'addons': []}))['slug'] + + backups = await ha.get() + assert len(backups) == 1 + assert slug in backups + assert backups[slug].ignore() + + +@pytest.mark.asyncio +async def test_ignore_upgrade_backup_single_addon(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", 'folders': [], 'addons': ["particla_accel"]}))['slug'] + + backups = await ha.get() + assert len(backups) == 1 + assert slug in backups + assert backups[slug].ignore() + + +@pytest.mark.asyncio +async def test_ignore_upgrade_backup_two_folders(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", 'folders': ['homeassistant', "share"], 'addons': []}))['slug'] + + backups = await ha.get() + assert len(backups) == 1 + assert slug in backups + assert not backups[slug].ignore() + + +@pytest.mark.asyncio +async def test_ignore_upgrade_backup_empty(ha: HaSource, time: Time, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor, config: Config, data_cache: DataCache): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + slug = (await ha.harequests.createBackup({'name': "Suddenly Appears", 'folders': [], 'addons': []}))['slug'] + + backups = await ha.get() + assert len(backups) == 1 + assert slug in backups + assert not backups[slug].ignore() + + +@pytest.mark.asyncio +async def test_very_long_running_backup(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 1) + config.override(Setting.PENDING_BACKUP_TIMEOUT_SECONDS, 2) + config.override(Setting.IGNORE_OTHER_BACKUPS, True) + + async with supervisor._backup_inner_lock: + await ha.create(CreateOptions(time.now(), "Actually gets made")) + + for _ in range(4): + time.advance(hours=1) + assert "pending" in (await ha.get()) + + # Let the task fail + await ha._pending_backup_task + + # after 4 hours the pending backup should be assumed to have failed and cleaned up + time.advance(hours=1) + assert len(await ha.get()) == 0 + + for _ in range(4): + time.advance(hours=1) + # Making a backup should keep failing + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Ignored")) + backups = await ha.get() + assert len(backups) == 1 + assert "pending" in backups + + # Wait for the backup to complete + async with supervisor._backup_lock: + pass + + time.advance(hours=1) + backups = await ha.get() + assert len(backups) == 1 + assert "pending" not in backups + assert list(backups.values())[0].madeByTheAddon() + + +@pytest.mark.asyncio +async def test_note(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + backup = await ha.create(CreateOptions(time.now(), "Backup")) + assert isinstance(backup, HABackup) + assert backup.note() is None + + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + await ha.note(full, "new note") + assert backup.note() == "new note" + + await ha.note(full, None) + assert backup.note() is None + + +@pytest.mark.asyncio +async def test_note_creation(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + backup = await ha.create(CreateOptions(time.now(), "Backup", note="creation note")) + assert isinstance(backup, HABackup) + assert backup.note() == "creation note" + assert (await ha.get())[backup.slug()].note() == "creation note" + + full = DummyBackup(backup.name(), backup.date(), + backup.size(), backup.slug(), "dummy") + full.addSource(backup) + await ha.note(full, "new note") + assert backup.note() == "new note" + assert (await ha.get())[full.slug()].note() == "new note" + + await ha.note(full, None) + assert backup.note() is None + + +@pytest.mark.asyncio +async def test_note_long_backup(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 1) + + async with supervisor._backup_inner_lock: + backup = await ha.create(CreateOptions(time.now(), "Backup", note="Creation note")) + assert isinstance(backup, PendingBackup) + assert backup.note() == "Creation note" + + pending = (await ha.get())[backup.slug()] + assert pending.note() == "Creation note" + assert ha._pending_backup_task is not None + await ha._pending_backup_task + completed = next(iter((await ha.get()).values())) + assert not isinstance(completed, PendingBackup) + assert completed.note() == "Creation note" + + +@pytest.mark.asyncio +async def test_note_long_backup_changed_during_creation(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 1) + + async with supervisor._backup_inner_lock: + backup = await ha.create(CreateOptions(time.now(), "Backup", note="Creation note")) + assert isinstance(backup, PendingBackup) + assert backup.note() == "Creation note" + + pending = (await ha.get())[backup.slug()] + assert pending.note() == "Creation note" + + full = DummyBackup(pending.name(), pending.date(), pending.size(), pending.slug(), "dummy") + full.addSource(pending) + await ha.note(full, "changed") + + still_pending = next(iter((await ha.get()).values())) + assert isinstance(still_pending, PendingBackup) + assert still_pending.note() == "changed" + + assert ha._pending_backup_task is not None + await ha._pending_backup_task + completed = next(iter((await ha.get()).values())) + assert not isinstance(completed, PendingBackup) + assert completed.note() == "changed" + + +@pytest.mark.asyncio +async def test_note_change_external_backup(time, config, ha: HaSource, supervisor: SimulatedSupervisor): + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 100) + + await supervisor.toggleBlockBackup() + with pytest.raises(BackupInProgress): + await ha.create(CreateOptions(time.now(), "Ignored", note="ignored")) + pending = next(iter((await ha.get()).values())) + assert isinstance(pending, PendingBackup) + assert pending.note() is None + + full = DummyBackup(pending.name(), pending.date(), pending.size(), pending.slug(), "dummy") + full.addSource(pending) + await ha.note(full, "changed note") + assert full.note() == "changed note" + + # create a new backup in the background, this should get the note of the pending backup. + await supervisor.toggleBlockBackup() + await supervisor.createBackup({"name": "Test Backup"}, date=time.now() - timedelta(hours=12)) + completed = next(iter((await ha.get()).values())) + assert not isinstance(completed, PendingBackup) + assert completed.note() == "changed note" + + +# Verify that if the supervisor is below the minimum version, we don't query for mount information and its populated with a reasonable default. +@pytest.mark.asyncio +async def test_mount_info_old_supervisor(time, config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + supervisor._super_version = Version.parse("2023.5") + await ha.refresh() + assert not interceptor.urlWasCalled(URL_MATCH_MOUNT) + assert len(ha.mount_info.get("mounts")) == 0 + + +# Verify that if the supervisor is above the minimum version, we we do query for mount info and populate it +@pytest.mark.asyncio +async def test_mount_info_new_supervisor(time, config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + supervisor._super_version = Version.parse("2023.6") + await ha.refresh() + assert interceptor.urlWasCalled(URL_MATCH_MOUNT) + assert len(ha.mount_info.get("mounts")) > 0 + + +# Verify that the default backup location is HA's configured default if the backup location is unspecified +# and the supervisor is above the minimum version +@pytest.mark.asyncio +async def test_default_backup_location_new_supervisor(time, config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + await ha.refresh() + req, _name, _protected = ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + assert req.get("location") is None + assert 'location' in req + + supervisor._mounts["default_backup_mount"] = "my_backup_share" + + await ha.refresh() + req, _name, _protected = ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + assert req.get("location") == "my_backup_share" + + +# Verify that having a backup storage location of "local-disk" always uses the default even if HA has another default configured +@pytest.mark.asyncio +async def test_default_backup_location_local_disk(time, config: Config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + config.override(Setting.BACKUP_STORAGE, "local-disk") + await ha.refresh() + req, _name, _protected = ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + assert req.get("location") is None + assert 'location' in req + + supervisor._mounts["default_backup_mount"] = "my_backup_share" + await ha.refresh() + req, _name, _protected = ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + assert req.get("location") is None + assert 'location' in req + + +# Verify that using a non-active share results in an error before attempting to request the backup +@pytest.mark.asyncio +async def test_inactive_backup_location(time, config: Config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + config.override(Setting.BACKUP_STORAGE, "my_backup_share") + supervisor._mounts["mounts"][1]["state"] = "starting" + await ha.refresh() + with pytest.raises(InactiveNetworkStorageError): + ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + + +# Verify that using a non-existant share results in an error before attempting to request the backup +@pytest.mark.asyncio +async def test_unknown_backup_location(time, config: Config, ha: HaSource, supervisor: SimulatedSupervisor, interceptor: RequestInterceptor): + config.override(Setting.BACKUP_STORAGE, "doesn't_exists") + await ha.refresh() + with pytest.raises(UnknownNetworkStorageError): + ha._buildBackupInfo(CreateOptions(time.now(), "Backup")) + + +@pytest.mark.asyncio +async def test_exclude_database(ha: HaSource, time, config: Config, supervisor: SimulatedSupervisor) -> None: + config.override(Setting.EXCLUDE_HA_DATABASE, True) + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 0.1) + async with supervisor._backup_inner_lock: + backup = await ha.create(CreateOptions(time.now(), "Test Name")) + assert isinstance(backup, PendingBackup) + assert backup._request_info['homeassistant_exclude_database'] diff --git a/hassio-google-drive-backup/tests/test_haupdater.py b/hassio-google-drive-backup/tests/test_haupdater.py new file mode 100644 index 0000000..478db64 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_haupdater.py @@ -0,0 +1,416 @@ +from datetime import timedelta +from backup.model.backups import Backup +import pytest + +from backup.util import GlobalInfo +from backup.ha import HaUpdater +from backup.ha.haupdater import REASSURING_MESSAGE +from .faketime import FakeTime +from .helpers import HelperTestSource +from dev.simulationserver import SimulationServer +from backup.logger import getLast +from backup.util import Estimator +from dev.simulated_supervisor import SimulatedSupervisor, URL_MATCH_CORE_API +from dev.request_interceptor import RequestInterceptor +from backup.model import Coordinator +from backup.config import Config, Setting + +STALE_ATTRIBUTES = { + "friendly_name": "Backups Stale", + "device_class": "problem" +} + + +@pytest.fixture +def source(): + return HelperTestSource("Source") + + +@pytest.fixture +def dest(): + return HelperTestSource("Dest") + + +@pytest.mark.asyncio +async def test_init(updater: HaUpdater, global_info, supervisor: SimulatedSupervisor, server, time: FakeTime): + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + verifyEntity(supervisor, "binary_sensor.backups_stale", + "off", STALE_ATTRIBUTES) + verifyEntity(supervisor, "sensor.backup_state", "waiting", { + 'friendly_name': 'Backup State', + 'last_backup': 'Never', + 'next_backup': time.now().isoformat(), + 'last_uploaded': 'Never', + 'backups': [], + 'backups_in_google_drive': 0, + 'free_space_in_google_drive': "", + 'backups_in_home_assistant': 0, + 'size_in_google_drive': "0.0 B", + 'size_in_home_assistant': '0.0 B' + }) + assert supervisor.getNotification() is None + + global_info.success() + assert not updater._stale() + assert updater._state() == "backed_up" + + +@pytest.mark.asyncio +async def test_init_failure(updater: HaUpdater, global_info: GlobalInfo, time: FakeTime, server, supervisor: SimulatedSupervisor): + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + + global_info.failed(Exception()) + assert not updater._stale() + assert updater._state() == "backed_up" + assert supervisor.getNotification() is None + + time.advanceDay() + assert updater._stale() + assert updater._state() == "error" + await updater.update() + assert supervisor.getNotification() == { + 'message': 'The add-on is having trouble making backups and needs attention. Please visit the add-on status page for details.', + 'title': 'Home Assistant Google Drive Backup is Having Trouble', + 'notification_id': 'backup_broken' + } + + +@pytest.mark.asyncio +async def test_failure_backoff_502(updater: HaUpdater, server, time: FakeTime, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_CORE_API, 502) + for x in range(9): + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + + interceptor.clear() + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + + +@pytest.mark.asyncio +async def test_failure_backoff_510(updater: HaUpdater, server, time: FakeTime, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_CORE_API, 502) + for x in range(9): + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + + interceptor.clear() + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + + +@pytest.mark.asyncio +async def test_failure_backoff_other(updater: HaUpdater, server, time: FakeTime, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_CORE_API, 400) + for x in range(9): + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + interceptor.clear() + await updater.update() + assert time.sleeps == [60, 120, 240, 300, 300, 300, 300, 300, 300] + + +@pytest.mark.asyncio +async def test_update_backups(updater: HaUpdater, server, time: FakeTime, supervisor: SimulatedSupervisor): + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + verifyEntity(supervisor, "binary_sensor.backups_stale", + "off", STALE_ATTRIBUTES) + verifyEntity(supervisor, "sensor.backup_state", "waiting", { + 'friendly_name': 'Backup State', + 'last_backup': 'Never', + 'next_backup': time.now().isoformat(), + 'last_uploaded': 'Never', + 'backups': [], + 'backups_in_google_drive': 0, + 'backups_in_home_assistant': 0, + 'size_in_home_assistant': "0.0 B", + 'size_in_google_drive': "0.0 B", + 'free_space_in_google_drive': '' + }) + + +@pytest.mark.asyncio +async def test_update_backups_no_next_backup(updater: HaUpdater, server, time: FakeTime, supervisor: SimulatedSupervisor, config: Config): + config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + verifyEntity(supervisor, "binary_sensor.backups_stale", + "off", STALE_ATTRIBUTES) + verifyEntity(supervisor, "sensor.backup_state", "waiting", { + 'friendly_name': 'Backup State', + 'last_backup': 'Never', + 'next_backup': None, + 'last_uploaded': 'Never', + 'backups': [], + 'backups_in_google_drive': 0, + 'backups_in_home_assistant': 0, + 'size_in_home_assistant': "0.0 B", + 'size_in_google_drive': "0.0 B", + 'free_space_in_google_drive': '' + }) + + +@pytest.mark.asyncio +async def test_update_backups_sync(updater: HaUpdater, server, time: FakeTime, backup: Backup, supervisor: SimulatedSupervisor, config: Config): + await updater.update() + assert not updater._stale() + assert updater._state() == "backed_up" + verifyEntity(supervisor, "binary_sensor.backups_stale", + "off", STALE_ATTRIBUTES) + date = '1985-12-06T05:00:00+00:00' + verifyEntity(supervisor, "sensor.backup_state", "backed_up", { + 'friendly_name': 'Backup State', + 'last_backup': date, + 'last_uploaded': date, + 'next_backup': (backup.date() + timedelta(days=config.get(Setting.DAYS_BETWEEN_BACKUPS))).isoformat(), + 'backups': [{ + 'date': date, + 'name': backup.name(), + 'size': backup.sizeString(), + 'state': backup.status(), + 'slug': backup.slug() + } + ], + 'backups_in_google_drive': 1, + 'backups_in_home_assistant': 1, + 'size_in_home_assistant': Estimator.asSizeString(backup.size()), + 'size_in_google_drive': Estimator.asSizeString(backup.size()), + 'free_space_in_google_drive': '5.0 GB' + }) + + +@pytest.mark.asyncio +async def test_notification_link(updater: HaUpdater, server, time: FakeTime, global_info, supervisor: SimulatedSupervisor): + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + verifyEntity(supervisor, "binary_sensor.backups_stale", + "off", STALE_ATTRIBUTES) + verifyEntity(supervisor, "sensor.backup_state", "waiting", { + 'friendly_name': 'Backup State', + 'last_backup': 'Never', + 'next_backup': time.now().isoformat(), + 'last_uploaded': 'Never', + 'backups': [], + 'backups_in_google_drive': 0, + 'backups_in_home_assistant': 0, + 'size_in_home_assistant': "0.0 B", + 'size_in_google_drive': "0.0 B", + 'free_space_in_google_drive': '' + }) + assert supervisor.getNotification() is None + + global_info.failed(Exception()) + global_info.url = "http://localhost/test" + time.advanceDay() + await updater.update() + assert supervisor.getNotification() == { + 'message': 'The add-on is having trouble making backups and needs attention. Please visit the add-on [status page](http://localhost/test) for details.', + 'title': 'Home Assistant Google Drive Backup is Having Trouble', + 'notification_id': 'backup_broken' + } + + +@pytest.mark.asyncio +async def test_notification_clears(updater: HaUpdater, server, time: FakeTime, global_info, supervisor: SimulatedSupervisor): + await updater.update() + assert not updater._stale() + assert updater._state() == "waiting" + assert supervisor.getNotification() is None + + global_info.failed(Exception()) + time.advance(hours=8) + await updater.update() + assert supervisor.getNotification() is not None + + global_info.success() + await updater.update() + assert supervisor.getNotification() is None + + +@pytest.mark.asyncio +async def test_publish_for_failure(updater: HaUpdater, server, time: FakeTime, global_info: GlobalInfo, supervisor: SimulatedSupervisor): + global_info.success() + await updater.update() + assert supervisor.getNotification() is None + + time.advance(hours=8) + global_info.failed(Exception()) + await updater.update() + assert supervisor.getNotification() is not None + + time.advance(hours=8) + global_info.failed(Exception()) + await updater.update() + assert supervisor.getNotification() is not None + + global_info.success() + await updater.update() + assert supervisor.getNotification() is None + + +@pytest.mark.asyncio +async def test_failure_logging(updater: HaUpdater, server, time: FakeTime, interceptor: RequestInterceptor): + interceptor.setError(URL_MATCH_CORE_API, 501) + assert getLast() is None + await updater.update() + assert getLast() is None + + time.advance(minutes=1) + await updater.update() + assert getLast() is None + + time.advance(minutes=5) + await updater.update() + assert getLast().msg == REASSURING_MESSAGE.format(501) + + last_log = getLast() + time.advance(minutes=5) + await updater.update() + assert getLast() is not last_log + assert getLast().msg == REASSURING_MESSAGE.format(501) + + last_log = getLast() + interceptor.clear() + await updater.update() + assert getLast() is last_log + + +@pytest.mark.asyncio +async def test_publish_retries(updater: HaUpdater, server: SimulationServer, time: FakeTime, backup, drive, supervisor: SimulatedSupervisor): + await updater.update() + assert supervisor.getEntity("sensor.backup_state") is not None + + # Shoudlnt update after 59 minutes + supervisor.clearEntities() + time.advance(minutes=59) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") is None + + # after that it should + supervisor.clearEntities() + time.advance(minutes=2) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") is not None + + supervisor.clearEntities() + await drive.delete(backup) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") is not None + + +@pytest.mark.asyncio +async def test_ignored_backups(updater: HaUpdater, time: FakeTime, server: SimulationServer, backup: Backup, supervisor: SimulatedSupervisor, coord: Coordinator, config: Config): + config.override(Setting.IGNORE_OTHER_BACKUPS, True) + time.advance(hours=1) + await supervisor.createBackup({'name': "test_backup"}, date=time.now()) + await coord.sync() + await updater.update() + state = supervisor.getAttributes("sensor.backup_state") + assert state["backups_in_google_drive"] == 1 + assert state["backups_in_home_assistant"] == 1 + assert len(state["backups"]) == 1 + assert state['last_backup'] == backup.date().isoformat() + + +@pytest.mark.asyncio +async def test_update_backups_old_names(updater: HaUpdater, server, backup: Backup, time: FakeTime, supervisor: SimulatedSupervisor, config: Config): + config.override(Setting.CALL_BACKUP_SNAPSHOT, True) + await updater.update() + assert not updater._stale() + assert updater._state() == "backed_up" + verifyEntity(supervisor, "binary_sensor.snapshots_stale", + "off", {"friendly_name": "Snapshots Stale", + "device_class": "problem"}) + date = '1985-12-06T05:00:00+00:00' + verifyEntity(supervisor, "sensor.snapshot_backup", "backed_up", { + 'friendly_name': 'Snapshot State', + 'last_snapshot': date, + 'snapshots': [{ + 'date': date, + 'name': backup.name(), + 'size': backup.sizeString(), + 'state': backup.status(), + 'slug': backup.slug() + } + ], + 'snapshots_in_google_drive': 1, + 'snapshots_in_home_assistant': 1, + 'snapshots_in_hassio': 1, + 'size_in_home_assistant': Estimator.asSizeString(backup.size()), + 'size_in_google_drive': Estimator.asSizeString(backup.size()) + }) + + +@pytest.mark.asyncio +async def test_drive_free_space(updater: HaUpdater, time: FakeTime, server: SimulationServer, supervisor: SimulatedSupervisor, coord: Coordinator, config: Config): + await updater.update() + state = supervisor.getAttributes("sensor.backup_state") + assert state["free_space_in_google_drive"] == "" + + await coord.sync() + await updater.update() + state = supervisor.getAttributes("sensor.backup_state") + assert state["free_space_in_google_drive"] == "5.0 GB" + + +@pytest.mark.asyncio +async def test_stale_backup_is_error(updater: HaUpdater, server, backup: Backup, time: FakeTime, supervisor: SimulatedSupervisor, config: Config): + config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "backed_up" + + time.advance(days=1) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "backed_up" + + time.advance(days=1) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "error" + + time.advance(days=1) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "error" + + +@pytest.mark.asyncio +async def test_stale_backup_ignores_pending(updater: HaUpdater, server, backup: Backup, time: FakeTime, supervisor: SimulatedSupervisor, config: Config, coord: Coordinator): + config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + config.override(Setting.NEW_BACKUP_TIMEOUT_SECONDS, 1) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "backed_up" + + time.advance(days=2) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "error" + + async with supervisor._backup_inner_lock: + await coord.sync() + assert coord.getBackup("pending") is not None + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "error" + + +@pytest.mark.asyncio +async def test_stale_backups_fine_for_no_creation(updater: HaUpdater, server, backup: Backup, time: FakeTime, supervisor: SimulatedSupervisor, config: Config, coord: Coordinator): + config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "backed_up" + + # backups shouldn't become stale because the addon doesn't create them. + time.advance(days=100) + await updater.update() + assert supervisor.getEntity("sensor.backup_state") == "backed_up" + + +def verifyEntity(backend: SimulatedSupervisor, name, state, attributes): + assert backend.getEntity(name) == state + assert backend.getAttributes(name) == attributes diff --git a/hassio-google-drive-backup/tests/test_jsonfilesaver.py b/hassio-google-drive-backup/tests/test_jsonfilesaver.py new file mode 100644 index 0000000..7baf6c6 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_jsonfilesaver.py @@ -0,0 +1,63 @@ + + +from backup.file import JsonFileSaver +from os.path import exists, join +from os import remove +import pytest +import json + +TEST_DATA = { + 'info': "and the value", + 'some': 3 +} + + +def readfile(path): + with open(path) as f: + return json.load(f) + + +@pytest.mark.asyncio +async def test_basic(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + backup_path = join(tmpdir, "test.json.backup") + + assert not JsonFileSaver.exists(path) + JsonFileSaver.write(path, TEST_DATA) + assert JsonFileSaver.exists(path) + assert readfile(path) == TEST_DATA + assert readfile(backup_path) == TEST_DATA + assert JsonFileSaver.read(path) == TEST_DATA + + JsonFileSaver.delete(path) + assert not exists(path) + assert not exists(backup_path) + assert not JsonFileSaver.exists(path) + + +@pytest.mark.asyncio +async def test_file_deleted(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + JsonFileSaver.write(path, TEST_DATA) + remove(path) + assert JsonFileSaver.read(path) == TEST_DATA + + +@pytest.mark.asyncio +async def test_backup_deleted(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + backup_path = join(tmpdir, "test.json.backup") + JsonFileSaver.write(path, TEST_DATA) + remove(backup_path) + assert JsonFileSaver.read(path) == TEST_DATA + +@pytest.mark.asyncio +async def test_decode_error(tmpdir: str) -> None: + path = join(tmpdir, "test.json") + JsonFileSaver.write(path, TEST_DATA) + with open(path, "w"): + # emptys the file contents + pass + with open(path) as f: + assert len(f.read()) == 0 + assert JsonFileSaver.read(path) == TEST_DATA diff --git a/hassio-google-drive-backup/tests/test_model.py b/hassio-google-drive-backup/tests/test_model.py new file mode 100644 index 0000000..34cd7d1 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_model.py @@ -0,0 +1,1349 @@ +from datetime import datetime, timedelta, timezone + +import pytest +from dateutil.tz import gettz + +from backup.config import Config, Setting, CreateOptions +from backup.exceptions import DeleteMutlipleBackupsError +from backup.util import GlobalInfo, DataCache +from backup.model import Model, BackupSource +from .faketime import FakeTime +from .helpers import HelperTestSource, IntentionalFailure + +test_tz = gettz('EST') + +default_source = BackupSource() + + +@pytest.fixture +def source(): + return HelperTestSource("Source", is_destination=False) + + +@pytest.fixture +def dest(): + return HelperTestSource("Dest", is_destination=True) + + +@pytest.fixture +def simple_config() -> Config: + config = createConfig() + return config + + +@pytest.fixture +def model(source, dest, time, simple_config, global_info, estimator, data_cache): + return Model(simple_config, time, source, dest, global_info, estimator, data_cache) + + +def createConfig() -> Config: + return Config().override(Setting.BACKUP_STARTUP_DELAY_MINUTES, 0) + + +def test_timeOfDay(estimator, model: Model) -> None: + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '00:00') + model.reinitialize() + assert model.getTimeOfDay() == (0, 0) + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '23:59') + model.reinitialize() + assert model.getTimeOfDay() == (23, 59) + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '24:59') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '24:60') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '-1:60') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '24:-1') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, 'boop:60') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '24:boop') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '24:10:22') + model.reinitialize() + assert model.getTimeOfDay() is None + + model.config.override(Setting.BACKUP_TIME_OF_DAY, '10') + model.reinitialize() + assert model.getTimeOfDay() is None + + +def test_next_time(estimator, data_cache): + time: FakeTime = FakeTime() + now: datetime = datetime(1985, 12, 6, 1, 0, 0).astimezone(timezone.utc) + time.setNow(now) + info = GlobalInfo(time) + config: Config = createConfig().override(Setting.DAYS_BETWEEN_BACKUPS, 0) + model: Model = Model(config, time, default_source, + default_source, info, estimator, data_cache) + assert model._nextBackup(now=now, last_backup=None) is None + assert model._nextBackup(now=now, last_backup=now) is None + + config: Config = createConfig().override(Setting.DAYS_BETWEEN_BACKUPS, 1) + model: Model = Model(config, time, default_source, + default_source, info, estimator, data_cache) + assert model._nextBackup( + now=now, last_backup=None) == now + assert model._nextBackup( + now=now, last_backup=now) == now + timedelta(days=1) + assert model._nextBackup( + now=now, last_backup=now - timedelta(days=1)) == now + assert model._nextBackup( + now=now, last_backup=now + timedelta(days=1)) == now + timedelta(days=2) + + +def test_next_time_of_day(estimator, data_cache): + time: FakeTime = FakeTime() + now: datetime = datetime(1985, 12, 6, 1, 0, 0).astimezone(timezone.utc) + time.setNow(now) + info = GlobalInfo(time) + config: Config = createConfig().override(Setting.DAYS_BETWEEN_BACKUPS, 1).override( + Setting.BACKUP_TIME_OF_DAY, '08:00') + model: Model = Model(config, time, default_source, + default_source, info, estimator, data_cache) + + assert model._nextBackup( + now=now, last_backup=None) == now + assert model._nextBackup( + now=now, last_backup=now - timedelta(days=1)) == now + assert model._nextBackup(now=now, last_backup=now) == datetime( + 1985, 12, 6, 8, 0, tzinfo=test_tz) + assert model._nextBackup(now=now, last_backup=datetime( + 1985, 12, 6, 8, 0, tzinfo=test_tz)) == datetime(1985, 12, 7, 8, 0, tzinfo=test_tz) + assert model._nextBackup(now=datetime(1985, 12, 6, 8, 0, tzinfo=test_tz), last_backup=datetime( + 1985, 12, 6, 8, 0, tzinfo=test_tz)) == datetime(1985, 12, 7, 8, 0, tzinfo=test_tz) + + +def test_next_time_of_day_drift(estimator, data_cache): + time: FakeTime = FakeTime() + now: datetime = datetime(1985, 12, 6, 1, 0, 0).astimezone(timezone.utc) + time.setNow(now) + info = GlobalInfo(time) + + # ignore the backup cooldown for this test + info.triggerBackupCooldown(timedelta(days=-7)) + config: Config = createConfig().override(Setting.DAYS_BETWEEN_BACKUPS, 1).override( + Setting.BACKUP_TIME_OF_DAY, '08:00') + model: Model = Model(config, time, default_source, + default_source, info, estimator, data_cache) + + assert model._nextBackup( + now=now, last_backup=None) == now + assert model._nextBackup( + now=now, last_backup=now - timedelta(days=1) + timedelta(minutes=1)) == datetime(1985, 12, 5, 8, 0, tzinfo=test_tz) + + +def test_next_time_of_day_dest_disabled(model, time, source, dest): + dest.setEnabled(True) + assert model._nextBackup( + now=time.now(), last_backup=None) == time.now() + dest.setEnabled(False) + assert model._nextBackup(now=time.now(), last_backup=None) is None + + +@pytest.mark.asyncio +async def test_sync_empty(model, time, source, dest): + source.setEnabled(False) + dest.setEnabled(False) + await model.sync(time.now()) + assert len(model.backups) == 0 + + +@pytest.mark.asyncio +async def test_sync_single_source(model: Model, source, dest, time): + backup = await source.create(CreateOptions(time.now(), "name")) + dest.setEnabled(False) + await model.sync(time.now()) + assert len(model.backups) == 1 + assert backup.slug() in model.backups + assert model.backups[backup.slug()].getSource( + source.name()) is backup + assert model.backups[backup.slug()].getSource(dest.name()) is None + + +@pytest.mark.asyncio +async def test_sync_source_and_dest(model: Model, time, source, dest: HelperTestSource): + backup_source = await source.create(CreateOptions(time.now(), "name")) + await model._syncBackups([source, dest], time.now()) + assert len(model.backups) == 1 + + backup_dest = await dest.save(model.backups[backup_source.slug()]) + await model._syncBackups([source, dest], time.now()) + assert len(model.backups) == 1 + assert model.backups[backup_source.slug()].getSource( + source.name()) is backup_source + assert model.backups[backup_source.slug()].getSource( + dest.name()) is backup_dest + + +@pytest.mark.asyncio +async def test_sync_different_sources(model: Model, time, source, dest): + backup_source = await source.create(CreateOptions(time.now(), "name")) + backup_dest = await dest.create(CreateOptions(time.now(), "name")) + + await model._syncBackups([source, dest], time.now()) + assert len(model.backups) == 2 + assert model.backups[backup_source.slug()].getSource( + source.name()) is backup_source + assert model.backups[backup_dest.slug()].getSource( + dest.name()) is backup_dest + + +@pytest.mark.asyncio +async def test_removal(model: Model, time, source, dest): + await source.create(CreateOptions(time.now(), "name")) + await model._syncBackups([source, dest], time.now()) + assert len(model.backups) == 1 + source.current = {} + await model._syncBackups([source, dest], time.now()) + assert len(model.backups) == 0 + + +@pytest.mark.asyncio +async def test_new_backup(model: Model, source, dest, time): + await model.sync(time.now()) + assert len(model.backups) == 1 + assert len(source.created) == 1 + assert source.created[0].date() == time.now() + assert len(source.current) == 1 + assert len(dest.current) == 1 + + +@pytest.mark.asyncio +async def test_upload_backup(time, model: Model, dest, source): + dest.setEnabled(True) + await model.sync(time.now()) + assert len(model.backups) == 1 + source.assertThat(created=1, current=1) + assert len(source.created) == 1 + assert source.created[0].date() == time.now() + assert len(source.current) == 1 + assert len(dest.current) == 1 + assert len(dest.saved) == 1 + + +@pytest.mark.asyncio +async def test_disabled(time, model: Model, source, dest): + # create two disabled sources + source.setEnabled(False) + source.insert("newer", time.now(), "slug1") + dest.setEnabled(False) + dest.insert("s2", time.now(), "slug2") + await model.sync(time.now()) + source.assertUnchanged() + dest.assertUnchanged() + assert len(model.backups) == 0 + + +@pytest.mark.asyncio +async def test_delete_source(time, model: Model, source, dest): + time = FakeTime() + now = time.now() + + # create two source backups + source.setMax(1) + older = source.insert("older", now - timedelta(minutes=1), "older") + newer = source.insert("newer", now, "newer") + + # configure only one to be kept + await model.sync(now) + assert len(model.backups) == 1 + assert len(source.saved) == 0 + assert source.deleted == [older] + assert len(source.saved) == 0 + assert newer.slug() in model.backups + assert model.backups[newer.slug()].getSource(source.name()) == newer + + +@pytest.mark.asyncio +async def test_delete_dest(time, model: Model, source, dest): + now = time.now() + + # create two source backups + dest.setMax(1) + older = dest.insert("older", now - timedelta(minutes=1), "older") + newer = dest.insert("newer", now, "newer") + + # configure only one to be kept + await model.sync(now) + assert len(model.backups) == 1 + assert len(dest.saved) == 0 + assert dest.deleted == [older] + assert len(source.saved) == 0 + assert newer.slug() in model.backups + assert model.backups[newer.slug()].getSource(dest.name()) == newer + source.assertUnchanged() + + +@pytest.mark.asyncio +async def test_new_upload_with_delete(time, model: Model, source, dest, simple_config): + now = time.now() + + # create a single old backups + source.setMax(1) + dest.setMax(1) + backup_dest = dest.insert("older", now - timedelta(days=1), "older") + backup_source = source.insert("older", now - timedelta(days=1), "older") + + # configure only one to be kept in both places + simple_config.config.update({ + "days_between_backups": 1 + }) + model.reinitialize() + await model.sync(now) + + # Old snapshto shoudl be deleted, new one shoudl be created and uploaded. + source.assertThat(current=1, created=1, deleted=1) + dest.assertThat(current=1, saved=1, deleted=1) + assert dest.deleted == [backup_dest] + assert source.deleted == [backup_source] + + assert len(model.backups) == 1 + assertBackup(model, [source.created[0], dest.saved[0]]) + + +@pytest.mark.asyncio +async def test_new_upload_no_delete(time, model: Model, source, dest, simple_config): + now = time.now() + + # create a single old backup + source.setMax(2) + dest.setMax(2) + backup_dest = dest.insert("older", now - timedelta(days=1), "older") + backup_source = source.insert("older", now - timedelta(days=1), "older") + + # configure keeping two in both places + simple_config.config.update({ + "days_between_backups": 1 + }) + model.reinitialize() + await model.sync(now) + + # Another backup should have been created and saved + source.assertThat(current=2, created=1) + dest.assertThat(current=2, saved=1) + assert len(model.backups) == 2 + assertBackup(model, [source.created[0], dest.saved[0]]) + assertBackup(model, [backup_dest, backup_source]) + + +@pytest.mark.asyncio +async def test_multiple_deletes_allowed(time, model: Model, source, dest, simple_config): + now = time.now() + simple_config.config.update({"confirm_multiple_deletes": False}) + # create 4 backups in dest + dest.setMax(1) + + current = dest.insert("current", now, "current") + old = dest.insert("old", now - timedelta(days=1), "old") + older = dest.insert("older", now - timedelta(days=2), "older") + oldest = dest.insert("oldest", now - timedelta(days=3), "oldest") + + # configure keeping 1 + simple_config.config.update({ + "max_backups_in_google_drive": 1, + }) + model.reinitialize() + await model.sync(now) + + source.assertUnchanged() + dest.assertThat(current=1, deleted=3) + assert dest.deleted == [oldest, older, old] + assert len(model.backups) == 1 + assertBackup(model, [current]) + + +@pytest.mark.asyncio +async def test_confirm_multiple_deletes(time, model: Model, source, dest, simple_config): + now = time.now() + dest.setMax(1) + source.setMax(1) + + dest.insert("current", now, "current") + dest.insert("old", now - timedelta(days=1), "old") + dest.insert("older", now - timedelta(days=2), "older") + dest.insert("oldest", now - timedelta(days=2), "olderest") + + source.insert("current", now, "current") + source.insert("old", now - timedelta(days=1), "old") + source.insert("older", now - timedelta(days=2), "older") + + with pytest.raises(DeleteMutlipleBackupsError) as thrown: + await model.sync(now) + + thrown.value.data() == { + source.name(): 2, + dest.name(): 3 + } + + source.assertUnchanged() + dest.assertUnchanged() + + +@pytest.mark.asyncio +async def test_dont_upload_deletable(time, model: Model, source, dest): + now = time.now() + + # a new backup in Drive and an old backup in HA + dest.setMax(1) + current = dest.insert("current", now, "current") + old = source.insert("old", now - timedelta(days=1), "old") + + # configure keeping 1 + await model.sync(now) + + # Nothing should happen, because the upload from hassio would have to be deleted right after it's uploaded. + source.assertUnchanged() + dest.assertUnchanged() + assert len(model.backups) == 2 + assertBackup(model, [current]) + assertBackup(model, [old]) + + +@pytest.mark.asyncio +async def test_dont_upload_when_disabled(time, model: Model, source, dest): + now = time.now() + + # Make an enabled destination but with upload diabled. + dest.setMax(1) + dest.setUpload(False) + + await model.sync(now) + + # Verify the backup was created at the source but not uploaded. + source.assertThat(current=1, created=1) + dest.assertUnchanged() + assert len(model.backups) == 1 + + +@pytest.mark.asyncio +async def test_dont_delete_purgable(time, model: Model, source, dest, simple_config): + now = time.now() + + # create a single old backup, retained + source.setMax(1) + dest.setMax(1) + backup_dest = dest.insert("older", now - timedelta(days=1), "older") + backup_dest.setRetained(True) + backup_source = source.insert("older", now - timedelta(days=1), "older") + backup_source.setRetained(True) + + # configure only one to be kept in both places + simple_config.config.update({ + "days_between_backups": 1 + }) + model.reinitialize() + await model.sync(now) + + # Old snapshto shoudl be kept, new one should be created and uploaded. + source.assertThat(current=2, created=1) + dest.assertThat(current=2, saved=1) + + assert len(model.backups) == 2 + assertBackup(model, [backup_dest, backup_source]) + assertBackup(model, [source.created[0], dest.saved[0]]) + + +@pytest.mark.asyncio +async def test_generational_delete(time, model: Model, dest, source, simple_config): + time.setNow(time.local(2019, 5, 10)) + now = time.now() + + # Create 4 backups, configured to keep 3 + source.setMax(3) + source.insert("Fri", time.local(2019, 5, 10, 1)) + source.insert("Thu", time.local(2019, 5, 9, 1)) + wed = source.insert("Wed", time.local(2019, 5, 8, 1)) + source.insert("Mon", time.local(2019, 5, 6, 1)) + + # configure only one to be kept in both places + simple_config.config.update({ + "days_between_backups": 1, + "generational_weeks": 1, + "generational_days": 2 + }) + model.reinitialize() + await model.sync(now) + + # Shoud only delete wed, since it isn't kept in the generational backup config + source.assertThat(current=3, deleted=1) + assert source.deleted == [wed] + assert len(model.backups) == 3 + dest.assertThat(current=3, saved=3) + + +@pytest.mark.asyncio +async def test_delete_when_drive_disabled(time, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config): + time.setNow(time.local(2019, 5, 10)) + now = time.now() + dest.setEnabled(False) + dest.setNeedsConfiguration(False) + + # Create 4 backups, configured to keep 3 + source.setMax(3) + source.insert("Fri", time.local(2019, 5, 10, 1)) + source.insert("Thu", time.local(2019, 5, 9, 1)) + source.insert("Wed", time.local(2019, 5, 8, 1)) + mon = source.insert("Mon", time.local(2019, 5, 7, 1)) + + await model.sync(now) + + # Shoud only delete mon, the oldest one + source.assertThat(current=3, deleted=1) + assert source.deleted == [mon] + assert len(model.backups) == 3 + dest.assertThat(current=0) + + +@pytest.mark.asyncio +async def test_wait_for_startup_no_backup(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + time.setNow(time.toUtc(time.local(2019, 5, 10))) + global_info.__init__(time) + global_info.triggerBackupCooldown(timedelta(minutes=10)) + assert model.nextBackup(time.now()) == time.now() + timedelta(minutes=10) + assert model.nextBackup(time.now()) == global_info.backupCooldownTime() + assert model.waiting_for_startup + + time.advance(minutes=10) + assert model.nextBackup(time.now()) == time.now() + assert not model.waiting_for_startup + + +@pytest.mark.asyncio +async def test_wait_for_startup_with_backup(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + time.setNow(time.local(2019, 5, 10)) + global_info.__init__(time) + global_info.triggerBackupCooldown(timedelta(minutes=10)) + + source.setMax(3) + source.insert("old", time.now() - timedelta(days=7)) + + assert model.nextBackup(time.now()) == time.now() + timedelta(minutes=10) + assert model.nextBackup(time.now()) == global_info.backupCooldownTime() + assert model.waiting_for_startup + + time.advance(minutes=10) + assert model.nextBackup(time.now()) == time.now() + assert not model.waiting_for_startup + + +@pytest.mark.asyncio +async def test_wait_for_startup_with_backup_during_cooldown(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo, simple_config: Config): + global_info.triggerBackupCooldown(timedelta(minutes=10)) + + source.setMax(3) + source.insert("old", time.now()) + + backup_time = time.toLocal(time.now()) + timedelta(minutes=5) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, f"{backup_time.hour}:{backup_time.minute}") + model.reinitialize() + await model.sync(time.now()) + assert model.nextBackup(time.now()) == global_info.backupCooldownTime() + assert model.waiting_for_startup + + time.advance(minutes=15) + assert model.nextBackup(time.now()) == global_info.backupCooldownTime() + assert not model.waiting_for_startup + + + +@pytest.mark.asyncio +async def test_ignore_startup_delay(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + time.setNow(time.local(2019, 5, 10)) + global_info.__init__(time) + global_info.triggerBackupCooldown(timedelta(minutes=10)) + model.ignore_startup_delay = True + assert model.nextBackup(time.now()) == time.now() + assert not model.waiting_for_startup + + +def assertBackup(model, sources): + matches = {} + for source in sources: + matches[source.source()] = source + slug = source.slug() + assert slug in model.backups + assert model.backups[slug].sources == matches + + +@pytest.mark.asyncio +async def test_delete_after_upload(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + model.config.override(Setting.DELETE_AFTER_UPLOAD, True) + source.setMax(100) + dest.setMax(100) + dest.insert("Destination 1", time.now()) + dest.reset() + + # Nothing should happen on a sync, the backup is already backed up. + await model.sync(time.now()) + dest.assertThat(current=1) + source.assertThat() + + time.advance(days=7) + source.insert("Source 1", time.now()) + source.reset() + + # Source backup should get backed up and the deleted + await model.sync(time.now()) + source.assertThat(deleted=1, current=0) + dest.assertThat(saved=1, current=2) + + +@pytest.mark.asyncio +async def test_delete_after_upload_multiple_deletes(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + model.config.override(Setting.DELETE_AFTER_UPLOAD, True) + source.setMax(100) + dest.setMax(100) + source.insert("Src 1", time.now()) + time.advance(days=1) + source.insert("Src 2", time.now()) + source.reset() + + # Deleteing multiple backups should still fail with DELETE_AFTER_UPLOAD:True + with pytest.raises(DeleteMutlipleBackupsError): + await model.sync(time.now()) + + # But the backup should still get backed up + source.assertThat(current=2) + dest.assertThat(saved=2, current=2) + + +@pytest.mark.asyncio +async def test_delete_after_upload_simple_sync(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, global_info: GlobalInfo): + model.config.override(Setting.DELETE_AFTER_UPLOAD, True) + source.setMax(100) + dest.setMax(100) + + # A sync should create a backup, back it up to dest, and then delete it from source. + await model.sync(time.now()) + source.assertThat(created=1, deleted=1, current=0) + dest.assertThat(saved=1, current=1) + + time.advance(hours=1) + source.reset() + dest.reset() + + # Next sync should do nothing + await model.sync(time.now()) + source.assertThat() + dest.assertThat(current=1) + + +@pytest.mark.asyncio +async def test_never_delete_ignored_backups(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource): + source.setMax(1) + dest.setMax(1) + + # A sync should create a backup and back it up to dest. + await model.sync(time.now()) + source.assertThat(created=1, current=1) + dest.assertThat(saved=1, current=1) + + source.reset() + dest.reset() + + # Another sync shoudl delete a backup, which is just a sanity check. + time.advance(days=5) + await model.sync(time.now()) + source.assertThat(created=1, current=1, deleted=1) + dest.assertThat(saved=1, current=1, deleted=1) + assert model.nextBackup(time.now()) == time.now() + timedelta(days=3) + source.reset() + dest.reset() + + # Make the backup ignored, which should cause a new backup to be created + # and synced without the ignored one getting deleted. + next(iter((await dest.get()).values())).setIgnore(True) + next(iter((await source.get()).values())).setIgnore(True) + assert model.nextBackup(time.now()) < time.now() + await model.sync(time.now()) + source.assertThat(created=1, current=2) + dest.assertThat(saved=1, current=2) + + +@pytest.mark.asyncio +async def test_ignored_backups_dont_upload(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource): + source.setMax(2) + dest.setMax(2) + + older = source.insert("older", time.now() - timedelta(days=1), slug="older") + older.setIgnore(True) + source.insert("newer", time.now(), slug="newer") + source.reset() + + # A sync should backup the last backup and ignore the older one + await model.sync(time.now()) + source.assertThat(created=0, current=2) + dest.assertThat(saved=1, current=1) + + uploaded = await dest.get() + assert len(uploaded) == 1 + assert next(iter(uploaded.values())).name() == "newer" + + +@pytest.mark.asyncio +async def test_dirty_cache_gets_saved(time: FakeTime, model: Model, data_cache: DataCache): + data_cache.makeDirty() + await model.sync(time.now()) + assert not data_cache.dirty + + +@pytest.mark.asyncio +async def test_delete_after_upload_with_no_backups(source: HelperTestSource, dest: HelperTestSource, time: FakeTime, model: Model, data_cache: DataCache, simple_config: Config): + source.setMax(0) + dest.setMax(2) + simple_config.override(Setting.DELETE_AFTER_UPLOAD, True) + + source.insert("older", time.now() - timedelta(days=1), slug="older") + source.insert("newer", time.now(), slug="newer") + source.reset() + + with pytest.raises(DeleteMutlipleBackupsError): + await model.sync(time.now()) + + simple_config.override(Setting.CONFIRM_MULTIPLE_DELETES, False) + await model.sync(time.now()) + + dest.assertThat(saved=2, current=2) + source.assertThat(deleted=2, current=0) + + +@pytest.mark.asyncio +async def test_purge_before_upload(source: HelperTestSource, dest: HelperTestSource, time: FakeTime, model: Model, data_cache: DataCache, simple_config: Config): + source.setMax(2) + dest.setMax(2) + older = source.insert("older", time.now() - timedelta(days=7), slug="older") + oldest = source.insert("oldest", time.now() - timedelta(days=14), slug="oldest") + await model.sync(time.now() - timedelta(days=7)) + + source.allow_create = False + dest.allow_save = False + + dest.reset() + source.reset() + + # trying to sync now should do nothing. + with pytest.raises(IntentionalFailure): + await model.sync(time.now()) + source.assertThat(current=2) + dest.assertThat(current=2) + + simple_config.override(Setting.DELETE_BEFORE_NEW_BACKUP, True) + # Trying to sync should delete the backup before syncing and then fail to create a new one. + with pytest.raises(IntentionalFailure): + await model.sync(time.now()) + source.assertThat(deleted=1, current=1) + assert oldest.slug() not in (await source.get()).keys() + dest.assertThat(current=2) + + # Trying to do it again should do nothing (eg not delete another backup) + with pytest.raises(IntentionalFailure): + await model.sync(time.now()) + source.assertThat(deleted=1, current=1) + dest.assertThat(current=2) + + # let the new source backup get created, which then deletes the destination but fails to save + source.allow_create = True + with pytest.raises(IntentionalFailure): + await model.sync(time.now()) + source.assertThat(deleted=1, current=2, created=1) + dest.assertThat(current=1, deleted=1) + assert oldest.slug() not in (await dest.get()).keys() + + # now let the new backup get saved. + dest.allow_save = True + await model.sync(time.now()) + source.assertThat(deleted=1, current=2, created=1) + dest.assertThat(current=2, deleted=1, saved=1) + + assert oldest.slug() not in (await source.get()).keys() + assert older.slug() in (await source.get()).keys() + assert oldest.slug() not in (await dest.get()).keys() + assert older.slug() in (await dest.get()).keys() + + +@pytest.mark.asyncio +async def test_generational_empty(time, model: Model, dest, source, simple_config: Config): + time.setNow(time.local(2019, 5, 10)) + now = time.now() + + simple_config.config.update({ + "days_between_backups": 1, + "generational_weeks": 1, + "generational_days": 2 + }) + + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + + model.reinitialize() + assert len(model.backups) == 0 + await model.sync(now) + assert len(model.backups) == 1 + + +@pytest.mark.asyncio +async def test_delete_ignored_other_backup_after_some_time(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + # Create a few backups, one ignored + source.setMax(2) + dest.setMax(2) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + simple_config.override(Setting.IGNORE_OTHER_BACKUPS, True) + simple_config.override(Setting.DELETE_IGNORED_AFTER_DAYS, 1) + ignored = source.insert("Ignored", time.now()) + source.insert("Existing", time.now()) + ignored.setIgnore(True) + await model.sync(time.now()) + source.assertThat(current=2) + dest.assertThat(saved=1, current=1) + source.reset() + dest.reset() + + time.advance(hours=23) + await model.sync(time.now()) + + source.assertThat(current=2) + dest.assertThat(current=1) + + time.advance(hours=2) + await model.sync(time.now()) + source.assertThat(current=1, deleted=1) + dest.assertThat(current=1) + + +@pytest.mark.asyncio +async def test_delete_ignored_upgrade_backup_after_some_time(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + # Create a few backups, one ignored + source.setMax(2) + dest.setMax(2) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + simple_config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + simple_config.override(Setting.DELETE_IGNORED_AFTER_DAYS, 1) + ignored = source.insert("Ignored", time.now()) + source.insert("Existing", time.now()) + ignored.setIgnore(True) + await model.sync(time.now()) + source.assertThat(current=2) + dest.assertThat(saved=1, current=1) + source.reset() + dest.reset() + + time.advance(hours=23) + await model.sync(time.now()) + + source.assertThat(current=2) + dest.assertThat(current=1) + + time.advance(hours=2) + await model.sync(time.now()) + source.assertThat(current=1, deleted=1) + dest.assertThat(current=1) + + +@pytest.mark.asyncio +async def test_zero_config_whiled_deleting_backups(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + """ + Issue #745 identified that setting setting destination max backups to 0 and "delete after upload"=True would cause destination + backups to get deleted due to an error in the logic for handling purges. This test verifies that no longer happens. + """ + source.setMax(1) + dest.setMax(1) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.DELETE_AFTER_UPLOAD, True) + source.insert("Backup", time.now()) + await model.sync(time.now()) + source.assertThat(current=0, deleted=1) + dest.assertThat(current=1, saved=1) + source.reset() + dest.reset() + + dest.setMax(0) + await model.sync(time.now()) + dest.assertThat(current=1) + source.assertThat() + + +async def simulate_backups_timeline(time: FakeTime, model: Model, start: datetime, end: datetime): + time.setNow(time.toUtc(start)) + await model.sync(time.now()) + while time.now() < end: + next = model.nextBackup(time.now()) + assert next is not None + assert next > time.now() + time.setNow(next) + await model.sync(time.now()) + + +@pytest.mark.asyncio +async def test_generational_delete_issue602(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + start = time.local(2021, 1, 1, 1) + end = time.local(2023, 5, 1, 1) + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 50) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.IGNORE_OTHER_BACKUPS, True) + simple_config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "00:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 14) + simple_config.override(Setting.GENERATIONAL_WEEKS, 5) + simple_config.override(Setting.GENERATIONAL_MONTHS, 13) + simple_config.override(Setting.GENERATIONAL_YEARS, 1) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(1) + dest.setMax(30) + model.reinitialize() + + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.parse('2022-04-30T22:00:00+00:00'), + time.parse('2022-05-31T22:00:00+00:00'), + time.parse('2022-06-30T22:00:00+00:00'), + time.parse('2022-07-31T22:00:00+00:00'), + time.parse('2022-08-31T22:00:00+00:00'), + time.parse('2022-09-30T22:00:00+00:00'), + time.parse('2022-10-31T23:00:00+00:00'), + time.parse('2022-11-30T23:00:00+00:00'), + time.parse('2022-12-31T23:00:00+00:00'), + time.parse('2023-01-31T23:00:00+00:00'), + time.parse('2023-02-28T23:00:00+00:00'), + time.parse('2023-03-31T22:00:00+00:00'), + time.parse('2023-04-02T22:00:00+00:00'), + time.parse('2023-04-09T22:00:00+00:00'), + time.parse('2023-04-16T22:00:00+00:00'), + time.parse('2023-04-18T22:00:00+00:00'), + time.parse('2023-04-19T22:00:00+00:00'), + time.parse('2023-04-20T22:00:00+00:00'), + time.parse('2023-04-21T22:00:00+00:00'), + time.parse('2023-04-22T22:00:00+00:00'), + time.parse('2023-04-23T22:00:00+00:00'), + time.parse('2023-04-24T22:00:00+00:00'), + time.parse('2023-04-25T22:00:00+00:00'), + time.parse('2023-04-26T22:00:00+00:00'), + time.parse('2023-04-27T22:00:00+00:00'), + time.parse('2023-04-28T22:00:00+00:00'), + time.parse('2023-04-29T22:00:00+00:00'), + time.parse('2023-04-30T22:00:00+00:00'), + time.parse('2023-05-01T22:00:00+00:00')] + + +@pytest.mark.asyncio +async def test_generational_delete_issue809(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("America/Los_Angeles") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 10) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 32) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 7) + simple_config.override(Setting.GENERATIONAL_WEEKS, 4) + simple_config.override(Setting.GENERATIONAL_MONTHS, 6) + simple_config.override(Setting.GENERATIONAL_YEARS, 10) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2021, 1, 1) + end = time.local(2024, 5, 1) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.parse('2021-01-01 10:00:00+00:00'), + time.parse('2022-01-01 10:00:00+00:00'), + time.parse('2023-01-01 10:00:00+00:00'), + time.parse('2023-12-01 10:00:00+00:00'), + time.parse('2024-01-01 10:00:00+00:00'), + time.parse('2024-02-01 10:00:00+00:00'), + time.parse('2024-03-01 10:00:00+00:00'), + time.parse('2024-04-01 09:00:00+00:00'), + time.parse('2024-04-08 09:00:00+00:00'), + time.parse('2024-04-09 09:00:00+00:00'), + time.parse('2024-04-10 09:00:00+00:00'), + time.parse('2024-04-11 09:00:00+00:00'), + time.parse('2024-04-12 09:00:00+00:00'), + time.parse('2024-04-13 09:00:00+00:00'), + time.parse('2024-04-14 09:00:00+00:00'), + time.parse('2024-04-15 09:00:00+00:00'), + time.parse('2024-04-16 09:00:00+00:00'), + time.parse('2024-04-17 09:00:00+00:00'), + time.parse('2024-04-18 09:00:00+00:00'), + time.parse('2024-04-19 09:00:00+00:00'), + time.parse('2024-04-20 09:00:00+00:00'), + time.parse('2024-04-21 09:00:00+00:00'), + time.parse('2024-04-22 09:00:00+00:00'), + time.parse('2024-04-23 09:00:00+00:00'), + time.parse('2024-04-24 09:00:00+00:00'), + time.parse('2024-04-25 09:00:00+00:00'), + time.parse('2024-04-26 09:00:00+00:00'), + time.parse('2024-04-27 09:00:00+00:00'), + time.parse('2024-04-28 09:00:00+00:00'), + time.parse('2024-04-29 09:00:00+00:00'), + time.parse('2024-04-30 09:00:00+00:00'), + time.parse('2024-05-01 09:00:00+00:00')] + + +async def test_generational_delete_dst_start_rome(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 1, 1) + end = time.local(2023, 3, 25) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 2, 1, 2), + time.local(2023, 3, 1, 2), + time.local(2023, 3, 6, 2), + time.local(2023, 3, 13, 2), + time.local(2023, 3, 20, 2), + time.local(2023, 3, 23, 2), + time.local(2023, 3, 24, 2), + time.local(2023, 3, 25, 2)] + + assert time.now() == time.local(2023, 3, 25, 2) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 2) + + time.setNow(time.toUtc(time.local(2023, 3, 26, 2))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 3, 26, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 27, 2) + + +async def test_generational_delete_dst_start_los_angeles(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("America/Los_Angeles") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 1, 1) + end = time.local(2023, 3, 11) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 2, 1, 2), + time.local(2023, 2, 20, 2), + time.local(2023, 2, 27, 2), + time.local(2023, 3, 1, 2), + time.local(2023, 3, 6, 2), + time.local(2023, 3, 9, 2), + time.local(2023, 3, 10, 2), + time.local(2023, 3, 11, 2)] + + assert time.now() == time.local(2023, 3, 11, 2) + assert model.nextBackup(time.now()) == time.local(2023, 3, 12, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 12, 2) + + time.setNow(time.toUtc(time.local(2023, 3, 12, 2))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 3, 12, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 13, 2) + + +async def test_generational_delete_dst_start_rome_2_30(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:30") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 1, 1) + end = time.local(2023, 3, 25) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 2, 1, 2, 30), + time.local(2023, 3, 1, 2, 30), + time.local(2023, 3, 6, 2, 30), + time.local(2023, 3, 13, 2, 30), + time.local(2023, 3, 20, 2, 30), + time.local(2023, 3, 23, 2, 30), + time.local(2023, 3, 24, 2, 30), + time.local(2023, 3, 25, 2, 30)] + + assert time.now() == time.local(2023, 3, 25, 2, 30) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 2, 30) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 2, 30) + + time.setNow(time.toUtc(time.local(2023, 3, 26, 2, 30))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 3, 26, 2, 30) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 27, 2, 30) + + +async def test_generational_delete_dst_start_rome_3_00(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "03:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 1, 1) + end = time.local(2023, 3, 25) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 2, 1, 3), + time.local(2023, 3, 1, 3), + time.local(2023, 3, 6, 3), + time.local(2023, 3, 13, 3), + time.local(2023, 3, 20, 3), + time.local(2023, 3, 23, 3), + time.local(2023, 3, 24, 3), + time.local(2023, 3, 25, 3)] + + assert time.now() == time.local(2023, 3, 25, 3) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 3) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 26, 3) + + time.setNow(time.toUtc(time.local(2023, 3, 26, 3))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 3, 26, 3) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 3, 27, 3) + + +async def test_generational_delete_dst_end_rome(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 6, 1) + end = time.local(2023, 10, 28) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 9, 1, 2), + time.local(2023, 10, 1, 2), + time.local(2023, 10, 9, 2), + time.local(2023, 10, 16, 2), + time.local(2023, 10, 23, 2), + time.local(2023, 10, 26, 2), + time.local(2023, 10, 27, 2), + time.local(2023, 10, 28, 2)] + + assert time.now() == time.local(2023, 10, 28, 2) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 2) + + time.setNow(time.toUtc(time.local(2023, 10, 29, 2))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 10, 29, 2) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 30, 2) + + +async def test_generational_delete_dst_end_rome_2_30(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "02:30") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 6, 1) + end = time.local(2023, 10, 28) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 9, 1, 2, 30), + time.local(2023, 10, 1, 2, 30), + time.local(2023, 10, 9, 2, 30), + time.local(2023, 10, 16, 2, 30), + time.local(2023, 10, 23, 2, 30), + time.local(2023, 10, 26, 2, 30), + time.local(2023, 10, 27, 2, 30), + time.local(2023, 10, 28, 2, 30)] + + assert time.now() == time.local(2023, 10, 28, 2, 30) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 2, 30) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 2, 30) + + time.setNow(time.toUtc(time.local(2023, 10, 29, 2, 30))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 10, 29, 2, 30) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 30, 2, 30) + + +async def test_generational_delete_dst_end_rome_3_00(time: FakeTime, model: Model, dest: HelperTestSource, source: HelperTestSource, simple_config: Config): + time.setTimeZone("Europe/Rome") + + simple_config.override(Setting.MAX_BACKUPS_IN_HA, 1) + simple_config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 8) + simple_config.override(Setting.DAYS_BETWEEN_BACKUPS, 1) + simple_config.override(Setting.BACKUP_TIME_OF_DAY, "03:00") + simple_config.override(Setting.GENERATIONAL_DAYS, 3) + simple_config.override(Setting.GENERATIONAL_WEEKS, 3) + simple_config.override(Setting.GENERATIONAL_MONTHS, 2) + simple_config.override(Setting.GENERATIONAL_DELETE_EARLY, True) + + source.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_HA)) + dest.setMax(simple_config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE)) + model.reinitialize() + + start = time.local(2023, 6, 1) + end = time.local(2023, 10, 28) + await simulate_backups_timeline(time, model, start, end) + + dates = list([x.date() for x in dest.current.values()]) + dates.sort() + assert dates == [time.local(2023, 9, 1, 3), + time.local(2023, 10, 1, 3), + time.local(2023, 10, 9, 3), + time.local(2023, 10, 16, 3), + time.local(2023, 10, 23, 3), + time.local(2023, 10, 26, 3), + time.local(2023, 10, 27, 3), + time.local(2023, 10, 28, 3)] + + assert time.now() == time.local(2023, 10, 28, 3) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 3) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 29, 3) + + time.setNow(time.toUtc(time.local(2023, 10, 29, 3))) + await model.sync(time.now()) + assert max([x.date() for x in dest.current.values()]) == time.local(2023, 10, 29, 3) + + for x in range(0, 24 * 15): + time.advance(minutes=15) + assert model.nextBackup(time.now()) == time.local(2023, 10, 30, 3) + + +def test_next_time_over_a_day(estimator, data_cache): + time: FakeTime = FakeTime() + now: datetime = time.localize(datetime(1985, 12, 6)) + time.setNow(now) + info = GlobalInfo(time) + + config: Config = createConfig() + config.override(Setting.BACKUP_TIME_OF_DAY, "00:00") + config.override(Setting.DAYS_BETWEEN_BACKUPS, 2.0) + model: Model = Model(config, time, default_source, + default_source, info, estimator, data_cache) + assert model._nextBackup( + now=now, last_backup=None) == now + assert model._nextBackup( + now=now, last_backup=now) == now + timedelta(days=2) diff --git a/hassio-google-drive-backup/tests/test_rangelookup.py b/hassio-google-drive-backup/tests/test_rangelookup.py new file mode 100644 index 0000000..5003ab1 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_rangelookup.py @@ -0,0 +1,26 @@ +from backup.util import RangeLookup + + +def test_lookup(): + data = [1, 3, 5] + lookup = RangeLookup(data, lambda x: x) + assert list(lookup.matches(-1, 0)) == [] + assert list(lookup.matches(6, 7)) == [] + assert list(lookup.matches(2, 2)) == [] + assert list(lookup.matches(4, 4)) == [] + assert list(lookup.matches(6, 6)) == [] + + assert list(lookup.matches(0, 6)) == [1, 3, 5] + assert list(lookup.matches(1, 5)) == [1, 3, 5] + + assert list(lookup.matches(1, 3)) == [1, 3] + assert list(lookup.matches(0, 4)) == [1, 3] + assert list(lookup.matches(3, 5)) == [3, 5] + assert list(lookup.matches(2, 6)) == [3, 5] + + assert list(lookup.matches(0, 2)) == [1] + assert list(lookup.matches(1, 1)) == [1] + assert list(lookup.matches(3, 3)) == [3] + assert list(lookup.matches(2, 4)) == [3] + assert list(lookup.matches(5, 5)) == [5] + assert list(lookup.matches(4, 5)) == [5] diff --git a/hassio-google-drive-backup/tests/test_resolver.py b/hassio-google-drive-backup/tests/test_resolver.py new file mode 100644 index 0000000..26f7e5f --- /dev/null +++ b/hassio-google-drive-backup/tests/test_resolver.py @@ -0,0 +1,46 @@ +import pytest +import socket + +from backup.config import Config, Setting +from backup.util import Resolver + + +@pytest.mark.asyncio +async def test_empty_name_server(resolver: Resolver, config: Config): + assert resolver._alt_dns.nameservers == ["8.8.8.8", "8.8.4.4"] + assert resolver._resolver is resolver._original_dns + config.override(Setting.ALTERNATE_DNS_SERVERS, "") + resolver.updateConfig() + assert resolver._resolver is resolver._alt_dns + + # make sure the value is cached + prev = resolver._alt_dns + resolver.updateConfig() + assert resolver._alt_dns is prev + + +@pytest.mark.asyncio +async def test_toggle(resolver: Resolver): + assert resolver._resolver is resolver._original_dns + resolver.toggle() + assert resolver._resolver is resolver._alt_dns + resolver.toggle() + assert resolver._resolver is resolver._original_dns + + +@pytest.mark.asyncio +async def test_hard_resolve(resolver: Resolver, config: Config): + expected = [{ + 'family': 0, + 'flags': socket.AddressInfo.AI_NUMERICHOST, + 'port': 1234, + 'proto': 0, + 'host': "1.2.3.4", + 'hostname': "www.googleapis.com" + }] + config.override(Setting.DRIVE_IPV4, "1.2.3.4") + assert await resolver.resolve("www.googleapis.com", 1234, 0) == expected + resolver.toggle() + assert await resolver.resolve("www.googleapis.com", 1234, 0) == expected + resolver.toggle() + assert await resolver.resolve("www.googleapis.com", 1234, 0) == expected diff --git a/hassio-google-drive-backup/tests/test_scheme.py b/hassio-google-drive-backup/tests/test_scheme.py new file mode 100644 index 0000000..b4fe62b --- /dev/null +++ b/hassio-google-drive-backup/tests/test_scheme.py @@ -0,0 +1,442 @@ +from datetime import datetime, timedelta + +import pytest +from dateutil.tz import tzutc +from pytest import fail + +from backup.model import GenConfig, GenerationalScheme, DummyBackup, Backup +from backup.time import Time + + +def test_timezone(time) -> None: + assert time.local_tz is not None + + +def test_trivial(time) -> None: + config = GenConfig(days=1) + + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + makeBackup("single", time.local(1928, 12, 6)) + ] + + assert scheme.getOldest(backups)[1].date() == time.local(1928, 12, 6) + + +def test_trivial_empty(time): + config = GenConfig(days=1) + scheme = GenerationalScheme(time, config, count=0) + assert scheme.getOldest([])[1] is None + + +def test_trivial_oldest(time: Time) -> None: + config = GenConfig(days=1) + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + makeBackup("test", time.local(1985, 12, 6, 10)), + makeBackup("test", time.local(1985, 12, 6, 12)), + makeBackup("test", time.local(1985, 12, 6, 13)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1985, 12, 6, 10), + time.local(1985, 12, 6, 12), + time.local(1985, 12, 6, 13) + ]) + + +def test_duplicate_weeks(time): + config = GenConfig(weeks=1, day_of_week='wed') + + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + makeBackup("test", time.local(1985, 12, 5)), + makeBackup("test", time.local(1985, 12, 4)), + makeBackup("test", time.local(1985, 12, 1)), + makeBackup("test", time.local(1985, 12, 2)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1985, 12, 1), + time.local(1985, 12, 2), + time.local(1985, 12, 5), + time.local(1985, 12, 4) + ]) + + +def test_duplicate_months(time) -> None: + config = GenConfig(months=2, day_of_month=15) + + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + makeBackup("test", time.local(1985, 12, 6)), + makeBackup("test", time.local(1985, 12, 15)), + makeBackup("test", time.local(1985, 11, 20)), + makeBackup("test", time.local(1985, 11, 15)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1985, 11, 20), + time.local(1985, 12, 6), + time.local(1985, 11, 15), + time.local(1985, 12, 15) + ]) + + +def test_duplicate_years(time): + config = GenConfig(years=2, day_of_year=1) + + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + makeBackup("test", time.local(1985, 12, 31)), + makeBackup("test", time.local(1985, 1, 1)), + makeBackup("test", time.local(1984, 12, 31)), + makeBackup("test", time.local(1984, 1, 1)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1984, 12, 31), + time.local(1985, 12, 31), + time.local(1984, 1, 1), + time.local(1985, 1, 1) + ]) + + +def test_removal_order(time) -> None: + config = GenConfig(days=5, weeks=2, months=2, years=2, + day_of_week='mon', day_of_month=15, day_of_year=1) + + scheme = GenerationalScheme(time, config, count=0) + + backups = [ + # 5 days, week 1 + makeBackup("test", time.local(1985, 12, 7)), # day 1 + makeBackup("test", time.local(1985, 12, 6)), # day 2 + makeBackup("test", time.local(1985, 12, 5)), # day 3 + makeBackup("test", time.local(1985, 12, 4)), # day 4 + makeBackup("test", time.local(1985, 12, 3)), # day 5 + + makeBackup("test", time.local(1985, 12, 1)), # 1st week pref + + # week 2 + makeBackup("test", time.local(1985, 11, 25)), # 1st month pref + + # month2 + makeBackup("test", time.local(1985, 11, 15)), # 2nd month pref + + # year 1 + makeBackup("test", time.local(1985, 1, 1)), # 1st year preference + makeBackup("test", time.local(1985, 1, 2)), + + # year 2 + makeBackup("test", time.local(1984, 6, 1)), # 2nd year pref + makeBackup("test", time.local(1984, 7, 1)), + + # year 3 + makeBackup("test", time.local(1983, 1, 1)), + ] + assertRemovalOrder(scheme, backups, [ + time.local(1983, 1, 1), + time.local(1984, 7, 1), + time.local(1985, 1, 2), + + time.local(1984, 6, 1), + time.local(1985, 1, 1), + time.local(1985, 11, 15), + time.local(1985, 11, 25), + time.local(1985, 12, 1), + time.local(1985, 12, 3), + time.local(1985, 12, 4), + time.local(1985, 12, 5), + time.local(1985, 12, 6), + time.local(1985, 12, 7) + ]) + + +@pytest.mark.timeout(60) +def test_simulate_daily_backup_for_4_years(time): + config = GenConfig(days=4, weeks=4, months=4, years=4, + day_of_week='mon', day_of_month=1, day_of_year=1) + scheme = GenerationalScheme(time, config, count=16) + backups = simulate(time.local(2019, 1, 1), + time.local(2022, 12, 31), + scheme) + assertRemovalOrder(GenerationalScheme(time, config, count=0), backups, [ + # 4 years + time.local(2019, 1, 1), + time.local(2020, 1, 1), + time.local(2021, 1, 1), + time.local(2022, 1, 1), + + # 4 months + time.local(2022, 9, 1), + time.local(2022, 10, 1), + time.local(2022, 11, 1), + time.local(2022, 12, 1), + + # 4 weeks + time.local(2022, 12, 5), + time.local(2022, 12, 12), + time.local(2022, 12, 19), + time.local(2022, 12, 26), + + # 4 days + time.local(2022, 12, 28), + time.local(2022, 12, 29), + time.local(2022, 12, 30), + time.local(2022, 12, 31) + ]) + + +@pytest.mark.timeout(60) +def test_simulate_agressive_daily_backup_for_4_years(time): + config = GenConfig(days=4, weeks=4, months=4, years=4, + day_of_week='mon', day_of_month=1, day_of_year=1, aggressive=True) + scheme = GenerationalScheme(time, config, count=16) + backups = simulate(time.local(2019, 1, 1), + time.local(2022, 12, 31), + scheme) + + assertRemovalOrder(GenerationalScheme(time, config, count=0), backups, [ + # 4 years + time.local(2019, 1, 1), + time.local(2020, 1, 1), + time.local(2021, 1, 1), + time.local(2022, 1, 1), + + # 4 months + time.local(2022, 9, 1), + time.local(2022, 10, 1), + time.local(2022, 11, 1), + time.local(2022, 12, 1), + + # 4 weeks + time.local(2022, 12, 5), + time.local(2022, 12, 12), + time.local(2022, 12, 19), + time.local(2022, 12, 26), + + # 4 days + time.local(2022, 12, 28), + time.local(2022, 12, 29), + time.local(2022, 12, 30), + time.local(2022, 12, 31), + ]) + + +def test_count_limit(time): + config = GenConfig(years=2, day_of_year=1) + scheme = GenerationalScheme(time, config, count=1) + backups = [ + makeBackup("test", time.local(1985, 1, 1)), + makeBackup("test", time.local(1984, 1, 1)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1984, 1, 1) + ]) + + +def test_aggressive_removal_below_limit(time): + config = GenConfig(years=2, day_of_year=1, aggressive=True) + scheme = GenerationalScheme(time, config, count=5) + backups = [ + makeBackup("test", time.local(1985, 1, 1)), + makeBackup("test", time.local(1985, 1, 2)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1985, 1, 2) + ]) + + +def test_aggressive_removal_at_limit_ok(time): + config = GenConfig(years=2, day_of_year=1, aggressive=True) + scheme = GenerationalScheme(time, config, count=2) + backups = [ + makeBackup("test", time.local(1985, 1, 1)), + makeBackup("test", time.local(1984, 1, 1)) + ] + assertRemovalOrder(scheme, backups, []) + + +def test_aggressive_removal_over_limit(time): + config = GenConfig(years=2, day_of_year=1, aggressive=True) + scheme = GenerationalScheme(time, config, count=2) + backups = [ + makeBackup("test", time.local(1985, 1, 1)), + makeBackup("test", time.local(1984, 1, 1)), + makeBackup("test", time.local(1983, 1, 1)), + makeBackup("test", time.local(1983, 1, 2)) + ] + assertRemovalOrder(scheme, backups, [ + time.local(1983, 1, 1), + time.local(1983, 1, 2) + ]) + + +def test_removal_order_week(time: Time): + config = GenConfig(weeks=1, day_of_week='wed', aggressive=True) + scheme = GenerationalScheme(time, config, count=1) + backups = [ + makeBackup("test", time.local(2019, 10, 28)), + makeBackup("test", time.local(2019, 10, 29)), + makeBackup("test", time.local(2019, 10, 30, 1)), + makeBackup("test", time.local(2019, 10, 30, 2)), + makeBackup("test", time.local(2019, 10, 31)), + makeBackup("test", time.local(2019, 11, 1)), + makeBackup("test", time.local(2019, 11, 2)), + makeBackup("test", time.local(2019, 11, 3)), + ] + assertRemovalOrder(scheme, backups, [ + time.local(2019, 10, 28), + time.local(2019, 10, 29), + time.local(2019, 10, 30, 1), + time.local(2019, 10, 31), + time.local(2019, 11, 1), + time.local(2019, 11, 2), + time.local(2019, 11, 3) + ]) + + +def test_removal_order_month(time): + config = GenConfig(months=1, day_of_month=20, aggressive=True) + + scheme = GenerationalScheme(time, config, count=1) + + backups = [ + makeBackup("test", time.local(2019, 1, 1)), + makeBackup("test", time.local(2019, 1, 2)), + makeBackup("test", time.local(2019, 1, 20, 1)), + makeBackup("test", time.local(2019, 1, 20, 2)), + makeBackup("test", time.local(2019, 1, 21)), + makeBackup("test", time.local(2019, 1, 25)), + makeBackup("test", time.local(2019, 1, 26)), + makeBackup("test", time.local(2019, 1, 27)), + ] + assertRemovalOrder(scheme, backups, [ + time.local(2019, 1, 1), + time.local(2019, 1, 2), + time.local(2019, 1, 20, 1), + time.local(2019, 1, 21), + time.local(2019, 1, 25), + time.local(2019, 1, 26), + time.local(2019, 1, 27) + ]) + + +def test_removal_order_many_months(time): + config = GenConfig(months=70, day_of_month=20, aggressive=True) + + scheme = GenerationalScheme(time, config, count=10) + + backups = [ + makeBackup("test", time.local(2019, 7, 20)), # preferred + makeBackup("test", time.local(2018, 7, 18)), # preferred + makeBackup("test", time.local(2018, 7, 21)), + makeBackup("test", time.local(2017, 1, 19)), + makeBackup("test", time.local(2017, 1, 20)), # preferred + makeBackup("test", time.local(2017, 1, 31)), + makeBackup("test", time.local(2016, 12, 1)), # preferred + makeBackup("test", time.local(2014, 1, 31)), + makeBackup("test", time.local(2014, 1, 1)), # preferred + ] + assertRemovalOrder(scheme, backups, [ + time.local(2014, 1, 31), + time.local(2017, 1, 19), + time.local(2017, 1, 31), + time.local(2018, 7, 21), + ]) + + +def test_removal_order_years(time): + config = GenConfig(years=2, day_of_year=15, aggressive=True) + + scheme = GenerationalScheme(time, config, count=10) + + backups = [ + makeBackup("test", time.local(2019, 2, 15)), + makeBackup("test", time.local(2019, 1, 15)), # keep + makeBackup("test", time.local(2018, 1, 14)), + makeBackup("test", time.local(2018, 1, 15)), # keep + makeBackup("test", time.local(2018, 1, 16)), + makeBackup("test", time.local(2017, 1, 15)), + ] + assertRemovalOrder(scheme, backups, [ + time.local(2017, 1, 15), + time.local(2018, 1, 14), + time.local(2018, 1, 16), + time.local(2019, 2, 15), + ]) + + +@pytest.mark.asyncio +async def test_ignored_generational_labels(time): + config = GenConfig(days=2) + + scheme = GenerationalScheme(time, config, count=10) + backup1 = makeBackup("test", time.local(2019, 2, 15)) + backup2 = makeBackup("test", time.local(2019, 2, 14)) + backup3 = makeBackup("test", time.local(2019, 2, 13), ignore=True) + backups = [backup1, backup2, backup3] + scheme.handleNaming(backups) + assert backup1.getStatusDetail() == ['Day 1 of 2'] + assert backup2.getStatusDetail() == ['Day 2 of 2'] + assert backup3.getStatusDetail() is None + + +def getRemovalOrder(scheme, toCheck): + backups = list(toCheck) + removed = [] + while True: + oldest = scheme.getOldest(backups) + if not oldest: + break + removed.append(oldest.date()) + backups.remove(oldest) + return removed + + +def assertRemovalOrder(scheme, toCheck, expected): + backups = list(toCheck) + removed = [] + index = 0 + time = scheme.time + while True: + reason, oldest = scheme.getOldest(backups) + if index >= len(expected): + if oldest is not None: + fail("at index {0}, expected 'None' but got {1}".format( + index, time.toLocal(oldest.date()))) + break + if oldest.date() != expected[index]: + fail("at index {0}, expected {1} but got {2}".format( + index, time.toLocal(expected[index]), time.toLocal(oldest.date()))) + removed.append(oldest.date()) + backups.remove(oldest) + index += 1 + return removed + + +def makeBackup(slug, date, name=None, ignore=False) -> Backup: + if not name: + name = slug + return DummyBackup(name, date.astimezone(tzutc()), "src", slug, ignore=ignore) + + +def simulate(start: datetime, end: datetime, scheme: GenerationalScheme, backups=[]): + today = start + while today <= end: + backups.append(makeBackup("test", today)) + test = scheme.getOldest(backups) + if test is None: + pass + reason, oldest = test + while oldest is not None: + backups.remove(oldest) + test = scheme.getOldest(backups) + if test is None: + pass + reason, oldest = test + today = today + timedelta(hours=27) + today = scheme.time.local(today.year, today.month, today.day) + return backups diff --git a/hassio-google-drive-backup/tests/test_server.py b/hassio-google-drive-backup/tests/test_server.py new file mode 100644 index 0000000..b530d98 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_server.py @@ -0,0 +1,59 @@ + +import pytest +from yarl import URL +from dev.simulationserver import SimulationServer +from aiohttp import ClientSession, hdrs +from backup.config import Config +from .faketime import FakeTime +import json + +@pytest.mark.asyncio +async def test_refresh_known_error(server: SimulationServer, session: ClientSession, config: Config, server_url: URL): + async with session.post(server_url.with_path("drive/refresh"), json={"blah": "blah"}) as r: + assert r.status == 503 + assert await r.json() == { + 'error': "Required key 'refresh_token' was missing from the request payload" + } + + +@pytest.mark.asyncio +async def test_refresh_unknown_error(server: SimulationServer, session: ClientSession, config: Config, server_url: URL): + async with session.post(server_url.with_path("drive/refresh"), data={}) as r: + assert r.status == 500 + assert len((await r.json())["error"]) > 0 + + +@pytest.mark.asyncio +async def test_old_auth_method(server: SimulationServer, session: ClientSession, server_url: URL): + start_auth = server_url.with_path("drive/authorize").with_query({ + "redirectbacktoken": "http://example.com" + }) + + # Verify the redirect to Drive's oauthv2 endpoint + async with session.get(start_auth, data={}, allow_redirects=False) as r: + assert r.status == 303 + redirect = URL(r.headers[hdrs.LOCATION]) + assert redirect.path == "/o/oauth2/v2/auth" + assert redirect.host == "localhost" + + # Verify the redirect back to the server's oauth page + async with session.get(redirect, data={}, allow_redirects=False) as r: + assert r.status == 303 + redirect = URL(r.headers[hdrs.LOCATION]) + assert redirect.path == "/drive/authorize" + assert redirect.host == "localhost" + + # Verify we gte redirected back to the addon (example.com) with creds + async with session.get(redirect, data={}, allow_redirects=False) as r: + assert r.status == 303 + redirect = URL(r.headers[hdrs.LOCATION]) + assert redirect.query.get("creds") is not None + assert redirect.host == "example.com" + + +async def test_log_to_firestore(time: FakeTime, server: SimulationServer, session: ClientSession, server_url: URL): + data = {"info": "testing"} + async with session.post(server_url.with_path("logerror"), data=json.dumps(data)) as r: + assert r.status == 200 + assert server._authserver.error_store.last_error is not None + assert server._authserver.error_store.last_error['report'] == data diff --git a/hassio-google-drive-backup/tests/test_settings.py b/hassio-google-drive-backup/tests/test_settings.py new file mode 100644 index 0000000..9b204fe --- /dev/null +++ b/hassio-google-drive-backup/tests/test_settings.py @@ -0,0 +1,37 @@ +from backup.config import Setting, addon_config, _CONFIG + + +def test_defaults(): + # all settings should have a default + for setting in Setting: + if setting is not Setting.DEBUGGER_PORT: + assert setting.default() is not None, setting.value + " has no default" + + +def test_validators(): + # all defaults shoudl have a validator + for setting in Setting: + assert setting.validator() is not None, setting.value + " has no validator" + + +def test_defaults_are_valid(): + # all defaults values should be valid and validate to their own value + for setting in Setting: + assert setting.validator().validate(setting.default()) == setting.default() + + +def test_setting_configuration(): + # All settings in the default config should have the exact same parse expression + for setting in Setting: + if setting.value in addon_config["schema"]: + if setting != Setting.GENERATIONAL_DAY_OF_WEEK: + assert _CONFIG[setting] == addon_config["schema"][setting.value], setting.value + + +def test_settings_present(): + all = set() + for setting in Setting: + all.add(setting.value) + + for setting in addon_config["schema"]: + assert setting in all, setting + " not present in config.json" diff --git a/hassio-google-drive-backup/tests/test_starter.py b/hassio-google-drive-backup/tests/test_starter.py new file mode 100644 index 0000000..eb8192c --- /dev/null +++ b/hassio-google-drive-backup/tests/test_starter.py @@ -0,0 +1,22 @@ +import pytest +import os +from backup.module import MainModule, BaseModule +from backup.starter import Starter +from backup.config import Config, Setting +from injector import Injector + + +@pytest.mark.asyncio +async def test_bootstrap_requirements(cleandir): + # This just verifies we're able to satisfy starter's injector requirements. + injector = Injector([BaseModule(), MainModule()]) + config = injector.get(Config) + config.override(Setting.DATA_CACHE_FILE_PATH, os.path.join(cleandir, "data_cache.json")) + injector.get(Starter) + + +@pytest.mark.asyncio +async def test_start_and_stop(injector): + starter = injector.get(Starter) + await starter.start() + await starter.stop() diff --git a/hassio-google-drive-backup/tests/test_timezone.py b/hassio-google-drive-backup/tests/test_timezone.py new file mode 100644 index 0000000..691f7a0 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_timezone.py @@ -0,0 +1,64 @@ +import datetime +import os +from backup.time import Time, _infer_timezone_from_env, _infer_timezone_from_name, _infer_timezone_from_offset, _infer_timezone_from_system +from .faketime import FakeTime + + +def test_parse() -> None: + time = Time.parse("1985-12-06 01:01:01.0001") + assert str(time) == "1985-12-06 01:01:01.000100+00:00" + + time = Time.parse("1985-12-06 01:01:01.0001+01:00") + assert str(time) == "1985-12-06 01:01:01.000100+01:00" + + +def test_parse_timezone(time) -> None: + assertUtc(Time.parse("1985-12-06")) + assertUtc(Time.parse("1985-12-06 21:21")) + assertUtc(Time.parse("1985-12-06 21:21+00:00")) + assertUtc(Time.parse("1985-12-06 21:21 UTC")) + assertUtc(Time.parse("1985-12-06 21:21 GGGR")) + + assertOffset(Time.parse("1985-12-06 21:21+10"), 10) + assertOffset(Time.parse("1985-12-06 21:21-10"), -10) + + +def assertOffset(time, hours): + assert time.tzinfo.utcoffset(time) == datetime.timedelta(hours=hours) + + +def assertUtc(time): + assertOffset(time, 0) + + +def test_common_timezones(time: FakeTime): + assert _infer_timezone_from_system() is not None + assert _infer_timezone_from_name() is not None + assert _infer_timezone_from_offset() is not None + assert _infer_timezone_from_env() is None + + os.environ["TZ"] = "America/Denver" + assert _infer_timezone_from_env().tzname(None) == "America/Denver" + + os.environ["TZ"] = "Australia/Brisbane" + assert _infer_timezone_from_env().tzname(None) == "Australia/Brisbane" + + tzs = {"SYSTEM": _infer_timezone_from_system(), + "ENV": _infer_timezone_from_env(), + "OFFSET": _infer_timezone_from_offset(), + "NAME": _infer_timezone_from_name()} + + for name, tz in tzs.items(): + print(name) + time.setTimeZone(tz) + time.now() + time.nowLocal() + time.localize(datetime.datetime(1985, 12, 6)) + time.local(1985, 12, 6) + time.toLocal(time.now()) + time.toUtc(time.nowLocal()) + + +def test_system_timezone(time: FakeTime): + tz = _infer_timezone_from_system() + assert tz.tzname(time.now()) == "UTC" diff --git a/hassio-google-drive-backup/tests/test_uiserver.py b/hassio-google-drive-backup/tests/test_uiserver.py new file mode 100644 index 0000000..cff2135 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_uiserver.py @@ -0,0 +1,1180 @@ +from datetime import timedelta +import os +import json +from os.path import abspath, join +from shutil import copyfile +from urllib.parse import quote + +import aiohttp +import pytest +import asyncio +import base64 +from aiohttp import BasicAuth +from aiohttp.client import ClientSession + +from backup.file import File +from backup.util import AsyncHttpGetter, GlobalInfo, DataCache, UpgradeFlags +from backup.ui import UiServer, Restarter +from backup.config import Config, Setting, CreateOptions +from backup.const import (ERROR_CREDS_EXPIRED, ERROR_EXISTING_FOLDER, + ERROR_MULTIPLE_DELETES, ERROR_NO_BACKUP, + SOURCE_GOOGLE_DRIVE, SOURCE_HA) +from backup.creds import Creds +from backup.model import Coordinator, Backup +from backup.drive import DriveSource, FolderFinder, OOB_CRED_CUTOFF +from backup.drive.drivesource import FOLDER_MIME_TYPE, DriveRequests +from backup.ha import HaSource, HaUpdater +from backup.config import VERSION +from .faketime import FakeTime +from .helpers import compareStreams +from yarl import URL +from dev.ports import Ports +from dev.simulated_supervisor import SimulatedSupervisor +from dev.simulationserver import SimulationServer +from dev.simulated_google import SimulatedGoogle +from bs4 import BeautifulSoup +from .conftest import ReaderHelper + + +@pytest.fixture +def source(ha): + return ha + + +@pytest.fixture +def dest(drive): + return drive + + +@pytest.fixture +def simple_config(config): + return config + + +@pytest.fixture +async def restarter(injector, server): + restarter = injector.get(Restarter) + await restarter.start() + return restarter + + +@pytest.mark.asyncio +async def test_uiserver_start(ui_server: UiServer): + assert ui_server.running + + +@pytest.mark.asyncio +@pytest.mark.timeout(10) +async def test_uiserver_static_files(reader: ReaderHelper): + await reader.get("") + await reader.get("reauthenticate") + await reader.get("pp") + await reader.get("tos") + + +@pytest.mark.asyncio +async def test_getstatus(reader, config: Config, ha, server, ports: Ports): + File.touch(config.get(Setting.INGRESS_TOKEN_FILE_PATH)) + await ha.init() + data = await reader.getjson("getstatus") + assert data['ask_error_reports'] is True + assert data['cred_version'] == 0 + assert data['firstSync'] is True + assert data['folder_id'] is None + assert data['last_error'] is None + assert data['last_backup_text'] == "Never" + assert data['next_backup_text'] == "right now" + assert data['backup_name_template'] == config.get(Setting.BACKUP_NAME) + assert data['warn_ingress_upgrade'] is False + assert len(data['backups']) == 0 + assert data['sources'][SOURCE_GOOGLE_DRIVE] == { + 'deletable': 0, + 'name': SOURCE_GOOGLE_DRIVE, + 'retained': 0, + 'backups': 0, + 'latest': None, + 'size': '0.0 B', + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE), + 'title': "Google Drive", + 'icon': 'google-drive', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + assert data['sources'][SOURCE_HA] == { + 'deletable': 0, + 'name': SOURCE_HA, + 'retained': 0, + 'backups': 0, + 'latest': None, + 'size': '0.0 B', + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_HA), + 'title': "Home Assistant", + 'free_space': "0.0 B", + 'icon': 'home-assistant', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + assert len(data['sources']) == 2 + + +@pytest.mark.asyncio +async def test_getstatus_sync(reader, config: Config, backup: Backup, time: FakeTime): + data = await reader.getjson("getstatus") + assert data['firstSync'] is False + assert data['folder_id'] is not None + assert data['last_error'] is None + assert data['last_backup_text'] != "Never" + assert data['next_backup_text'] != "right now" + assert len(data['backups']) == 1 + assert data['sources'][SOURCE_GOOGLE_DRIVE] == { + 'deletable': 1, + 'name': SOURCE_GOOGLE_DRIVE, + 'retained': 0, + 'backups': 1, + 'latest': time.asRfc3339String(time.now()), + 'size': data['sources'][SOURCE_GOOGLE_DRIVE]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE), + 'title': "Google Drive", + 'icon': 'google-drive', + 'free_space': "5.0 GB", + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "testing@no.where", + } + assert data['sources'][SOURCE_HA] == { + 'deletable': 1, + 'name': SOURCE_HA, + 'retained': 0, + 'backups': 1, + 'latest': time.asRfc3339String(time.now()), + 'size': data['sources'][SOURCE_HA]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_HA), + 'title': "Home Assistant", + 'free_space': data['sources'][SOURCE_HA]['free_space'], + 'icon': 'home-assistant', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + assert len(data['sources']) == 2 + + +@pytest.mark.asyncio +async def test_retain(reader: ReaderHelper, config: Config, backup: Backup, coord: Coordinator, time: FakeTime): + slug = backup.slug() + assert await reader.getjson("retain", json={'slug': slug, 'sources': {"GoogleDrive": True, "HomeAssistant": True}}) == { + 'message': "Updated the backup's settings" + } + status = await reader.getjson("getstatus") + assert status['sources'][SOURCE_GOOGLE_DRIVE] == { + 'deletable': 0, + 'name': SOURCE_GOOGLE_DRIVE, + 'retained': 1, + 'backups': 1, + 'latest': time.asRfc3339String(backup.date()), + 'size': status['sources'][SOURCE_GOOGLE_DRIVE]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE), + 'title': "Google Drive", + 'icon': 'google-drive', + 'free_space': "5.0 GB", + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "testing@no.where", + } + assert status['sources'][SOURCE_HA] == { + 'deletable': 0, + 'name': SOURCE_HA, + 'retained': 1, + 'backups': 1, + 'latest': time.asRfc3339String(backup.date()), + 'size': status['sources'][SOURCE_HA]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_HA), + 'title': "Home Assistant", + 'free_space': status['sources'][SOURCE_HA]["free_space"], + 'icon': 'home-assistant', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + + await reader.getjson("retain", json={'slug': slug, 'sources': {"GoogleDrive": False, "HomeAssistant": False}}) + status = await reader.getjson("getstatus") + assert status['sources'][SOURCE_GOOGLE_DRIVE] == { + 'deletable': 1, + 'name': SOURCE_GOOGLE_DRIVE, + 'retained': 0, + 'backups': 1, + 'latest': time.asRfc3339String(backup.date()), + 'size': status['sources'][SOURCE_GOOGLE_DRIVE]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE), + 'title': "Google Drive", + 'icon': 'google-drive', + 'free_space': "5.0 GB", + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "testing@no.where", + } + assert status['sources'][SOURCE_HA] == { + 'deletable': 1, + 'name': SOURCE_HA, + 'retained': 0, + 'backups': 1, + 'latest': time.asRfc3339String(backup.date()), + 'size': status['sources'][SOURCE_HA]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_HA), + 'title': "Home Assistant", + 'free_space': status['sources'][SOURCE_HA]["free_space"], + 'icon': 'home-assistant', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + delete_req = { + "slug": slug, + "sources": ["GoogleDrive"] + } + await reader.getjson("deleteSnapshot", json=delete_req) + await reader.getjson("retain", json={'slug': slug, 'sources': {"HomeAssistant": True}}) + status = await reader.getjson("getstatus") + assert status['sources'][SOURCE_GOOGLE_DRIVE] == { + 'deletable': 0, + 'name': SOURCE_GOOGLE_DRIVE, + 'retained': 0, + 'backups': 0, + 'latest': None, + 'size': status['sources'][SOURCE_GOOGLE_DRIVE]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE), + 'title': "Google Drive", + 'icon': 'google-drive', + 'free_space': "5.0 GB", + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "testing@no.where", + } + assert status['sources'][SOURCE_HA] == { + 'deletable': 0, + 'name': SOURCE_HA, + 'retained': 1, + 'backups': 1, + 'latest': time.asRfc3339String(backup.date()), + 'size': status['sources'][SOURCE_HA]['size'], + 'enabled': True, + 'max': config.get(Setting.MAX_BACKUPS_IN_HA), + 'title': "Home Assistant", + 'free_space': status['sources'][SOURCE_HA]["free_space"], + 'icon': 'home-assistant', + 'ignored': 0, + 'ignored_size': '0.0 B', + 'detail': "", + } + + # sync again, which should upoload the backup to Drive + await coord.sync() + status = await reader.getjson("getstatus") + assert status['sources'][SOURCE_GOOGLE_DRIVE]['backups'] == 1 + assert status['sources'][SOURCE_GOOGLE_DRIVE]['retained'] == 0 + assert status['sources'][SOURCE_GOOGLE_DRIVE]['backups'] == 1 + + +@pytest.mark.asyncio +async def test_note(reader: ReaderHelper, config: Config, backup: Backup, coord: Coordinator, time: FakeTime): + slug = backup.slug() + assert backup.note() is None + assert await reader.getjson("note", json={'slug': slug, 'note': "This is the note"}) == { + 'message': "Updated the backup's settings" + } + status = await reader.getjson("getstatus") + assert backup.note() == "This is the note" + assert status['backups'][0]['note'] == "This is the note" + + +@pytest.mark.asyncio +async def test_sync(reader, ui_server, coord: Coordinator, time: FakeTime, session): + assert len(coord.backups()) == 0 + status = await reader.getjson("sync") + assert len(coord.backups()) == 1 + assert status == await reader.getjson("getstatus") + time.advance(days=7) + assert len((await reader.getjson("sync"))['backups']) == 2 + + +@pytest.mark.asyncio +async def test_delete(reader: ReaderHelper, ui_server, backup): + slug = backup.slug() + + data = {"slug": "bad_slug", "sources": ["GoogleDrive"]} + await reader.assertError("deleteSnapshot", json=data, error_type=ERROR_NO_BACKUP) + status = await reader.getjson("getstatus") + assert len(status['backups']) == 1 + data["slug"] = slug + assert await reader.getjson("deleteSnapshot", json=data) == {"message": "Deleted from 1 place(s)"} + await reader.assertError("deleteSnapshot", json=data, error_type=ERROR_NO_BACKUP) + status = await reader.getjson("getstatus") + assert len(status['backups']) == 1 + assert status['sources'][SOURCE_GOOGLE_DRIVE]['backups'] == 0 + data["sources"] = ["HomeAssistant"] + assert await reader.getjson("deleteSnapshot", json=data) == {"message": "Deleted from 1 place(s)"} + status = await reader.getjson("getstatus") + assert len(status['backups']) == 0 + data["sources"] = [] + await reader.assertError("deleteSnapshot", json=data, error_type=ERROR_NO_BACKUP) + + +@pytest.mark.asyncio +async def test_backup_now(reader, ui_server, time: FakeTime, backup: Backup, coord: Coordinator): + assert len(coord.backups()) == 1 + assert (await reader.getjson("getstatus"))["backups"][0]["date"] == time.toLocal(time.now()).strftime("%c") + + time.advance(hours=1) + assert await reader.getjson("backup?custom_name=TestName&retain_drive=False&retain_ha=False") == { + 'message': "Requested backup 'TestName'" + } + status = await reader.getjson('getstatus') + assert len(status["backups"]) == 2 + assert status["backups"][1]["date"] == time.toLocal(time.now()).strftime("%c") + assert status["backups"][1]["name"] == "TestName" + assert status["backups"][1]["note"] is None + assert status["backups"][1]['sources'][0]['retained'] is False + assert len(status["backups"][1]['sources']) == 1 + + time.advance(hours=1) + assert await reader.getjson("backup?custom_name=TestName2&retain_drive=True&retain_ha=False") == { + 'message': "Requested backup 'TestName2'" + } + await coord.sync() + status = await reader.getjson('getstatus') + assert len(status["backups"]) == 3 + assert status["backups"][2]["date"] == time.toLocal(time.now()).strftime("%c") + assert status["backups"][2]["name"] == "TestName2" + assert status["backups"][2]['sources'][0]['retained'] is False + assert status["backups"][2]['sources'][1]['retained'] is True + + time.advance(hours=1) + assert await reader.getjson("backup?custom_name=TestName3&retain_drive=False&retain_ha=True") == { + 'message': "Requested backup 'TestName3'" + } + await coord.sync() + status = await reader.getjson('getstatus') + assert len(status["backups"]) == 4 + assert status["backups"][3]['sources'][0]['retained'] is True + assert status["backups"][3]['sources'][1]['retained'] is False + assert status["backups"][3]["date"] == time.toLocal(time.now()).strftime("%c") + assert status["backups"][3]["name"] == "TestName3" + + +@pytest.mark.asyncio +async def test_backup_now_with_note(reader, ui_server, time: FakeTime, coord: Coordinator): + assert len(coord.backups()) == 0 + + time.advance(hours=1) + assert await reader.getjson("backup?custom_name=TestName&retain_drive=False&retain_ha=False¬e=ThisIsTheNote") == { + 'message': "Requested backup 'TestName'" + } + await coord.sync() + status = await reader.getjson('getstatus') + assert status['backups'][0]["note"] == "ThisIsTheNote" + + +@pytest.mark.asyncio +async def test_config(reader, ui_server, config: Config, supervisor: SimulatedSupervisor): + update = { + "config": { + "days_between_backups": 20, + "drive_ipv4": "" + }, + "backup_folder": "unused" + } + assert ui_server._starts == 1 + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert config.get(Setting.DAYS_BETWEEN_BACKUPS) == 20 + assert supervisor._options["days_between_backups"] == 20 + assert ui_server._starts == 1 + + +@pytest.mark.asyncio +async def test_auth_and_restart(reader, ui_server, config: Config, restarter, coord: Coordinator, supervisor: SimulatedSupervisor): + update = {"config": {"require_login": True, + "expose_extra_server": True}, "backup_folder": "unused"} + assert ui_server._starts == 1 + assert not config.get(Setting.REQUIRE_LOGIN) + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + assert config.get(Setting.REQUIRE_LOGIN) + assert supervisor._options['require_login'] + assert ui_server._starts == 2 + + await reader.get("getstatus", status=401, ingress=False) + await reader.get("getstatus", auth=BasicAuth("user", "badpassword"), status=401, ingress=False) + await reader.get("getstatus", auth=BasicAuth("user", "pass"), ingress=False) + await coord.waitForSyncToFinish() + status = await reader.getjson("getstatus", auth=BasicAuth("user", "pass"), ingress=False) + + # verify a the sync succeeded (no errors) + assert status["last_error"] is None + + # The ingress server shouldn't require login, even though its turned on for the extra server + await reader.get("getstatus") + # even a bad user/pass should work + await reader.get("getstatus", auth=BasicAuth("baduser", "badpassword")) + + +@pytest.mark.asyncio +@pytest.mark.timeout(100) +async def test_expose_extra_server_option(reader, ui_server: UiServer, config: Config): + with pytest.raises(aiohttp.client_exceptions.ClientConnectionError): + await reader.getjson("sync", ingress=False) + config.override(Setting.EXPOSE_EXTRA_SERVER, True) + await ui_server.run() + await reader.getjson("sync", ingress=False) + await ui_server.run() + await reader.getjson("sync", ingress=False) + config.override(Setting.EXPOSE_EXTRA_SERVER, False) + await ui_server.run() + with pytest.raises(aiohttp.client_exceptions.ClientConnectionError): + await reader.getjson("sync", ingress=False) + await reader.getjson("sync") + + +@pytest.mark.asyncio +async def test_update_error_reports_true(reader, ui_server, config: Config, supervisor: SimulatedSupervisor): + assert config.get(Setting.SEND_ERROR_REPORTS) is False + assert not config.isExplicit(Setting.SEND_ERROR_REPORTS) + assert await reader.getjson("errorreports?send=true") == {'message': 'Configuration updated'} + assert config.get(Setting.SEND_ERROR_REPORTS) is True + assert config.isExplicit(Setting.SEND_ERROR_REPORTS) + assert supervisor._options["send_error_reports"] is True + + +@pytest.mark.asyncio +async def test_update_error_reports_false(reader, ui_server, config: Config, supervisor: SimulatedSupervisor): + assert config.get(Setting.SEND_ERROR_REPORTS) is False + assert not config.isExplicit(Setting.SEND_ERROR_REPORTS) + assert await reader.getjson("errorreports?send=false") == {'message': 'Configuration updated'} + assert config.get(Setting.SEND_ERROR_REPORTS) is False + assert config.isExplicit(Setting.SEND_ERROR_REPORTS) + assert supervisor._options["send_error_reports"] is False + + +@pytest.mark.asyncio +async def test_drive_cred_generation(reader: ReaderHelper, ui_server: UiServer, backup, config: Config, global_info: GlobalInfo, session: ClientSession, google): + status = await reader.getjson("getstatus") + assert len(status["backups"]) == 1 + assert global_info.credVersion == 0 + # Invalidate the drive creds, sync, then verify we see an error + google.expireCreds() + status = await reader.getjson("sync") + assert status["last_error"]["error_type"] == ERROR_CREDS_EXPIRED + + # simulate the user going through the Drive authentication workflow + auth_url = URL(status['authenticate_url']).with_query({ + "redirectbacktoken": reader.getUrl(True) + "token", + "version": VERSION, + "return": reader.getUrl(True) + }) + async with session.get(auth_url) as resp: + resp.raise_for_status() + html = await resp.text() + page = BeautifulSoup(html, 'html.parser') + area = page.find("textarea") + creds = str(area.getText()).strip() + + cred_url = URL(reader.getUrl(True) + "token").with_query({"creds": creds, "host": reader.getUrl(True)}) + async with session.get(cred_url) as resp: + resp.raise_for_status() + # verify we got redirected to the addon main page. + assert resp.url == URL(reader.getUrl(True)) + await ui_server.sync(None) + assert global_info._last_error is None + assert global_info.credVersion == 1 + + +@pytest.mark.asyncio +async def test_confirm_multiple_deletes(reader, ui_server, server, config: Config, time: FakeTime, ha: HaSource): + # reconfigure to only store 1 backup + config.override(Setting.MAX_BACKUPS_IN_GOOGLE_DRIVE, 1) + config.override(Setting.MAX_BACKUPS_IN_HA, 1) + + # create three backups + await ha.create(CreateOptions(time.now(), "Name1")) + await ha.create(CreateOptions(time.now(), "Name2")) + await ha.create(CreateOptions(time.now(), "Name3")) + + # verify we have 3 backups an the multiple delete error + status = await reader.getjson("sync") + assert len(status['backups']) == 3 + assert status["last_error"]["error_type"] == ERROR_MULTIPLE_DELETES + assert status["last_error"]["data"] == { + SOURCE_GOOGLE_DRIVE: 0, + SOURCE_GOOGLE_DRIVE + "_desc": '', + SOURCE_HA: 2, + SOURCE_HA + "_desc": "Name1\nName2" + } + + # request that multiple deletes be allowed + assert await reader.getjson("confirmdelete?always=false") == { + 'message': 'Backups deleted this one time' + } + assert config.get(Setting.CONFIRM_MULTIPLE_DELETES) + + # backup, verify the deletes go through + status = await reader.getjson("sync") + assert status["last_error"] is None + assert len(status["backups"]) == 1 + + # create another backup, verify we delete the one + await ha.create(CreateOptions(time.now(), "Name1")) + status = await reader.getjson("sync") + assert len(status['backups']) == 1 + assert status["last_error"] is None + + # create two more backups, verify we see the error again + await ha.create(CreateOptions(time.now(), "Name1")) + await ha.create(CreateOptions(time.now(), "Name2")) + status = await reader.getjson("sync") + assert len(status['backups']) == 3 + assert status["last_error"]["error_type"] == ERROR_MULTIPLE_DELETES + assert status["last_error"]["data"] == { + SOURCE_GOOGLE_DRIVE: 0, + SOURCE_GOOGLE_DRIVE + "_desc": '', + SOURCE_HA: 2, + SOURCE_HA + "_desc": "Name1\nName1" + } + + +@pytest.mark.asyncio +async def test_update_multiple_deletes_setting(reader, ui_server, server, config: Config, time: FakeTime, ha: HaSource, global_info: GlobalInfo): + assert await reader.getjson("confirmdelete?always=true") == { + 'message': 'Configuration updated, I\'ll never ask again' + } + assert not config.get(Setting.CONFIRM_MULTIPLE_DELETES) + + +@pytest.mark.asyncio +async def test_resolve_folder_reuse(reader, config: Config, backup, time, drive): + # Simulate an existing folder error + old_folder = await drive.getFolderId() + File.delete(config.get(Setting.FOLDER_FILE_PATH)) + time.advance(days=1) + status = await reader.getjson("sync") + assert status["last_error"]["error_type"] == ERROR_EXISTING_FOLDER + + assert (await reader.getjson("resolvefolder?use_existing=true")) == {'message': 'Done'} + status = await reader.getjson("sync") + assert status["last_error"] is None + assert old_folder == await drive.getFolderId() + + +@pytest.mark.asyncio +async def test_resolve_folder_new(reader, config: Config, backup, time, drive): + # Simulate an existing folder error + old_folder = await drive.getFolderId() + File.delete(config.get(Setting.FOLDER_FILE_PATH)) + time.advance(days=1) + status = await reader.getjson("sync") + assert status["last_error"]["error_type"] == ERROR_EXISTING_FOLDER + + assert (await reader.getjson("resolvefolder?use_existing=false")) == {'message': 'Done'} + status = await reader.getjson("sync") + assert status["last_error"] is None + assert old_folder != await drive.getFolderId() + + +@pytest.mark.asyncio +async def test_ssl_server(reader: ReaderHelper, ui_server: UiServer, config, server, cleandir, restarter): + ssl_dir = abspath(join(__file__, "..", "..", "dev", "ssl")) + copyfile(join(ssl_dir, "localhost.crt"), join(cleandir, "localhost.crt")) + copyfile(join(ssl_dir, "localhost.key"), join(cleandir, "localhost.key")) + update = { + "config": { + "use_ssl": True, + "expose_extra_server": True, + "certfile": join(cleandir, "localhost.crt"), + "keyfile": join(cleandir, "localhost.key") + }, + "backup_folder": "unused" + } + assert ui_server._starts == 1 + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + assert ui_server._starts == 2 + + +@pytest.mark.asyncio +async def test_bad_ssl_config_missing_files(reader: ReaderHelper, ui_server: UiServer, config, server, cleandir, restarter): + update = { + "config": { + "use_ssl": True, + "expose_extra_server": True, + "certfile": join(cleandir, "localhost.crt"), + "keyfile": join(cleandir, "localhost.key") + }, + "backup_folder": "unused" + } + assert ui_server._starts == 1 + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + assert ui_server._starts == 2 + + # Verify the ingress endpoint is still up, but not the SSL one + await reader.getjson("getstatus") + with pytest.raises(aiohttp.client_exceptions.ClientConnectionError): + await reader.getjson("getstatus", ingress=False, ssl=True, sslcontext=False) + + +@pytest.mark.asyncio +async def test_bad_ssl_config_wrong_files(reader: ReaderHelper, ui_server: UiServer, config, server, cleandir, restarter): + ssl_dir = abspath(join(__file__, "..", "..", "dev", "ssl")) + copyfile(join(ssl_dir, "localhost.crt"), join(cleandir, "localhost.crt")) + copyfile(join(ssl_dir, "localhost.key"), join(cleandir, "localhost.key")) + update = { + "config": { + "use_ssl": True, + "expose_extra_server": True, + "certfile": join(cleandir, "localhost.key"), + "keyfile": join(cleandir, "localhost.crt") + }, + "backup_folder": "unused" + } + assert ui_server._starts == 1 + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + assert ui_server._starts == 2 + + # Verify the ingress endpoint is still up, but not the SSL one + await reader.getjson("getstatus") + with pytest.raises(aiohttp.client_exceptions.ClientConnectionError): + await reader.getjson("getstatus", ingress=False, ssl=True, sslcontext=False) + + +@pytest.mark.asyncio +async def test_download_drive(reader, ui_server, backup, drive: DriveSource, ha: HaSource, session, time): + await ha.delete(backup) + # download the item from Google Drive + from_drive = await drive.read(backup) + # Download rom the web server + from_server = AsyncHttpGetter( + reader.getUrl() + "download?slug=" + backup.slug(), {}, session, time=time) + await compareStreams(from_drive, from_server) + + +@pytest.mark.asyncio +async def test_download_home_assistant(reader: ReaderHelper, ui_server, backup, drive: DriveSource, ha: HaSource, session, time): + await drive.delete(backup) + # download the item from Google Drive + from_ha = await ha.read(backup) + # Download rom the web server + from_server = AsyncHttpGetter( + reader.getUrl() + "download?slug=" + backup.slug(), {}, session, time=time) + await compareStreams(from_ha, from_server) + + +@pytest.mark.asyncio +async def test_cancel_and_startsync(reader: ReaderHelper, coord: Coordinator): + coord._sync_wait.set() + status = await reader.getjson("startSync") + assert status["syncing"] + cancel = await reader.getjson('cancelSync') + assert not cancel["syncing"] + assert cancel["last_error"]["error_type"] == "cancelled" + + +@pytest.mark.asyncio +async def test_token(reader: ReaderHelper, coord: Coordinator, ha, drive: DriveSource): + creds = { + "client_id": "new_access_token", + "access_token": "new_access_token", + "refresh_token": "new_refresh_token", + "token_expiry": "2022-01-01T00:00:00" + } + serialized = str(base64.b64encode(json.dumps(creds).encode("utf-8")), "utf-8") + await reader.get("token?creds={0}&host={1}".format(quote(serialized), quote(reader.getUrl(True)))) + assert drive.drivebackend.creds.access_token == 'new_access_token' + assert drive.drivebackend.creds.refresh_token == 'new_refresh_token' + assert drive.drivebackend.creds.secret is None + + +@pytest.mark.asyncio +async def test_token_with_secret(reader: ReaderHelper, coord: Coordinator, ha, drive: DriveSource): + creds = { + "client_id": "new_access_token", + "client_secret": "new_client_secret", + "access_token": "new_access_token", + "refresh_token": "new_refresh_token", + "token_expiry": "2022-01-01T00:00:00" + } + serialized = str(base64.b64encode(json.dumps(creds).encode("utf-8")), "utf-8") + await reader.get("token?creds={0}&host={1}".format(quote(serialized), quote(reader.getUrl(True)))) + assert drive.drivebackend.creds.access_token == 'new_access_token' + assert drive.drivebackend.creds.refresh_token == 'new_refresh_token' + assert drive.drivebackend.creds.secret == 'new_client_secret' + + +@pytest.mark.asyncio +async def test_token_extra_server(reader: ReaderHelper, coord: Coordinator, ha, drive: DriveSource, restarter, time): + update = { + "config": { + "expose_extra_server": True + }, + "backup_folder": "unused" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + creds = Creds(time, "id", time.now(), "token", "refresh") + serialized = str(base64.b64encode(json.dumps(creds.serialize()).encode("utf-8")), "utf-8") + await reader.get("token?creds={0}&host={1}".format(quote(serialized), quote(reader.getUrl(False))), ingress=False) + assert drive.drivebackend.creds.access_token == 'token' + + +@pytest.mark.asyncio +async def test_changefolder(reader: ReaderHelper, coord: Coordinator, ha, ui_server, folder_finder: FolderFinder): + assert await reader.get("changefolder?id=12345") == '{}' + assert await folder_finder.get() == "12345" + + +@pytest.mark.asyncio +async def test_changefolder_extra_server(reader: ReaderHelper, coord: Coordinator, ha, drive: DriveSource, restarter, ui_server, folder_finder: FolderFinder): + update = { + "config": { + "expose_extra_server": True + }, + "backup_folder": "unused" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + await restarter.waitForRestart() + + # create a folder + folder_metadata = { + 'name': "Other Folder", + 'mimeType': FOLDER_MIME_TYPE, + 'appProperties': { + "backup_folder": "true", + }, + } + + # create two folders at different times + id = (await drive.drivebackend.createFolder(folder_metadata))['id'] + + await reader.get("changefolder?id=" + str(id), ingress=False) + assert await folder_finder.get() == id + + +@pytest.mark.asyncio +async def test_update_sync_interval(reader, ui_server, config: Config, supervisor: SimulatedSupervisor): + # Make sure the default saves nothing + update = { + "config": { + "max_sync_interval_seconds": '3 hours', + }, + "backup_folder": "unused" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert config.get(Setting.MAX_SYNC_INTERVAL_SECONDS) == 60 * 60 * 3 + assert "max_sync_interval_seconds" not in supervisor._options + + # Update custom + update = { + "config": { + "max_sync_interval_seconds": '2 hours', + }, + "backup_folder": "unused" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert config.get(Setting.MAX_SYNC_INTERVAL_SECONDS) == 60 * 60 * 2 + assert supervisor._options["max_sync_interval_seconds"] == 60 * 60 * 2 + + +@pytest.mark.asyncio +async def test_manual_creds(reader: ReaderHelper, ui_server: UiServer, config: Config, server: SimulationServer, session, drive: DriveSource): + periodic_check = await reader.getjson("checkManualAuth") + assert periodic_check['message'] == "No request for authorization is in progress." + drive.saveCreds(None) + assert not drive.enabled() + + await setup_manual_creds(reader, server, drive, session) + + # Verify creds are saved and drive is enabled + assert drive.enabled() + assert drive.isCustomCreds() + + +@pytest.mark.asyncio +async def test_manual_creds_failure(reader: ReaderHelper, ui_server: UiServer, config: Config, server: SimulationServer, session, drive: DriveSource): + drive.saveCreds(None) + assert not drive.enabled() + + # Try with a bad client_id + req_path = URL("manualauth").with_query({ + "client_id": "wrong_id", + "client_secret": server.google._custom_drive_client_secret}) + data = await reader.getjson(str(req_path), status=500) + assert data["message"] == "Google responded with error status HTTP 401. Please verify your credentials are set up correctly." + + # Try with a bad client_secret + req_path = URL("manualauth").with_query({ + "client_id": server.google._custom_drive_client_id, + "client_secret": "wrong_secret"}) + data = await reader.getjson(str(req_path)) + + await asyncio.sleep(2) + + periodic_check = await reader.getjson("checkManualAuth", status=500) + assert periodic_check['message'] == "Failed unexpectedly while trying to reach Google. See the add-on logs for details." + + # verify creds are saved and drive is enabled + assert not drive.enabled() + + +@pytest.mark.asyncio +async def test_setting_cancels_and_resyncs(reader: ReaderHelper, ui_server: UiServer, config: Config, server, session, drive: DriveSource, coord: Coordinator): + # Create a blocking sync task + coord._sync_wait.set() + sync = asyncio.create_task(coord.sync(), name="Sync from saving settings") + await coord._sync_start.wait() + assert not sync.cancelled() + assert not sync.done() + + # Change some config + update = { + "config": { + "days_between_backups": 20, + "drive_ipv4": "" + }, + "backup_folder": "unused" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + + # verify the previous sync is done and another one is running + assert sync.done() + assert coord.isSyncing() + + +@pytest.mark.asyncio +async def test_change_specify_folder_setting(reader: ReaderHelper, server, session, coord: Coordinator, folder_finder: FolderFinder): + await coord.sync() + assert folder_finder.getCachedFolder() is not None + + old_folder = folder_finder.getCachedFolder() + # Change some config + update = { + "config": { + "specify_backup_folder": True + }, + "backup_folder": "" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + + # verify the backup folder was reset, which triggers the error dialog to find a new folder + assert folder_finder.getCachedFolder() == old_folder + + await coord.waitForSyncToFinish() + result = await reader.postjson("getstatus") + assert result["last_error"] is None + + +@pytest.mark.asyncio +async def test_change_specify_folder_setting_with_manual_creds(reader: ReaderHelper, google: SimulatedGoogle, session, coord: Coordinator, folder_finder: FolderFinder, drive: DriveSource, config): + google.resetDriveAuth() + drive.saveCreds(None) + assert not drive.enabled() + + # get the auth url + req_path = URL("manualauth").with_query({ + "client_id": google._custom_drive_client_id, + "client_secret": google._custom_drive_client_secret}) + data = await reader.getjson(str(req_path)) + + # Authorize the device using the url and device code provided + authorize_url = URL(data["auth_url"]).with_query({"code": data['code']}) + async with session.get(str(authorize_url), allow_redirects=False) as resp: + resp.raise_for_status() + + # TODO: wait for creds in a smarter way + await asyncio.sleep(2) + + assert drive.enabled() + assert drive.isCustomCreds() + + await coord.sync() + assert folder_finder.getCachedFolder() is not None + + # Specify the backup folder, which should cache the new one + update = { + "config": { + Setting.SPECIFY_BACKUP_FOLDER.value: True + }, + "backup_folder": "12345" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert folder_finder.getCachedFolder() == "12345" + + # Un change the folder, which should keep the existing folder + update = { + "config": { + Setting.SPECIFY_BACKUP_FOLDER.value: False + }, + "backup_folder": "" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert folder_finder.getCachedFolder() == "12345" + + +@pytest.mark.asyncio +async def test_update_non_ui_setting(reader: ReaderHelper, server, session, coord: Coordinator, folder_finder: FolderFinder, config: Config): + await coord.sync() + # Change some config + update = { + "config": { + Setting.NEW_BACKUP_TIMEOUT_SECONDS.value: 10 + }, + "backup_folder": "" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + + assert config.get(Setting.NEW_BACKUP_TIMEOUT_SECONDS) == 10 + + update = { + "config": { + Setting.MAX_BACKUPS_IN_HA.value: 1 + }, + "backup_folder": "" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": False} + assert config.get(Setting.NEW_BACKUP_TIMEOUT_SECONDS) == 10 + + +@pytest.mark.asyncio +async def test_update_disable_drive(reader: ReaderHelper, server, coord: Coordinator, config: Config, drive_requests: DriveRequests): + # Disable drive + drive_requests.creds = None + os.remove(config.get(Setting.CREDENTIALS_FILE_PATH)) + assert not coord.enabled() + await coord.sync() + assert len(coord.backups()) == 0 + + # Disable Drive Upload + update = { + "config": { + Setting.ENABLE_DRIVE_UPLOAD.value: False + }, + "backup_folder": "" + } + assert await reader.postjson("saveconfig", json=update) == {'message': 'Settings saved', "reload_page": True} + assert config.get(Setting.ENABLE_DRIVE_UPLOAD) is False + + # Verify the app is working fine. + assert coord.enabled() + await coord.waitForSyncToFinish() + assert len(coord.backups()) == 1 + + +@pytest.mark.asyncio +async def test_update_ignore(reader: ReaderHelper, time: FakeTime, coord: Coordinator, config: Config, supervisor: SimulatedSupervisor, ha: HaSource, drive: DriveSource): + config.override(Setting.IGNORE_UPGRADE_BACKUPS, True) + config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + + # make an ignored_backup + slug = await supervisor.createBackup({'name': "Ignore_me", 'folders': ['homeassistant'], 'addons': []}, date=time.now()) + + await coord.sync() + assert len(await drive.get()) == 0 + assert len(await ha.get()) == 1 + assert len(coord.backups()) == 1 + + # Disable Drive Upload + update = { + "ignore": False, + "slug": slug, + } + await reader.postjson("ignore", json=update) + await coord.waitForSyncToFinish() + assert len(coord.backups()) == 1 + assert len(await drive.get()) == 1 + assert len(await ha.get()) == 1 + + +@pytest.mark.asyncio +async def test_check_ignored_backup_notification(reader: ReaderHelper, time: FakeTime, coord: Coordinator, config: Config, supervisor: SimulatedSupervisor, ha: HaSource, drive: DriveSource): + # Create an "ignored" backup after upgrade to the current version. + time.advance(days=1) + await supervisor.createBackup({'name': "test_name"}, date=time.now()) + + # cerate one that isn't ignored. + time.advance(days=1) + await ha.create(CreateOptions(time.now(), name_template=None)) + + update = { + "config": { + Setting.IGNORE_OTHER_BACKUPS.value: True + }, + "backup_folder": "" + } + await reader.postjson("saveconfig", json=update) + await coord.waitForSyncToFinish() + + status = await reader.getjson("getstatus") + assert status["backups"][0]["ignored"] + assert not status["backups"][1]["ignored"] + assert not status["notify_check_ignored"] + + # Create an ignored backup from "before" the addon was upgraded to v0.104.0 + await supervisor.createBackup({'name': "test_name"}, date=time.now() - timedelta(days=10)) + await coord.sync() + + # The UI should nofify about checking ignored backups + status = await reader.getjson("getstatus") + assert status["backups"][0]["ignored"] + assert status["backups"][1]["ignored"] + assert not status["backups"][2]["ignored"] + assert status["notify_check_ignored"] + + # Acknowledge the notification + await reader.postjson("ackignorecheck") == {'message': "Acknowledged."} + status = await reader.getjson("getstatus") + assert not status["notify_check_ignored"] + + +@pytest.mark.asyncio +async def test_snapshot_to_backup_upgrade_use_new_values(reader: ReaderHelper, time: FakeTime, coord: Coordinator, config: Config, supervisor: SimulatedSupervisor, ha: HaSource, drive: DriveSource, data_cache: DataCache, updater: HaUpdater): + """ Test the path where a user upgrades from the addon before the backup rename and then chooses to use the new names""" + status = await reader.getjson("getstatus") + assert not status["warn_backup_upgrade"] + + # simulate upgrading config + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 7 + } + await coord.sync() + assert Setting.CALL_BACKUP_SNAPSHOT.value in supervisor._options + assert Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value not in supervisor._options + assert config.get(Setting.CALL_BACKUP_SNAPSHOT) + + status = await reader.getjson("getstatus") + assert status["warn_backup_upgrade"] + assert not data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_BACKUP_RENAME) + assert not updater._trigger_once + + # simulate user clicking the button to use new names + assert await reader.getjson("callbackupsnapshot?switch=true") == {'message': 'Configuration updated'} + assert data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_BACKUP_RENAME) + assert not config.get(Setting.CALL_BACKUP_SNAPSHOT) + status = await reader.getjson("getstatus") + assert not status["warn_backup_upgrade"] + assert updater._trigger_once + + +@pytest.mark.asyncio +async def test_snapshot_to_backup_upgrade_use_old_values(reader: ReaderHelper, time: FakeTime, coord: Coordinator, config: Config, supervisor: SimulatedSupervisor, ha: HaSource, drive: DriveSource, data_cache: DataCache, updater: HaUpdater): + """ Test the path where a user upgrades from the addon before the backup rename and then chooses to use the old names""" + status = await reader.getjson("getstatus") + assert not status["warn_backup_upgrade"] + + # simulate upgrading config + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 7 + } + await coord.sync() + assert Setting.CALL_BACKUP_SNAPSHOT.value in supervisor._options + assert config.get(Setting.CALL_BACKUP_SNAPSHOT) + + status = await reader.getjson("getstatus") + assert status["warn_backup_upgrade"] + assert not data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_BACKUP_RENAME) + assert not updater._trigger_once + + # simulate user clicking the button to use new names + assert await reader.getjson("callbackupsnapshot?switch=false") == {'message': 'Configuration updated'} + assert data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_BACKUP_RENAME) + status = await reader.getjson("getstatus") + assert not status["warn_backup_upgrade"] + assert config.get(Setting.CALL_BACKUP_SNAPSHOT) + + +@pytest.mark.asyncio +async def test_snapshot_to_backup_upgrade_avoid_default_overwrite(reader: ReaderHelper, time: FakeTime, coord: Coordinator, config: Config, supervisor: SimulatedSupervisor, ha: HaSource, drive: DriveSource, data_cache: DataCache, updater: HaUpdater): + """ Test the path where a user upgrades from the addon but a new value with a default value gets overwritten""" + status = await reader.getjson("getstatus") + assert not status["warn_backup_upgrade"] + + # simulate upgrading config + supervisor._options = { + Setting.DEPRECTAED_MAX_BACKUPS_IN_HA.value: 7, + Setting.MAX_BACKUPS_IN_HA.value: 4 # defuault, should get overridden + } + await coord.sync() + assert Setting.CALL_BACKUP_SNAPSHOT.value in supervisor._options + assert config.get(Setting.CALL_BACKUP_SNAPSHOT) + assert config.get(Setting.MAX_BACKUPS_IN_HA) == 7 + + +@pytest.mark.asyncio +async def test_ha_upload(reader: ReaderHelper, backup_helper, ui_server: UiServer, drive: DriveSource, ha: HaSource, config: Config, model, time): + from_backup, data = await backup_helper.createFile() + backup = await drive.save(from_backup, data) + + config.override(Setting.DAYS_BETWEEN_BACKUPS, 0) + await model.sync(time.now()) + assert len(await ha.get()) == 0 + assert len(await drive.get()) == 1 + + reply = await reader.getjson(str(URL("upload").with_query({"slug": backup.slug()}))) + assert reply['message'] == "Uploading backup in the background" + await ui_server.waitForUpload() + assert len(await ha.get()) == 1 + + +async def setup_manual_creds(reader: ReaderHelper, server: SimulationServer, drive: DriveSource, session: ClientSession): + # get the auth url + req_path = URL("manualauth").with_query({ + "client_id": server.google._custom_drive_client_id, + "client_secret": server.google._custom_drive_client_secret}) + data = await reader.getjson(str(req_path)) + assert "auth_url" in data + assert "code" in data + assert "expires" in data + + periodic_check = await reader.getjson("checkManualAuth") + assert periodic_check['message'] == "Waiting for you to authorize the add-on." + + # Authorize the device using the url and device code provided + drive._cred_trigger.clear() + authorize_url = URL(data["auth_url"]).with_query({"code": data['code']}) + async with session.get(str(authorize_url), allow_redirects=False) as resp: + resp.raise_for_status() + + await drive.debug_wait_for_credentials() + + # verify creds are saved and drive is enabled + assert drive.enabled() + assert drive.isCustomCreds() + + +@pytest.mark.asyncio +async def test_oob_warning(reader: ReaderHelper, ui_server: UiServer, config: Config, server: SimulationServer, session, drive: DriveSource, data_cache: DataCache): + server.google._custom_drive_client_expiration = OOB_CRED_CUTOFF + timedelta(seconds=1) + assert not data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_OOB_FLOW) + await setup_manual_creds(reader, server, drive, session) + assert data_cache.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_OOB_FLOW) + data_cache.TESTS_ONLY_clearFlags() + status = await reader.getjson("getstatus") + assert status['warn_oob_oauth'] is False + + server.google._custom_drive_client_expiration = OOB_CRED_CUTOFF - timedelta(seconds=1) + await setup_manual_creds(reader, server, drive, session) + data_cache.TESTS_ONLY_clearFlags() + status = await reader.getjson("getstatus") + assert status['warn_oob_oauth'] is True + + data_cache.addFlag(UpgradeFlags.NOTIFIED_ABOUT_OOB_FLOW) + status = await reader.getjson("getstatus") + assert status['warn_oob_oauth'] is False diff --git a/hassio-google-drive-backup/tests/test_version.py b/hassio-google-drive-backup/tests/test_version.py new file mode 100644 index 0000000..5303dc8 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_version.py @@ -0,0 +1,47 @@ +from backup.config import Version + + +def test_default(): + assert Version.default() == Version.default() + assert not Version.default() > Version.default() + assert not Version.default() < Version.default() + assert not Version.default() != Version.default() + assert Version.default() >= Version.default() + assert Version.default() <= Version.default() + + +def test_version(): + assert Version(1, 2, 3) == Version(1, 2, 3) + assert Version(1, 2, 3) >= Version(1, 2, 3) + assert Version(1, 2, 3) <= Version(1, 2, 3) + assert Version(1, 2, 3) > Version(1, 2) + assert Version(1) < Version(2) + assert Version(2) > Version(1) + assert Version(1) != Version(2) + assert Version(1, 2) > Version(1) + assert Version(1) < Version(1, 2) + + +def test_parse(): + assert Version.parse("1.0") == Version(1, 0) + assert Version.parse("1.2.3") == Version(1, 2, 3) + + +def test_parse_staging(): + assert Version.parse("1.0.staging.1") == Version(1, 0, 1) + assert Version.parse("1.0.staging.1").staging + assert Version.parse("1.0.staging.1") > Version(1.0) + assert Version.parse("1.2.3") == Version(1, 2, 3) + + +def test_junk_strings(): + assert Version.parse("1-.2.3.1") == Version(1, 2, 3, 1) + assert Version.parse("ignore-1.2.3.1") == Version(1, 2, 3, 1) + assert Version.parse("1.2.ignore.this.text.3.and...andhere.too.1") == Version(1, 2, 3, 1) + + +def test_broken_versions(): + assert Version.parse("") == Version.default() + assert Version.parse(".") == Version.default() + assert Version.parse("empty") == Version.default() + assert Version.parse("no.version.here") == Version.default() diff --git a/hassio-google-drive-backup/tests/test_watcher.py b/hassio-google-drive-backup/tests/test_watcher.py new file mode 100644 index 0000000..798b422 --- /dev/null +++ b/hassio-google-drive-backup/tests/test_watcher.py @@ -0,0 +1,119 @@ +from backup.watcher import Watcher +from backup.config import Config, Setting, CreateOptions +from backup.ha import HaSource +from os.path import join +from .faketime import FakeTime +from asyncio import sleep +import pytest +import os + +TEST_FILE_NAME = "test.tar" + + +@pytest.mark.asyncio +async def test_watcher_trigger_on_backup(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await watcher.start() + assert not await watcher.check() + watcher.noticed_change_signal.clear() + await simulateBackup(config, TEST_FILE_NAME, ha, time) + await watcher.noticed_change_signal.wait() + time.advance(minutes=11) + assert await watcher.check() + + +@pytest.mark.asyncio +async def test_disable_watching(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + config.override(Setting.WATCH_BACKUP_DIRECTORY, False) + await watcher.start() + assert not await watcher.check() + await simulateBackup(config, TEST_FILE_NAME, ha, time) + await sleep(1) + time.advance(minutes=11) + assert not await watcher.check() + + +@pytest.mark.asyncio +async def test_watcher_doesnt_trigger_on_no_backup(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await watcher.start() + assert not await watcher.check() + file = join(config.get(Setting.BACKUP_DIRECTORY_PATH), TEST_FILE_NAME) + watcher.noticed_change_signal.clear() + with open(file, "w"): + pass + await watcher.noticed_change_signal.wait() + time.advance(minutes=11) + assert not await watcher.check() + + +@pytest.mark.asyncio +async def test_watcher_below_wait_threshold(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await watcher.start() + assert not await watcher.check() + for x in range(10): + watcher.noticed_change_signal.clear() + await simulateBackup(config, f"{TEST_FILE_NAME}.{x}", ha, time) + await watcher.noticed_change_signal.wait() + time.advance(seconds=9) + assert not await watcher.check() + time.advance(minutes=11) + assert await watcher.check() + + +@pytest.mark.asyncio +async def test_watcher_triggers_for_deletes(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await simulateBackup(config, TEST_FILE_NAME, ha, time) + + await watcher.start() + assert not await watcher.check() + watcher.noticed_change_signal.clear() + os.remove(join(config.get(Setting.BACKUP_DIRECTORY_PATH), TEST_FILE_NAME)) + await watcher.noticed_change_signal.wait() + + time.advance(seconds=30) + assert await watcher.check() + + +@pytest.mark.asyncio +async def test_moves_out_trigger(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await simulateBackup(config, TEST_FILE_NAME, ha, time) + await watcher.start() + watcher.noticed_change_signal.clear() + os.mkdir(join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir")) + os.rename(join(config.get(Setting.BACKUP_DIRECTORY_PATH), TEST_FILE_NAME), join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir", TEST_FILE_NAME)) + await watcher.noticed_change_signal.wait() + time.advance(minutes=11) + assert await watcher.check() + +# Check if move ins are really necessary +# @pytest.mark.asyncio +# async def test_moves_in_trigger(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): +# os.mkdir(join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir")) +# await simulateBackup(config, "subdir/" + TEST_FILE_NAME, ha, time) +# await watcher.start() +# watcher.noticed_change_signal.clear() +# os.rename(join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir", TEST_FILE_NAME), join(config.get(Setting.BACKUP_DIRECTORY_PATH), TEST_FILE_NAME)) +# await watcher.noticed_change_signal.wait() +# time.advance(minutes=11) +# assert await watcher.check() + + +@pytest.mark.asyncio +async def test_subdirs_dont_trigger(server, watcher: Watcher, config: Config, time: FakeTime, ha: HaSource): + await simulateBackup(config, TEST_FILE_NAME, ha, time) + await watcher.start() + watcher.noticed_change_signal.clear() + os.mkdir(join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir")) + with open(join(config.get(Setting.BACKUP_DIRECTORY_PATH), "subdir", "ignored.txt"), "w"): + pass + assert not await watcher.check() + time.advance(minutes=11) + assert not await watcher.check() + + +async def simulateBackup(config, file_name, ha, time): + file = join(config.get(Setting.BACKUP_DIRECTORY_PATH), file_name) + with open(file, "w"): + pass + await ha.create(CreateOptions(time.now(), file_name)) + +# Verify that subdirectories get ignored diff --git a/hassio-google-drive-backup/tests/test_worker.py b/hassio-google-drive-backup/tests/test_worker.py new file mode 100644 index 0000000..98bfd1e --- /dev/null +++ b/hassio-google-drive-backup/tests/test_worker.py @@ -0,0 +1,46 @@ +import asyncio + +import pytest + +from backup.worker import StopWorkException, Worker +from .faketime import FakeTime + + +@pytest.mark.asyncio +async def test_worker(time: FakeTime): + data = {'count': 0} + + async def work(): + if data['count'] >= 5: + raise StopWorkException() + data['count'] += 1 + + worker = Worker("test", work, time, 1) + task = await worker.start() + await asyncio.wait([task]) + assert not worker.isRunning() + assert data['count'] == 5 + assert time.sleeps == [1, 1, 1, 1, 1] + # assert worker._task.name == "test" + assert worker.getLastError() is None + + +@pytest.mark.asyncio +async def test_worker_error(time: FakeTime): + data = {'count': 0} + + async def work(): + if data['count'] >= 5: + raise StopWorkException() + data['count'] += 1 + raise OSError() + + worker = Worker("test", work, time, 1) + task = await worker.start() + await asyncio.wait([task]) + assert not worker.isRunning() + assert data['count'] == 5 + assert time.sleeps == [1, 1, 1, 1, 1] + # assert worker.getName() == "test" + assert worker.getLastError() is not None + assert type(worker.getLastError()) is OSError diff --git a/hassio-google-drive-backup/tests/util/__init__.py b/hassio-google-drive-backup/tests/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hassio-google-drive-backup/tests/util/test_token_bucket.py b/hassio-google-drive-backup/tests/util/test_token_bucket.py new file mode 100644 index 0000000..bdb2ae7 --- /dev/null +++ b/hassio-google-drive-backup/tests/util/test_token_bucket.py @@ -0,0 +1,51 @@ +from backup.util import TokenBucket +from ..faketime import FakeTime + + +async def test_consume(time: FakeTime): + bucket = TokenBucket(time, 10, 1, 1) + assert bucket.consume(1) + assert not bucket.consume(1) + + time.advance(seconds=1) + assert bucket.consume(1) + assert not bucket.consume(1) + + +async def test_async_consume(time: FakeTime): + bucket = TokenBucket(time, 10, 1, 1) + assert await bucket.consumeWithWait(1, 2) == 1 + assert len(time.sleeps) == 0 + + time.advance(seconds=2) + assert await bucket.consumeWithWait(1, 2) == 2 + assert len(time.sleeps) == 0 + + assert await bucket.consumeWithWait(1, 2) == 1 + assert len(time.sleeps) == 1 + assert time.sleeps[0] == 1 + + +async def test_capacity(time: FakeTime): + bucket = TokenBucket(time, 10, 1) + assert await bucket.consumeWithWait(1, 10) == 10 + assert len(time.sleeps) == 0 + + assert await bucket.consumeWithWait(5, 10) == 5 + assert len(time.sleeps) == 1 + assert time.sleeps[0] == 5 + + time.clearSleeps() + assert await bucket.consumeWithWait(20, 20) == 20 + assert len(time.sleeps) == 1 + assert time.sleeps[0] == 20 + + time.clearSleeps() + time.advance(seconds=5) + assert await bucket.consumeWithWait(1, 10) == 5 + + +async def test_higher_fill_rate(time: FakeTime): + bucket = TokenBucket(time, capacity=1000, fill_rate=100) + assert await bucket.consumeWithWait(1, 1000) == 1000 + assert len(time.sleeps) == 0 diff --git a/proxy/CHANGELOG.md b/proxy/CHANGELOG.md new file mode 100755 index 0000000..cf921a9 --- /dev/null +++ b/proxy/CHANGELOG.md @@ -0,0 +1,3 @@ +# 1.0.0 + +- initial release diff --git a/proxy/Dockerfile b/proxy/Dockerfile new file mode 100755 index 0000000..7c5ef53 --- /dev/null +++ b/proxy/Dockerfile @@ -0,0 +1,23 @@ +ARG BUILD_ARCH +# hadolint ignore=DL3006 +FROM ghcr.io/hassio-addons/debian-base/${BUILD_ARCH}:5.1.1 + +# Set shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Setup base +# hadolint ignore=DL3003 +RUN apt-get -qq update \ + && apt-get -qq install --no-install-recommends -y nginx \ + && (apt-get autoremove -y; apt-get autoclean -y) + +# Copy root filesystem +COPY rootfs / + +ARG BUILD_ARCH +ARG BUILD_DATE +ARG BUILD_DESCRIPTION +ARG BUILD_NAME +ARG BUILD_REF +ARG BUILD_REPOSITORY +ARG BUILD_VERSION \ No newline at end of file diff --git a/proxy/README.md b/proxy/README.md new file mode 100755 index 0000000..62b3c12 --- /dev/null +++ b/proxy/README.md @@ -0,0 +1,21 @@ +[![Double Take](https://badgen.net/github/release/jakowenko/double-take/stable)](https://github.com/jakowenko/double-take) [![Double Take](https://badgen.net/github/stars/jakowenko/double-take)](https://github.com/jakowenko/double-take/stargazers) [![Docker Pulls](https://flat.badgen.net/docker/pulls/jakowenko/double-take)](https://hub.docker.com/r/jakowenko/double-take) [![Discord](https://flat.badgen.net/discord/members/3pumsskdN5?label=Discord)](https://discord.gg/3pumsskdN5) + +![amd64][amd64-shield] ![armv7][armv7-shield] ![aarch64][aarch64-shield] ![armhf][armhf-shield] ![i386][i386-shield] + +# Double Take + +Unified UI and API for processing and training images for facial recognition. + +[Documentation](https://github.com/jakowenko/double-take/tree/beta#readme) + +--- + +This add-on creates a proxy to a Double Take instance running separately from Home Assistant. + +_Note: this add-on does not run Double Take itself._ + +[aarch64-shield]: https://img.shields.io/badge/aarch64-yes-green.svg +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg +[armhf-shield]: https://img.shields.io/badge/armhf-yes-green.svg +[armv7-shield]: https://img.shields.io/badge/armv7-yes-green.svg +[i386-shield]: https://img.shields.io/badge/i386-yes-green.svg diff --git a/proxy/config.json b/proxy/config.json new file mode 100755 index 0000000..83171ef --- /dev/null +++ b/proxy/config.json @@ -0,0 +1,20 @@ +{ + "name": "Double Take Proxy", + "version": "1.0.0", + "url": "https://github.com/jakowenko/double-take", + "panel_icon": "mdi:face-recognition", + "slug": "double-take-proxy", + "description": "Unified UI and API for processing and training images for facial recognition", + "arch": ["aarch64", "amd64", "armhf", "armv7", "i386"], + "startup": "application", + "boot": "auto", + "ingress": true, + "ingress_port": 3000, + "panel_admin": false, + "options": { + "server": "double-take.local:3000" + }, + "schema": { + "server": "match(^.+:\\d+$)" + } +} diff --git a/proxy/icon.png b/proxy/icon.png new file mode 100755 index 0000000..ed7663c Binary files /dev/null and b/proxy/icon.png differ diff --git a/proxy/rootfs/etc/cont-init.d/nginx.sh b/proxy/rootfs/etc/cont-init.d/nginx.sh new file mode 100755 index 0000000..21ad794 --- /dev/null +++ b/proxy/rootfs/etc/cont-init.d/nginx.sh @@ -0,0 +1,18 @@ +#!/usr/bin/with-contenv bashio +# ============================================================================== +# Configures NGINX for use with this add-on. +# ============================================================================== +declare server + +bashio::var.json \ + entry "$(bashio::addon.ingress_entry)" \ + | tempio \ + -template /etc/nginx/templates/ingress.gtpl \ + -out /etc/nginx/servers/ingress.conf + +server=$(bashio::config 'server') + +echo '{"server":"'"$server"'"}' \ + | tempio \ + -template /etc/nginx/templates/upstream.gtpl \ + -out /etc/nginx/includes/upstream.conf diff --git a/proxy/rootfs/etc/nginx/includes/mime.types b/proxy/rootfs/etc/nginx/includes/mime.types new file mode 100755 index 0000000..7c7cdef --- /dev/null +++ b/proxy/rootfs/etc/nginx/includes/mime.types @@ -0,0 +1,96 @@ +types { + text/html html htm shtml; + text/css css; + text/xml xml; + image/gif gif; + image/jpeg jpeg jpg; + application/javascript js; + application/atom+xml atom; + application/rss+xml rss; + + text/mathml mml; + text/plain txt; + text/vnd.sun.j2me.app-descriptor jad; + text/vnd.wap.wml wml; + text/x-component htc; + + image/png png; + image/svg+xml svg svgz; + image/tiff tif tiff; + image/vnd.wap.wbmp wbmp; + image/webp webp; + image/x-icon ico; + image/x-jng jng; + image/x-ms-bmp bmp; + + font/woff woff; + font/woff2 woff2; + + application/java-archive jar war ear; + application/json json; + application/mac-binhex40 hqx; + application/msword doc; + application/pdf pdf; + application/postscript ps eps ai; + application/rtf rtf; + application/vnd.apple.mpegurl m3u8; + application/vnd.google-earth.kml+xml kml; + application/vnd.google-earth.kmz kmz; + application/vnd.ms-excel xls; + application/vnd.ms-fontobject eot; + application/vnd.ms-powerpoint ppt; + application/vnd.oasis.opendocument.graphics odg; + application/vnd.oasis.opendocument.presentation odp; + application/vnd.oasis.opendocument.spreadsheet ods; + application/vnd.oasis.opendocument.text odt; + application/vnd.openxmlformats-officedocument.presentationml.presentation + pptx; + application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + xlsx; + application/vnd.openxmlformats-officedocument.wordprocessingml.document + docx; + application/vnd.wap.wmlc wmlc; + application/x-7z-compressed 7z; + application/x-cocoa cco; + application/x-java-archive-diff jardiff; + application/x-java-jnlp-file jnlp; + application/x-makeself run; + application/x-perl pl pm; + application/x-pilot prc pdb; + application/x-rar-compressed rar; + application/x-redhat-package-manager rpm; + application/x-sea sea; + application/x-shockwave-flash swf; + application/x-stuffit sit; + application/x-tcl tcl tk; + application/x-x509-ca-cert der pem crt; + application/x-xpinstall xpi; + application/xhtml+xml xhtml; + application/xspf+xml xspf; + application/zip zip; + + application/octet-stream bin exe dll; + application/octet-stream deb; + application/octet-stream dmg; + application/octet-stream iso img; + application/octet-stream msi msp msm; + + audio/midi mid midi kar; + audio/mpeg mp3; + audio/ogg ogg; + audio/x-m4a m4a; + audio/x-realaudio ra; + + video/3gpp 3gpp 3gp; + video/mp2t ts; + video/mp4 mp4; + video/mpeg mpeg mpg; + video/quicktime mov; + video/webm webm; + video/x-flv flv; + video/x-m4v m4v; + video/x-mng mng; + video/x-ms-asf asx asf; + video/x-ms-wmv wmv; + video/x-msvideo avi; +} diff --git a/proxy/rootfs/etc/nginx/includes/proxy_params.conf b/proxy/rootfs/etc/nginx/includes/proxy_params.conf new file mode 100755 index 0000000..fcd1576 --- /dev/null +++ b/proxy/rootfs/etc/nginx/includes/proxy_params.conf @@ -0,0 +1,17 @@ +proxy_http_version 1.1; +proxy_ignore_client_abort off; +proxy_read_timeout 86400s; +proxy_redirect off; +proxy_send_timeout 86400s; +proxy_max_temp_file_size 0; + +proxy_set_header Accept-Encoding ""; +proxy_set_header Connection $connection_upgrade; +proxy_set_header Host $http_host; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-NginX-Proxy true; +proxy_set_header X-Real-IP $remote_addr; + + diff --git a/proxy/rootfs/etc/nginx/includes/server_params.conf b/proxy/rootfs/etc/nginx/includes/server_params.conf new file mode 100755 index 0000000..09c0654 --- /dev/null +++ b/proxy/rootfs/etc/nginx/includes/server_params.conf @@ -0,0 +1,6 @@ +root /dev/null; +server_name $hostname; + +add_header X-Content-Type-Options nosniff; +add_header X-XSS-Protection "1; mode=block"; +add_header X-Robots-Tag none; diff --git a/proxy/rootfs/etc/nginx/includes/ssl_params.conf b/proxy/rootfs/etc/nginx/includes/ssl_params.conf new file mode 100755 index 0000000..e6789cb --- /dev/null +++ b/proxy/rootfs/etc/nginx/includes/ssl_params.conf @@ -0,0 +1,8 @@ +ssl_protocols TLSv1.2 TLSv1.3; +ssl_prefer_server_ciphers off; +ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; +ssl_session_timeout 10m; +ssl_session_cache shared:SSL:10m; +ssl_session_tickets off; +ssl_stapling on; +ssl_stapling_verify on; diff --git a/proxy/rootfs/etc/nginx/nginx.conf b/proxy/rootfs/etc/nginx/nginx.conf new file mode 100755 index 0000000..11e15c7 --- /dev/null +++ b/proxy/rootfs/etc/nginx/nginx.conf @@ -0,0 +1,44 @@ +# Run nginx in foreground. +daemon off; + +# This is run inside Docker. +user root; + +# Pid storage location. +pid /var/run/nginx.pid; + +# Set number of worker processes. +worker_processes 1; + +# Enables the use of JIT for regular expressions to speed-up their processing. +pcre_jit on; + +# Write error log to the add-on log. +error_log /proc/1/fd/1 error; + +# Max num of simultaneous connections by a worker process. +events { + worker_connections 512; +} + +http { + include /etc/nginx/includes/mime.types; + + access_log off; + client_max_body_size 4G; + default_type application/octet-stream; + gzip on; + keepalive_timeout 65; + sendfile on; + server_tokens off; + tcp_nodelay on; + tcp_nopush on; + + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + + include /etc/nginx/includes/upstream.conf; + include /etc/nginx/servers/*.conf; +} diff --git a/proxy/rootfs/etc/nginx/servers/.gitkeep b/proxy/rootfs/etc/nginx/servers/.gitkeep new file mode 100755 index 0000000..85ad51b --- /dev/null +++ b/proxy/rootfs/etc/nginx/servers/.gitkeep @@ -0,0 +1 @@ +Without requirements or design, programming is the art of adding bugs to an empty text file. (Louis Srygley) diff --git a/proxy/rootfs/etc/nginx/templates/ingress.gtpl b/proxy/rootfs/etc/nginx/templates/ingress.gtpl new file mode 100755 index 0000000..a0d2705 --- /dev/null +++ b/proxy/rootfs/etc/nginx/templates/ingress.gtpl @@ -0,0 +1,14 @@ +server { + listen 3000 default_server; + + include /etc/nginx/includes/server_params.conf; + + location / { + allow 172.30.32.2; + deny all; + + proxy_pass http://backend; + proxy_set_header X-Ingress-Path {{ .entry }}; + include /etc/nginx/includes/proxy_params.conf; + } +} \ No newline at end of file diff --git a/proxy/rootfs/etc/nginx/templates/upstream.gtpl b/proxy/rootfs/etc/nginx/templates/upstream.gtpl new file mode 100755 index 0000000..873b90c --- /dev/null +++ b/proxy/rootfs/etc/nginx/templates/upstream.gtpl @@ -0,0 +1,3 @@ +upstream backend { + server {{ .server }}; +} diff --git a/proxy/rootfs/etc/services.d/nginx/finish b/proxy/rootfs/etc/services.d/nginx/finish new file mode 100755 index 0000000..5a6fa73 --- /dev/null +++ b/proxy/rootfs/etc/services.d/nginx/finish @@ -0,0 +1,8 @@ +#!/usr/bin/execlineb -S0 +# ============================================================================== +# Take down the S6 supervision tree when NGINX fails +# ============================================================================== +if -n { s6-test $# -ne 0 } +if -n { s6-test ${1} -eq 256 } + +s6-svscanctl -t /var/run/s6/services diff --git a/proxy/rootfs/etc/services.d/nginx/run b/proxy/rootfs/etc/services.d/nginx/run new file mode 100755 index 0000000..1ede88a --- /dev/null +++ b/proxy/rootfs/etc/services.d/nginx/run @@ -0,0 +1,7 @@ +#!/usr/bin/with-contenv bashio +# ============================================================================== +# Runs the NGINX daemon +# ============================================================================== + +bashio::log.info "Starting NGINX..." +exec nginx diff --git a/stable/CHANGELOG.md b/stable/CHANGELOG.md new file mode 100755 index 0000000..0cb3f44 --- /dev/null +++ b/stable/CHANGELOG.md @@ -0,0 +1 @@ +Please reference the [release notes](https://github.com/jakowenko/double-take/releases) for changes. diff --git a/stable/Dockerfile b/stable/Dockerfile new file mode 100644 index 0000000..0eea2a5 --- /dev/null +++ b/stable/Dockerfile @@ -0,0 +1 @@ +FROM jakowenko/double-take:1.13.1 \ No newline at end of file diff --git a/stable/README.md b/stable/README.md new file mode 100755 index 0000000..49b89e0 --- /dev/null +++ b/stable/README.md @@ -0,0 +1,13 @@ +[![Double Take](https://badgen.net/github/release/jakowenko/double-take/stable)](https://github.com/jakowenko/double-take) [![Double Take](https://badgen.net/github/stars/jakowenko/double-take)](https://github.com/jakowenko/double-take/stargazers) [![Docker Pulls](https://flat.badgen.net/docker/pulls/jakowenko/double-take)](https://hub.docker.com/r/jakowenko/double-take) [![Discord](https://flat.badgen.net/discord/members/3pumsskdN5?label=Discord)](https://discord.gg/3pumsskdN5) + +![amd64][amd64-shield] ![armv7][armv7-shield] ![aarch64][aarch64-shield] + +# Double Take + +Unified UI and API for processing and training images for facial recognition. + +[Documentation](https://github.com/jakowenko/double-take#readme) + +[aarch64-shield]: https://img.shields.io/badge/aarch64-yes-green.svg +[amd64-shield]: https://img.shields.io/badge/amd64-yes-green.svg +[armv7-shield]: https://img.shields.io/badge/armv7-yes-green.svg diff --git a/stable/config.json b/stable/config.json new file mode 100755 index 0000000..e6536a1 --- /dev/null +++ b/stable/config.json @@ -0,0 +1,35 @@ +{ + "name": "Double Take", + "version": "1.13.1", + "url": "https://github.com/jakowenko/double-take", + "panel_icon": "mdi:face-recognition", + "slug": "double-take", + "description": "Unified UI and API for processing and training images for facial recognition", + "arch": ["armv7", "aarch64", "amd64"], + "startup": "application", + "boot": "auto", + "ingress": true, + "ingress_port": 3000, + "ports": { + "3000/tcp": 3000 + }, + "ports_description": { + "3000/tcp": "Web interface (not required for Home Assistant ingress)" + }, + "map": ["media:rw", "config:rw"], + "environment": { + "HA_ADDON": "true" + }, + "options": { + "STORAGE_PATH": "/config/double-take", + "CONFIG_PATH": "/config/double-take", + "SECRETS_PATH": "/config", + "MEDIA_PATH": "/media/double-take" + }, + "schema": { + "STORAGE_PATH": "str", + "CONFIG_PATH": "str", + "SECRETS_PATH": "str", + "MEDIA_PATH": "str" + } +} diff --git a/stable/icon.png b/stable/icon.png new file mode 100755 index 0000000..ed7663c Binary files /dev/null and b/stable/icon.png differ