mirror of
https://github.com/pi-hole/pi-hole.git
synced 2024-11-21 05:33:45 +00:00
Merge branch 'development' into fix_FTLcheckUpdate
Signed-off-by: RD WebDesign <github@rdwebdesign.com.br>
This commit is contained in:
commit
63fb9be79a
83 changed files with 3028 additions and 6418 deletions
|
@ -1,3 +1,4 @@
|
|||
doubleclick
|
||||
wan
|
||||
nwe
|
||||
padd
|
||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -25,16 +25,16 @@ jobs:
|
|||
steps:
|
||||
-
|
||||
name: Checkout repository
|
||||
uses: actions/checkout@v3.3.0
|
||||
uses: actions/checkout@v4.2.0
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
-
|
||||
name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: 'python'
|
||||
-
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
-
|
||||
name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
|
2
.github/workflows/merge-conflict.yml
vendored
2
.github/workflows/merge-conflict.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if PRs are have merge conflicts
|
||||
uses: eps1lon/actions-label-merge-conflict@v2.1.0
|
||||
uses: eps1lon/actions-label-merge-conflict@v3.0.2
|
||||
with:
|
||||
dirtyLabel: "PR: Merge Conflict"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
|
29
.github/workflows/stale.yml
vendored
29
.github/workflows/stale.yml
vendored
|
@ -4,23 +4,44 @@ on:
|
|||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
issue_comment:
|
||||
|
||||
env:
|
||||
stale_label: stale
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
stale_action:
|
||||
if: github.event_name != 'issue_comment'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v7.0.0
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
days-before-close: 5
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||
stale-issue-label: 'stale'
|
||||
stale-issue-label: '${{ env.stale_label }}'
|
||||
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
|
||||
exempt-all-issue-assignees: true
|
||||
operations-per-run: 300
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
remove_stale:
|
||||
# trigger "stale" removal immediately when stale issues are commented on
|
||||
# we need to explicitly check that the trigger does not run on comment on a PR as
|
||||
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only
|
||||
if: ${{ !github.event.issue.pull_request && github.event_name != 'schedule' }}
|
||||
permissions:
|
||||
contents: read # for actions/checkout
|
||||
issues: write # to edit issues label
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.2.0
|
||||
- name: Remove 'stale' label
|
||||
run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
2
.github/workflows/stale_pr.yml
vendored
2
.github/workflows/stale_pr.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v7.0.0
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Do not automatically mark PR/issue as stale
|
||||
|
|
24
.github/workflows/sync-back-to-dev.yml
vendored
24
.github/workflows/sync-back-to-dev.yml
vendored
|
@ -5,13 +5,35 @@ on:
|
|||
branches:
|
||||
- master
|
||||
|
||||
# The section is needed to drop the default write-all permissions for all jobs
|
||||
# that are granted on `push` event. By specifying any permission explicitly
|
||||
# all others are set to none. By using the principle of least privilege the damage a compromised
|
||||
# workflow can do (because of an injection or compromised third party tool or
|
||||
# action) is restricted. Adding labels to issues, commenting
|
||||
# on pull-requests, etc. may need additional permissions:
|
||||
#
|
||||
# Syntax for this section:
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
#
|
||||
# Reference for how to assign permissions on a job-by-job basis:
|
||||
# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
#
|
||||
# Reference for available permissions that we can enable if needed:
|
||||
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sync-branches:
|
||||
# The job needs to be able to pull the code and create a pull request.
|
||||
permissions:
|
||||
contents: read # for actions/checkout
|
||||
pull-requests: write # to create pull request
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Syncing branches
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3.3.0
|
||||
uses: actions/checkout@v4.2.0
|
||||
- name: Opening pull request
|
||||
run: gh pr create -B development -H master --title 'Sync master back into development' --body 'Created by Github action' --label 'internal'
|
||||
env:
|
||||
|
|
22
.github/workflows/test.yml
vendored
22
.github/workflows/test.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3.3.0
|
||||
uses: actions/checkout@v4.2.0
|
||||
|
||||
- name: Check scripts in repository are executable
|
||||
run: |
|
||||
|
@ -23,6 +23,13 @@ jobs:
|
|||
# If FAIL is 1 then we fail.
|
||||
[[ $FAIL == 1 ]] && exit 1 || echo "Scripts are executable!"
|
||||
|
||||
- name: Run shellcheck
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
with:
|
||||
check_together: 'yes'
|
||||
format: tty
|
||||
severity: error
|
||||
|
||||
- name: Spell-Checking
|
||||
uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
|
@ -49,23 +56,24 @@ jobs:
|
|||
matrix:
|
||||
distro:
|
||||
[
|
||||
debian_10,
|
||||
debian_11,
|
||||
debian_12,
|
||||
ubuntu_20,
|
||||
ubuntu_22,
|
||||
centos_8,
|
||||
ubuntu_23,
|
||||
ubuntu_24,
|
||||
centos_9,
|
||||
fedora_36,
|
||||
fedora_37,
|
||||
fedora_39,
|
||||
fedora_40,
|
||||
]
|
||||
env:
|
||||
DISTRO: ${{matrix.distro}}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3.3.0
|
||||
uses: actions/checkout@v4.2.0
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4.5.0
|
||||
uses: actions/setup-python@v5.2.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
linters:
|
||||
shellcheck:
|
||||
shell: bash
|
||||
phpcs:
|
||||
flake8:
|
||||
max-line-length: 120
|
||||
yamllint:
|
||||
config: ./.yamllint.conf
|
||||
remarklint:
|
|
@ -1,3 +0,0 @@
|
|||
rules:
|
||||
line-length: disable
|
||||
document-start: disable
|
|
@ -33,7 +33,9 @@ The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) th
|
|||
|
||||
Those who want to get started quickly and conveniently may install Pi-hole using the following command:
|
||||
|
||||
### `curl -sSL https://install.pi-hole.net | bash`
|
||||
```bash
|
||||
curl -sSL https://install.pi-hole.net | bash
|
||||
```
|
||||
|
||||
## Alternative Install Methods
|
||||
|
||||
|
@ -150,7 +152,7 @@ You can read our [Core Feature Breakdown](https://docs.pi-hole.net/core/pihole-c
|
|||
|
||||
### The Web Interface Dashboard
|
||||
|
||||
This [optional dashboard](https://github.com/pi-hole/AdminLTE) allows you to view stats, change settings, and configure your Pi-hole. It's the power of the Command Line Interface, with none of the learning curve!
|
||||
This [optional dashboard](https://github.com/pi-hole/web) allows you to view stats, change settings, and configure your Pi-hole. It's the power of the Command Line Interface, with none of the learning curve!
|
||||
|
||||
Some notable features include:
|
||||
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Dnsmasq config for Pi-hole's FTLDNS
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
###############################################################################
|
||||
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
|
||||
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
|
||||
# #
|
||||
# IF YOU WISH TO CHANGE THE UPSTREAM SERVERS, CHANGE THEM IN: #
|
||||
# /etc/pihole/setupVars.conf #
|
||||
# #
|
||||
# ANY OTHER CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
|
||||
# WITHIN /etc/dnsmasq.d/yourname.conf #
|
||||
###############################################################################
|
||||
|
||||
addn-hosts=/etc/pihole/local.list
|
||||
addn-hosts=/etc/pihole/custom.list
|
||||
|
||||
domain-needed
|
||||
|
||||
localise-queries
|
||||
|
||||
bogus-priv
|
||||
|
||||
no-resolv
|
||||
|
||||
log-queries
|
||||
log-facility=/var/log/pihole/pihole.log
|
||||
|
||||
log-async
|
|
@ -1,42 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2021 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# RFC 6761 config file for Pi-hole
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
###############################################################################
|
||||
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
|
||||
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
|
||||
# #
|
||||
# CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
|
||||
# WITHIN /etc/dnsmasq.d/yourname.conf #
|
||||
###############################################################################
|
||||
|
||||
# RFC 6761: Caching DNS servers SHOULD recognize
|
||||
# test, localhost, invalid
|
||||
# names as special and SHOULD NOT attempt to look up NS records for them, or
|
||||
# otherwise query authoritative DNS servers in an attempt to resolve these
|
||||
# names.
|
||||
server=/test/
|
||||
server=/localhost/
|
||||
server=/invalid/
|
||||
|
||||
# The same RFC requests something similar for
|
||||
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
|
||||
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
|
||||
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
|
||||
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
|
||||
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
|
||||
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
|
||||
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
|
||||
# 01-pihole.conf) because this also covers IPv6.
|
||||
|
||||
# OpenWRT furthermore blocks bind, local, onion domains
|
||||
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
|
||||
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
|
||||
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
|
||||
server=/bind/
|
||||
server=/onion/
|
|
@ -1,5 +1,5 @@
|
|||
# Determine if terminal is capable of showing colors
|
||||
if ([[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]) || [[ "${WEBCALL}" ]]; then
|
||||
if ([ -t 1 ] && [ $(tput colors) -ge 8 ]) || [ "${WEBCALL}" ]; then
|
||||
# Bold and underline may not show up on all clients
|
||||
# If something MUST be emphasized, use both
|
||||
COL_BOLD='[1m'
|
||||
|
|
290
advanced/Scripts/api.sh
Executable file
290
advanced/Scripts/api.sh
Executable file
|
@ -0,0 +1,290 @@
|
|||
#!/usr/bin/env sh
|
||||
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Script to hold api functions for use in other scripts
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
|
||||
# The basic usage steps are
|
||||
# 1) Test Availability of the API
|
||||
# 2) Try to authenticate (read password if needed)
|
||||
# 3) Get the data from the API endpoint
|
||||
# 4) Delete the session
|
||||
|
||||
|
||||
TestAPIAvailability() {
|
||||
|
||||
# as we are running locally, we can get the port value from FTL directly
|
||||
local chaos_api_list availabilityResponse
|
||||
|
||||
# Query the API URLs from FTL using CHAOS TXT local.api.ftl
|
||||
# The result is a space-separated enumeration of full URLs
|
||||
# e.g., "http://localhost:80/api/" "https://localhost:443/api/"
|
||||
chaos_api_list="$(dig +short chaos txt local.api.ftl @127.0.0.1)"
|
||||
|
||||
# If the query was not successful, the variable is empty
|
||||
if [ -z "${chaos_api_list}" ]; then
|
||||
echo "API not available. Please check connectivity"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Iterate over space-separated list of URLs
|
||||
while [ -n "${chaos_api_list}" ]; do
|
||||
# Get the first URL
|
||||
API_URL="${chaos_api_list%% *}"
|
||||
# Strip leading and trailing quotes
|
||||
API_URL="${API_URL%\"}"
|
||||
API_URL="${API_URL#\"}"
|
||||
|
||||
# Test if the API is available at this URL
|
||||
availabilityResponse=$(curl -skS -o /dev/null -w "%{http_code}" "${API_URL}auth")
|
||||
|
||||
# Test if http status code was 200 (OK) or 401 (authentication required)
|
||||
if [ ! "${availabilityResponse}" = 200 ] && [ ! "${availabilityResponse}" = 401 ]; then
|
||||
# API is not available at this port/protocol combination
|
||||
API_PORT=""
|
||||
else
|
||||
# API is available at this URL combination
|
||||
|
||||
if [ "${availabilityResponse}" = 200 ]; then
|
||||
# API is available without authentication
|
||||
needAuth=false
|
||||
fi
|
||||
|
||||
break
|
||||
fi
|
||||
|
||||
# Remove the first URL from the list
|
||||
local last_api_list
|
||||
last_api_list="${chaos_api_list}"
|
||||
chaos_api_list="${chaos_api_list#* }"
|
||||
|
||||
# If the list did not change, we are at the last element
|
||||
if [ "${last_api_list}" = "${chaos_api_list}" ]; then
|
||||
# Remove the last element
|
||||
chaos_api_list=""
|
||||
fi
|
||||
done
|
||||
|
||||
# if API_PORT is empty, no working API port was found
|
||||
if [ -n "${API_PORT}" ]; then
|
||||
echo "API not available at: ${API_URL}"
|
||||
echo "Exiting."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
LoginAPI() {
|
||||
# If the API URL is not set, test the availability
|
||||
if [ -z "${API_URL}" ]; then
|
||||
TestAPIAvailability
|
||||
fi
|
||||
|
||||
# Exit early if authentication is not needed
|
||||
if [ "${needAuth}" = false ]; then
|
||||
if [ "${1}" = "verbose" ]; then
|
||||
echo "API Authentication: Not needed"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Try to read the CLI password (if enabled and readable by the current user)
|
||||
if [ -r /etc/pihole/cli_pw ]; then
|
||||
password=$(cat /etc/pihole/cli_pw)
|
||||
|
||||
if [ "${1}" = "verbose" ]; then
|
||||
echo "API Authentication: Trying to use CLI password"
|
||||
fi
|
||||
|
||||
# Try to authenticate using the CLI password
|
||||
Authentication "${1}"
|
||||
|
||||
elif [ "${1}" = "verbose" ]; then
|
||||
echo "API Authentication: CLI password not available"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# If this did not work, ask the user for the password
|
||||
while [ "${validSession}" = false ] || [ -z "${validSession}" ] ; do
|
||||
echo "Authentication failed. Please enter your Pi-hole password"
|
||||
|
||||
# secretly read the password
|
||||
secretRead; printf '\n'
|
||||
|
||||
# Try to authenticate again
|
||||
Authentication "${1}"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
Authentication() {
|
||||
sessionResponse="$(curl -skS -X POST "${API_URL}auth" --user-agent "Pi-hole cli " --data "{\"password\":\"${password}\"}" )"
|
||||
|
||||
if [ -z "${sessionResponse}" ]; then
|
||||
echo "No response from FTL server. Please check connectivity"
|
||||
exit 1
|
||||
fi
|
||||
# obtain validity and session ID from session response
|
||||
validSession=$(echo "${sessionResponse}"| jq .session.valid 2>/dev/null)
|
||||
SID=$(echo "${sessionResponse}"| jq --raw-output .session.sid 2>/dev/null)
|
||||
|
||||
if [ "${1}" = "verbose" ]; then
|
||||
if [ "${validSession}" = true ]; then
|
||||
echo "API Authentication: ${COL_GREEN}Success${COL_NC}"
|
||||
else
|
||||
echo "API Authentication: ${COL_RED}Failed${COL_NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
LogoutAPI() {
|
||||
# if a valid Session exists (no password required or successful Authentication) and
|
||||
# SID is not null (successful Authentication only), delete the session
|
||||
if [ "${validSession}" = true ] && [ ! "${SID}" = null ]; then
|
||||
# Try to delete the session. Omit the output, but get the http status code
|
||||
deleteResponse=$(curl -skS -o /dev/null -w "%{http_code}" -X DELETE "${API_URL}auth" -H "Accept: application/json" -H "sid: ${SID}")
|
||||
|
||||
case "${deleteResponse}" in
|
||||
"401") echo "Logout attempt without a valid session. Unauthorized!";;
|
||||
"204") if [ "${1}" = "verbose" ]; then echo "API Logout: ${COL_GREEN}Success${COL_NC} (session deleted)"; fi;;
|
||||
esac;
|
||||
elif [ "${1}" = "verbose" ]; then
|
||||
echo "API Logout: ${COL_GREEN}Success${COL_NC} (no valid session)"
|
||||
fi
|
||||
}
|
||||
|
||||
GetFTLData() {
|
||||
local data response status
|
||||
# get the data from querying the API as well as the http status code
|
||||
response=$(curl -skS -w "%{http_code}" -X GET "${API_URL}$1" -H "Accept: application/json" -H "sid: ${SID}" )
|
||||
|
||||
# status are the last 3 characters
|
||||
status="${response#"${response%???}"}"
|
||||
# data is everything from response without the last 3 characters
|
||||
data="${response%???}"
|
||||
|
||||
if [ "${2}" = "raw" ]; then
|
||||
# return the raw response
|
||||
echo "${response}"
|
||||
else
|
||||
# return only the data
|
||||
if [ "${status}" = 200 ]; then
|
||||
# response OK
|
||||
echo "${data}"
|
||||
else
|
||||
# connection lost
|
||||
echo "${status}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
PostFTLData() {
|
||||
local data response status
|
||||
# send the data to the API
|
||||
response=$(curl -skS -w "%{http_code}" -X POST "${API_URL}$1" --data-raw "$2" -H "Accept: application/json" -H "sid: ${SID}" )
|
||||
# data is everything from response without the last 3 characters
|
||||
if [ "${3}" = "status" ]; then
|
||||
# Keep the status code appended if requested
|
||||
printf %s "${response}"
|
||||
else
|
||||
# Strip the status code
|
||||
printf %s "${response%???}"
|
||||
fi
|
||||
}
|
||||
|
||||
secretRead() {
|
||||
|
||||
# POSIX compliant function to read user-input and
|
||||
# mask every character entered by (*)
|
||||
#
|
||||
# This is challenging, because in POSIX, `read` does not support
|
||||
# `-s` option (suppressing the input) or
|
||||
# `-n` option (reading n chars)
|
||||
|
||||
|
||||
# This workaround changes the terminal characteristics to not echo input and later resets this option
|
||||
# credits https://stackoverflow.com/a/4316765
|
||||
# showing asterisk instead of password
|
||||
# https://stackoverflow.com/a/24600839
|
||||
# https://unix.stackexchange.com/a/464963
|
||||
|
||||
|
||||
# Save current terminal settings (needed for later restore after password prompt)
|
||||
stty_orig=$(stty -g)
|
||||
|
||||
stty -echo # do not echo user input
|
||||
stty -icanon min 1 time 0 # disable canonical mode https://man7.org/linux/man-pages/man3/termios.3.html
|
||||
|
||||
unset password
|
||||
unset key
|
||||
unset charcount
|
||||
charcount=0
|
||||
while key=$(dd ibs=1 count=1 2>/dev/null); do #read one byte of input
|
||||
if [ "${key}" = "$(printf '\0' | tr -d '\0')" ] ; then
|
||||
# Enter - accept password
|
||||
break
|
||||
fi
|
||||
if [ "${key}" = "$(printf '\177')" ] ; then
|
||||
# Backspace
|
||||
if [ $charcount -gt 0 ] ; then
|
||||
charcount=$((charcount-1))
|
||||
printf '\b \b'
|
||||
password="${password%?}"
|
||||
fi
|
||||
else
|
||||
# any other character
|
||||
charcount=$((charcount+1))
|
||||
printf '*'
|
||||
password="$password$key"
|
||||
fi
|
||||
done
|
||||
|
||||
# restore original terminal settings
|
||||
stty "${stty_orig}"
|
||||
}
|
||||
|
||||
apiFunc() {
|
||||
local data response status status_col
|
||||
|
||||
# Authenticate with the API
|
||||
LoginAPI verbose
|
||||
echo ""
|
||||
|
||||
echo "Requesting: ${COL_PURPLE}GET ${COL_CYAN}${API_URL}${COL_YELLOW}$1${COL_NC}"
|
||||
echo ""
|
||||
|
||||
# Get the data from the API
|
||||
response=$(GetFTLData "$1" raw)
|
||||
|
||||
# status are the last 3 characters
|
||||
status="${response#"${response%???}"}"
|
||||
# data is everything from response without the last 3 characters
|
||||
data="${response%???}"
|
||||
|
||||
# Output the status (200 -> green, else red)
|
||||
if [ "${status}" = 200 ]; then
|
||||
status_col="${COL_GREEN}"
|
||||
else
|
||||
status_col="${COL_RED}"
|
||||
fi
|
||||
echo "Status: ${status_col}${status}${COL_NC}"
|
||||
|
||||
# Output the data. Format it with jq if available and data is actually JSON.
|
||||
# Otherwise just print it
|
||||
echo "Data:"
|
||||
if command -v jq >/dev/null && echo "${data}" | jq . >/dev/null 2>&1; then
|
||||
echo "${data}" | jq .
|
||||
else
|
||||
echo "${data}"
|
||||
fi
|
||||
|
||||
# Delete the session
|
||||
LogoutAPI verbose
|
||||
}
|
|
@ -1,577 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Calculates stats and displays to an LCD
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
LC_ALL=C
|
||||
LC_NUMERIC=C
|
||||
|
||||
# Retrieve stats from FTL engine
|
||||
pihole-FTL() {
|
||||
local ftl_port LINE
|
||||
# shellcheck disable=SC1091
|
||||
. /opt/pihole/utils.sh
|
||||
ftl_port=$(getFTLAPIPort)
|
||||
if [[ -n "$ftl_port" ]]; then
|
||||
# Open connection to FTL
|
||||
exec 3<>"/dev/tcp/127.0.0.1/$ftl_port"
|
||||
|
||||
# Test if connection is open
|
||||
if { "true" >&3; } 2> /dev/null; then
|
||||
# Send command to FTL and ask to quit when finished
|
||||
echo -e ">$1 >quit" >&3
|
||||
|
||||
# Read input until we received an empty string and the connection is
|
||||
# closed
|
||||
read -r -t 1 LINE <&3
|
||||
until [[ -z "${LINE}" ]] && [[ ! -t 3 ]]; do
|
||||
echo "$LINE" >&1
|
||||
read -r -t 1 LINE <&3
|
||||
done
|
||||
|
||||
# Close connection
|
||||
exec 3>&-
|
||||
exec 3<&-
|
||||
fi
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print spaces to align right-side additional text
|
||||
printFunc() {
|
||||
local text_last
|
||||
|
||||
title="$1"
|
||||
title_len="${#title}"
|
||||
|
||||
text_main="$2"
|
||||
text_main_nocol="$text_main"
|
||||
if [[ "${text_main:0:1}" == "" ]]; then
|
||||
text_main_nocol=$(sed 's/\[[0-9;]\{1,5\}m//g' <<< "$text_main")
|
||||
fi
|
||||
text_main_len="${#text_main_nocol}"
|
||||
|
||||
text_addn="$3"
|
||||
if [[ "$text_addn" == "last" ]]; then
|
||||
text_addn=""
|
||||
text_last="true"
|
||||
fi
|
||||
|
||||
# If there is additional text, define max length of text_main
|
||||
if [[ -n "$text_addn" ]]; then
|
||||
case "$scr_cols" in
|
||||
[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-4]) text_main_max_len="9";;
|
||||
4[5-9]) text_main_max_len="14";;
|
||||
*) text_main_max_len="19";;
|
||||
esac
|
||||
fi
|
||||
|
||||
[[ -z "$text_addn" ]] && text_main_max_len="$(( scr_cols - title_len ))"
|
||||
|
||||
# Remove excess characters from main text
|
||||
if [[ "$text_main_len" -gt "$text_main_max_len" ]]; then
|
||||
# Trim text without colors
|
||||
text_main_trim="${text_main_nocol:0:$text_main_max_len}"
|
||||
# Replace with trimmed text
|
||||
text_main="${text_main/$text_main_nocol/$text_main_trim}"
|
||||
fi
|
||||
|
||||
# Determine amount of spaces for each line
|
||||
if [[ -n "$text_last" ]]; then
|
||||
# Move cursor to end of screen
|
||||
spc_num=$(( scr_cols - ( title_len + text_main_len ) ))
|
||||
else
|
||||
spc_num=$(( text_main_max_len - text_main_len ))
|
||||
fi
|
||||
|
||||
[[ "$spc_num" -le 0 ]] && spc_num="0"
|
||||
spc=$(printf "%${spc_num}s")
|
||||
#spc="${spc// /.}" # Debug: Visualize spaces
|
||||
|
||||
printf "%s%s$spc" "$title" "$text_main"
|
||||
|
||||
if [[ -n "$text_addn" ]]; then
|
||||
printf "%s(%s)%s\\n" "$COL_NC$COL_DARK_GRAY" "$text_addn" "$COL_NC"
|
||||
else
|
||||
# Do not print trailing newline on final line
|
||||
[[ -z "$text_last" ]] && printf "%s\\n" "$COL_NC"
|
||||
fi
|
||||
}
|
||||
|
||||
# Perform on first Chrono run (not for JSON formatted string)
|
||||
get_init_stats() {
|
||||
calcFunc(){ awk "BEGIN {print $*}" 2> /dev/null; }
|
||||
|
||||
# Convert bytes to human-readable format
|
||||
hrBytes() {
|
||||
awk '{
|
||||
num=$1;
|
||||
if(num==0) {
|
||||
print "0 B"
|
||||
} else {
|
||||
xxx=(num<0?-num:num)
|
||||
sss=(num<0?-1:1)
|
||||
split("B KB MB GB TB PB",type)
|
||||
for(i=5;yyy < 1;i--) {
|
||||
yyy=xxx / (2^(10*i))
|
||||
}
|
||||
printf "%.0f " type[i+2], yyy*sss
|
||||
}
|
||||
}' <<< "$1";
|
||||
}
|
||||
|
||||
# Convert seconds to human-readable format
|
||||
hrSecs() {
|
||||
day=$(( $1/60/60/24 )); hrs=$(( $1/3600%24 ))
|
||||
mins=$(( ($1%3600)/60 )); secs=$(( $1%60 ))
|
||||
[[ "$day" -ge "2" ]] && plu="s"
|
||||
[[ "$day" -ge "1" ]] && days="$day day${plu}, " || days=""
|
||||
printf "%s%02d:%02d:%02d\\n" "$days" "$hrs" "$mins" "$secs"
|
||||
}
|
||||
|
||||
# Set Color Codes
|
||||
coltable="/opt/pihole/COL_TABLE"
|
||||
if [[ -f "${coltable}" ]]; then
|
||||
source ${coltable}
|
||||
else
|
||||
COL_NC="[0m"
|
||||
COL_DARK_GRAY="[1;30m"
|
||||
COL_LIGHT_GREEN="[1;32m"
|
||||
COL_LIGHT_BLUE="[1;34m"
|
||||
COL_LIGHT_RED="[1;31m"
|
||||
COL_YELLOW="[1;33m"
|
||||
COL_LIGHT_RED="[1;31m"
|
||||
COL_URG_RED="[39;41m"
|
||||
fi
|
||||
|
||||
# Get RPi throttle state (RPi 3B only) & model number, or OS distro info
|
||||
if command -v vcgencmd &> /dev/null; then
|
||||
local sys_throttle_raw
|
||||
local sys_rev_raw
|
||||
|
||||
sys_throttle_raw=$(vgt=$(sudo vcgencmd get_throttled); echo "${vgt##*x}")
|
||||
|
||||
# Active Throttle Notice: https://bit.ly/2gnunOo
|
||||
if [[ "$sys_throttle_raw" != "0" ]]; then
|
||||
case "$sys_throttle_raw" in
|
||||
*0001) thr_type="${COL_YELLOW}Under Voltage";;
|
||||
*0002) thr_type="${COL_LIGHT_BLUE}Arm Freq Cap";;
|
||||
*0003) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_BLUE}AFC";;
|
||||
*0004) thr_type="${COL_LIGHT_RED}Throttled";;
|
||||
*0005) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
|
||||
*0006) thr_type="${COL_LIGHT_BLUE}AFC${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
|
||||
*0007) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_BLUE}AFC${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
|
||||
esac
|
||||
[[ -n "$thr_type" ]] && sys_throttle="$thr_type${COL_DARK_GRAY}"
|
||||
fi
|
||||
|
||||
sys_rev_raw=$(awk '/Revision/ {print $3}' < /proc/cpuinfo)
|
||||
case "$sys_rev_raw" in
|
||||
000[2-6]) sys_model=" 1, Model B";; # 256MB
|
||||
000[7-9]) sys_model=" 1, Model A";; # 256MB
|
||||
000d|000e|000f) sys_model=" 1, Model B";; # 512MB
|
||||
0010|0013) sys_model=" 1, Model B+";; # 512MB
|
||||
0012|0015) sys_model=" 1, Model A+";; # 256MB
|
||||
a0104[0-1]|a21041|a22042) sys_model=" 2, Model B";; # 1GB
|
||||
900021) sys_model=" 1, Model A+";; # 512MB
|
||||
900032) sys_model=" 1, Model B+";; # 512MB
|
||||
90009[2-3]|920093) sys_model=" Zero";; # 512MB
|
||||
9000c1) sys_model=" Zero W";; # 512MB
|
||||
a02082|a[2-3]2082) sys_model=" 3, Model B";; # 1GB
|
||||
a020d3) sys_model=" 3, Model B+";; # 1GB
|
||||
*) sys_model="";;
|
||||
esac
|
||||
sys_type="Raspberry Pi$sys_model"
|
||||
else
|
||||
source "/etc/os-release"
|
||||
CODENAME=$(sed 's/[()]//g' <<< "${VERSION/* /}")
|
||||
sys_type="${NAME/ */} ${CODENAME^} $VERSION_ID"
|
||||
fi
|
||||
|
||||
# Get core count
|
||||
sys_cores=$(grep -c "^processor" /proc/cpuinfo)
|
||||
|
||||
# Test existence of clock speed file for ARM CPU
|
||||
if [[ -f "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq" ]]; then
|
||||
scaling_freq_file="/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq"
|
||||
fi
|
||||
|
||||
# Test existence of temperature file
|
||||
if [[ -f "/sys/class/thermal/thermal_zone0/temp" ]]; then
|
||||
temp_file="/sys/class/thermal/thermal_zone0/temp"
|
||||
elif [[ -f "/sys/class/hwmon/hwmon0/temp1_input" ]]; then
|
||||
temp_file="/sys/class/hwmon/hwmon0/temp1_input"
|
||||
else
|
||||
temp_file=""
|
||||
fi
|
||||
|
||||
# Test existence of setupVars config
|
||||
if [[ -f "/etc/pihole/setupVars.conf" ]]; then
|
||||
setupVars="/etc/pihole/setupVars.conf"
|
||||
fi
|
||||
}
|
||||
|
||||
get_sys_stats() {
|
||||
local ph_ver_raw
|
||||
local cpu_raw
|
||||
local ram_raw
|
||||
local disk_raw
|
||||
|
||||
# Update every 12 refreshes (Def: every 60s)
|
||||
count=$((count+1))
|
||||
if [[ "$count" == "1" ]] || (( "$count" % 12 == 0 )); then
|
||||
# Do not source setupVars if file does not exist
|
||||
[[ -n "$setupVars" ]] && source "$setupVars"
|
||||
|
||||
mapfile -t ph_ver_raw < <(pihole -v -c 2> /dev/null | sed -n 's/^.* v/v/p')
|
||||
if [[ -n "${ph_ver_raw[0]}" ]]; then
|
||||
ph_core_ver="${ph_ver_raw[0]}"
|
||||
if [[ ${#ph_ver_raw[@]} -eq 2 ]]; then
|
||||
# AdminLTE not installed
|
||||
ph_lte_ver="(not installed)"
|
||||
ph_ftl_ver="${ph_ver_raw[1]}"
|
||||
else
|
||||
ph_lte_ver="${ph_ver_raw[1]}"
|
||||
ph_ftl_ver="${ph_ver_raw[2]}"
|
||||
fi
|
||||
else
|
||||
ph_core_ver="-1"
|
||||
fi
|
||||
|
||||
sys_name=$(hostname)
|
||||
|
||||
[[ -n "$TEMPERATUREUNIT" ]] && temp_unit="${TEMPERATUREUNIT^^}" || temp_unit="C"
|
||||
|
||||
# Get storage stats for partition mounted on /
|
||||
read -r -a disk_raw <<< "$(df -B1 / 2> /dev/null | awk 'END{ print $3,$2,$5 }')"
|
||||
disk_used="${disk_raw[0]}"
|
||||
disk_total="${disk_raw[1]}"
|
||||
disk_perc="${disk_raw[2]}"
|
||||
|
||||
net_gateway=$(ip route | grep default | cut -d ' ' -f 3 | head -n 1)
|
||||
|
||||
# Get DHCP stats, if feature is enabled
|
||||
if [[ "$DHCP_ACTIVE" == "true" ]]; then
|
||||
ph_dhcp_max=$(( ${DHCP_END##*.} - ${DHCP_START##*.} + 1 ))
|
||||
fi
|
||||
|
||||
# Get DNS server count
|
||||
dns_count="0"
|
||||
[[ -n "${PIHOLE_DNS_1}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_2}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_3}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_4}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_5}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_6}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_7}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_8}" ]] && dns_count=$((dns_count+1))
|
||||
[[ -n "${PIHOLE_DNS_9}" ]] && dns_count="$dns_count+"
|
||||
fi
|
||||
|
||||
# Get screen size
|
||||
read -r -a scr_size <<< "$(stty size 2>/dev/null || echo 24 80)"
|
||||
scr_lines="${scr_size[0]}"
|
||||
scr_cols="${scr_size[1]}"
|
||||
|
||||
# Determine Chronometer size behavior
|
||||
if [[ "$scr_cols" -ge 58 ]]; then
|
||||
chrono_width="large"
|
||||
elif [[ "$scr_cols" -gt 40 ]]; then
|
||||
chrono_width="medium"
|
||||
else
|
||||
chrono_width="small"
|
||||
fi
|
||||
|
||||
# Determine max length of divider string
|
||||
scr_line_len=$(( scr_cols - 2 ))
|
||||
[[ "$scr_line_len" -ge 58 ]] && scr_line_len="58"
|
||||
scr_line_str=$(printf "%${scr_line_len}s")
|
||||
scr_line_str="${scr_line_str// /—}"
|
||||
|
||||
sys_uptime=$(hrSecs "$(cut -d. -f1 /proc/uptime)")
|
||||
sys_loadavg=$(cut -d " " -f1,2,3 /proc/loadavg)
|
||||
|
||||
# Get CPU usage, only counting processes over 1% as active
|
||||
# shellcheck disable=SC2009
|
||||
cpu_raw=$(ps -eo pcpu,rss --no-headers | grep -E -v " 0")
|
||||
cpu_tasks=$(wc -l <<< "$cpu_raw")
|
||||
cpu_taskact=$(sed -r "/(^ 0.)/d" <<< "$cpu_raw" | wc -l)
|
||||
cpu_perc=$(awk '{sum+=$1} END {printf "%.0f\n", sum/'"$sys_cores"'}' <<< "$cpu_raw")
|
||||
|
||||
# Get CPU clock speed
|
||||
if [[ -n "$scaling_freq_file" ]]; then
|
||||
cpu_mhz=$(( $(< /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq) / 1000 ))
|
||||
else
|
||||
cpu_mhz=$(lscpu | awk -F ":" '/MHz/ {print $2;exit}')
|
||||
cpu_mhz=$(printf "%.0f" "${cpu_mhz//[[:space:]]/}")
|
||||
fi
|
||||
|
||||
# Determine whether to display CPU clock speed as MHz or GHz
|
||||
if [[ -n "$cpu_mhz" ]]; then
|
||||
[[ "$cpu_mhz" -le "999" ]] && cpu_freq="$cpu_mhz MHz" || cpu_freq="$(printf "%.1f" $(calcFunc "$cpu_mhz"/1000)) GHz"
|
||||
[[ "${cpu_freq}" == *".0"* ]] && cpu_freq="${cpu_freq/.0/}"
|
||||
fi
|
||||
|
||||
# Determine color for temperature
|
||||
if [[ -n "$temp_file" ]]; then
|
||||
if [[ "$temp_unit" == "C" ]]; then
|
||||
cpu_temp=$(printf "%.0fc\\n" "$(calcFunc "$(< $temp_file) / 1000")")
|
||||
|
||||
case "${cpu_temp::-1}" in
|
||||
-*|[0-9]|[1-3][0-9]) cpu_col="$COL_LIGHT_BLUE";;
|
||||
4[0-9]) cpu_col="";;
|
||||
5[0-9]) cpu_col="$COL_YELLOW";;
|
||||
6[0-9]) cpu_col="$COL_LIGHT_RED";;
|
||||
*) cpu_col="$COL_URG_RED";;
|
||||
esac
|
||||
|
||||
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
|
||||
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
||||
|
||||
elif [[ "$temp_unit" == "F" ]]; then
|
||||
cpu_temp=$(printf "%.0ff\\n" "$(calcFunc "($(< $temp_file) / 1000) * 9 / 5 + 32")")
|
||||
|
||||
case "${cpu_temp::-1}" in
|
||||
-*|[0-9]|[0-9][0-9]) cpu_col="$COL_LIGHT_BLUE";;
|
||||
1[0-1][0-9]) cpu_col="";;
|
||||
1[2-3][0-9]) cpu_col="$COL_YELLOW";;
|
||||
1[4-5][0-9]) cpu_col="$COL_LIGHT_RED";;
|
||||
*) cpu_col="$COL_URG_RED";;
|
||||
esac
|
||||
|
||||
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
||||
|
||||
else
|
||||
cpu_temp_str=$(printf " @ %.0fk\\n" "$(calcFunc "($(< $temp_file) / 1000) + 273.15")")
|
||||
fi
|
||||
else
|
||||
cpu_temp_str=""
|
||||
fi
|
||||
|
||||
read -r -a ram_raw <<< "$(awk '/MemTotal:/{total=$2} /MemFree:/{free=$2} /Buffers:/{buffers=$2} /^Cached:/{cached=$2} END {printf "%.0f %.0f %.0f", (total-free-buffers-cached)*100/total, (total-free-buffers-cached)*1024, total*1024}' /proc/meminfo)"
|
||||
ram_perc="${ram_raw[0]}"
|
||||
ram_used="${ram_raw[1]}"
|
||||
ram_total="${ram_raw[2]}"
|
||||
|
||||
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then
|
||||
ph_status="${COL_LIGHT_GREEN}Active"
|
||||
else
|
||||
ph_status="${COL_LIGHT_RED}Offline"
|
||||
fi
|
||||
|
||||
if [[ "$DHCP_ACTIVE" == "true" ]]; then
|
||||
local ph_dhcp_range
|
||||
|
||||
ph_dhcp_range=$(seq -s "|" -f "${DHCP_START%.*}.%g" "${DHCP_START##*.}" "${DHCP_END##*.}")
|
||||
|
||||
# Count dynamic leases from available range, and not static leases
|
||||
ph_dhcp_num=$(grep -cE "$ph_dhcp_range" "/etc/pihole/dhcp.leases")
|
||||
ph_dhcp_percent=$(( ph_dhcp_num * 100 / ph_dhcp_max ))
|
||||
fi
|
||||
}
|
||||
|
||||
get_ftl_stats() {
|
||||
local stats_raw
|
||||
|
||||
mapfile -t stats_raw < <(pihole-FTL "stats")
|
||||
domains_being_blocked_raw="${stats_raw[0]#* }"
|
||||
dns_queries_today_raw="${stats_raw[1]#* }"
|
||||
ads_blocked_today_raw="${stats_raw[2]#* }"
|
||||
ads_percentage_today_raw="${stats_raw[3]#* }"
|
||||
queries_forwarded_raw="${stats_raw[5]#* }"
|
||||
queries_cached_raw="${stats_raw[6]#* }"
|
||||
|
||||
# Only retrieve these stats when not called from jsonFunc
|
||||
if [[ -z "$1" ]]; then
|
||||
local top_ad_raw
|
||||
local top_domain_raw
|
||||
local top_client_raw
|
||||
|
||||
domains_being_blocked=$(printf "%.0f\\n" "${domains_being_blocked_raw}" 2> /dev/null)
|
||||
dns_queries_today=$(printf "%.0f\\n" "${dns_queries_today_raw}")
|
||||
ads_blocked_today=$(printf "%.0f\\n" "${ads_blocked_today_raw}")
|
||||
ads_percentage_today=$(printf "%'.0f\\n" "${ads_percentage_today_raw}")
|
||||
queries_cached_percentage=$(printf "%.0f\\n" "$(calcFunc "$queries_cached_raw * 100 / ( $queries_forwarded_raw + $queries_cached_raw )")")
|
||||
recent_blocked=$(pihole-FTL recentBlocked)
|
||||
read -r -a top_ad_raw <<< "$(pihole-FTL "top-ads (1)")"
|
||||
read -r -a top_domain_raw <<< "$(pihole-FTL "top-domains (1)")"
|
||||
read -r -a top_client_raw <<< "$(pihole-FTL "top-clients (1)")"
|
||||
|
||||
top_ad="${top_ad_raw[2]}"
|
||||
top_domain="${top_domain_raw[2]}"
|
||||
if [[ "${top_client_raw[3]}" ]]; then
|
||||
top_client="${top_client_raw[3]}"
|
||||
else
|
||||
top_client="${top_client_raw[2]}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
get_strings() {
|
||||
# Expand or contract strings depending on screen size
|
||||
if [[ "$chrono_width" == "large" ]]; then
|
||||
phc_str=" ${COL_DARK_GRAY}Core"
|
||||
lte_str=" ${COL_DARK_GRAY}Web"
|
||||
ftl_str=" ${COL_DARK_GRAY}FTL"
|
||||
api_str="${COL_LIGHT_RED}API Offline"
|
||||
|
||||
host_info="$sys_type"
|
||||
sys_info="$sys_throttle"
|
||||
sys_info2="Active: $cpu_taskact of $cpu_tasks tasks"
|
||||
used_str="Used: "
|
||||
leased_str="Leased: "
|
||||
domains_being_blocked=$(printf "%'.0f" "$domains_being_blocked")
|
||||
ads_blocked_today=$(printf "%'.0f" "$ads_blocked_today")
|
||||
dns_queries_today=$(printf "%'.0f" "$dns_queries_today")
|
||||
ph_info="Blocking: $domains_being_blocked sites"
|
||||
total_str="Total: "
|
||||
else
|
||||
phc_str=" ${COL_DARK_GRAY}Core"
|
||||
lte_str=" ${COL_DARK_GRAY}Web"
|
||||
ftl_str=" ${COL_DARK_GRAY}FTL"
|
||||
api_str="${COL_LIGHT_RED}API Down"
|
||||
ph_info="$domains_being_blocked blocked"
|
||||
fi
|
||||
|
||||
[[ "$sys_cores" -ne 1 ]] && sys_cores_txt="${sys_cores}x "
|
||||
cpu_info="$sys_cores_txt$cpu_freq$cpu_temp_str"
|
||||
ram_info="$used_str$(hrBytes "$ram_used") of $(hrBytes "$ram_total")"
|
||||
disk_info="$used_str$(hrBytes "$disk_used") of $(hrBytes "$disk_total")"
|
||||
|
||||
lan_info="Gateway: $net_gateway"
|
||||
dhcp_info="$leased_str$ph_dhcp_num of $ph_dhcp_max"
|
||||
|
||||
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
|
||||
dns_info="$dns_count DNS servers"
|
||||
|
||||
[[ "$recent_blocked" == "0" ]] && recent_blocked="${COL_LIGHT_RED}FTL offline${COL_NC}"
|
||||
}
|
||||
|
||||
chronoFunc() {
|
||||
local extra_arg="$1"
|
||||
local extra_value="$2"
|
||||
|
||||
get_init_stats
|
||||
|
||||
for (( ; ; )); do
|
||||
get_sys_stats
|
||||
get_ftl_stats
|
||||
get_strings
|
||||
|
||||
# Strip excess development version numbers
|
||||
if [[ "$ph_core_ver" != "-1" ]]; then
|
||||
phc_ver_str="$phc_str: ${ph_core_ver%-*}${COL_NC}"
|
||||
lte_ver_str="$lte_str: ${ph_lte_ver%-*}${COL_NC}"
|
||||
ftl_ver_str="$ftl_str: ${ph_ftl_ver%-*}${COL_NC}"
|
||||
else
|
||||
phc_ver_str="$phc_str: $api_str${COL_NC}"
|
||||
fi
|
||||
|
||||
# Get refresh number
|
||||
if [[ "${extra_arg}" = "refresh" ]]; then
|
||||
num="${extra_value}"
|
||||
num_str="Refresh set for every $num seconds"
|
||||
else
|
||||
num_str=""
|
||||
fi
|
||||
|
||||
clear
|
||||
|
||||
# Remove exit message heading on third refresh
|
||||
if [[ "$count" -le 2 ]] && [[ "${extra_arg}" != "exit" ]]; then
|
||||
echo -e " ${COL_LIGHT_GREEN}Pi-hole Chronometer${COL_NC}
|
||||
$num_str
|
||||
${COL_LIGHT_RED}Press Ctrl-C to exit${COL_NC}
|
||||
${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||
else
|
||||
echo -e "[0;1;31;91m|¯[0;1;33;93m¯[0;1;32;92m¯[0;1;32;92m(¯[0;1;36;96m)[0;1;34;94m_[0;1;35;95m|[0;1;33;93m¯[0;1;31;91m|_ [0;1;32;92m__[0;1;36;96m_|[0;1;31;91m¯[0;1;34;94m|[0;1;35;95m__[0;1;31;91m_[0m$phc_ver_str\\n[0;1;33;93m| ¯[0;1;32;92m_[0;1;36;96m/¯[0;1;34;94m|[0;1;35;95m_[0;1;31;91m| [0;1;33;93m' [0;1;32;92m\\/ [0;1;36;96m_ [0;1;34;94m\\ [0;1;35;95m/ [0;1;31;91m-[0;1;33;93m_)[0m$lte_ver_str\\n[0;1;32;92m|_[0;1;36;96m| [0;1;34;94m|_[0;1;35;95m| [0;1;33;93m|_[0;1;32;92m||[0;1;36;96m_\\[0;1;34;94m__[0;1;35;95m_/[0;1;31;91m_\\[0;1;33;93m__[0;1;32;92m_|[0m$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||
fi
|
||||
|
||||
printFunc " Hostname: " "$sys_name" "$host_info"
|
||||
printFunc " Uptime: " "$sys_uptime" "$sys_info"
|
||||
printFunc " Task Load: " "$sys_loadavg" "$sys_info2"
|
||||
printFunc " CPU usage: " "$cpu_perc%" "$cpu_info"
|
||||
printFunc " RAM usage: " "$ram_perc%" "$ram_info"
|
||||
printFunc " HDD usage: " "$disk_perc" "$disk_info"
|
||||
|
||||
if [[ "$DHCP_ACTIVE" == "true" ]]; then
|
||||
printFunc "DHCP usage: " "$ph_dhcp_percent%" "$dhcp_info"
|
||||
fi
|
||||
|
||||
printFunc " Pi-hole: " "$ph_status" "$ph_info"
|
||||
printFunc " Blocked: " "$ads_percentage_today%" "$ads_info"
|
||||
printFunc "Local Qrys: " "$queries_cached_percentage%" "$dns_info"
|
||||
|
||||
printFunc "Last Block: " "$recent_blocked"
|
||||
printFunc " Top Block: " "$top_ad"
|
||||
|
||||
# Provide more stats on screens with more lines
|
||||
if [[ "$scr_lines" -eq 17 ]]; then
|
||||
if [[ "$DHCP_ACTIVE" == "true" ]]; then
|
||||
printFunc "Top Domain: " "$top_domain" "last"
|
||||
else
|
||||
print_client="true"
|
||||
fi
|
||||
else
|
||||
print_client="true"
|
||||
fi
|
||||
|
||||
if [[ -n "$print_client" ]]; then
|
||||
printFunc "Top Domain: " "$top_domain"
|
||||
printFunc "Top Client: " "$top_client" "last"
|
||||
fi
|
||||
|
||||
# Handle exit/refresh options
|
||||
if [[ "${extra_arg}" == "exit" ]]; then
|
||||
exit 0
|
||||
else
|
||||
if [[ "${extra_arg}" == "refresh" ]]; then
|
||||
sleep "$num"
|
||||
else
|
||||
sleep 5
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
jsonFunc() {
|
||||
get_ftl_stats "json"
|
||||
echo "{\"domains_being_blocked\":${domains_being_blocked_raw},\"dns_queries_today\":${dns_queries_today_raw},\"ads_blocked_today\":${ads_blocked_today_raw},\"ads_percentage_today\":${ads_percentage_today_raw}}"
|
||||
}
|
||||
|
||||
helpFunc() {
|
||||
if [[ "$1" == "?" ]]; then
|
||||
echo "Unknown option. Please view 'pihole -c --help' for more information"
|
||||
else
|
||||
echo "Usage: pihole -c [options]
|
||||
Example: 'pihole -c -j'
|
||||
Calculates stats and displays to an LCD
|
||||
|
||||
Options:
|
||||
-j, --json Output stats as JSON formatted string
|
||||
-r, --refresh Set update frequency (in seconds)
|
||||
-e, --exit Output stats and exit without refreshing
|
||||
-h, --help Display this help text"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
if [[ $# = 0 ]]; then
|
||||
chronoFunc
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
"-j" | "--json" ) jsonFunc;;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
"-r" | "--refresh" ) chronoFunc refresh "$2";;
|
||||
"-e" | "--exit" ) chronoFunc exit;;
|
||||
* ) helpFunc "?";;
|
||||
esac
|
|
@ -13,119 +13,150 @@
|
|||
readonly scriptPath="/etc/.pihole/advanced/Scripts/database_migration/gravity"
|
||||
|
||||
upgrade_gravityDB(){
|
||||
local database piholeDir auditFile version
|
||||
database="${1}"
|
||||
piholeDir="${2}"
|
||||
auditFile="${piholeDir}/auditlog.list"
|
||||
local database piholeDir auditFile version
|
||||
database="${1}"
|
||||
piholeDir="${2}"
|
||||
auditFile="${piholeDir}/auditlog.list"
|
||||
|
||||
# Get database version
|
||||
version="$(pihole-FTL sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
||||
# Exit early if the database does not exist (e.g. in CI tests)
|
||||
if [[ ! -f "${database}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$version" == "1" ]]; then
|
||||
# This migration script upgrades the gravity.db file by
|
||||
# adding the domain_audit table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
|
||||
version=2
|
||||
# Get database version
|
||||
version="$(pihole-FTL sqlite3 -ni "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
||||
|
||||
# Store audit domains in database table
|
||||
if [ -e "${auditFile}" ]; then
|
||||
echo -e " ${INFO} Migrating content of ${auditFile} into new database"
|
||||
# database_table_from_file is defined in gravity.sh
|
||||
database_table_from_file "domain_audit" "${auditFile}"
|
||||
fi
|
||||
fi
|
||||
if [[ "$version" == "2" ]]; then
|
||||
# This migration script upgrades the gravity.db file by
|
||||
# renaming the regex table to regex_blacklist, and
|
||||
# creating a new regex_whitelist table + corresponding linking table and views
|
||||
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
|
||||
version=3
|
||||
fi
|
||||
if [[ "$version" == "3" ]]; then
|
||||
# This migration script unifies the formally separated domain
|
||||
# lists into a single table with a UNIQUE domain constraint
|
||||
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
|
||||
version=4
|
||||
fi
|
||||
if [[ "$version" == "4" ]]; then
|
||||
# This migration script upgrades the gravity and list views
|
||||
# implementing necessary changes for per-client blocking
|
||||
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
|
||||
version=5
|
||||
fi
|
||||
if [[ "$version" == "5" ]]; then
|
||||
# This migration script upgrades the adlist view
|
||||
# to return an ID used in gravity.sh
|
||||
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
|
||||
version=6
|
||||
fi
|
||||
if [[ "$version" == "6" ]]; then
|
||||
# This migration script adds a special group with ID 0
|
||||
# which is automatically associated to all clients not
|
||||
# having their own group assignments
|
||||
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
|
||||
version=7
|
||||
fi
|
||||
if [[ "$version" == "7" ]]; then
|
||||
# This migration script recreated the group table
|
||||
# to ensure uniqueness on the group name
|
||||
# We also add date_added and date_modified columns
|
||||
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
|
||||
version=8
|
||||
fi
|
||||
if [[ "$version" == "8" ]]; then
|
||||
# This migration fixes some issues that were introduced
|
||||
# in the previous migration script.
|
||||
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
||||
version=9
|
||||
fi
|
||||
if [[ "$version" == "9" ]]; then
|
||||
# This migration drops unused tables and creates triggers to remove
|
||||
# obsolete groups assignments when the linked items are deleted
|
||||
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
||||
version=10
|
||||
fi
|
||||
if [[ "$version" == "10" ]]; then
|
||||
# This adds timestamp and an optional comment field to the client table
|
||||
# These fields are only temporary and will be replaces by the columns
|
||||
# defined in gravity.db.sql during gravity swapping. We add them here
|
||||
# to keep the copying process generic (needs the same columns in both the
|
||||
# source and the destination databases).
|
||||
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
||||
version=11
|
||||
fi
|
||||
if [[ "$version" == "11" ]]; then
|
||||
# Rename group 0 from "Unassociated" to "Default"
|
||||
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
|
||||
version=12
|
||||
fi
|
||||
if [[ "$version" == "12" ]]; then
|
||||
# Add column date_updated to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
|
||||
version=13
|
||||
fi
|
||||
if [[ "$version" == "13" ]]; then
|
||||
# Add columns number and status to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
||||
version=14
|
||||
fi
|
||||
if [[ "$version" == "14" ]]; then
|
||||
# Changes the vw_adlist created in 5_to_6
|
||||
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
|
||||
version=15
|
||||
fi
|
||||
if [[ "$version" == "1" ]]; then
|
||||
# This migration script upgrades the gravity.db file by
|
||||
# adding the domain_audit table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/1_to_2.sql"
|
||||
version=2
|
||||
|
||||
# Store audit domains in database table
|
||||
if [ -e "${auditFile}" ]; then
|
||||
echo -e " ${INFO} Migrating content of ${auditFile} into new database"
|
||||
# database_table_from_file is defined in gravity.sh
|
||||
database_table_from_file "domain_audit" "${auditFile}"
|
||||
fi
|
||||
fi
|
||||
if [[ "$version" == "2" ]]; then
|
||||
# This migration script upgrades the gravity.db file by
|
||||
# renaming the regex table to regex_blacklist, and
|
||||
# creating a new regex_whitelist table + corresponding linking table and views
|
||||
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/2_to_3.sql"
|
||||
version=3
|
||||
fi
|
||||
if [[ "$version" == "3" ]]; then
|
||||
# This migration script unifies the formally separated domain
|
||||
# lists into a single table with a UNIQUE domain constraint
|
||||
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/3_to_4.sql"
|
||||
version=4
|
||||
fi
|
||||
if [[ "$version" == "4" ]]; then
|
||||
# This migration script upgrades the gravity and list views
|
||||
# implementing necessary changes for per-client blocking
|
||||
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/4_to_5.sql"
|
||||
version=5
|
||||
fi
|
||||
if [[ "$version" == "5" ]]; then
|
||||
# This migration script upgrades the adlist view
|
||||
# to return an ID used in gravity.sh
|
||||
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/5_to_6.sql"
|
||||
version=6
|
||||
fi
|
||||
if [[ "$version" == "6" ]]; then
|
||||
# This migration script adds a special group with ID 0
|
||||
# which is automatically associated to all clients not
|
||||
# having their own group assignments
|
||||
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/6_to_7.sql"
|
||||
version=7
|
||||
fi
|
||||
if [[ "$version" == "7" ]]; then
|
||||
# This migration script recreated the group table
|
||||
# to ensure uniqueness on the group name
|
||||
# We also add date_added and date_modified columns
|
||||
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/7_to_8.sql"
|
||||
version=8
|
||||
fi
|
||||
if [[ "$version" == "8" ]]; then
|
||||
# This migration fixes some issues that were introduced
|
||||
# in the previous migration script.
|
||||
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/8_to_9.sql"
|
||||
version=9
|
||||
fi
|
||||
if [[ "$version" == "9" ]]; then
|
||||
# This migration drops unused tables and creates triggers to remove
|
||||
# obsolete groups assignments when the linked items are deleted
|
||||
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/9_to_10.sql"
|
||||
version=10
|
||||
fi
|
||||
if [[ "$version" == "10" ]]; then
|
||||
# This adds timestamp and an optional comment field to the client table
|
||||
# These fields are only temporary and will be replaces by the columns
|
||||
# defined in gravity.db.sql during gravity swapping. We add them here
|
||||
# to keep the copying process generic (needs the same columns in both the
|
||||
# source and the destination databases).
|
||||
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/10_to_11.sql"
|
||||
version=11
|
||||
fi
|
||||
if [[ "$version" == "11" ]]; then
|
||||
# Rename group 0 from "Unassociated" to "Default"
|
||||
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/11_to_12.sql"
|
||||
version=12
|
||||
fi
|
||||
if [[ "$version" == "12" ]]; then
|
||||
# Add column date_updated to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/12_to_13.sql"
|
||||
version=13
|
||||
fi
|
||||
if [[ "$version" == "13" ]]; then
|
||||
# Add columns number and status to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/13_to_14.sql"
|
||||
version=14
|
||||
fi
|
||||
if [[ "$version" == "14" ]]; then
|
||||
# Changes the vw_adlist created in 5_to_6
|
||||
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/14_to_15.sql"
|
||||
version=15
|
||||
fi
|
||||
if [[ "$version" == "15" ]]; then
|
||||
# Add column abp_entries to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 15 to 16"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/15_to_16.sql"
|
||||
version=16
|
||||
fi
|
||||
if [[ "$version" == "16" ]]; then
|
||||
# Add antigravity table
|
||||
# Add column type to adlist table (to support adlist types)
|
||||
echo -e " ${INFO} Upgrading gravity database from version 16 to 17"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/16_to_17.sql"
|
||||
version=17
|
||||
fi
|
||||
if [[ "$version" == "17" ]]; then
|
||||
# Add adlist.id to vw_gravity and vw_antigravity
|
||||
echo -e " ${INFO} Upgrading gravity database from version 17 to 18"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/17_to_18.sql"
|
||||
version=18
|
||||
fi
|
||||
if [[ "$version" == "18" ]]; then
|
||||
# Modify DELETE triggers to delete BEFORE instead of AFTER to prevent
|
||||
# foreign key constraint violations
|
||||
echo -e " ${INFO} Upgrading gravity database from version 18 to 19"
|
||||
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/18_to_19.sql"
|
||||
version=19
|
||||
fi
|
||||
}
|
||||
|
|
11
advanced/Scripts/database_migration/gravity/15_to_16.sql
Normal file
11
advanced/Scripts/database_migration/gravity/15_to_16.sql
Normal file
|
@ -0,0 +1,11 @@
|
|||
.timeout 30000
|
||||
|
||||
PRAGMA FOREIGN_KEYS=OFF;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE adlist ADD COLUMN abp_entries INTEGER NOT NULL DEFAULT 0;
|
||||
|
||||
UPDATE info SET value = 16 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
33
advanced/Scripts/database_migration/gravity/16_to_17.sql
Normal file
33
advanced/Scripts/database_migration/gravity/16_to_17.sql
Normal file
|
@ -0,0 +1,33 @@
|
|||
.timeout 30000
|
||||
|
||||
PRAGMA FOREIGN_KEYS=OFF;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE adlist ADD COLUMN type INTEGER NOT NULL DEFAULT 0;
|
||||
|
||||
UPDATE adlist SET type = 0;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS antigravity
|
||||
(
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
|
||||
);
|
||||
|
||||
CREATE VIEW vw_antigravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
||||
FROM antigravity
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
|
||||
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
|
||||
|
||||
DROP VIEW vw_adlist;
|
||||
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id, type
|
||||
FROM adlist
|
||||
WHERE enabled = 1
|
||||
ORDER BY id;
|
||||
|
||||
UPDATE info SET value = 17 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
25
advanced/Scripts/database_migration/gravity/17_to_18.sql
Normal file
25
advanced/Scripts/database_migration/gravity/17_to_18.sql
Normal file
|
@ -0,0 +1,25 @@
|
|||
.timeout 30000
|
||||
|
||||
PRAGMA FOREIGN_KEYS=OFF;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
DROP VIEW vw_gravity;
|
||||
CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
|
||||
FROM gravity
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
|
||||
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
||||
|
||||
DROP VIEW vw_antigravity;
|
||||
CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
|
||||
FROM antigravity
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
|
||||
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
|
||||
|
||||
UPDATE info SET value = 18 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
27
advanced/Scripts/database_migration/gravity/18_to_19.sql
Normal file
27
advanced/Scripts/database_migration/gravity/18_to_19.sql
Normal file
|
@ -0,0 +1,27 @@
|
|||
.timeout 30000
|
||||
|
||||
PRAGMA FOREIGN_KEYS=OFF;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
DROP TRIGGER tr_domainlist_delete;
|
||||
CREATE TRIGGER tr_domainlist_delete BEFORE DELETE ON domainlist
|
||||
BEGIN
|
||||
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
|
||||
END;
|
||||
|
||||
DROP TRIGGER tr_adlist_delete;
|
||||
CREATE TRIGGER tr_adlist_delete BEFORE DELETE ON adlist
|
||||
BEGIN
|
||||
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
|
||||
END;
|
||||
|
||||
DROP TRIGGER tr_client_delete;
|
||||
CREATE TRIGGER tr_client_delete BEFORE DELETE ON client
|
||||
BEGIN
|
||||
DELETE FROM client_by_group WHERE client_id = OLD.id;
|
||||
END;
|
||||
|
||||
UPDATE info SET value = 19 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
|
@ -4,9 +4,9 @@ BEGIN TRANSACTION;
|
|||
|
||||
CREATE TABLE domain_audit
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
|
||||
);
|
||||
|
||||
UPDATE info SET value = 2 WHERE property = 'version';
|
||||
|
|
|
@ -8,9 +8,9 @@ ALTER TABLE regex RENAME TO regex_blacklist;
|
|||
|
||||
CREATE TABLE regex_blacklist_by_group
|
||||
(
|
||||
regex_blacklist_id INTEGER NOT NULL REFERENCES regex_blacklist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (regex_blacklist_id, group_id)
|
||||
regex_blacklist_id INTEGER NOT NULL REFERENCES regex_blacklist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (regex_blacklist_id, group_id)
|
||||
);
|
||||
|
||||
INSERT INTO regex_blacklist_by_group SELECT * FROM regex_by_group;
|
||||
|
@ -32,19 +32,19 @@ CREATE TRIGGER tr_regex_blacklist_update AFTER UPDATE ON regex_blacklist
|
|||
|
||||
CREATE TABLE regex_whitelist
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE regex_whitelist_by_group
|
||||
(
|
||||
regex_whitelist_id INTEGER NOT NULL REFERENCES regex_whitelist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (regex_whitelist_id, group_id)
|
||||
regex_whitelist_id INTEGER NOT NULL REFERENCES regex_whitelist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (regex_whitelist_id, group_id)
|
||||
);
|
||||
|
||||
CREATE VIEW vw_regex_whitelist AS SELECT DISTINCT domain
|
||||
|
|
|
@ -6,13 +6,13 @@ BEGIN TRANSACTION;
|
|||
|
||||
CREATE TABLE domainlist
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type INTEGER NOT NULL DEFAULT 0,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type INTEGER NOT NULL DEFAULT 0,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
);
|
||||
|
||||
ALTER TABLE whitelist ADD COLUMN type INTEGER;
|
||||
|
@ -41,9 +41,9 @@ DROP TABLE regex_whitelist_by_group;
|
|||
DROP TABLE regex_blacklist_by_group;
|
||||
CREATE TABLE domainlist_by_group
|
||||
(
|
||||
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (domainlist_id, group_id)
|
||||
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (domainlist_id, group_id)
|
||||
);
|
||||
|
||||
DROP TRIGGER tr_whitelist_update;
|
||||
|
|
|
@ -7,9 +7,9 @@ BEGIN TRANSACTION;
|
|||
DROP TABLE gravity;
|
||||
CREATE TABLE gravity
|
||||
(
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
|
||||
PRIMARY KEY(domain, adlist_id)
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
|
||||
PRIMARY KEY(domain, adlist_id)
|
||||
);
|
||||
|
||||
DROP VIEW vw_gravity;
|
||||
|
@ -22,15 +22,15 @@ CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
|||
|
||||
CREATE TABLE client
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOL NULL UNIQUE
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOL NULL UNIQUE
|
||||
);
|
||||
|
||||
CREATE TABLE client_by_group
|
||||
(
|
||||
client_id INTEGER NOT NULL REFERENCES client (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (client_id, group_id)
|
||||
client_id INTEGER NOT NULL REFERENCES client (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (client_id, group_id)
|
||||
);
|
||||
|
||||
UPDATE info SET value = 5 WHERE property = 'version';
|
||||
|
|
|
@ -15,4 +15,3 @@ CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
|
|||
UPDATE info SET value = 6 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -8,12 +8,12 @@ ALTER TABLE "group" RENAME TO "group__";
|
|||
|
||||
CREATE TABLE "group"
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
description TEXT
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
description TEXT
|
||||
);
|
||||
|
||||
CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
|
||||
|
|
|
@ -5,261 +5,187 @@
|
|||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Whitelist and blacklist domains
|
||||
# allowlist and denylist domains
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# Globals
|
||||
piholeDir="/etc/pihole"
|
||||
GRAVITYDB="${piholeDir}/gravity.db"
|
||||
# Source pihole-FTL from install script
|
||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||
if [[ -f "${pihole_FTL}" ]]; then
|
||||
source "${pihole_FTL}"
|
||||
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
|
||||
readonly utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
readonly apifile="${PI_HOLE_SCRIPT_DIR}/api.sh"
|
||||
source "${apifile}"
|
||||
|
||||
# Determine database location
|
||||
DBFILE=$(getFTLConfigValue "files.database")
|
||||
if [ -z "$DBFILE" ]; then
|
||||
DBFILE="/etc/pihole/pihole-FTL.db"
|
||||
fi
|
||||
|
||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||
# have changed
|
||||
gravityDBfile="${GRAVITYDB}"
|
||||
# Determine gravity database location
|
||||
GRAVITYDB=$(getFTLConfigValue "files.gravity")
|
||||
if [ -z "$GRAVITYDB" ]; then
|
||||
GRAVITYDB="/etc/pihole/gravity.db"
|
||||
fi
|
||||
|
||||
noReloadRequested=false
|
||||
addmode=true
|
||||
verbose=true
|
||||
wildcard=false
|
||||
web=false
|
||||
|
||||
domList=()
|
||||
|
||||
typeId=""
|
||||
comment=""
|
||||
declare -i domaincount
|
||||
domaincount=0
|
||||
reload=false
|
||||
|
||||
colfile="/opt/pihole/COL_TABLE"
|
||||
source ${colfile}
|
||||
|
||||
# IDs are hard-wired to domain interpretation in the gravity database scheme
|
||||
# Clients (including FTL) will read them through the corresponding views
|
||||
readonly whitelist="0"
|
||||
readonly blacklist="1"
|
||||
readonly regex_whitelist="2"
|
||||
readonly regex_blacklist="3"
|
||||
|
||||
GetListnameFromTypeId() {
|
||||
if [[ "$1" == "${whitelist}" ]]; then
|
||||
echo "whitelist"
|
||||
elif [[ "$1" == "${blacklist}" ]]; then
|
||||
echo "blacklist"
|
||||
elif [[ "$1" == "${regex_whitelist}" ]]; then
|
||||
echo "regex whitelist"
|
||||
elif [[ "$1" == "${regex_blacklist}" ]]; then
|
||||
echo "regex blacklist"
|
||||
fi
|
||||
}
|
||||
|
||||
GetListParamFromTypeId() {
|
||||
if [[ "${typeId}" == "${whitelist}" ]]; then
|
||||
echo "w"
|
||||
elif [[ "${typeId}" == "${blacklist}" ]]; then
|
||||
echo "b"
|
||||
elif [[ "${typeId}" == "${regex_whitelist}" && "${wildcard}" == true ]]; then
|
||||
echo "-white-wild"
|
||||
elif [[ "${typeId}" == "${regex_whitelist}" ]]; then
|
||||
echo "-white-regex"
|
||||
elif [[ "${typeId}" == "${regex_blacklist}" && "${wildcard}" == true ]]; then
|
||||
echo "-wild"
|
||||
elif [[ "${typeId}" == "${regex_blacklist}" ]]; then
|
||||
echo "-regex"
|
||||
fi
|
||||
}
|
||||
|
||||
helpFunc() {
|
||||
local listname param
|
||||
|
||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||
param="$(GetListParamFromTypeId)"
|
||||
|
||||
echo "Usage: pihole -${param} [options] <domain> <domain2 ...>
|
||||
Example: 'pihole -${param} site.com', or 'pihole -${param} site1.com site2.com'
|
||||
${listname^} one or more domains
|
||||
echo "Usage: pihole ${abbrv} [options] <domain> <domain2 ...>
|
||||
Example: 'pihole ${abbrv} site.com', or 'pihole ${abbrv} site1.com site2.com'
|
||||
${typeId^} one or more ${kindId} domains
|
||||
|
||||
Options:
|
||||
-d, --delmode Remove domain(s) from the ${listname}
|
||||
-nr, --noreload Update ${listname} without reloading the DNS server
|
||||
remove, delete, -d Remove domain(s)
|
||||
-q, --quiet Make output less verbose
|
||||
-h, --help Show this help dialog
|
||||
-l, --list Display all your ${listname}listed domains
|
||||
--nuke Removes all entries in a list
|
||||
-l, --list Display domains
|
||||
--comment \"text\" Add a comment to the domain. If adding multiple domains the same comment will be used for all"
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
ValidateDomain() {
|
||||
# Convert to lowercase
|
||||
domain="${1,,}"
|
||||
local str validDomain
|
||||
|
||||
# Check validity of domain (don't check for regex entries)
|
||||
if [[ ( "${typeId}" == "${regex_blacklist}" || "${typeId}" == "${regex_whitelist}" ) && "${wildcard}" == false ]]; then
|
||||
validDomain="${domain}"
|
||||
else
|
||||
# Check max length
|
||||
if [[ "${#domain}" -le 253 ]]; then
|
||||
validDomain=$(grep -P "^((-|_)*[a-z\\d]((-|_)*[a-z\\d])*(-|_)*)(\\.(-|_)*([a-z\\d]((-|_)*[a-z\\d])*))*$" <<< "${domain}") # Valid chars check
|
||||
validDomain=$(grep -P "^[^\\.]{1,63}(\\.[^\\.]{1,63})*$" <<< "${validDomain}") # Length of each label
|
||||
# set error string
|
||||
str="is not a valid argument or domain name!"
|
||||
else
|
||||
validDomain=
|
||||
str="is too long!"
|
||||
|
||||
fi
|
||||
CreateDomainList() {
|
||||
# Format domain into regex filter if requested
|
||||
local dom=${1}
|
||||
if [[ "${wildcard}" == true ]]; then
|
||||
dom="(\\.|^)${dom//\./\\.}$"
|
||||
fi
|
||||
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
domList=("${domList[@]}" "${validDomain}")
|
||||
else
|
||||
echo -e " ${CROSS} ${domain} ${str}"
|
||||
fi
|
||||
|
||||
domaincount=$((domaincount+1))
|
||||
}
|
||||
|
||||
ProcessDomainList() {
|
||||
for dom in "${domList[@]}"; do
|
||||
# Format domain into regex filter if requested
|
||||
if [[ "${wildcard}" == true ]]; then
|
||||
dom="(\\.|^)${dom//\./\\.}$"
|
||||
fi
|
||||
|
||||
# Logic: If addmode then add to desired list and remove from the other;
|
||||
# if delmode then remove from desired list but do not add to the other
|
||||
if ${addmode}; then
|
||||
AddDomain "${dom}"
|
||||
else
|
||||
RemoveDomain "${dom}"
|
||||
fi
|
||||
done
|
||||
domList=("${domList[@]}" "${dom}")
|
||||
}
|
||||
|
||||
AddDomain() {
|
||||
local domain num requestedListname existingTypeId existingListname
|
||||
domain="$1"
|
||||
local json num data
|
||||
|
||||
# Is the domain in the list we want to add it to?
|
||||
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
# Authenticate with the API
|
||||
LoginAPI
|
||||
|
||||
if [[ "${num}" -ne 0 ]]; then
|
||||
existingTypeId="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
||||
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
||||
# Prepare request to POST /api/domains/{type}/{kind}
|
||||
# Build JSON object of the following form
|
||||
# {
|
||||
# "domain": [ <domains> ],
|
||||
# "comment": <comment>
|
||||
# }
|
||||
# where <domains> is an array of domain strings and <comment> is a string
|
||||
# We use jq to build the JSON object
|
||||
json=$(jq --null-input --compact-output --arg domains "${domList[*]}" --arg comment "${comment}" '{domain: $domains | split(" "), comment: $comment}')
|
||||
|
||||
# Send the request
|
||||
data=$(PostFTLData "domains/${typeId}/${kindId}" "${json}")
|
||||
|
||||
# Display domain(s) added
|
||||
# (they are listed in .processed.success, use jq)
|
||||
num=$(echo "${data}" | jq '.processed.success | length')
|
||||
if [[ "${num}" -gt 0 ]] && [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${TICK} Added ${num} domain(s):"
|
||||
for i in $(seq 0 $((num-1))); do
|
||||
echo -e " - ${COL_BLUE}$(echo "${data}" | jq --raw-output ".processed.success[$i].item")${COL_NC}"
|
||||
done
|
||||
fi
|
||||
# Display failed domain(s)
|
||||
# (they are listed in .processed.errors, use jq)
|
||||
num=$(echo "${data}" | jq '.processed.errors | length')
|
||||
if [[ "${num}" -gt 0 ]] && [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${CROSS} Failed to add ${num} domain(s):"
|
||||
for i in $(seq 0 $((num-1))); do
|
||||
echo -e " - ${COL_BLUE}$(echo "${data}" | jq --raw-output ".processed.errors[$i].item")${COL_NC}"
|
||||
error=$(echo "${data}" | jq --raw-output ".processed.errors[$i].error")
|
||||
if [[ "${error}" == "UNIQUE constraint failed: domainlist.domain, domainlist.type" ]]; then
|
||||
error="Domain already in the specified list"
|
||||
fi
|
||||
else
|
||||
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
||||
fi
|
||||
fi
|
||||
return
|
||||
echo -e " ${error}"
|
||||
done
|
||||
fi
|
||||
|
||||
# Domain not found in the table, add it!
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} Adding ${domain} to the ${requestedListname}..."
|
||||
fi
|
||||
reload=true
|
||||
# Insert only the domain here. The enabled and date_added fields will be filled
|
||||
# with their default values (enabled = true, date_added = current timestamp)
|
||||
if [[ -z "${comment}" ]]; then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
|
||||
else
|
||||
# also add comment when variable has been set through the "--comment" option
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
|
||||
fi
|
||||
# Log out
|
||||
LogoutAPI
|
||||
}
|
||||
|
||||
RemoveDomain() {
|
||||
local domain num requestedListname
|
||||
domain="$1"
|
||||
local json num data status
|
||||
|
||||
# Is the domain in the list we want to remove it from?
|
||||
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
|
||||
# Authenticate with the API
|
||||
LoginAPI
|
||||
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
# Prepare request to POST /api/domains:batchDelete
|
||||
# Build JSON object of the following form
|
||||
# [{
|
||||
# "item": <domain>,
|
||||
# "type": "${typeId}",
|
||||
# "kind": "${kindId}",
|
||||
# }]
|
||||
# where <domain> is the domain string and ${typeId} and ${kindId} are the type and kind IDs
|
||||
# We use jq to build the JSON object)
|
||||
json=$(jq --null-input --compact-output --arg domains "${domList[*]}" --arg typeId "${typeId}" --arg kindId "${kindId}" '[ $domains | split(" ")[] as $item | {item: $item, type: $typeId, kind: $kindId} ]')
|
||||
|
||||
if [[ "${num}" -eq 0 ]]; then
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
|
||||
fi
|
||||
return
|
||||
# Send the request
|
||||
data=$(PostFTLData "domains:batchDelete" "${json}" "status")
|
||||
# Separate the status from the data
|
||||
status=$(printf %s "${data#"${data%???}"}")
|
||||
data=$(printf %s "${data%???}")
|
||||
|
||||
# If there is an .error object in the returned data, display it
|
||||
local error
|
||||
error=$(jq --compact-output <<< "${data}" '.error')
|
||||
if [[ $error != "null" && $error != "" ]]; then
|
||||
echo -e " ${CROSS} Failed to remove domain(s):"
|
||||
echo -e " $(jq <<< "${data}" '.error')"
|
||||
elif [[ "${verbose}" == true && "${status}" == "204" ]]; then
|
||||
echo -e " ${TICK} Domain(s) removed from the ${kindId} ${typeId}list"
|
||||
elif [[ "${verbose}" == true && "${status}" == "404" ]]; then
|
||||
echo -e " ${TICK} Requested domain(s) not found on ${kindId} ${typeId}list"
|
||||
fi
|
||||
|
||||
# Domain found in the table, remove it!
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} Removing ${domain} from the ${requestedListname}..."
|
||||
fi
|
||||
reload=true
|
||||
# Remove it from the current list
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
|
||||
# Log out
|
||||
LogoutAPI
|
||||
}
|
||||
|
||||
Displaylist() {
|
||||
local count num_pipes domain enabled status nicedate requestedListname
|
||||
local data
|
||||
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
data="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
|
||||
|
||||
if [[ -z $data ]]; then
|
||||
echo -e "Not showing empty list"
|
||||
else
|
||||
echo -e "Displaying ${requestedListname}:"
|
||||
count=1
|
||||
while IFS= read -r line
|
||||
do
|
||||
# Count number of pipes seen in this line
|
||||
# This is necessary because we can only detect the pipe separating the fields
|
||||
# from the end backwards as the domain (which is the first field) may contain
|
||||
# pipe symbols as they are perfectly valid regex filter control characters
|
||||
num_pipes="$(grep -c "^" <<< "$(grep -o "|" <<< "${line}")")"
|
||||
|
||||
# Extract domain and enabled status based on the obtained number of pipe characters
|
||||
domain="$(cut -d'|' -f"-$((num_pipes-1))" <<< "${line}")"
|
||||
enabled="$(cut -d'|' -f"$((num_pipes))" <<< "${line}")"
|
||||
datemod="$(cut -d'|' -f"$((num_pipes+1))" <<< "${line}")"
|
||||
|
||||
# Translate boolean status into human readable string
|
||||
if [[ "${enabled}" -eq 1 ]]; then
|
||||
status="enabled"
|
||||
else
|
||||
status="disabled"
|
||||
fi
|
||||
|
||||
# Get nice representation of numerical date stored in database
|
||||
nicedate=$(date --rfc-2822 -d "@${datemod}")
|
||||
|
||||
echo " ${count}: ${domain} (${status}, last modified ${nicedate})"
|
||||
count=$((count+1))
|
||||
done <<< "${data}"
|
||||
# if either typeId or kindId is empty, we cannot display the list
|
||||
if [[ -z "${typeId}" ]] || [[ -z "${kindId}" ]]; then
|
||||
echo " ${CROSS} Unable to display list. Please specify a list type and kind."
|
||||
exit 1
|
||||
fi
|
||||
exit 0;
|
||||
}
|
||||
|
||||
NukeList() {
|
||||
count=$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||
if [ "$count" -gt 0 ];then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
||||
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
||||
# Authenticate with the API
|
||||
LoginAPI
|
||||
|
||||
# Send the request
|
||||
data=$(GetFTLData "domains/${typeId}/${kindId}")
|
||||
|
||||
# Display the list
|
||||
num=$(echo "${data}" | jq '.domains | length')
|
||||
if [[ "${num}" -gt 0 ]]; then
|
||||
echo -e " ${TICK} Found ${num} domain(s) in the ${kindId} ${typeId}list:"
|
||||
for i in $(seq 0 $((num-1))); do
|
||||
echo -e " - ${COL_BLUE}$(echo "${data}" | jq --compact-output ".domains[$i].domain")${COL_NC}"
|
||||
echo -e " Comment: $(echo "${data}" | jq --compact-output ".domains[$i].comment")"
|
||||
echo -e " Groups: $(echo "${data}" | jq --compact-output ".domains[$i].groups")"
|
||||
echo -e " Added: $(date -d @"$(echo "${data}" | jq --compact-output ".domains[$i].date_added")")"
|
||||
echo -e " Last modified: $(date -d @"$(echo "${data}" | jq --compact-output ".domains[$i].date_modified")")"
|
||||
done
|
||||
else
|
||||
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
||||
echo -e " ${INFO} No domains found in the ${kindId} ${typeId}list"
|
||||
fi
|
||||
exit 0;
|
||||
|
||||
# Log out
|
||||
LogoutAPI
|
||||
|
||||
# Return early without adding/deleting domains
|
||||
exit 0
|
||||
}
|
||||
|
||||
GetComment() {
|
||||
|
@ -272,38 +198,30 @@ GetComment() {
|
|||
|
||||
while (( "$#" )); do
|
||||
case "${1}" in
|
||||
"-w" | "whitelist" ) typeId=0;;
|
||||
"-b" | "blacklist" ) typeId=1;;
|
||||
"--white-regex" | "white-regex" ) typeId=2;;
|
||||
"--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
|
||||
"--wild" | "wildcard" ) typeId=3; wildcard=true;;
|
||||
"--regex" | "regex" ) typeId=3;;
|
||||
"-nr"| "--noreload" ) noReloadRequested=true;;
|
||||
"-d" | "--delmode" ) addmode=false;;
|
||||
"allow" | "allowlist" ) kindId="exact"; typeId="allow"; abbrv="allow";;
|
||||
"deny" | "denylist" ) kindId="exact"; typeId="deny"; abbrv="deny";;
|
||||
"--allow-regex" | "allow-regex" ) kindId="regex"; typeId="allow"; abbrv="--allow-regex";;
|
||||
"--allow-wild" | "allow-wild" ) kindId="regex"; typeId="allow"; wildcard=true; abbrv="--allow-wild";;
|
||||
"--regex" | "regex" ) kindId="regex"; typeId="deny"; abbrv="--regex";;
|
||||
"--wild" | "wildcard" ) kindId="regex"; typeId="deny"; wildcard=true; abbrv="--wild";;
|
||||
"-d" | "remove" | "delete" ) addmode=false;;
|
||||
"-q" | "--quiet" ) verbose=false;;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
"-l" | "--list" ) Displaylist;;
|
||||
"--nuke" ) NukeList;;
|
||||
"--web" ) web=true;;
|
||||
"--comment" ) GetComment "${2}"; shift;;
|
||||
* ) ValidateDomain "${1}";;
|
||||
* ) CreateDomainList "${1}";;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
shift
|
||||
|
||||
if [[ ${domaincount} == 0 ]]; then
|
||||
if [[ ${#domList[@]} == 0 ]]; then
|
||||
helpFunc
|
||||
fi
|
||||
|
||||
ProcessDomainList
|
||||
|
||||
# Used on web interface
|
||||
if $web; then
|
||||
echo "DONE"
|
||||
fi
|
||||
|
||||
if [[ ${reload} == true && ${noReloadRequested} == false ]]; then
|
||||
pihole restartdns reload-lists
|
||||
if ${addmode}; then
|
||||
AddDomain
|
||||
else
|
||||
RemoveDomain
|
||||
fi
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2020 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
#
|
||||
#
|
||||
# The pihole disable command has the option to set a specified time before
|
||||
# blocking is automatically re-enabled.
|
||||
#
|
||||
# Present script is responsible for the sleep & re-enable part of the job and
|
||||
# is automatically terminated if it is still running when pihole is enabled by
|
||||
# other means.
|
||||
#
|
||||
# This ensures that pihole ends up in the correct state after a sequence of
|
||||
# commands suchs as: `pihole disable 30s; pihole enable; pihole disable`
|
||||
|
||||
readonly PI_HOLE_BIN_DIR="/usr/local/bin"
|
||||
|
||||
sleep "${1}"
|
||||
"${PI_HOLE_BIN_DIR}"/pihole enable
|
|
@ -15,31 +15,33 @@ if [[ -f ${coltable} ]]; then
|
|||
source ${coltable}
|
||||
fi
|
||||
|
||||
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
# Determine database location
|
||||
# Obtain DBFILE=... setting from pihole-FTL.db
|
||||
# Constructed to return nothing when
|
||||
# a) the setting is not present in the config file, or
|
||||
# b) the setting is commented out (e.g. "#DBFILE=...")
|
||||
FTLconf="/etc/pihole/pihole-FTL.conf"
|
||||
if [ -e "$FTLconf" ]; then
|
||||
DBFILE="$(sed -n -e 's/^\s*DBFILE\s*=\s*//p' ${FTLconf})"
|
||||
fi
|
||||
# Test for empty string. Use standard path in this case.
|
||||
DBFILE=$(getFTLConfigValue "files.database")
|
||||
if [ -z "$DBFILE" ]; then
|
||||
DBFILE="/etc/pihole/pihole-FTL.db"
|
||||
fi
|
||||
|
||||
|
||||
flushARP(){
|
||||
local output
|
||||
if [[ "${args[1]}" != "quiet" ]]; then
|
||||
echo -ne " ${INFO} Flushing network table ..."
|
||||
fi
|
||||
|
||||
# Stop FTL to prevent database access
|
||||
if ! output=$(pihole-FTL service stop 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to stop FTL"
|
||||
echo " Output: ${output}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Truncate network_addresses table in pihole-FTL.db
|
||||
# This needs to be done before we can truncate the network table due to
|
||||
# foreign key constraints
|
||||
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
||||
if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
|
||||
echo " Database location: ${DBFILE}"
|
||||
echo " Output: ${output}"
|
||||
|
@ -47,13 +49,27 @@ flushARP(){
|
|||
fi
|
||||
|
||||
# Truncate network table in pihole-FTL.db
|
||||
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
|
||||
if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network" 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to truncate network table"
|
||||
echo " Database location: ${DBFILE}"
|
||||
echo " Output: ${output}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Flush ARP cache of the host
|
||||
if ! output=$(ip -s -s neigh flush all 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to flush ARP cache"
|
||||
echo " Output: ${output}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Start FTL again
|
||||
if ! output=$(pihole-FTL service restart 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to restart FTL"
|
||||
echo " Output: ${output}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "${args[1]}" != "quiet" ]]; then
|
||||
echo -e "${OVER} ${TICK} Flushed network table"
|
||||
fi
|
||||
|
|
|
@ -16,15 +16,12 @@ source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
|||
# webInterfaceDir set in basic-install.sh
|
||||
# piholeGitURL set in basic-install.sh
|
||||
# is_repo() sourced from basic-install.sh
|
||||
# setupVars set in basic-install.sh
|
||||
# check_download_exists sourced from basic-install.sh
|
||||
# fully_fetch_repo sourced from basic-install.sh
|
||||
# get_available_branches sourced from basic-install.sh
|
||||
# fetch_checkout_pull_branch sourced from basic-install.sh
|
||||
# checkout_pull_branch sourced from basic-install.sh
|
||||
|
||||
source "${setupVars}"
|
||||
|
||||
warning1() {
|
||||
echo " Please note that changing branches severely alters your Pi-hole subsystems"
|
||||
echo " Features that work on the master branch, may not on a development branch"
|
||||
|
@ -61,12 +58,11 @@ checkout() {
|
|||
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
|
||||
exit 1;
|
||||
fi
|
||||
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then
|
||||
if ! is_repo "${webInterfaceDir}" ; then
|
||||
echo -e " ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
|
||||
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if ! is_repo "${webInterfaceDir}" ; then
|
||||
echo -e " ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
|
||||
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [[ -z "${1}" ]]; then
|
||||
|
@ -81,15 +77,13 @@ checkout() {
|
|||
|
||||
if [[ "${1}" == "dev" ]] ; then
|
||||
# Shortcut to check out development branches
|
||||
echo -e " ${INFO} Shortcut \"dev\" detected - checking out development / devel branches..."
|
||||
echo -e " ${INFO} Shortcut \"${COL_YELLOW}dev${COL_NC}\" detected - checking out development branches..."
|
||||
echo ""
|
||||
echo -e " ${INFO} Pi-hole Core"
|
||||
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "development" || { echo " ${CROSS} Unable to pull Core development branch"; exit 1; }
|
||||
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then
|
||||
echo ""
|
||||
echo -e " ${INFO} Web interface"
|
||||
fetch_checkout_pull_branch "${webInterfaceDir}" "devel" || { echo " ${CROSS} Unable to pull Web development branch"; exit 1; }
|
||||
fi
|
||||
echo ""
|
||||
echo -e " ${INFO} Web interface"
|
||||
fetch_checkout_pull_branch "${webInterfaceDir}" "development" || { echo " ${CROSS} Unable to pull Web development branch"; exit 1; }
|
||||
#echo -e " ${TICK} Pi-hole Core"
|
||||
|
||||
local path
|
||||
|
@ -98,13 +92,11 @@ checkout() {
|
|||
chmod 644 /etc/pihole/ftlbranch
|
||||
elif [[ "${1}" == "master" ]] ; then
|
||||
# Shortcut to check out master branches
|
||||
echo -e " ${INFO} Shortcut \"master\" detected - checking out master branches..."
|
||||
echo -e " ${INFO} Shortcut \"${COL_YELLOW}master${COL_NC}\" detected - checking out master branches..."
|
||||
echo -e " ${INFO} Pi-hole core"
|
||||
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "master" || { echo " ${CROSS} Unable to pull Core master branch"; exit 1; }
|
||||
if [[ ${INSTALL_WEB_INTERFACE} == "true" ]]; then
|
||||
echo -e " ${INFO} Web interface"
|
||||
fetch_checkout_pull_branch "${webInterfaceDir}" "master" || { echo " ${CROSS} Unable to pull Web master branch"; exit 1; }
|
||||
fi
|
||||
echo -e " ${INFO} Web interface"
|
||||
fetch_checkout_pull_branch "${webInterfaceDir}" "master" || { echo " ${CROSS} Unable to pull Web master branch"; exit 1; }
|
||||
#echo -e " ${TICK} Web Interface"
|
||||
local path
|
||||
path="master/${binary}"
|
||||
|
@ -131,13 +123,13 @@ checkout() {
|
|||
echo ""
|
||||
# Have the user choose the branch they want
|
||||
if ! (for e in "${corebranches[@]}"; do [[ "$e" == "${2}" ]] && exit 0; done); then
|
||||
echo -e " ${INFO} Requested branch \"${2}\" is not available"
|
||||
echo -e " ${INFO} Requested branch \"${COL_CYAN}${2}${COL_NC}\" is not available"
|
||||
echo -e " ${INFO} Available branches for Core are:"
|
||||
for e in "${corebranches[@]}"; do echo " - $e"; done
|
||||
exit 1
|
||||
fi
|
||||
checkout_pull_branch "${PI_HOLE_FILES_DIR}" "${2}"
|
||||
elif [[ "${1}" == "web" ]] && [[ "${INSTALL_WEB_INTERFACE}" == "true" ]] ; then
|
||||
elif [[ "${1}" == "web" ]] ; then
|
||||
str="Fetching branches from ${webInterfaceGitUrl}"
|
||||
echo -ne " ${INFO} $str"
|
||||
if ! fully_fetch_repo "${webInterfaceDir}" ; then
|
||||
|
@ -158,7 +150,7 @@ checkout() {
|
|||
echo ""
|
||||
# Have the user choose the branch they want
|
||||
if ! (for e in "${webbranches[@]}"; do [[ "$e" == "${2}" ]] && exit 0; done); then
|
||||
echo -e " ${INFO} Requested branch \"${2}\" is not available"
|
||||
echo -e " ${INFO} Requested branch \"${COL_CYAN}${2}${COL_NC}\" is not available"
|
||||
echo -e " ${INFO} Available branches for Web Admin are:"
|
||||
for e in "${webbranches[@]}"; do echo " - $e"; done
|
||||
exit 1
|
||||
|
@ -169,29 +161,71 @@ checkout() {
|
|||
elif [[ "${1}" == "ftl" ]] ; then
|
||||
local path
|
||||
local oldbranch
|
||||
local existing=false
|
||||
path="${2}/${binary}"
|
||||
oldbranch="$(pihole-FTL -b)"
|
||||
|
||||
if check_download_exists "$path"; then
|
||||
echo " ${TICK} Branch ${2} exists"
|
||||
echo "${2}" > /etc/pihole/ftlbranch
|
||||
chmod 644 /etc/pihole/ftlbranch
|
||||
echo -e " ${INFO} Switching to branch: \"${2}\" from \"${oldbranch}\""
|
||||
FTLinstall "${binary}"
|
||||
restart_service pihole-FTL
|
||||
enable_service pihole-FTL
|
||||
# Update local and remote versions via updatechecker
|
||||
/opt/pihole/updatecheck.sh
|
||||
else
|
||||
echo " ${CROSS} Requested branch \"${2}\" is not available"
|
||||
ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep 'heads' | sed 's/refs\/heads\///;s/ //g' | awk '{print $2}') )
|
||||
echo -e " ${INFO} Available branches for FTL are:"
|
||||
for e in "${ftlbranches[@]}"; do echo " - $e"; done
|
||||
# Check if requested branch is available
|
||||
echo -e " ${INFO} Checking for availability of branch ${COL_CYAN}${2}${COL_NC} on GitHub"
|
||||
ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep "refs/heads" | cut -d'/' -f3- -) )
|
||||
# If returned array is empty -> connectivity issue
|
||||
if [[ ${#ftlbranches[@]} -eq 0 ]]; then
|
||||
echo -e " ${CROSS} Unable to fetch branches from GitHub. Please check your Internet connection and try again later."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for e in "${ftlbranches[@]}"; do [[ "$e" == "${2}" ]] && existing=true; done
|
||||
if [[ "${existing}" == false ]]; then
|
||||
echo -e " ${CROSS} Requested branch is not available\n"
|
||||
echo -e " ${INFO} Available branches are:"
|
||||
for e in "${ftlbranches[@]}"; do echo " - $e"; done
|
||||
exit 1
|
||||
fi
|
||||
echo -e " ${TICK} Branch ${2} exists on GitHub"
|
||||
|
||||
echo -e " ${INFO} Checking for ${COL_YELLOW}${binary}${COL_NC} binary on https://ftl.pi-hole.net"
|
||||
|
||||
if check_download_exists "$path"; then
|
||||
echo " ${TICK} Binary exists"
|
||||
echo "${2}" > /etc/pihole/ftlbranch
|
||||
chmod 644 /etc/pihole/ftlbranch
|
||||
echo -e " ${INFO} Switching to branch: ${COL_CYAN}${2}${COL_NC} from ${COL_CYAN}${oldbranch}${COL_NC}"
|
||||
FTLinstall "${binary}"
|
||||
restart_service pihole-FTL
|
||||
enable_service pihole-FTL
|
||||
str="Restarting FTL..."
|
||||
echo -ne " ${INFO} ${str}"
|
||||
# Wait until name resolution is working again after restarting FTL,
|
||||
# so that the updatechecker can run successfully and does not fail
|
||||
# trying to resolve github.com
|
||||
until getent hosts github.com &> /dev/null; do
|
||||
# Append one dot for each second waiting
|
||||
str="${str}."
|
||||
echo -ne " ${OVER} ${INFO} ${str}"
|
||||
sleep 1
|
||||
done
|
||||
echo -e " ${OVER} ${TICK} Restarted FTL service"
|
||||
|
||||
# Update local and remote versions via updatechecker
|
||||
/opt/pihole/updatecheck.sh
|
||||
else
|
||||
if [ $? -eq 1 ]; then
|
||||
# Binary for requested branch is not available, may still be
|
||||
# int he process of being built or CI build job failed
|
||||
printf " %b Binary for requested branch is not available, please try again later.\\n" ${CROSS}
|
||||
printf " If the issue persists, please contact Pi-hole Support and ask them to re-generate the binary.\\n"
|
||||
exit 1
|
||||
elif [ $? -eq 2 ]; then
|
||||
printf " %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}"
|
||||
exit 1
|
||||
else
|
||||
printf " %b Unknown checkout error. Please contact Pi-hole Support\\n" "${CROSS}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
echo -e " ${INFO} Requested option \"${1}\" is not available"
|
||||
echo -e " ${CROSS} Requested option \"${1}\" is not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@ FAQ_HARDWARE_REQUIREMENTS="${COL_CYAN}https://docs.pi-hole.net/main/prerequisite
|
|||
FAQ_HARDWARE_REQUIREMENTS_PORTS="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#ports${COL_NC}"
|
||||
FAQ_HARDWARE_REQUIREMENTS_FIREWALLD="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#firewalld${COL_NC}"
|
||||
FAQ_GATEWAY="${COL_CYAN}https://discourse.pi-hole.net/t/why-is-a-default-gateway-important-for-pi-hole/3546${COL_NC}"
|
||||
FAQ_FTL_COMPATIBILITY="${COL_CYAN}https://github.com/pi-hole/FTL#compatibility-list${COL_NC}"
|
||||
|
||||
# Other URLs we may use
|
||||
FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}"
|
||||
|
@ -64,10 +63,6 @@ PIHOLE_SCRIPTS_DIRECTORY="/opt/pihole"
|
|||
BIN_DIRECTORY="/usr/local/bin"
|
||||
RUN_DIRECTORY="/run"
|
||||
LOG_DIRECTORY="/var/log/pihole"
|
||||
WEB_SERVER_LOG_DIRECTORY="/var/log/lighttpd"
|
||||
WEB_SERVER_CONFIG_DIRECTORY="/etc/lighttpd"
|
||||
WEB_SERVER_CONFIG_DIRECTORY_FEDORA="${WEB_SERVER_CONFIG_DIRECTORY}/conf.d"
|
||||
WEB_SERVER_CONFIG_DIRECTORY_DEBIAN="${WEB_SERVER_CONFIG_DIRECTORY}/conf-enabled"
|
||||
HTML_DIRECTORY="/var/www/html"
|
||||
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
|
||||
SHM_DIRECTORY="/dev/shm"
|
||||
|
@ -77,49 +72,24 @@ ETC="/etc"
|
|||
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
|
||||
PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
|
||||
|
||||
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
|
||||
WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
|
||||
WEB_SERVER_PIHOLE_CONFIG_FILE_DEBIAN="${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}/15-pihole-admin.conf"
|
||||
WEB_SERVER_PIHOLE_CONFIG_FILE_FEDORA="${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}/pihole-admin.conf"
|
||||
|
||||
PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log"
|
||||
PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*"
|
||||
PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
|
||||
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
|
||||
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
|
||||
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
|
||||
PIHOLE_CUSTOM_HOSTS_FILE="${PIHOLE_DIRECTORY}/custom.list"
|
||||
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole.toml"
|
||||
PIHOLE_DNSMASQ_CONF_FILE="${PIHOLE_DIRECTORY}/dnsmasq.conf"
|
||||
PIHOLE_VERSIONS_FILE="${PIHOLE_DIRECTORY}/versions"
|
||||
|
||||
# Read the value of an FTL config key. The value is printed to stdout.
|
||||
#
|
||||
# Args:
|
||||
# 1. The key to read
|
||||
# 2. The default if the setting or config does not exist
|
||||
get_ftl_conf_value() {
|
||||
local key=$1
|
||||
local default=$2
|
||||
local value
|
||||
|
||||
# Obtain key=... setting from pihole-FTL.conf
|
||||
if [[ -e "$PIHOLE_FTL_CONF_FILE" ]]; then
|
||||
# Constructed to return nothing when
|
||||
# a) the setting is not present in the config file, or
|
||||
# b) the setting is commented out (e.g. "#DBFILE=...")
|
||||
value="$(sed -n -e "s/^\\s*$key=\\s*//p" ${PIHOLE_FTL_CONF_FILE})"
|
||||
fi
|
||||
|
||||
# Test for missing value. Use default value in this case.
|
||||
if [[ -z "$value" ]]; then
|
||||
value="$default"
|
||||
fi
|
||||
|
||||
echo "$value"
|
||||
# Obtain setting from FTL directly
|
||||
pihole-FTL --config "${key}"
|
||||
}
|
||||
|
||||
PIHOLE_GRAVITY_DB_FILE="$(get_ftl_conf_value "GRAVITYDB" "${PIHOLE_DIRECTORY}/gravity.db")"
|
||||
PIHOLE_GRAVITY_DB_FILE="$(get_ftl_conf_value "files.gravity")"
|
||||
|
||||
PIHOLE_FTL_DB_FILE="$(get_ftl_conf_value "DBFILE" "${PIHOLE_DIRECTORY}/pihole-FTL.db")"
|
||||
PIHOLE_FTL_DB_FILE="$(get_ftl_conf_value "files.database")"
|
||||
|
||||
PIHOLE_COMMAND="${BIN_DIRECTORY}/pihole"
|
||||
PIHOLE_COLTABLE_FILE="${BIN_DIRECTORY}/COL_TABLE"
|
||||
|
@ -129,29 +99,23 @@ FTL_PID="${RUN_DIRECTORY}/pihole-FTL.pid"
|
|||
PIHOLE_LOG="${LOG_DIRECTORY}/pihole.log"
|
||||
PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*"
|
||||
PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log"
|
||||
PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/FTL.log")"
|
||||
|
||||
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access-pihole.log"
|
||||
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error-pihole.log"
|
||||
PIHOLE_FTL_LOG="$(get_ftl_conf_value "files.log.ftl")"
|
||||
PIHOLE_WEBSERVER_LOG="$(get_ftl_conf_value "files.log.webserver")"
|
||||
|
||||
RESOLVCONF="${ETC}/resolv.conf"
|
||||
DNSMASQ_CONF="${ETC}/dnsmasq.conf"
|
||||
|
||||
# Store Pi-hole's processes in an array for easy use and parsing
|
||||
PIHOLE_PROCESSES=( "lighttpd" "pihole-FTL" )
|
||||
PIHOLE_PROCESSES=( "pihole-FTL" )
|
||||
|
||||
# Store the required directories in an array so it can be parsed through
|
||||
REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
||||
"${WEB_SERVER_CONFIG_FILE}"
|
||||
"${WEB_SERVER_CUSTOM_CONFIG_FILE}"
|
||||
"${WEB_SERVER_PIHOLE_CONFIG_FILE_DEBIAN}"
|
||||
"${WEB_SERVER_PIHOLE_CONFIG_FILE_FEDORA}"
|
||||
"${PIHOLE_INSTALL_LOG_FILE}"
|
||||
"${PIHOLE_RAW_BLOCKLIST_FILES}"
|
||||
"${PIHOLE_LOCAL_HOSTS_FILE}"
|
||||
"${PIHOLE_LOGROTATE_FILE}"
|
||||
"${PIHOLE_SETUP_VARS_FILE}"
|
||||
"${PIHOLE_FTL_CONF_FILE}"
|
||||
"${PIHOLE_DNSMASQ_CONF_FILE}"
|
||||
"${PIHOLE_COMMAND}"
|
||||
"${PIHOLE_COLTABLE_FILE}"
|
||||
"${FTL_PID}"
|
||||
|
@ -159,11 +123,9 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
|||
"${PIHOLE_LOG_GZIPS}"
|
||||
"${PIHOLE_DEBUG_LOG}"
|
||||
"${PIHOLE_FTL_LOG}"
|
||||
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}"
|
||||
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}"
|
||||
"${PIHOLE_WEBSERVER_LOG}"
|
||||
"${RESOLVCONF}"
|
||||
"${DNSMASQ_CONF}"
|
||||
"${PIHOLE_CUSTOM_HOSTS_FILE}"
|
||||
"${PIHOLE_VERSIONS_FILE}")
|
||||
|
||||
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
|
||||
|
@ -177,20 +139,6 @@ show_disclaimer(){
|
|||
log_write "${DISCLAIMER}"
|
||||
}
|
||||
|
||||
source_setup_variables() {
|
||||
# Display the current test that is running
|
||||
log_write "\\n${COL_PURPLE}*** [ INITIALIZING ]${COL_NC} Sourcing setup variables"
|
||||
# If the variable file exists,
|
||||
if ls "${PIHOLE_SETUP_VARS_FILE}" 1> /dev/null 2>&1; then
|
||||
log_write "${INFO} Sourcing ${PIHOLE_SETUP_VARS_FILE}...";
|
||||
# source it
|
||||
source ${PIHOLE_SETUP_VARS_FILE}
|
||||
else
|
||||
# If it can't, show an error
|
||||
log_write "${PIHOLE_SETUP_VARS_FILE} ${COL_RED}does not exist or cannot be read.${COL_NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
make_temporary_log() {
|
||||
# Create a random temporary file for the log
|
||||
TEMPLOG=$(mktemp /tmp/pihole_temp.XXXXXX)
|
||||
|
@ -230,10 +178,8 @@ initialize_debug() {
|
|||
|
||||
# This is a function for visually displaying the current test that is being run.
|
||||
# Accepts one variable: the name of what is being diagnosed
|
||||
# Colors do not show in the dasboard, but the icons do: [i], [✓], and [✗]
|
||||
echo_current_diagnostic() {
|
||||
# Colors are used for visually distinguishing each test in the output
|
||||
# These colors do not show in the GUI, but the formatting will
|
||||
log_write "\\n${COL_PURPLE}*** [ DIAGNOSING ]:${COL_NC} ${1}"
|
||||
}
|
||||
|
||||
|
@ -302,17 +248,10 @@ compare_local_version_to_git_version() {
|
|||
return 1
|
||||
fi
|
||||
else
|
||||
# There is no git directory so check if the web interface was disabled
|
||||
local setup_vars_web_interface
|
||||
setup_vars_web_interface=$(< ${PIHOLE_SETUP_VARS_FILE} grep ^INSTALL_WEB_INTERFACE | cut -d '=' -f2)
|
||||
if [[ "${pihole_component}" == "Web" ]] && [[ "${setup_vars_web_interface}" == "false" ]]; then
|
||||
log_write "${INFO} ${pihole_component}: Disabled in setupVars.conf via INSTALL_WEB_INTERFACE=false"
|
||||
else
|
||||
# Return an error message
|
||||
log_write "${COL_RED}Directory ${git_dir} doesn't exist${COL_NC}"
|
||||
# and exit with a non zero code
|
||||
return 1
|
||||
fi
|
||||
# Return an error message
|
||||
log_write "${COL_RED}Directory ${git_dir} doesn't exist${COL_NC}"
|
||||
# and exit with a non zero code
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -351,39 +290,6 @@ check_component_versions() {
|
|||
check_ftl_version
|
||||
}
|
||||
|
||||
|
||||
get_program_version() {
|
||||
local program_name="${1}"
|
||||
# Create a local variable so this function can be safely reused
|
||||
local program_version
|
||||
echo_current_diagnostic "${program_name} version"
|
||||
# Evaluate the program we are checking, if it is any of the ones below, show the version
|
||||
case "${program_name}" in
|
||||
"lighttpd") program_version="$(${program_name} -v 2> /dev/null | head -n1 | cut -d '/' -f2 | cut -d ' ' -f1)"
|
||||
;;
|
||||
"php") program_version="$(${program_name} -v 2> /dev/null | head -n1 | cut -d '-' -f1 | cut -d ' ' -f2)"
|
||||
;;
|
||||
# If a match is not found, show an error
|
||||
*) echo "Unrecognized program";
|
||||
esac
|
||||
# If the program does not have a version (the variable is empty)
|
||||
if [[ -z "${program_version}" ]]; then
|
||||
# Display and error
|
||||
log_write "${CROSS} ${COL_RED}${program_name} version could not be detected.${COL_NC}"
|
||||
else
|
||||
# Otherwise, display the version
|
||||
log_write "${INFO} ${program_version}"
|
||||
fi
|
||||
}
|
||||
|
||||
# These are the most critical dependencies of Pi-hole, so we check for them
|
||||
# and their versions, using the functions above.
|
||||
check_critical_program_versions() {
|
||||
# Use the function created earlier and bundle them into one function that checks all the version numbers
|
||||
get_program_version "lighttpd"
|
||||
get_program_version "php"
|
||||
}
|
||||
|
||||
os_check() {
|
||||
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
|
||||
# and determines whether or not the script is running on one of those systems
|
||||
|
@ -393,7 +299,7 @@ os_check() {
|
|||
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
|
||||
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
cmdResult="$(dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
#Get the return code of the previous command (last line)
|
||||
digReturnCode="${cmdResult##*$'\n'}"
|
||||
|
||||
|
@ -403,7 +309,20 @@ os_check() {
|
|||
if [ "${digReturnCode}" -ne 0 ]; then
|
||||
log_write "${INFO} Distro: ${detected_os^}"
|
||||
log_write "${INFO} Version: ${detected_version}"
|
||||
log_write "${CROSS} dig return code: ${COL_RED}${digReturnCode}${COL_NC}"
|
||||
log_write "${CROSS} dig IPv4 return code: ${COL_RED}${digReturnCode}${COL_NC}"
|
||||
log_write "${CROSS} dig response: ${response}"
|
||||
log_write "${INFO} Retrying via IPv6"
|
||||
|
||||
cmdResult="$(dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
#Get the return code of the previous command (last line)
|
||||
digReturnCode="${cmdResult##*$'\n'}"
|
||||
|
||||
# Extract dig response
|
||||
response="${cmdResult%%$'\n'*}"
|
||||
fi
|
||||
# If also no success via IPv6
|
||||
if [ "${digReturnCode}" -ne 0 ]; then
|
||||
log_write "${CROSS} dig IPv6 return code: ${COL_RED}${digReturnCode}${COL_NC}"
|
||||
log_write "${CROSS} dig response: ${response}"
|
||||
log_write "${CROSS} Error: ${COL_RED}dig command failed - Unable to check OS${COL_NC}"
|
||||
else
|
||||
|
@ -451,7 +370,7 @@ os_check() {
|
|||
}
|
||||
|
||||
diagnose_operating_system() {
|
||||
# error message in a variable so we can easily modify it later (or re-use it)
|
||||
# error message in a variable so we can easily modify it later (or reuse it)
|
||||
local error_msg="Distribution unknown -- most likely you are on an unsupported platform and may run into issues."
|
||||
# Display the current test that is running
|
||||
echo_current_diagnostic "Operating system"
|
||||
|
@ -551,34 +470,34 @@ check_firewalld() {
|
|||
fi
|
||||
}
|
||||
|
||||
processor_check() {
|
||||
echo_current_diagnostic "Processor"
|
||||
# Store the processor type in a variable
|
||||
PROCESSOR=$(uname -m)
|
||||
# If it does not contain a value,
|
||||
if [[ -z "${PROCESSOR}" ]]; then
|
||||
# we couldn't detect it, so show an error
|
||||
PROCESSOR=$(lscpu | awk '/Architecture/ {print $2}')
|
||||
log_write "${CROSS} ${COL_RED}${PROCESSOR}${COL_NC} has not been tested with FTL, but may still work: (${FAQ_FTL_COMPATIBILITY})"
|
||||
run_and_print_command() {
|
||||
# Run the command passed as an argument
|
||||
local cmd="${1}"
|
||||
# Show the command that is being run
|
||||
log_write "${INFO} ${cmd}"
|
||||
# Run the command and store the output in a variable
|
||||
local output
|
||||
output=$(${cmd} 2>&1)
|
||||
# If the command was successful,
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# show the output
|
||||
log_write "${output}"
|
||||
else
|
||||
# Check if the architecture is currently supported for FTL
|
||||
case "${PROCESSOR}" in
|
||||
"amd64" | "x86_64") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
|
||||
;;
|
||||
"armv6l") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
|
||||
;;
|
||||
"armv6") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
|
||||
;;
|
||||
"armv7l") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
|
||||
;;
|
||||
"aarch64") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
|
||||
;;
|
||||
# Otherwise, show the processor type
|
||||
*) log_write "${INFO} ${PROCESSOR}";
|
||||
esac
|
||||
# otherwise, show an error
|
||||
log_write "${CROSS} ${COL_RED}Command failed${COL_NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
hardware_check() {
|
||||
echo_current_diagnostic "System hardware configuration"
|
||||
# Store the output of the command in a variable
|
||||
run_and_print_command "lshw -short"
|
||||
|
||||
echo_current_diagnostic "Processor details"
|
||||
# Store the output of the command in a variable
|
||||
run_and_print_command "lscpu"
|
||||
}
|
||||
|
||||
disk_usage() {
|
||||
local file_system
|
||||
local hide
|
||||
|
@ -600,18 +519,6 @@ disk_usage() {
|
|||
done
|
||||
}
|
||||
|
||||
parse_setup_vars() {
|
||||
echo_current_diagnostic "Setup variables"
|
||||
# If the file exists,
|
||||
if [[ -r "${PIHOLE_SETUP_VARS_FILE}" ]]; then
|
||||
# parse it
|
||||
parse_file "${PIHOLE_SETUP_VARS_FILE}"
|
||||
else
|
||||
# If not, show an error
|
||||
log_write "${CROSS} ${COL_RED}Could not read ${PIHOLE_SETUP_VARS_FILE}.${COL_NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
parse_locale() {
|
||||
local pihole_locale
|
||||
echo_current_diagnostic "Locale"
|
||||
|
@ -619,33 +526,6 @@ parse_locale() {
|
|||
parse_file "${pihole_locale}"
|
||||
}
|
||||
|
||||
detect_ip_addresses() {
|
||||
# First argument should be a 4 or a 6
|
||||
local protocol=${1}
|
||||
# Use ip to show the addresses for the chosen protocol
|
||||
# Store the values in an array so they can be looped through
|
||||
# Get the lines that are in the file(s) and store them in an array for parsing later
|
||||
mapfile -t ip_addr_list < <(ip -"${protocol}" addr show dev "${PIHOLE_INTERFACE}" | awk -F ' ' '{ for(i=1;i<=NF;i++) if ($i ~ '/^inet/') print $(i+1) }')
|
||||
|
||||
# If there is something in the IP address list,
|
||||
if [[ -n ${ip_addr_list[*]} ]]; then
|
||||
# Local iterator
|
||||
local i
|
||||
# Display the protocol and interface
|
||||
log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:"
|
||||
# Since there may be more than one IP address, store them in an array
|
||||
for i in "${!ip_addr_list[@]}"; do
|
||||
log_write " ${ip_addr_list[$i]}"
|
||||
done
|
||||
# Print a blank line just for formatting
|
||||
log_write ""
|
||||
else
|
||||
# If there are no IPs detected, explain that the protocol is not configured
|
||||
log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
ping_ipv4_or_ipv6() {
|
||||
# Give the first argument a readable name (a 4 or a six should be the argument)
|
||||
local protocol="${1}"
|
||||
|
@ -668,23 +548,30 @@ ping_gateway() {
|
|||
ping_ipv4_or_ipv6 "${protocol}"
|
||||
# Check if we are using IPv4 or IPv6
|
||||
# Find the default gateways using IPv4 or IPv6
|
||||
local gateway
|
||||
local gateway gateway_addr gateway_iface
|
||||
|
||||
log_write "${INFO} Default IPv${protocol} gateway(s):"
|
||||
|
||||
while IFS= read -r gateway; do
|
||||
log_write " ${gateway}"
|
||||
done < <(ip -"${protocol}" route | grep default | grep "${PIHOLE_INTERFACE}" | cut -d ' ' -f 3)
|
||||
log_write " $(cut -d ' ' -f 3 <<< "${gateway}")%$(cut -d ' ' -f 5 <<< "${gateway}")"
|
||||
done < <(ip -"${protocol}" route | grep default)
|
||||
|
||||
gateway=$(ip -"${protocol}" route | grep default | grep "${PIHOLE_INTERFACE}" | cut -d ' ' -f 3 | head -n 1)
|
||||
gateway_addr=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 3 | head -n 1)
|
||||
gateway_iface=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 5 | head -n 1)
|
||||
# If there was at least one gateway
|
||||
if [ -n "${gateway}" ]; then
|
||||
if [ -n "${gateway_addr}" ]; then
|
||||
# Append the interface to the gateway address if it is a link-local address
|
||||
if [[ "${gateway_addr}" =~ ^fe80 ]]; then
|
||||
gateway="${gateway_addr}%${gateway_iface}"
|
||||
else
|
||||
gateway="${gateway_addr}"
|
||||
fi
|
||||
# Let the user know we will ping the gateway for a response
|
||||
log_write " * Pinging first gateway ${gateway}..."
|
||||
# Try to quietly ping the gateway 3 times, with a timeout of 3 seconds, using numeric output only,
|
||||
# on the pihole interface, and tail the last three lines of the output
|
||||
# If pinging the gateway is not successful,
|
||||
if ! ${cmd} -c 1 -W 2 -n "${gateway}" -I "${PIHOLE_INTERFACE}" >/dev/null; then
|
||||
if ! ${cmd} -c 1 -W 2 -n "${gateway}" >/dev/null; then
|
||||
# let the user know
|
||||
log_write "${CROSS} ${COL_RED}Gateway did not respond.${COL_NC} ($FAQ_GATEWAY)\\n"
|
||||
# and return an error code
|
||||
|
@ -737,10 +624,8 @@ compare_port_to_service_assigned() {
|
|||
|
||||
check_required_ports() {
|
||||
echo_current_diagnostic "Ports in use"
|
||||
# Since Pi-hole needs 53, 80, and 4711, check what they are being used by
|
||||
# Since Pi-hole needs various ports, check what they are being used by
|
||||
# so we can detect any issues
|
||||
local resolver="pihole-FTL"
|
||||
local web_server="lighttpd"
|
||||
local ftl="pihole-FTL"
|
||||
# Create an array for these ports in use
|
||||
ports_in_use=()
|
||||
|
@ -749,6 +634,15 @@ check_required_ports() {
|
|||
ports_in_use+=( "$line" )
|
||||
done < <( ss --listening --numeric --tcp --udp --processes --no-header )
|
||||
|
||||
local ports_configured
|
||||
# Get all configured ports
|
||||
ports_configured="$(pihole-FTL --config "webserver.port")"
|
||||
# Remove all non-didgits, split into an array at ","
|
||||
ports_configured="${ports_configured//[!0-9,]/}"
|
||||
mapfile -d "," -t ports_configured < <(echo "${ports_configured}")
|
||||
# Add port 53
|
||||
ports_configured+=("53")
|
||||
|
||||
# Now that we have the values stored,
|
||||
for i in "${!ports_in_use[@]}"; do
|
||||
# loop through them and assign some local variables
|
||||
|
@ -759,17 +653,13 @@ check_required_ports() {
|
|||
local port_number
|
||||
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
|
||||
|
||||
# Use a case statement to determine if the right services are using the right ports
|
||||
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in
|
||||
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
# Check if the right services are using the right ports
|
||||
if [[ ${ports_configured[*]} =~ $(echo "${port_number}" | rev | cut -d: -f1 | rev) ]]; then
|
||||
compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
else
|
||||
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
||||
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
|
||||
esac
|
||||
log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -792,8 +682,6 @@ check_networking() {
|
|||
# Runs through several of the functions made earlier; we just clump them
|
||||
# together since they are all related to the networking aspect of things
|
||||
echo_current_diagnostic "Networking"
|
||||
detect_ip_addresses "4"
|
||||
detect_ip_addresses "6"
|
||||
ping_gateway "4"
|
||||
ping_gateway "6"
|
||||
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required
|
||||
|
@ -801,35 +689,6 @@ check_networking() {
|
|||
[ -z "${DOCKER_VERSION}" ] && check_required_ports
|
||||
}
|
||||
|
||||
check_x_headers() {
|
||||
# The X-Headers allow us to determine from the command line if the Web
|
||||
# lighttpd.conf has a directive to show "X-Pi-hole: A black hole for Internet advertisements."
|
||||
# in the header of any Pi-holed domain
|
||||
# Similarly, it will show "X-Pi-hole: The Pi-hole Web interface is working!" if you view the header returned
|
||||
# when accessing the dashboard (i.e curl -I pi.hole/admin/)
|
||||
# server is operating correctly
|
||||
echo_current_diagnostic "Dashboard headers"
|
||||
# Use curl -I to get the header and parse out just the X-Pi-hole one
|
||||
local full_curl_output_dashboard
|
||||
local dashboard
|
||||
full_curl_output_dashboard="$(curl -Is localhost/admin/)"
|
||||
dashboard=$(echo "${full_curl_output_dashboard}" | awk '/X-Pi-hole/' | tr -d '\r')
|
||||
# Store what the X-Header should be in variables for comparison later
|
||||
local dashboard_working
|
||||
dashboard_working="X-Pi-hole: The Pi-hole Web interface is working!"
|
||||
|
||||
# If the X-Header matches what a working system should have,
|
||||
if [[ $dashboard == "$dashboard_working" ]]; then
|
||||
# then we can show a success
|
||||
log_write "$TICK Web interface X-Header: ${COL_GREEN}${dashboard}${COL_NC}"
|
||||
else
|
||||
# Otherwise, it's a failure since the X-Headers either don't exist or have been modified in some way
|
||||
log_write "$CROSS Web interface X-Header: ${COL_RED}X-Header does not match or could not be retrieved.${COL_NC}"
|
||||
|
||||
log_write "${COL_RED}${full_curl_output_dashboard}${COL_NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
dig_at() {
|
||||
# We need to test if Pi-hole can properly resolve domain names
|
||||
# as it is an essential piece of the software
|
||||
|
@ -863,11 +722,15 @@ dig_at() {
|
|||
local record_type="A"
|
||||
fi
|
||||
|
||||
# Find a random blocked url that has not been whitelisted.
|
||||
# Find a random blocked url that has not been whitelisted and is not ABP style.
|
||||
# This helps emulate queries to different domains that a user might query
|
||||
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
|
||||
local random_url
|
||||
random_url=$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
|
||||
random_url=$(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity WHERE domain not like '||%^' ORDER BY RANDOM() LIMIT 1")
|
||||
# Fallback if no non-ABP style domains were found
|
||||
if [ -z "${random_url}" ]; then
|
||||
random_url="flurry.com"
|
||||
fi
|
||||
|
||||
# Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address
|
||||
# This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is
|
||||
|
@ -902,15 +765,29 @@ dig_at() {
|
|||
# Removes CIDR and everything thereafter (e.g., scope properties)
|
||||
addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
|
||||
if [ -n "${addresses}" ]; then
|
||||
while IFS= read -r local_address ; do
|
||||
while IFS= read -r local_address ; do
|
||||
# If ${local_address} is an IPv6 link-local address, append the interface name to it
|
||||
if [[ "${local_address}" =~ ^fe80 ]]; then
|
||||
local_address="${local_address}%${iface}"
|
||||
fi
|
||||
|
||||
# Check if Pi-hole can use itself to block a domain
|
||||
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" +short "${record_type}"); then
|
||||
# If it can, show success
|
||||
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
|
||||
else
|
||||
# Otherwise, show a failure
|
||||
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
|
||||
fi
|
||||
if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}")"; then
|
||||
# If it can, show success
|
||||
if [[ "${local_dig}" == *"status: NOERROR"* ]]; then
|
||||
local_dig="NOERROR"
|
||||
elif [[ "${local_dig}" == *"status: NXDOMAIN"* ]]; then
|
||||
local_dig="NXDOMAIN"
|
||||
else
|
||||
# Extract the first entry in the answer section from dig's output,
|
||||
# replacing any multiple spaces and tabs with a single space
|
||||
local_dig="$(echo "${local_dig}" | grep -A1 "ANSWER SECTION" | grep -v "ANSWER SECTION" | tr -s " \t" " ")"
|
||||
fi
|
||||
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
|
||||
else
|
||||
# Otherwise, show a failure
|
||||
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
|
||||
fi
|
||||
done <<< "${addresses}"
|
||||
else
|
||||
log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}"
|
||||
|
@ -983,20 +860,6 @@ ftl_full_status(){
|
|||
fi
|
||||
}
|
||||
|
||||
lighttpd_test_configuration(){
|
||||
# let lighttpd test it's own configuration
|
||||
local lighttpd_conf_test
|
||||
echo_current_diagnostic "Lighttpd configuration test"
|
||||
lighttpd_conf_test=$(lighttpd -tt -f /etc/lighttpd/lighttpd.conf)
|
||||
if [ -z "${lighttpd_conf_test}" ]; then
|
||||
# empty output
|
||||
log_write "${TICK} ${COL_GREEN}No error in lighttpd configuration${COL_NC}"
|
||||
else
|
||||
log_write "${CROSS} ${COL_RED}Error in lighttpd configuration${COL_NC}"
|
||||
log_write " ${lighttpd_conf_test}"
|
||||
fi
|
||||
}
|
||||
|
||||
make_array_from_file() {
|
||||
local filename="${1}"
|
||||
# The second argument can put a limit on how many line should be read from the file
|
||||
|
@ -1004,8 +867,6 @@ make_array_from_file() {
|
|||
local limit=${2}
|
||||
# A local iterator for testing if we are at the limit above
|
||||
local i=0
|
||||
# Set the array to be empty so we can start fresh when the function is used
|
||||
local file_content=()
|
||||
# If the file is a directory
|
||||
if [[ -d "${filename}" ]]; then
|
||||
# do nothing since it cannot be parsed
|
||||
|
@ -1017,11 +878,14 @@ make_array_from_file() {
|
|||
new_line=$(echo "${line}" | sed -e 's/^\s*#.*$//' -e '/^$/d')
|
||||
# If the line still has content (a non-zero value)
|
||||
if [[ -n "${new_line}" ]]; then
|
||||
# Put it into the array
|
||||
file_content+=("${new_line}")
|
||||
else
|
||||
# Otherwise, it's a blank line or comment, so do nothing
|
||||
:
|
||||
|
||||
# If the string contains "### CHANGED", highlight this part in red
|
||||
if [[ "${new_line}" == *"### CHANGED"* ]]; then
|
||||
new_line="${new_line//### CHANGED/${COL_RED}### CHANGED${COL_NC}}"
|
||||
fi
|
||||
|
||||
# Finally, write this line to the log
|
||||
log_write " ${new_line}"
|
||||
fi
|
||||
# Increment the iterator +1
|
||||
i=$((i+1))
|
||||
|
@ -1033,12 +897,6 @@ make_array_from_file() {
|
|||
break
|
||||
fi
|
||||
done < "${filename}"
|
||||
# Now the we have made an array of the file's content
|
||||
for each_line in "${file_content[@]}"; do
|
||||
# Print each line
|
||||
# At some point, we may want to check the file line-by-line, so that's the reason for an array
|
||||
log_write " ${each_line}"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -1060,8 +918,10 @@ parse_file() {
|
|||
# For each line in the file,
|
||||
for file_lines in "${file_info[@]}"; do
|
||||
if [[ -n "${file_lines}" ]]; then
|
||||
# don't include the Web password hash
|
||||
[[ "${file_lines}" =~ ^\#.*$ || ! "${file_lines}" || "${file_lines}" == "WEBPASSWORD="* ]] && continue
|
||||
# skip empty and comment lines line
|
||||
[[ "${file_lines}" =~ ^[[:space:]]*\#.*$ || ! "${file_lines}" ]] && continue
|
||||
# remove the password hash from the output (*"pwhash = "*)
|
||||
[[ "${file_lines}" == *"pwhash ="* ]] && file_lines=$(echo "${file_lines}" | sed -e 's/\(pwhash = \).*/\1<removed>/')
|
||||
# otherwise, display the lines of the file
|
||||
log_write " ${file_lines}"
|
||||
fi
|
||||
|
@ -1108,12 +968,6 @@ list_files_in_dir() {
|
|||
if [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then
|
||||
# SHM file - we do not want to see the content, but we want to see the files and their sizes
|
||||
log_write "$(ls -lh "${dir_to_parse}/")"
|
||||
elif [[ "${dir_to_parse}" == "${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}" ]]; then
|
||||
# we want to see all files files in /etc/lighttpd/conf.d
|
||||
log_write "$(ls -lh "${dir_to_parse}/" 2> /dev/null )"
|
||||
elif [[ "${dir_to_parse}" == "${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}" ]]; then
|
||||
# we want to see all files files in /etc/lighttpd/conf.d
|
||||
log_write "$(ls -lh "${dir_to_parse}/"/ 2> /dev/null )"
|
||||
fi
|
||||
|
||||
# Store the files found in an array
|
||||
|
@ -1126,9 +980,7 @@ list_files_in_dir() {
|
|||
elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_SETUP_VARS_FILE}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}" ]] || \
|
||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG_GZIPS}" ]]; then
|
||||
:
|
||||
elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then
|
||||
|
@ -1143,8 +995,8 @@ list_files_in_dir() {
|
|||
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
|
||||
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
|
||||
case "${dir_to_parse}/${each_file}" in
|
||||
# If it's Web server error log, give the first and last 25 lines
|
||||
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}") head_tail_log "${dir_to_parse}/${each_file}" 25
|
||||
# If it's Web server log, give the first and last 25 lines
|
||||
"${PIHOLE_WEBSERVER_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 25
|
||||
;;
|
||||
# Same for the FTL log
|
||||
"${PIHOLE_FTL_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 35
|
||||
|
@ -1175,11 +1027,7 @@ show_content_of_pihole_files() {
|
|||
# Show the content of the files in each of Pi-hole's folders
|
||||
show_content_of_files_in_dir "${PIHOLE_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${DNSMASQ_D_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}"
|
||||
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}"
|
||||
show_content_of_files_in_dir "${CRON_D_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${LOG_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${SHM_DIRECTORY}"
|
||||
show_content_of_files_in_dir "${ETC}"
|
||||
|
@ -1224,7 +1072,7 @@ show_db_entries() {
|
|||
IFS=$'\r\n'
|
||||
local entries=()
|
||||
mapfile -t entries < <(\
|
||||
pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
|
||||
pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" \
|
||||
-cmd ".headers on" \
|
||||
-cmd ".mode column" \
|
||||
-cmd ".width ${widths}" \
|
||||
|
@ -1249,7 +1097,7 @@ show_FTL_db_entries() {
|
|||
IFS=$'\r\n'
|
||||
local entries=()
|
||||
mapfile -t entries < <(\
|
||||
pihole-FTL sqlite3 "${PIHOLE_FTL_DB_FILE}" \
|
||||
pihole-FTL sqlite3 -ni "${PIHOLE_FTL_DB_FILE}" \
|
||||
-cmd ".headers on" \
|
||||
-cmd ".mode column" \
|
||||
-cmd ".width ${widths}" \
|
||||
|
@ -1315,7 +1163,7 @@ analyze_gravity_list() {
|
|||
fi
|
||||
|
||||
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
||||
gravity_updated_raw="$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||
gravity_updated_raw="$(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||
gravity_updated="$(date -d @"${gravity_updated_raw}")"
|
||||
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
|
||||
log_write ""
|
||||
|
@ -1323,7 +1171,7 @@ analyze_gravity_list() {
|
|||
OLD_IFS="$IFS"
|
||||
IFS=$'\r\n'
|
||||
local gravity_sample=()
|
||||
mapfile -t gravity_sample < <(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||
mapfile -t gravity_sample < <(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
|
||||
|
||||
for line in "${gravity_sample[@]}"; do
|
||||
|
@ -1355,7 +1203,7 @@ database_integrity_check(){
|
|||
|
||||
log_write "${INFO} Checking foreign key constraints of ${database} ... (this can take several minutes)"
|
||||
unset result
|
||||
result="$(pihole-FTL sqlite3 "${database}" -cmd ".headers on" -cmd ".mode column" "PRAGMA foreign_key_check" 2>&1 & spinner)"
|
||||
result="$(pihole-FTL sqlite3 -ni "${database}" -cmd ".headers on" -cmd ".mode column" "PRAGMA foreign_key_check" 2>&1 & spinner)"
|
||||
if [[ -z ${result} ]]; then
|
||||
log_write "${TICK} No foreign key errors in ${database}"
|
||||
else
|
||||
|
@ -1416,10 +1264,10 @@ spinner(){
|
|||
analyze_pihole_log() {
|
||||
echo_current_diagnostic "Pi-hole log"
|
||||
local pihole_log_permissions
|
||||
local logging_enabled
|
||||
local queryLogging
|
||||
|
||||
logging_enabled=$(grep -c "^log-queries" /etc/dnsmasq.d/01-pihole.conf)
|
||||
if [[ "${logging_enabled}" == "0" ]]; then
|
||||
queryLogging="$(get_ftl_conf_value "dns.queryLogging")"
|
||||
if [[ "${queryLogging}" == "false" ]]; then
|
||||
# Inform user that logging has been disabled and pihole.log does not contain queries
|
||||
log_write "${INFO} Query logging is disabled"
|
||||
log_write ""
|
||||
|
@ -1501,7 +1349,7 @@ upload_to_tricorder() {
|
|||
# If no token was generated
|
||||
else
|
||||
# Show an error and some help instructions
|
||||
# Skip this if being called from web interface and autmatic mode was not chosen (users opt-out to upload)
|
||||
# Skip this if being called from web interface and automatic mode was not chosen (users opt-out to upload)
|
||||
if [[ "${WEBCALL}" ]] && [[ ! "${AUTOMATED}" ]]; then
|
||||
:
|
||||
else
|
||||
|
@ -1516,15 +1364,12 @@ upload_to_tricorder() {
|
|||
# Run through all the functions we made
|
||||
make_temporary_log
|
||||
initialize_debug
|
||||
# setupVars.conf needs to be sourced before the networking so the values are
|
||||
# available to the other functions
|
||||
source_setup_variables
|
||||
check_component_versions
|
||||
check_critical_program_versions
|
||||
# check_critical_program_versions
|
||||
diagnose_operating_system
|
||||
check_selinux
|
||||
check_firewalld
|
||||
processor_check
|
||||
hardware_check
|
||||
disk_usage
|
||||
check_ip_command
|
||||
check_networking
|
||||
|
@ -1532,9 +1377,6 @@ check_name_resolution
|
|||
check_dhcp_servers
|
||||
process_status
|
||||
ftl_full_status
|
||||
lighttpd_test_configuration
|
||||
parse_setup_vars
|
||||
check_x_headers
|
||||
analyze_ftl_db
|
||||
analyze_gravity_list
|
||||
show_groups
|
||||
|
|
|
@ -11,32 +11,39 @@
|
|||
colfile="/opt/pihole/COL_TABLE"
|
||||
source ${colfile}
|
||||
|
||||
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
# In case we're running at the same time as a system logrotate, use a
|
||||
# separate logrotate state file to prevent stepping on each other's
|
||||
# toes.
|
||||
STATEFILE="/var/lib/logrotate/pihole"
|
||||
|
||||
# Determine database location
|
||||
# Obtain DBFILE=... setting from pihole-FTL.db
|
||||
# Constructed to return nothing when
|
||||
# a) the setting is not present in the config file, or
|
||||
# b) the setting is commented out (e.g. "#DBFILE=...")
|
||||
FTLconf="/etc/pihole/pihole-FTL.conf"
|
||||
if [ -e "$FTLconf" ]; then
|
||||
DBFILE="$(sed -n -e 's/^\s*DBFILE\s*=\s*//p' ${FTLconf})"
|
||||
fi
|
||||
# Test for empty string. Use standard path in this case.
|
||||
DBFILE=$(getFTLConfigValue "files.database")
|
||||
if [ -z "$DBFILE" ]; then
|
||||
DBFILE="/etc/pihole/pihole-FTL.db"
|
||||
fi
|
||||
|
||||
if [[ "$@" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Flushing /var/log/pihole/pihole.log ..."
|
||||
# Determine log file location
|
||||
LOGFILE=$(getFTLConfigValue "files.log.dnsmasq")
|
||||
if [ -z "$LOGFILE" ]; then
|
||||
LOGFILE="/var/log/pihole/pihole.log"
|
||||
fi
|
||||
if [[ "$@" == *"once"* ]]; then
|
||||
FTLFILE=$(getFTLConfigValue "files.log.ftl")
|
||||
if [ -z "$FTLFILE" ]; then
|
||||
FTLFILE="/var/log/pihole/FTL.log"
|
||||
fi
|
||||
|
||||
if [[ "$*" == *"once"* ]]; then
|
||||
# Nightly logrotation
|
||||
if command -v /usr/sbin/logrotate >/dev/null; then
|
||||
# Logrotate once
|
||||
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Running logrotate ..."
|
||||
fi
|
||||
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
||||
else
|
||||
# Copy pihole.log over to pihole.log.1
|
||||
|
@ -44,32 +51,72 @@ if [[ "$@" == *"once"* ]]; then
|
|||
# Note that moving the file is not an option, as
|
||||
# dnsmasq would happily continue writing into the
|
||||
# moved file (it will have the same file handler)
|
||||
cp -p /var/log/pihole/pihole.log /var/log/pihole/pihole.log.1
|
||||
echo " " > /var/log/pihole/pihole.log
|
||||
chmod 640 /var/log/pihole/pihole.log
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Rotating ${LOGFILE} ..."
|
||||
fi
|
||||
cp -p "${LOGFILE}" "${LOGFILE}.1"
|
||||
echo " " > "${LOGFILE}"
|
||||
chmod 640 "${LOGFILE}"
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Rotated ${LOGFILE} ..."
|
||||
fi
|
||||
# Copy FTL.log over to FTL.log.1
|
||||
# and empty out FTL.log
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Rotating ${FTLFILE} ..."
|
||||
fi
|
||||
cp -p "${FTLFILE}" "${FTLFILE}.1"
|
||||
echo " " > "${FTLFILE}"
|
||||
chmod 640 "${FTLFILE}"
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Rotated ${FTLFILE} ..."
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Manual flushing
|
||||
if command -v /usr/sbin/logrotate >/dev/null; then
|
||||
# Logrotate twice to move all data out of sight of FTL
|
||||
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate; sleep 3
|
||||
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
||||
else
|
||||
# Flush both pihole.log and pihole.log.1 (if existing)
|
||||
echo " " > /var/log/pihole/pihole.log
|
||||
if [ -f /var/log/pihole/pihole.log.1 ]; then
|
||||
echo " " > /var/log/pihole/pihole.log.1
|
||||
chmod 640 /var/log/pihole/pihole.log.1
|
||||
fi
|
||||
|
||||
# Flush both pihole.log and pihole.log.1 (if existing)
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Flushing ${LOGFILE} ..."
|
||||
fi
|
||||
echo " " > "${LOGFILE}"
|
||||
chmod 640 "${LOGFILE}"
|
||||
if [ -f "${LOGFILE}.1" ]; then
|
||||
echo " " > "${LOGFILE}.1"
|
||||
chmod 640 "${LOGFILE}.1"
|
||||
fi
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Flushed ${LOGFILE} ..."
|
||||
fi
|
||||
|
||||
# Flush both FTL.log and FTL.log.1 (if existing)
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Flushing ${FTLFILE} ..."
|
||||
fi
|
||||
echo " " > "${FTLFILE}"
|
||||
chmod 640 "${FTLFILE}"
|
||||
if [ -f "${FTLFILE}.1" ]; then
|
||||
echo " " > "${FTLFILE}.1"
|
||||
chmod 640 "${FTLFILE}.1"
|
||||
fi
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Flushed ${FTLFILE} ..."
|
||||
fi
|
||||
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -ne " ${INFO} Flushing database, DNS resolution temporarily unavailable ..."
|
||||
fi
|
||||
|
||||
# Stop FTL to make sure it doesn't write to the database while we're deleting data
|
||||
service pihole-FTL stop
|
||||
|
||||
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
|
||||
deleted=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
|
||||
deleted=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
|
||||
|
||||
# Restart pihole-FTL to force reloading history
|
||||
sudo pihole restartdns
|
||||
# Restart FTL
|
||||
service pihole-FTL restart
|
||||
if [[ "$*" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Deleted ${deleted} queries from long-term query database"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$@" != *"quiet"* ]]; then
|
||||
echo -e "${OVER} ${TICK} Flushed /var/log/pihole/pihole.log"
|
||||
echo -e " ${TICK} Deleted ${deleted} queries from database"
|
||||
fi
|
||||
|
|
|
@ -1,246 +1,157 @@
|
|||
#!/usr/bin/env bash
|
||||
#!/usr/bin/env sh
|
||||
# shellcheck disable=SC1090
|
||||
|
||||
# Ignore warning about `local` being undefinded in POSIX
|
||||
# shellcheck disable=SC3043
|
||||
# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2018 Pi-hole, LLC (https://pi-hole.net)
|
||||
# (c) 2023 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Query Domain Lists
|
||||
# Search Adlists
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# Globals
|
||||
piholeDir="/etc/pihole"
|
||||
GRAVITYDB="${piholeDir}/gravity.db"
|
||||
options="$*"
|
||||
all=""
|
||||
exact=""
|
||||
matchType="match"
|
||||
# Source pihole-FTL from install script
|
||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||
if [[ -f "${pihole_FTL}" ]]; then
|
||||
source "${pihole_FTL}"
|
||||
fi
|
||||
|
||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||
# have changed
|
||||
gravityDBfile="${GRAVITYDB}"
|
||||
PI_HOLE_INSTALL_DIR="/opt/pihole"
|
||||
max_results="20"
|
||||
partial="false"
|
||||
domain=""
|
||||
|
||||
# Source color table
|
||||
colfile="/opt/pihole/COL_TABLE"
|
||||
source "${colfile}"
|
||||
. "${colfile}"
|
||||
|
||||
# Scan an array of files for matching strings
|
||||
scanList(){
|
||||
# Escape full stops
|
||||
local domain="${1}" esc_domain="${1//./\\.}" lists="${2}" list_type="${3:-}"
|
||||
# Source api functions
|
||||
. "${PI_HOLE_INSTALL_DIR}/api.sh"
|
||||
|
||||
# Prevent grep from printing file path
|
||||
cd "$piholeDir" || exit 1
|
||||
|
||||
# Prevent grep -i matching slowly: https://bit.ly/2xFXtUX
|
||||
export LC_CTYPE=C
|
||||
|
||||
# /dev/null forces filename to be printed when only one list has been generated
|
||||
case "${list_type}" in
|
||||
"exact" ) grep -i -E -l "(^|(?<!#)\\s)${esc_domain}($|\\s|#)" ${lists} /dev/null 2>/dev/null;;
|
||||
# Iterate through each regexp and check whether it matches the domainQuery
|
||||
# If it does, print the matching regexp and continue looping
|
||||
# Input 1 - regexps | Input 2 - domainQuery
|
||||
"regex" )
|
||||
for list in ${lists}; do
|
||||
if [[ "${domain}" =~ ${list} ]]; then
|
||||
printf "%b\n" "${list}";
|
||||
fi
|
||||
done;;
|
||||
* ) grep -i "${esc_domain}" ${lists} /dev/null 2>/dev/null;;
|
||||
esac
|
||||
}
|
||||
|
||||
if [[ "${options}" == "-h" ]] || [[ "${options}" == "--help" ]]; then
|
||||
Help() {
|
||||
echo "Usage: pihole -q [option] <domain>
|
||||
Example: 'pihole -q -exact domain.com'
|
||||
Example: 'pihole -q --partial domain.com'
|
||||
Query the adlists for a specified domain
|
||||
|
||||
Options:
|
||||
-exact Search the adlists for exact domain matches
|
||||
-all Return all query matches within the adlists
|
||||
-h, --help Show this help dialog"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Handle valid options
|
||||
[[ "${options}" == *"-all"* ]] && all=true
|
||||
if [[ "${options}" == *"-exact"* ]]; then
|
||||
exact="exact"; matchType="exact ${matchType}"
|
||||
fi
|
||||
|
||||
# Strip valid options, leaving only the domain and invalid options
|
||||
# This allows users to place the options before or after the domain
|
||||
options=$(sed -E 's/ ?-(all|exact) ?//g' <<< "${options}")
|
||||
|
||||
# Handle remaining options
|
||||
# If $options contain non ASCII characters, convert to punycode
|
||||
case "${options}" in
|
||||
"" ) str="No domain specified";;
|
||||
*" "* ) str="Unknown query option specified";;
|
||||
*[![:ascii:]]* ) domainQuery=$(idn2 "${options}");;
|
||||
* ) domainQuery="${options}";;
|
||||
esac
|
||||
|
||||
if [[ -n "${str:-}" ]]; then
|
||||
echo -e "${str}${COL_NC}\\nTry 'pihole -q --help' for more information."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
scanDatabaseTable() {
|
||||
local domain table list_type querystr result extra
|
||||
domain="$(printf "%q" "${1}")"
|
||||
table="${2}"
|
||||
list_type="${3:-}"
|
||||
|
||||
# As underscores are legitimate parts of domains, we escape them when using the LIKE operator.
|
||||
# Underscores are SQLite wildcards matching exactly one character. We obviously want to suppress this
|
||||
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
|
||||
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
|
||||
if [[ "${table}" == "gravity" ]]; then
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
|
||||
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
else
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${list_type}' AND domain = '${domain}'";;
|
||||
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${list_type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Send prepared query to gravity database
|
||||
result="$(pihole-FTL sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
|
||||
if [[ -z "${result}" ]]; then
|
||||
# Return early when there are no matches in this table
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${table}" == "gravity" ]]; then
|
||||
echo "${result}"
|
||||
return
|
||||
fi
|
||||
|
||||
# Mark domain as having been white-/blacklist matched (global variable)
|
||||
wbMatch=true
|
||||
|
||||
# Print table name
|
||||
echo " ${matchType^} found in ${COL_BOLD}exact ${table}${COL_NC}"
|
||||
|
||||
# Loop over results and print them
|
||||
mapfile -t results <<< "${result}"
|
||||
for result in "${results[@]}"; do
|
||||
domain="${result/|*}"
|
||||
if [[ "${result#*|}" == "0" ]]; then
|
||||
extra=" (disabled)"
|
||||
else
|
||||
extra=""
|
||||
fi
|
||||
echo " ${domain}${extra}"
|
||||
done
|
||||
--partial Search the adlists for partially matching domains
|
||||
--all Return all query matches within the adlists
|
||||
-h, --help Show this help dialog"
|
||||
exit 0
|
||||
}
|
||||
|
||||
scanRegexDatabaseTable() {
|
||||
local domain list list_type
|
||||
domain="${1}"
|
||||
list="${2}"
|
||||
list_type="${3:-}"
|
||||
GenerateOutput() {
|
||||
local data gravity_data lists_data num_gravity num_lists search_type_str
|
||||
local gravity_data_csv lists_data_csv line current_domain url type color
|
||||
data="${1}"
|
||||
|
||||
# Query all regex from the corresponding database tables
|
||||
mapfile -t regexList < <(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${list_type}" 2> /dev/null)
|
||||
# construct a new json for the list results where each object contains the domain and the related type
|
||||
lists_data=$(printf %s "${data}" | jq '.search.domains | [.[] | {domain: .domain, type: .type}]')
|
||||
|
||||
# If we have regexps to process
|
||||
if [[ "${#regexList[@]}" -ne 0 ]]; then
|
||||
# Split regexps over a new line
|
||||
str_regexList=$(printf '%s\n' "${regexList[@]}")
|
||||
# Check domain against regexps
|
||||
mapfile -t regexMatches < <(scanList "${domain}" "${str_regexList}" "regex")
|
||||
# If there were regex matches
|
||||
if [[ "${#regexMatches[@]}" -ne 0 ]]; then
|
||||
# Split matching regexps over a new line
|
||||
str_regexMatches=$(printf '%s\n' "${regexMatches[@]}")
|
||||
# Form a "matched" message
|
||||
str_message="${matchType^} found in ${COL_BOLD}regex ${list}${COL_NC}"
|
||||
# Form a "results" message
|
||||
str_result="${COL_BOLD}${str_regexMatches}${COL_NC}"
|
||||
# If we are displaying more than just the source of the block
|
||||
# Set the wildcard match flag
|
||||
wcMatch=true
|
||||
# Echo the "matched" message, indented by one space
|
||||
echo " ${str_message}"
|
||||
# Echo the "results" message, each line indented by three spaces
|
||||
# shellcheck disable=SC2001
|
||||
echo "${str_result}" | sed 's/^/ /'
|
||||
fi
|
||||
# construct a new json for the gravity results where each object contains the adlist URL and the related domains
|
||||
gravity_data=$(printf %s "${data}" | jq '.search.gravity | group_by(.address,.type) | map({ address: (.[0].address), type: (.[0].type), domains: [.[] | .domain] })')
|
||||
|
||||
# number of objects in each json
|
||||
num_gravity=$(printf %s "${gravity_data}" | jq length)
|
||||
num_lists=$(printf %s "${lists_data}" | jq length)
|
||||
|
||||
if [ "${partial}" = true ]; then
|
||||
search_type_str="partially"
|
||||
else
|
||||
search_type_str="exactly"
|
||||
fi
|
||||
|
||||
# Results from allow/deny list
|
||||
printf "%s\n\n" "Found ${num_lists} domains ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'."
|
||||
if [ "${num_lists}" -gt 0 ]; then
|
||||
# Convert the data to a csv, each line is a "domain,type" string
|
||||
# not using jq's @csv here as it quotes each value individually
|
||||
lists_data_csv=$(printf %s "${lists_data}" | jq --raw-output '.[] | [.domain, .type] | join(",")')
|
||||
|
||||
# Generate output for each csv line, separating line in a domain and type substring at the ','
|
||||
echo "${lists_data_csv}" | while read -r line; do
|
||||
printf "%s\n\n" " - ${COL_GREEN}${line%,*}${COL_NC} (type: exact ${line#*,} domain)"
|
||||
done
|
||||
fi
|
||||
|
||||
# Results from gravity
|
||||
printf "%s\n\n" "Found ${num_gravity} adlists ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'."
|
||||
if [ "${num_gravity}" -gt 0 ]; then
|
||||
# Convert the data to a csv, each line is a "URL,domain,domain,...." string
|
||||
# not using jq's @csv here as it quotes each value individually
|
||||
gravity_data_csv=$(printf %s "${gravity_data}" | jq --raw-output '.[] | [.address, .type, .domains[]] | join(",")')
|
||||
|
||||
# Generate line-by-line output for each csv line
|
||||
echo "${gravity_data_csv}" | while read -r line; do
|
||||
# Get first part of the line, the URL
|
||||
url=${line%%,*}
|
||||
|
||||
# cut off URL, leaving "type,domain,domain,...."
|
||||
line=${line#*,}
|
||||
type=${line%%,*}
|
||||
# type == "block" -> red, type == "allow" -> green
|
||||
if [ "${type}" = "block" ]; then
|
||||
color="${COL_RED}"
|
||||
else
|
||||
color="${COL_GREEN}"
|
||||
fi
|
||||
|
||||
# print adlist URL
|
||||
printf "%s (%s)\n\n" " - ${COL_BLUE}${url}${COL_NC}" "${color}${type}${COL_NC}"
|
||||
|
||||
# cut off type, leaving "domain,domain,...."
|
||||
line=${line#*,}
|
||||
# print each domain and remove it from the string until nothing is left
|
||||
while [ ${#line} -gt 0 ]; do
|
||||
current_domain=${line%%,*}
|
||||
printf ' - %s\n' "${COL_GREEN}${current_domain}${COL_NC}"
|
||||
# we need to remove the current_domain and the comma in two steps because
|
||||
# the last domain won't have a trailing comma and the while loop wouldn't exit
|
||||
line=${line#"${current_domain}"}
|
||||
line=${line#,}
|
||||
done
|
||||
printf "\n\n"
|
||||
done
|
||||
fi
|
||||
|
||||
# If no exact results were found, suggest using partial matching
|
||||
if [ "${num_lists}" -eq 0 ] && [ "${num_gravity}" -eq 0 ] && [ "${partial}" = false ]; then
|
||||
printf "%s\n" "Hint: Try partial matching with"
|
||||
printf "%s\n\n" " ${COL_GREEN}pihole -q --partial ${domain}${COL_NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Scan Whitelist and Blacklist
|
||||
scanDatabaseTable "${domainQuery}" "whitelist" "0"
|
||||
scanDatabaseTable "${domainQuery}" "blacklist" "1"
|
||||
Main() {
|
||||
local data
|
||||
|
||||
# Scan Regex table
|
||||
scanRegexDatabaseTable "${domainQuery}" "whitelist" "2"
|
||||
scanRegexDatabaseTable "${domainQuery}" "blacklist" "3"
|
||||
|
||||
# Query block lists
|
||||
mapfile -t results <<< "$(scanDatabaseTable "${domainQuery}" "gravity")"
|
||||
|
||||
# Handle notices
|
||||
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then
|
||||
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the adlists"
|
||||
exit 0
|
||||
elif [[ -z "${results[*]}" ]]; then
|
||||
# Result found in WL/BL/Wildcards
|
||||
exit 0
|
||||
elif [[ -z "${all}" ]] && [[ "${#results[*]}" -ge 100 ]]; then
|
||||
echo -e " ${INFO} Over 100 ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC}
|
||||
This can be overridden using the -all option"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Print "Exact matches for" title
|
||||
if [[ -n "${exact}" ]]; then
|
||||
plural=""; [[ "${#results[*]}" -gt 1 ]] && plural="es"
|
||||
echo " ${matchType^}${plural} for ${COL_BOLD}${domainQuery}${COL_NC} found in:"
|
||||
fi
|
||||
|
||||
for result in "${results[@]}"; do
|
||||
match="${result/|*/}"
|
||||
extra="${result#*|}"
|
||||
adlistAddress="${extra/|*/}"
|
||||
extra="${extra#*|}"
|
||||
if [[ "${extra}" == "0" ]]; then
|
||||
extra=" (disabled)"
|
||||
else
|
||||
extra=""
|
||||
if [ -z "${domain}" ]; then
|
||||
echo "No domain specified"
|
||||
exit 1
|
||||
fi
|
||||
# domains are lowercased and converted to punycode by FTL since
|
||||
# https://github.com/pi-hole/FTL/pull/1715
|
||||
# no need to do it here
|
||||
|
||||
if [[ -n "${exact}" ]]; then
|
||||
echo " - ${adlistAddress}${extra}"
|
||||
else
|
||||
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
||||
count=""
|
||||
echo " ${matchType^} found in ${COL_BOLD}${adlistAddress}${COL_NC}:"
|
||||
adlistAddress_prev="${adlistAddress}"
|
||||
fi
|
||||
: $((count++))
|
||||
# Authenticate with FTL
|
||||
LoginAPI
|
||||
|
||||
# Print matching domain if $max_count has not been reached
|
||||
[[ -z "${all}" ]] && max_count="50"
|
||||
if [[ -z "${all}" ]] && [[ "${count}" -ge "${max_count}" ]]; then
|
||||
[[ "${count}" -gt "${max_count}" ]] && continue
|
||||
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
||||
else
|
||||
echo " ${match}${extra}"
|
||||
fi
|
||||
fi
|
||||
# send query again
|
||||
data=$(GetFTLData "search/${domain}?N=${max_results}&partial=${partial}")
|
||||
|
||||
GenerateOutput "${data}"
|
||||
|
||||
# Delete session
|
||||
LogoutAPI
|
||||
}
|
||||
|
||||
# Process all options (if present)
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
"-h" | "--help") Help ;;
|
||||
"--partial") partial="true" ;;
|
||||
"--all") max_results=10000 ;; # hard-coded FTL limit
|
||||
*) domain=$1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
exit 0
|
||||
Main "${domain}"
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# Variables
|
||||
readonly ADMIN_INTERFACE_GIT_URL="https://github.com/pi-hole/AdminLTE.git"
|
||||
readonly ADMIN_INTERFACE_GIT_URL="https://github.com/pi-hole/web.git"
|
||||
readonly ADMIN_INTERFACE_DIR="/var/www/html/admin"
|
||||
readonly PI_HOLE_GIT_URL="https://github.com/pi-hole/pi-hole.git"
|
||||
readonly PI_HOLE_FILES_DIR="/etc/.pihole"
|
||||
|
@ -104,9 +104,6 @@ main() {
|
|||
web_update=false
|
||||
FTL_update=false
|
||||
|
||||
# shellcheck disable=1090,2154
|
||||
source "${setupVars}"
|
||||
|
||||
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
|
||||
package_manager_detect
|
||||
install_dependent_packages "${INSTALLER_DEPS[@]}"
|
||||
|
@ -128,20 +125,18 @@ main() {
|
|||
echo -e " ${INFO} Pi-hole Core:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
|
||||
fi
|
||||
|
||||
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
|
||||
if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then
|
||||
echo -e "\\n ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
|
||||
echo -e " Please re-run install script from https://pi-hole.net${COL_NC}"
|
||||
exit 1;
|
||||
fi
|
||||
if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then
|
||||
echo -e "\\n ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
|
||||
echo -e " Please re-run install script from https://pi-hole.net${COL_NC}"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if GitCheckUpdateAvail "${ADMIN_INTERFACE_DIR}" ; then
|
||||
web_update=true
|
||||
echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}"
|
||||
else
|
||||
web_update=false
|
||||
echo -e " ${INFO} Web Interface:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
|
||||
fi
|
||||
if GitCheckUpdateAvail "${ADMIN_INTERFACE_DIR}" ; then
|
||||
web_update=true
|
||||
echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}"
|
||||
else
|
||||
web_update=false
|
||||
echo -e " ${INFO} Web Interface:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
|
||||
fi
|
||||
|
||||
local funcOutput
|
||||
|
@ -149,7 +144,7 @@ main() {
|
|||
local binary
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}" #binary name will be the last line of the output of get_binary_name (it always begins with pihole-FTL)
|
||||
|
||||
if FTLcheckUpdate "${binary}" > /dev/null; then
|
||||
if FTLcheckUpdate "${binary}"; then
|
||||
FTL_update=true
|
||||
echo -e " ${INFO} FTL:\\t\\t${COL_YELLOW}update available${COL_NC}"
|
||||
else
|
||||
|
@ -160,8 +155,13 @@ main() {
|
|||
2)
|
||||
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_LIGHT_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch."
|
||||
;;
|
||||
3)
|
||||
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, cannot reach download server${COL_NC}"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, contact support${COL_NC}"
|
||||
exit 1
|
||||
esac
|
||||
FTL_update=false
|
||||
fi
|
||||
|
@ -221,6 +221,12 @@ main() {
|
|||
echo -e " ${INFO} Local version file information updated."
|
||||
fi
|
||||
|
||||
# if there was only a web update, show the new versions
|
||||
# (on core and FTL updates, this is done as part of the installer run)
|
||||
if [[ "${web_update}" == true && "${FTL_update}" == false && "${core_update}" == false ]]; then
|
||||
"${PI_HOLE_BIN_DIR}"/pihole version
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exit 0
|
||||
}
|
||||
|
|
|
@ -10,34 +10,34 @@
|
|||
|
||||
function get_local_branch() {
|
||||
# Return active branch
|
||||
cd "${1}" 2> /dev/null || return 1
|
||||
cd "${1}" 2>/dev/null || return 1
|
||||
git rev-parse --abbrev-ref HEAD || return 1
|
||||
}
|
||||
|
||||
function get_local_version() {
|
||||
# Return active version
|
||||
cd "${1}" 2> /dev/null || return 1
|
||||
git describe --tags --always 2> /dev/null || return 1
|
||||
cd "${1}" 2>/dev/null || return 1
|
||||
git describe --tags --always 2>/dev/null || return 1
|
||||
}
|
||||
|
||||
function get_local_hash() {
|
||||
cd "${1}" 2> /dev/null || return 1
|
||||
cd "${1}" 2>/dev/null || return 1
|
||||
git rev-parse --short=8 HEAD || return 1
|
||||
}
|
||||
|
||||
function get_remote_version() {
|
||||
curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2> /dev/null | jq --raw-output .tag_name || return 1
|
||||
# if ${2} is = "master" we need to use the "latest" endpoint, otherwise, we simply return null
|
||||
if [[ "${2}" == "master" ]]; then
|
||||
curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2>/dev/null | jq --raw-output .tag_name || return 1
|
||||
else
|
||||
echo "null"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_remote_hash(){
|
||||
git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 0,8);}' || return 1
|
||||
function get_remote_hash() {
|
||||
git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 1,8);}' || return 1
|
||||
}
|
||||
|
||||
# Source the setupvars config file
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/pihole/setupVars.conf
|
||||
|
||||
# Source the utils file for addOrEditKeyValPair()
|
||||
# shellcheck disable=SC1091
|
||||
. /opt/pihole/utils.sh
|
||||
|
@ -54,18 +54,18 @@ chmod 644 "${VERSION_FILE}"
|
|||
|
||||
# if /pihole.docker.tag file exists, we will use it's value later in this script
|
||||
DOCKER_TAG=$(cat /pihole.docker.tag 2>/dev/null)
|
||||
regex='^([0-9]+\.){1,2}(\*|[0-9]+)(-.*)?$|(^nightly$)|(^dev.*$)'
|
||||
release_regex='^([0-9]+\.){1,2}(\*|[0-9]+)(-.*)?$'
|
||||
regex=$release_regex'|(^nightly$)|(^dev.*$)'
|
||||
if [[ ! "${DOCKER_TAG}" =~ $regex ]]; then
|
||||
# DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it.
|
||||
unset DOCKER_TAG
|
||||
# DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it.
|
||||
unset DOCKER_TAG
|
||||
fi
|
||||
|
||||
# used in cronjob
|
||||
if [[ "$1" == "reboot" ]]; then
|
||||
sleep 30
|
||||
sleep 30
|
||||
fi
|
||||
|
||||
|
||||
# get Core versions
|
||||
|
||||
CORE_VERSION="$(get_local_version /etc/.pihole)"
|
||||
|
@ -77,33 +77,28 @@ addOrEditKeyValPair "${VERSION_FILE}" "CORE_BRANCH" "${CORE_BRANCH}"
|
|||
CORE_HASH="$(get_local_hash /etc/.pihole)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "CORE_HASH" "${CORE_HASH}"
|
||||
|
||||
GITHUB_CORE_VERSION="$(get_remote_version pi-hole)"
|
||||
GITHUB_CORE_VERSION="$(get_remote_version pi-hole "${CORE_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_VERSION" "${GITHUB_CORE_VERSION}"
|
||||
|
||||
GITHUB_CORE_HASH="$(get_remote_hash pi-hole "${CORE_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}"
|
||||
|
||||
|
||||
# get Web versions
|
||||
|
||||
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
|
||||
WEB_VERSION="$(get_local_version /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}"
|
||||
|
||||
WEB_VERSION="$(get_local_version /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}"
|
||||
WEB_BRANCH="$(get_local_branch /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}"
|
||||
|
||||
WEB_BRANCH="$(get_local_branch /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}"
|
||||
WEB_HASH="$(get_local_hash /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}"
|
||||
|
||||
WEB_HASH="$(get_local_hash /var/www/html/admin)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}"
|
||||
GITHUB_WEB_VERSION="$(get_remote_version web "${WEB_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_VERSION" "${GITHUB_WEB_VERSION}"
|
||||
|
||||
GITHUB_WEB_VERSION="$(get_remote_version AdminLTE)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_VERSION" "${GITHUB_WEB_VERSION}"
|
||||
|
||||
GITHUB_WEB_HASH="$(get_remote_hash AdminLTE "${WEB_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_HASH" "${GITHUB_WEB_HASH}"
|
||||
|
||||
fi
|
||||
GITHUB_WEB_HASH="$(get_remote_hash web "${WEB_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_HASH" "${GITHUB_WEB_HASH}"
|
||||
|
||||
# get FTL versions
|
||||
|
||||
|
@ -116,18 +111,23 @@ addOrEditKeyValPair "${VERSION_FILE}" "FTL_BRANCH" "${FTL_BRANCH}"
|
|||
FTL_HASH="$(pihole-FTL --hash)"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "FTL_HASH" "${FTL_HASH}"
|
||||
|
||||
GITHUB_FTL_VERSION="$(get_remote_version FTL)"
|
||||
GITHUB_FTL_VERSION="$(get_remote_version FTL "${FTL_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_VERSION" "${GITHUB_FTL_VERSION}"
|
||||
|
||||
GITHUB_FTL_HASH="$(get_remote_hash FTL "${FTL_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_HASH" "${GITHUB_FTL_HASH}"
|
||||
|
||||
|
||||
# get Docker versions
|
||||
|
||||
if [[ "${DOCKER_TAG}" ]]; then
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "DOCKER_VERSION" "${DOCKER_TAG}"
|
||||
|
||||
GITHUB_DOCKER_VERSION="$(get_remote_version docker-pi-hole)"
|
||||
# Remote version check only if the tag is a valid release version
|
||||
docker_branch=""
|
||||
if [[ "${DOCKER_TAG}" =~ $release_regex ]]; then
|
||||
docker_branch="master"
|
||||
fi
|
||||
|
||||
GITHUB_DOCKER_VERSION="$(get_remote_version docker-pi-hole "${docker_branch}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_DOCKER_VERSION" "${GITHUB_DOCKER_VERSION}"
|
||||
fi
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#
|
||||
# Example usage:
|
||||
# addOrEditKeyValPair "/etc/pihole/setupVars.conf" "BLOCKING_ENABLED" "true"
|
||||
# TODO: We miight not actually need this function in v6
|
||||
#######################
|
||||
addOrEditKeyValPair() {
|
||||
local file="${1}"
|
||||
|
@ -57,7 +58,11 @@ addKey(){
|
|||
# touch file to prevent grep error if file does not exist yet
|
||||
touch "${file}"
|
||||
|
||||
if ! grep -q "^${key}" "${file}"; then
|
||||
# Match key against entire line, using both anchors. We assume
|
||||
# that the file's keys never have bounding whitespace. Anchors
|
||||
# are necessary to ensure the key is considered absent when it
|
||||
# is a substring of another key present in the file.
|
||||
if ! grep -q "^${key}$" "${file}"; then
|
||||
# Key does not exist, add it.
|
||||
echo "${key}" >> "${file}"
|
||||
fi
|
||||
|
@ -76,48 +81,6 @@ removeKey() {
|
|||
sed -i "/^${key}/d" "${file}"
|
||||
}
|
||||
|
||||
|
||||
#######################
|
||||
# returns FTL's current telnet API port based on the setting in /etc/pihole-FTL.conf
|
||||
########################
|
||||
getFTLAPIPort(){
|
||||
local FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
|
||||
local DEFAULT_FTL_PORT=4711
|
||||
local ftl_api_port
|
||||
|
||||
if [ -s "$FTLCONFFILE" ]; then
|
||||
# if FTLPORT is not set in pihole-FTL.conf, use the default port
|
||||
ftl_api_port="$({ grep '^FTLPORT=' "${FTLCONFFILE}" || echo "${DEFAULT_FTL_PORT}"; } | cut -d'=' -f2-)"
|
||||
# Exploit prevention: set the port to the default port if there is malicious (non-numeric)
|
||||
# content set in pihole-FTL.conf
|
||||
expr "${ftl_api_port}" : "[^[:digit:]]" > /dev/null && ftl_api_port="${DEFAULT_FTL_PORT}"
|
||||
else
|
||||
# if there is no pihole-FTL.conf, use the default port
|
||||
ftl_api_port="${DEFAULT_FTL_PORT}"
|
||||
fi
|
||||
|
||||
echo "${ftl_api_port}"
|
||||
}
|
||||
|
||||
#######################
|
||||
# returns path of FTL's PID file
|
||||
#######################
|
||||
getFTLPIDFile() {
|
||||
local FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
|
||||
local DEFAULT_PID_FILE="/run/pihole-FTL.pid"
|
||||
local FTL_PID_FILE
|
||||
|
||||
if [ -s "${FTLCONFFILE}" ]; then
|
||||
# if PIDFILE is not set in pihole-FTL.conf, use the default path
|
||||
FTL_PID_FILE="$({ grep '^PIDFILE=' "${FTLCONFFILE}" || echo "${DEFAULT_PID_FILE}"; } | cut -d'=' -f2-)"
|
||||
else
|
||||
# if there is no pihole-FTL.conf, use the default path
|
||||
FTL_PID_FILE="${DEFAULT_PID_FILE}"
|
||||
fi
|
||||
|
||||
echo "${FTL_PID_FILE}"
|
||||
}
|
||||
|
||||
#######################
|
||||
# returns FTL's PID based on the content of the pihole-FTL.pid file
|
||||
#
|
||||
|
@ -141,3 +104,30 @@ getFTLPID() {
|
|||
FTL_PID=${FTL_PID:=-1}
|
||||
echo "${FTL_PID}"
|
||||
}
|
||||
|
||||
#######################
|
||||
# returns value from FTLs config file using pihole-FTL --config
|
||||
#
|
||||
# Takes one argument: key
|
||||
# Example getFTLConfigValue dns.piholePTR
|
||||
#######################
|
||||
getFTLConfigValue(){
|
||||
pihole-FTL --config -q "${1}"
|
||||
}
|
||||
|
||||
#######################
|
||||
# sets value in FTLs config file using pihole-FTL --config
|
||||
#
|
||||
# Takes two arguments: key and value
|
||||
# Example setFTLConfigValue dns.piholePTR PI.HOLE
|
||||
#
|
||||
# Note, for complex values such as dns.upstreams, you should wrap the value in single quotes:
|
||||
# setFTLConfigValue dns.upstreams '[ "8.8.8.8" , "8.8.4.4" ]'
|
||||
#######################
|
||||
setFTLConfigValue(){
|
||||
pihole-FTL --config "${1}" "${2}" >/dev/null
|
||||
if [[ $? -eq 5 ]]; then
|
||||
echo -e " ${CROSS} ${1} set by environment variable. Please unset it to use this function"
|
||||
exit 5
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# Source the setupvars config file
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/pihole/setupVars.conf
|
||||
# Ignore warning about `local` being undefinded in POSIX
|
||||
# shellcheck disable=SC3043
|
||||
# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
|
||||
|
||||
# Source the versions file poupulated by updatechecker.sh
|
||||
cachedVersions="/etc/pihole/versions"
|
||||
|
@ -25,126 +25,34 @@ else
|
|||
. "$cachedVersions"
|
||||
fi
|
||||
|
||||
getLocalVersion() {
|
||||
case ${1} in
|
||||
"Pi-hole" ) echo "${CORE_VERSION:=N/A}";;
|
||||
"AdminLTE" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_VERSION:=N/A}";;
|
||||
"FTL" ) echo "${FTL_VERSION:=N/A}";;
|
||||
esac
|
||||
}
|
||||
main() {
|
||||
local details
|
||||
details=false
|
||||
|
||||
getLocalHash() {
|
||||
case ${1} in
|
||||
"Pi-hole" ) echo "${CORE_HASH:=N/A}";;
|
||||
"AdminLTE" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_HASH:=N/A}";;
|
||||
"FTL" ) echo "${FTL_HASH:=N/A}";;
|
||||
esac
|
||||
}
|
||||
|
||||
getRemoteHash(){
|
||||
case ${1} in
|
||||
"Pi-hole" ) echo "${GITHUB_CORE_HASH:=N/A}";;
|
||||
"AdminLTE" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${GITHUB_WEB_HASH:=N/A}";;
|
||||
"FTL" ) echo "${GITHUB_FTL_HASH:=N/A}";;
|
||||
esac
|
||||
}
|
||||
|
||||
getRemoteVersion(){
|
||||
case ${1} in
|
||||
"Pi-hole" ) echo "${GITHUB_CORE_VERSION:=N/A}";;
|
||||
"AdminLTE" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${GITHUB_WEB_VERSION:=N/A}";;
|
||||
"FTL" ) echo "${GITHUB_FTL_VERSION:=N/A}";;
|
||||
esac
|
||||
}
|
||||
|
||||
getLocalBranch(){
|
||||
case ${1} in
|
||||
"Pi-hole" ) echo "${CORE_BRANCH:=N/A}";;
|
||||
"AdminLTE" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_BRANCH:=N/A}";;
|
||||
"FTL" ) echo "${FTL_BRANCH:=N/A}";;
|
||||
esac
|
||||
}
|
||||
|
||||
versionOutput() {
|
||||
if [ "$1" = "AdminLTE" ] && [ "${INSTALL_WEB_INTERFACE}" != true ]; then
|
||||
echo " WebAdmin not installed"
|
||||
return 1
|
||||
# Automatically show detailed information if
|
||||
# at least one of the components is not on master branch
|
||||
if [ ! "${CORE_BRANCH}" = "master" ] || [ ! "${WEB_BRANCH}" = "master" ] || [ ! "${FTL_BRANCH}" = "master" ]; then
|
||||
details=true
|
||||
fi
|
||||
|
||||
[ "$2" = "-c" ] || [ "$2" = "--current" ] || [ -z "$2" ] && current=$(getLocalVersion "${1}") && branch=$(getLocalBranch "${1}")
|
||||
[ "$2" = "-l" ] || [ "$2" = "--latest" ] || [ -z "$2" ] && latest=$(getRemoteVersion "${1}")
|
||||
if [ "$2" = "--hash" ]; then
|
||||
[ "$3" = "-c" ] || [ "$3" = "--current" ] || [ -z "$3" ] && curHash=$(getLocalHash "${1}") && branch=$(getLocalBranch "${1}")
|
||||
[ "$3" = "-l" ] || [ "$3" = "--latest" ] || [ -z "$3" ] && latHash=$(getRemoteHash "${1}") && branch=$(getLocalBranch "${1}")
|
||||
fi
|
||||
|
||||
# We do not want to show the branch name when we are on master,
|
||||
# blank out the variable in this case
|
||||
if [ "$branch" = "master" ]; then
|
||||
branch=""
|
||||
if [ "${details}" = true ]; then
|
||||
echo "Core"
|
||||
echo " Version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
|
||||
echo " Branch is ${CORE_BRANCH:=N/A}"
|
||||
echo " Hash is ${CORE_HASH:=N/A} (Latest: ${GITHUB_CORE_HASH:=N/A})"
|
||||
echo "Web"
|
||||
echo " Version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
|
||||
echo " Branch is ${WEB_BRANCH:=N/A}"
|
||||
echo " Hash is ${WEB_HASH:=N/A} (Latest: ${GITHUB_WEB_HASH:=N/A})"
|
||||
echo "FTL"
|
||||
echo " Version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
|
||||
echo " Branch is ${FTL_BRANCH:=N/A}"
|
||||
echo " Hash is ${FTL_HASH:=N/A} (Latest: ${GITHUB_FTL_HASH:=N/A})"
|
||||
else
|
||||
branch="$branch "
|
||||
echo "Core version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
|
||||
echo "Web version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
|
||||
echo "FTL version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
|
||||
fi
|
||||
|
||||
if [ -n "$current" ] && [ -n "$latest" ]; then
|
||||
output="${1} version is $branch$current (Latest: $latest)"
|
||||
elif [ -n "$current" ] && [ -z "$latest" ]; then
|
||||
output="Current ${1} version is $branch$current"
|
||||
elif [ -z "$current" ] && [ -n "$latest" ]; then
|
||||
output="Latest ${1} version is $latest"
|
||||
elif [ -n "$curHash" ] && [ -n "$latHash" ]; then
|
||||
output="Local ${1} hash is $curHash (Remote: $latHash)"
|
||||
elif [ -n "$curHash" ] && [ -z "$latHash" ]; then
|
||||
output="Current local ${1} hash is $curHash"
|
||||
elif [ -z "$curHash" ] && [ -n "$latHash" ]; then
|
||||
output="Latest remote ${1} hash is $latHash"
|
||||
elif [ -z "$curHash" ] && [ -z "$latHash" ]; then
|
||||
output="Hashes for ${1} not available"
|
||||
else
|
||||
errorOutput
|
||||
return 1
|
||||
fi
|
||||
|
||||
[ -n "$output" ] && echo " $output"
|
||||
}
|
||||
|
||||
errorOutput() {
|
||||
echo " Invalid Option! Try 'pihole -v --help' for more information."
|
||||
exit 1
|
||||
}
|
||||
|
||||
defaultOutput() {
|
||||
versionOutput "Pi-hole" "$@"
|
||||
|
||||
if [ "${INSTALL_WEB_INTERFACE}" = true ]; then
|
||||
versionOutput "AdminLTE" "$@"
|
||||
fi
|
||||
|
||||
versionOutput "FTL" "$@"
|
||||
}
|
||||
|
||||
helpFunc() {
|
||||
echo "Usage: pihole -v [repo | option] [option]
|
||||
Example: 'pihole -v -p -l'
|
||||
Show Pi-hole, Admin Console & FTL versions
|
||||
|
||||
Repositories:
|
||||
-p, --pihole Only retrieve info regarding Pi-hole repository
|
||||
-a, --admin Only retrieve info regarding AdminLTE repository
|
||||
-f, --ftl Only retrieve info regarding FTL repository
|
||||
|
||||
Options:
|
||||
-c, --current Return the current version
|
||||
-l, --latest Return the latest version
|
||||
--hash Return the GitHub hash from your local repositories
|
||||
-h, --help Show this help dialog"
|
||||
exit 0
|
||||
}
|
||||
|
||||
case "${1}" in
|
||||
"-p" | "--pihole" ) shift; versionOutput "Pi-hole" "$@";;
|
||||
"-a" | "--admin" ) shift; versionOutput "AdminLTE" "$@";;
|
||||
"-f" | "--ftl" ) shift; versionOutput "FTL" "$@";;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
* ) defaultOutput "$@";;
|
||||
esac
|
||||
main
|
||||
|
|
|
@ -1,848 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC1090
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Web interface settings
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
readonly dnsmasqconfig="/etc/dnsmasq.d/01-pihole.conf"
|
||||
readonly dhcpconfig="/etc/dnsmasq.d/02-pihole-dhcp.conf"
|
||||
readonly FTLconf="/etc/pihole/pihole-FTL.conf"
|
||||
# 03 -> wildcards
|
||||
readonly dhcpstaticconfig="/etc/dnsmasq.d/04-pihole-static-dhcp.conf"
|
||||
readonly dnscustomfile="/etc/pihole/custom.list"
|
||||
readonly dnscustomcnamefile="/etc/dnsmasq.d/05-pihole-custom-cname.conf"
|
||||
|
||||
readonly gravityDBfile="/etc/pihole/gravity.db"
|
||||
|
||||
# Source install script for ${setupVars}, ${PI_HOLE_BIN_DIR} and valid_ip()
|
||||
readonly PI_HOLE_FILES_DIR="/etc/.pihole"
|
||||
# shellcheck disable=SC2034 # used in basic-install to source the script without running it
|
||||
SKIP_INSTALL="true"
|
||||
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
||||
|
||||
utilsfile="/opt/pihole/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
coltable="/opt/pihole/COL_TABLE"
|
||||
if [[ -f ${coltable} ]]; then
|
||||
source ${coltable}
|
||||
fi
|
||||
|
||||
helpFunc() {
|
||||
echo "Usage: pihole -a [options]
|
||||
Example: pihole -a -p password
|
||||
Set options for the Admin Console
|
||||
|
||||
Options:
|
||||
-p, password Set Admin Console password
|
||||
-c, celsius Set Celsius as preferred temperature unit
|
||||
-f, fahrenheit Set Fahrenheit as preferred temperature unit
|
||||
-k, kelvin Set Kelvin as preferred temperature unit
|
||||
-h, --help Show this help dialog
|
||||
-i, interface Specify dnsmasq's interface listening behavior
|
||||
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
||||
-t, teleporter Backup configuration as an archive
|
||||
-t, teleporter myname.tar.gz Backup configuration to archive with name myname.tar.gz as specified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
add_setting() {
|
||||
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
delete_setting() {
|
||||
removeKey "${setupVars}" "${1}"
|
||||
}
|
||||
|
||||
change_setting() {
|
||||
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
addFTLsetting() {
|
||||
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
deleteFTLsetting() {
|
||||
removeKey "${FTLconf}" "${1}"
|
||||
}
|
||||
|
||||
changeFTLsetting() {
|
||||
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
add_dnsmasq_setting() {
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
delete_dnsmasq_setting() {
|
||||
removeKey "${dnsmasqconfig}" "${1}"
|
||||
}
|
||||
|
||||
SetTemperatureUnit() {
|
||||
addOrEditKeyValPair "${setupVars}" "TEMPERATUREUNIT" "${unit}"
|
||||
echo -e " ${TICK} Set temperature unit to ${unit}"
|
||||
}
|
||||
|
||||
HashPassword() {
|
||||
# Compute password hash twice to avoid rainbow table vulnerability
|
||||
return=$(echo -n "${1}" | sha256sum | sed 's/\s.*$//')
|
||||
return=$(echo -n "${return}" | sha256sum | sed 's/\s.*$//')
|
||||
echo "${return}"
|
||||
}
|
||||
|
||||
SetWebPassword() {
|
||||
if [ "${SUDO_USER}" == "www-data" ]; then
|
||||
echo "Security measure: user www-data is not allowed to change webUI password!"
|
||||
echo "Exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${SUDO_USER}" == "lighttpd" ]; then
|
||||
echo "Security measure: user lighttpd is not allowed to change webUI password!"
|
||||
echo "Exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if (( ${#args[2]} > 0 )) ; then
|
||||
readonly PASSWORD="${args[2]}"
|
||||
readonly CONFIRM="${PASSWORD}"
|
||||
else
|
||||
# Prevents a bug if the user presses Ctrl+C and it continues to hide the text typed.
|
||||
# So we reset the terminal via stty if the user does press Ctrl+C
|
||||
trap '{ echo -e "\nNo password will be set" ; stty sane ; exit 1; }' INT
|
||||
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
|
||||
echo ""
|
||||
|
||||
if [ "${PASSWORD}" == "" ]; then
|
||||
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" ""
|
||||
echo -e " ${TICK} Password Removed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
read -s -r -p "Confirm Password: " CONFIRM
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
|
||||
# We do not wrap this in brackets, otherwise BASH will expand any appropriate syntax
|
||||
hash=$(HashPassword "$PASSWORD")
|
||||
# Save hash to file
|
||||
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" "${hash}"
|
||||
echo -e " ${TICK} New password set"
|
||||
else
|
||||
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ProcessDNSSettings() {
|
||||
source "${setupVars}"
|
||||
|
||||
removeKey "${dnsmasqconfig}" "server"
|
||||
|
||||
COUNTER=1
|
||||
while true ; do
|
||||
var=PIHOLE_DNS_${COUNTER}
|
||||
if [ -z "${!var}" ]; then
|
||||
break;
|
||||
fi
|
||||
addKey "${dnsmasqconfig}" "server=${!var}"
|
||||
(( COUNTER++ ))
|
||||
done
|
||||
|
||||
# The option LOCAL_DNS_PORT is deprecated
|
||||
# We apply it once more, and then convert it into the current format
|
||||
if [ -n "${LOCAL_DNS_PORT}" ]; then
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "server" "127.0.0.1#${LOCAL_DNS_PORT}"
|
||||
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}"
|
||||
removeKey "${setupVars}" "LOCAL_DNS_PORT"
|
||||
fi
|
||||
|
||||
removeKey "${dnsmasqconfig}" "domain-needed"
|
||||
removeKey "${dnsmasqconfig}" "expand-hosts"
|
||||
|
||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||
addKey "${dnsmasqconfig}" "domain-needed"
|
||||
addKey "${dnsmasqconfig}" "expand-hosts"
|
||||
fi
|
||||
|
||||
removeKey "${dnsmasqconfig}" "bogus-priv"
|
||||
|
||||
if [[ "${DNS_BOGUS_PRIV}" == true ]]; then
|
||||
addKey "${dnsmasqconfig}" "bogus-priv"
|
||||
fi
|
||||
|
||||
removeKey "${dnsmasqconfig}" "dnssec"
|
||||
removeKey "${dnsmasqconfig}" "trust-anchor"
|
||||
|
||||
if [[ "${DNSSEC}" == true ]]; then
|
||||
echo "dnssec
|
||||
trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D
|
||||
" >> "${dnsmasqconfig}"
|
||||
fi
|
||||
|
||||
removeKey "${dnsmasqconfig}" "host-record"
|
||||
|
||||
if [ -n "${HOSTRECORD}" ]; then
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "host-record" "${HOSTRECORD}"
|
||||
fi
|
||||
|
||||
# Setup interface listening behavior of dnsmasq
|
||||
removeKey "${dnsmasqconfig}" "interface"
|
||||
removeKey "${dnsmasqconfig}" "local-service"
|
||||
removeKey "${dnsmasqconfig}" "except-interface"
|
||||
removeKey "${dnsmasqconfig}" "bind-interfaces"
|
||||
|
||||
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
||||
# Listen on all interfaces, permit all origins
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "except-interface" "nonexisting"
|
||||
elif [[ "${DNSMASQ_LISTENING}" == "local" ]]; then
|
||||
# Listen only on all interfaces, but only local subnets
|
||||
addKey "${dnsmasqconfig}" "local-service"
|
||||
else
|
||||
# Options "bind" and "single"
|
||||
# Listen only on one interface
|
||||
# Use eth0 as fallback interface if interface is missing in setupVars.conf
|
||||
if [ -z "${PIHOLE_INTERFACE}" ]; then
|
||||
PIHOLE_INTERFACE="eth0"
|
||||
fi
|
||||
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "interface" "${PIHOLE_INTERFACE}"
|
||||
|
||||
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
|
||||
# Really bind to interface
|
||||
addKey "${dnsmasqconfig}" "bind-interfaces"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
||||
# Convert legacy "conditional forwarding" to rev-server configuration
|
||||
# Remove any existing REV_SERVER settings
|
||||
removeKey "${setupVars}" "REV_SERVER"
|
||||
removeKey "${setupVars}" "REV_SERVER_DOMAIN"
|
||||
removeKey "${setupVars}" "REV_SERVER_TARGET"
|
||||
removeKey "${setupVars}" "REV_SERVER_CIDR"
|
||||
|
||||
REV_SERVER=true
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
|
||||
|
||||
REV_SERVER_DOMAIN="${CONDITIONAL_FORWARDING_DOMAIN}"
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${REV_SERVER_DOMAIN}"
|
||||
|
||||
REV_SERVER_TARGET="${CONDITIONAL_FORWARDING_IP}"
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${REV_SERVER_TARGET}"
|
||||
|
||||
#Convert CONDITIONAL_FORWARDING_REVERSE if necessary e.g:
|
||||
# 1.1.168.192.in-addr.arpa to 192.168.1.1/32
|
||||
# 1.168.192.in-addr.arpa to 192.168.1.0/24
|
||||
# 168.192.in-addr.arpa to 192.168.0.0/16
|
||||
# 192.in-addr.arpa to 192.0.0.0/8
|
||||
if [[ "${CONDITIONAL_FORWARDING_REVERSE}" == *"in-addr.arpa" ]];then
|
||||
arrRev=("${CONDITIONAL_FORWARDING_REVERSE//./ }")
|
||||
case ${#arrRev[@]} in
|
||||
6 ) REV_SERVER_CIDR="${arrRev[3]}.${arrRev[2]}.${arrRev[1]}.${arrRev[0]}/32";;
|
||||
5 ) REV_SERVER_CIDR="${arrRev[2]}.${arrRev[1]}.${arrRev[0]}.0/24";;
|
||||
4 ) REV_SERVER_CIDR="${arrRev[1]}.${arrRev[0]}.0.0/16";;
|
||||
3 ) REV_SERVER_CIDR="${arrRev[0]}.0.0.0/8";;
|
||||
esac
|
||||
else
|
||||
# Set REV_SERVER_CIDR to whatever value it was set to
|
||||
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
|
||||
fi
|
||||
|
||||
# If REV_SERVER_CIDR is not converted by the above, then use the REV_SERVER_TARGET variable to derive it
|
||||
if [ -z "${REV_SERVER_CIDR}" ]; then
|
||||
# Convert existing input to /24 subnet (preserves legacy behavior)
|
||||
# This sed converts "192.168.1.2" to "192.168.1.0/24"
|
||||
# shellcheck disable=2001
|
||||
REV_SERVER_CIDR="$(sed "s+\\.[0-9]*$+\\.0/24+" <<< "${REV_SERVER_TARGET}")"
|
||||
fi
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${REV_SERVER_CIDR}"
|
||||
|
||||
# Remove obsolete settings from setupVars.conf
|
||||
removeKey "${setupVars}" "CONDITIONAL_FORWARDING"
|
||||
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_REVERSE"
|
||||
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_DOMAIN"
|
||||
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_IP"
|
||||
fi
|
||||
|
||||
removeKey "${dnsmasqconfig}" "rev-server"
|
||||
|
||||
if [[ "${REV_SERVER}" == true ]]; then
|
||||
addKey "${dnsmasqconfig}" "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
|
||||
if [ -n "${REV_SERVER_DOMAIN}" ]; then
|
||||
# Forward local domain names to the CF target, too
|
||||
addKey "${dnsmasqconfig}" "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
|
||||
fi
|
||||
|
||||
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
|
||||
# Forward unqualified names to the CF target only when the "never
|
||||
# forward non-FQDN" option is unticked
|
||||
addKey "${dnsmasqconfig}" "server=//${REV_SERVER_TARGET}"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# We need to process DHCP settings here as well to account for possible
|
||||
# changes in the non-FQDN forwarding. This cannot be done in 01-pihole.conf
|
||||
# as we don't want to delete all local=/.../ lines so it's much safer to
|
||||
# simply rewrite the entire corresponding config file (which is what the
|
||||
# DHCP settings subroutine is doing)
|
||||
ProcessDHCPSettings
|
||||
}
|
||||
|
||||
SetDNSServers() {
|
||||
# Save setting to file
|
||||
removeKey "${setupVars}" "PIHOLE_DNS"
|
||||
IFS=',' read -r -a array <<< "${args[2]}"
|
||||
for index in "${!array[@]}"
|
||||
do
|
||||
# Replace possible "\#" by "#". This fixes AdminLTE#1427
|
||||
local ip
|
||||
ip="${array[index]//\\#/#}"
|
||||
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_$((index+1))" "${ip}"
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${args[3]}" == "domain-needed" ]]; then
|
||||
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "true"
|
||||
else
|
||||
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "false"
|
||||
fi
|
||||
|
||||
if [[ "${args[4]}" == "bogus-priv" ]]; then
|
||||
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "true"
|
||||
else
|
||||
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "false"
|
||||
fi
|
||||
|
||||
if [[ "${args[5]}" == "dnssec" ]]; then
|
||||
addOrEditKeyValPair "${setupVars}" "DNSSEC" "true"
|
||||
else
|
||||
addOrEditKeyValPair "${setupVars}" "DNSSEC" "false"
|
||||
fi
|
||||
|
||||
if [[ "${args[6]}" == "rev-server" ]]; then
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${args[7]}"
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${args[8]}"
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${args[9]}"
|
||||
else
|
||||
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "false"
|
||||
fi
|
||||
|
||||
ProcessDNSSettings
|
||||
|
||||
# Restart dnsmasq to load new configuration
|
||||
RestartDNS
|
||||
}
|
||||
|
||||
SetExcludeDomains() {
|
||||
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_DOMAINS" "${args[2]}"
|
||||
}
|
||||
|
||||
SetExcludeClients() {
|
||||
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_CLIENTS" "${args[2]}"
|
||||
}
|
||||
|
||||
Poweroff(){
|
||||
nohup bash -c "sleep 5; poweroff" &> /dev/null </dev/null &
|
||||
}
|
||||
|
||||
Reboot() {
|
||||
nohup bash -c "sleep 5; reboot" &> /dev/null </dev/null &
|
||||
}
|
||||
|
||||
RestartDNS() {
|
||||
"${PI_HOLE_BIN_DIR}"/pihole restartdns
|
||||
}
|
||||
|
||||
SetQueryLogOptions() {
|
||||
addOrEditKeyValPair "${setupVars}" "API_QUERY_LOG_SHOW" "${args[2]}"
|
||||
}
|
||||
|
||||
ProcessDHCPSettings() {
|
||||
source "${setupVars}"
|
||||
|
||||
if [[ "${DHCP_ACTIVE}" == "true" ]]; then
|
||||
interface="${PIHOLE_INTERFACE}"
|
||||
|
||||
# Use eth0 as fallback interface
|
||||
if [ -z ${interface} ]; then
|
||||
interface="eth0"
|
||||
fi
|
||||
|
||||
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
||||
PIHOLE_DOMAIN="lan"
|
||||
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||
fi
|
||||
|
||||
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
||||
leasetime="infinite"
|
||||
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
||||
leasetime="24h"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "24"
|
||||
else
|
||||
leasetime="${DHCP_LEASETIME}h"
|
||||
fi
|
||||
|
||||
# Write settings to file
|
||||
echo "###############################################################################
|
||||
# DHCP SERVER CONFIG FILE AUTOMATICALLY POPULATED BY PI-HOLE WEB INTERFACE. #
|
||||
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON CHANGE #
|
||||
###############################################################################
|
||||
dhcp-authoritative
|
||||
dhcp-range=${DHCP_START},${DHCP_END},${leasetime}
|
||||
dhcp-option=option:router,${DHCP_ROUTER}
|
||||
dhcp-leasefile=/etc/pihole/dhcp.leases
|
||||
#quiet-dhcp
|
||||
" > "${dhcpconfig}"
|
||||
chmod 644 "${dhcpconfig}"
|
||||
|
||||
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
|
||||
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
|
||||
|
||||
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
|
||||
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
|
||||
# local and FTL may answer queries from /etc/hosts or DHCP but should
|
||||
# never forward queries on that domain to any upstream servers
|
||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Sourced from setupVars
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
|
||||
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
|
||||
fi
|
||||
|
||||
if [[ "${DHCP_IPv6}" == "true" ]]; then
|
||||
echo "#quiet-dhcp6
|
||||
#enable-ra
|
||||
dhcp-option=option6:dns-server,[::]
|
||||
dhcp-range=::,constructor:${interface},ra-names,ra-stateless,64
|
||||
|
||||
" >> "${dhcpconfig}"
|
||||
fi
|
||||
|
||||
else
|
||||
if [[ -f "${dhcpconfig}" ]]; then
|
||||
rm "${dhcpconfig}" &> /dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
EnableDHCP() {
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "true"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_START" "${args[2]}"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_END" "${args[3]}"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_ROUTER" "${args[4]}"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "${args[5]}"
|
||||
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${args[6]}"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_IPv6" "${args[7]}"
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_rapid_commit" "${args[8]}"
|
||||
|
||||
# Remove possible old setting from file
|
||||
removeKey "${dnsmasqconfig}" "dhcp-"
|
||||
removeKey "${dnsmasqconfig}" "quiet-dhcp"
|
||||
|
||||
# If a DHCP client claims that its name is "wpad", ignore that.
|
||||
# This fixes a security hole. see CERT Vulnerability VU#598349
|
||||
# We also ignore "localhost" as Windows behaves strangely if a
|
||||
# device claims this host name
|
||||
addKey "${dnsmasqconfig}" "dhcp-name-match=set:hostname-ignore,wpad
|
||||
dhcp-name-match=set:hostname-ignore,localhost
|
||||
dhcp-ignore-names=tag:hostname-ignore"
|
||||
|
||||
ProcessDHCPSettings
|
||||
|
||||
RestartDNS
|
||||
}
|
||||
|
||||
DisableDHCP() {
|
||||
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "false"
|
||||
|
||||
# Remove possible old setting from file
|
||||
removeKey "${dnsmasqconfig}" "dhcp-"
|
||||
removeKey "${dnsmasqconfig}" "quiet-dhcp"
|
||||
|
||||
ProcessDHCPSettings
|
||||
|
||||
RestartDNS
|
||||
}
|
||||
|
||||
SetWebUILayout() {
|
||||
addOrEditKeyValPair "${setupVars}" "WEBUIBOXEDLAYOUT" "${args[2]}"
|
||||
}
|
||||
|
||||
SetWebUITheme() {
|
||||
addOrEditKeyValPair "${setupVars}" "WEBTHEME" "${args[2]}"
|
||||
}
|
||||
|
||||
CheckUrl(){
|
||||
local regex check_url
|
||||
# Check for characters NOT allowed in URLs
|
||||
regex="[^a-zA-Z0-9:/?&%=~._()-;]"
|
||||
|
||||
# this will remove first @ that is after schema and before domain
|
||||
# \1 is optional schema, \2 is userinfo
|
||||
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$1" )"
|
||||
|
||||
if [[ "${check_url}" =~ ${regex} ]]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
CustomizeAdLists() {
|
||||
local address
|
||||
address="${args[3]}"
|
||||
local comment
|
||||
comment="${args[4]}"
|
||||
|
||||
if CheckUrl "${address}"; then
|
||||
if [[ "${args[2]}" == "enable" ]]; then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
||||
elif [[ "${args[2]}" == "disable" ]]; then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
||||
elif [[ "${args[2]}" == "add" ]]; then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
||||
elif [[ "${args[2]}" == "del" ]]; then
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
||||
else
|
||||
echo "Not permitted"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "Invalid Url"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
AddDHCPStaticAddress() {
|
||||
mac="${args[2]}"
|
||||
ip="${args[3]}"
|
||||
host="${args[4]}"
|
||||
|
||||
if [[ "${ip}" == "noip" ]]; then
|
||||
# Static host name
|
||||
echo "dhcp-host=${mac},${host}" >> "${dhcpstaticconfig}"
|
||||
elif [[ "${host}" == "nohost" ]]; then
|
||||
# Static IP
|
||||
echo "dhcp-host=${mac},${ip}" >> "${dhcpstaticconfig}"
|
||||
else
|
||||
# Full info given
|
||||
echo "dhcp-host=${mac},${ip},${host}" >> "${dhcpstaticconfig}"
|
||||
fi
|
||||
}
|
||||
|
||||
RemoveDHCPStaticAddress() {
|
||||
mac="${args[2]}"
|
||||
if [[ "$mac" =~ ^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$ ]]; then
|
||||
sed -i "/dhcp-host=${mac}.*/d" "${dhcpstaticconfig}"
|
||||
else
|
||||
echo " ${CROSS} Invalid Mac Passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
SetListeningMode() {
|
||||
source "${setupVars}"
|
||||
|
||||
if [[ "$3" == "-h" ]] || [[ "$3" == "--help" ]]; then
|
||||
echo "Usage: pihole -a -i [interface]
|
||||
Example: 'pihole -a -i local'
|
||||
Specify dnsmasq's network interface listening behavior
|
||||
|
||||
Interfaces:
|
||||
local Only respond to queries from devices that
|
||||
are at most one hop away (local devices)
|
||||
single Respond only on interface ${PIHOLE_INTERFACE}
|
||||
bind Bind only on interface ${PIHOLE_INTERFACE}
|
||||
all Listen on all interfaces, permit all origins"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${args[2]}" == "all" ]]; then
|
||||
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
||||
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "all"
|
||||
elif [[ "${args[2]}" == "local" ]]; then
|
||||
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
||||
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "local"
|
||||
elif [[ "${args[2]}" == "bind" ]]; then
|
||||
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
|
||||
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "bind"
|
||||
else
|
||||
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
||||
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "single"
|
||||
fi
|
||||
|
||||
# Don't restart DNS server yet because other settings
|
||||
# will be applied afterwards if "-web" is set
|
||||
if [[ "${args[3]}" != "-web" ]]; then
|
||||
ProcessDNSSettings
|
||||
# Restart dnsmasq to load new configuration
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
Teleporter() {
|
||||
local filename
|
||||
filename="${args[2]}"
|
||||
if [[ -z "${filename}" ]]; then
|
||||
local datetimestamp
|
||||
local host
|
||||
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
|
||||
host=$(hostname)
|
||||
host="${host//./_}"
|
||||
filename="pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
|
||||
fi
|
||||
# webroot is sourced from basic-install above
|
||||
php "${webroot}/admin/scripts/pi-hole/php/teleporter.php" > "${filename}"
|
||||
}
|
||||
|
||||
checkDomain()
|
||||
{
|
||||
local domain validDomain
|
||||
# Convert to lowercase
|
||||
domain="${1,,}"
|
||||
validDomain=$(grep -P "^((-|_)*[a-z\\d]((-|_)*[a-z\\d])*(-|_)*)(\\.(-|_)*([a-z\\d]((-|_)*[a-z\\d])*))*$" <<< "${domain}") # Valid chars check
|
||||
validDomain=$(grep -P "^[^\\.]{1,63}(\\.[^\\.]{1,63})*$" <<< "${validDomain}") # Length of each label
|
||||
echo "${validDomain}"
|
||||
}
|
||||
|
||||
escapeDots()
|
||||
{
|
||||
# SC suggest bashism ${variable//search/replace}
|
||||
# shellcheck disable=SC2001
|
||||
escaped=$(echo "$1" | sed 's/\./\\./g')
|
||||
echo "${escaped}"
|
||||
}
|
||||
|
||||
addAudit()
|
||||
{
|
||||
shift # skip "-a"
|
||||
shift # skip "audit"
|
||||
local domains validDomain
|
||||
domains=""
|
||||
for domain in "$@"
|
||||
do
|
||||
# Check domain to be added. Only continue if it is valid
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
# Put comma in between domains when there is
|
||||
# more than one domains to be added
|
||||
# SQL INSERT allows adding multiple rows at once using the format
|
||||
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
|
||||
if [[ -n "${domains}" ]]; then
|
||||
domains="${domains},"
|
||||
fi
|
||||
domains="${domains}('${domain}')"
|
||||
fi
|
||||
done
|
||||
# Insert only the domain here. The date_added field will be
|
||||
# filled with its default value (date_added = current timestamp)
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
|
||||
}
|
||||
|
||||
clearAudit()
|
||||
{
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
|
||||
}
|
||||
|
||||
SetPrivacyLevel() {
|
||||
# Set privacy level. Minimum is 0, maximum is 3
|
||||
if [ "${args[2]}" -ge 0 ] && [ "${args[2]}" -le 3 ]; then
|
||||
addOrEditKeyValPair "${FTLconf}" "PRIVACYLEVEL" "${args[2]}"
|
||||
pihole restartdns reload-lists
|
||||
fi
|
||||
}
|
||||
|
||||
AddCustomDNSAddress() {
|
||||
echo -e " ${TICK} Adding custom DNS entry..."
|
||||
|
||||
ip="${args[2]}"
|
||||
host="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
validHost="$(checkDomain "${host}")"
|
||||
if [[ -n "${validHost}" ]]; then
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
echo "${ip} ${validHost}" >> "${dnscustomfile}"
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to load new custom DNS entries only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
RemoveCustomDNSAddress() {
|
||||
echo -e " ${TICK} Removing custom DNS entry..."
|
||||
|
||||
ip="${args[2]}"
|
||||
host="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
validHost="$(checkDomain "${host}")"
|
||||
if [[ -n "${validHost}" ]]; then
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
validHost=$(escapeDots "${validHost}")
|
||||
sed -i "/^${ip} ${validHost}$/Id" "${dnscustomfile}"
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to load new custom DNS entries only if reload is not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
AddCustomCNAMERecord() {
|
||||
echo -e " ${TICK} Adding custom CNAME record..."
|
||||
|
||||
domain="${args[2]}"
|
||||
target="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
validTarget="$(checkDomain "${target}")"
|
||||
if [[ -n "${validTarget}" ]]; then
|
||||
if [ "${validDomain}" = "${validTarget}" ]; then
|
||||
echo " ${CROSS} Domain and target are the same. This would cause a DNS loop."
|
||||
exit 1
|
||||
else
|
||||
echo "cname=${validDomain},${validTarget}" >> "${dnscustomcnamefile}"
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Target Passed!"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
# Restart dnsmasq to load new custom CNAME records only if reload is not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
RemoveCustomCNAMERecord() {
|
||||
echo -e " ${TICK} Removing custom CNAME record..."
|
||||
|
||||
domain="${args[2]}"
|
||||
target="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
validTarget="$(checkDomain "${target}")"
|
||||
if [[ -n "${validTarget}" ]]; then
|
||||
validDomain=$(escapeDots "${validDomain}")
|
||||
validTarget=$(escapeDots "${validTarget}")
|
||||
sed -i "/^cname=${validDomain},${validTarget}$/Id" "${dnscustomcnamefile}"
|
||||
else
|
||||
echo " ${CROSS} Invalid Target Passed!"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to update removed custom CNAME records only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
SetRateLimit() {
|
||||
local rate_limit_count rate_limit_interval reload
|
||||
rate_limit_count="${args[2]}"
|
||||
rate_limit_interval="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
# Set rate-limit setting inf valid
|
||||
if [ "${rate_limit_count}" -ge 0 ] && [ "${rate_limit_interval}" -ge 0 ]; then
|
||||
addOrEditKeyValPair "${FTLconf}" "RATE_LIMIT" "${rate_limit_count}/${rate_limit_interval}"
|
||||
fi
|
||||
|
||||
# Restart FTL to update rate-limit settings only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
args=("$@")
|
||||
|
||||
case "${args[1]}" in
|
||||
"-p" | "password" ) SetWebPassword;;
|
||||
"-c" | "celsius" ) unit="C"; SetTemperatureUnit;;
|
||||
"-f" | "fahrenheit" ) unit="F"; SetTemperatureUnit;;
|
||||
"-k" | "kelvin" ) unit="K"; SetTemperatureUnit;;
|
||||
"setdns" ) SetDNSServers;;
|
||||
"setexcludedomains" ) SetExcludeDomains;;
|
||||
"setexcludeclients" ) SetExcludeClients;;
|
||||
"poweroff" ) Poweroff;;
|
||||
"reboot" ) Reboot;;
|
||||
"restartdns" ) RestartDNS;;
|
||||
"setquerylog" ) SetQueryLogOptions;;
|
||||
"enabledhcp" ) EnableDHCP;;
|
||||
"disabledhcp" ) DisableDHCP;;
|
||||
"layout" ) SetWebUILayout;;
|
||||
"theme" ) SetWebUITheme;;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
"addstaticdhcp" ) AddDHCPStaticAddress;;
|
||||
"removestaticdhcp" ) RemoveDHCPStaticAddress;;
|
||||
"-i" | "interface" ) SetListeningMode "$@";;
|
||||
"-t" | "teleporter" ) Teleporter;;
|
||||
"adlist" ) CustomizeAdLists;;
|
||||
"audit" ) addAudit "$@";;
|
||||
"clearaudit" ) clearAudit;;
|
||||
"-l" | "privacylevel" ) SetPrivacyLevel;;
|
||||
"addcustomdns" ) AddCustomDNSAddress;;
|
||||
"removecustomdns" ) RemoveCustomDNSAddress;;
|
||||
"addcustomcname" ) AddCustomCNAMERecord;;
|
||||
"removecustomcname" ) RemoveCustomCNAMERecord;;
|
||||
"ratelimit" ) SetRateLimit;;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
||||
shift
|
||||
|
||||
if [[ $# = 0 ]]; then
|
||||
helpFunc
|
||||
fi
|
||||
}
|
|
@ -3,90 +3,99 @@ BEGIN TRANSACTION;
|
|||
|
||||
CREATE TABLE "group"
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
description TEXT
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
description TEXT
|
||||
);
|
||||
INSERT INTO "group" (id,enabled,name,description) VALUES (0,1,'Default','The default group');
|
||||
|
||||
CREATE TABLE domainlist
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type INTEGER NOT NULL DEFAULT 0,
|
||||
domain TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT,
|
||||
UNIQUE(domain, type)
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type INTEGER NOT NULL DEFAULT 0,
|
||||
domain TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT,
|
||||
UNIQUE(domain, type)
|
||||
);
|
||||
|
||||
CREATE TABLE adlist
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
address TEXT UNIQUE NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT,
|
||||
date_updated INTEGER,
|
||||
number INTEGER NOT NULL DEFAULT 0,
|
||||
invalid_domains INTEGER NOT NULL DEFAULT 0,
|
||||
status INTEGER NOT NULL DEFAULT 0
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
address TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT,
|
||||
date_updated INTEGER,
|
||||
number INTEGER NOT NULL DEFAULT 0,
|
||||
invalid_domains INTEGER NOT NULL DEFAULT 0,
|
||||
status INTEGER NOT NULL DEFAULT 0,
|
||||
abp_entries INTEGER NOT NULL DEFAULT 0,
|
||||
type INTEGER NOT NULL DEFAULT 0,
|
||||
UNIQUE(address, type)
|
||||
);
|
||||
|
||||
CREATE TABLE adlist_by_group
|
||||
(
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (adlist_id, group_id)
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (adlist_id, group_id)
|
||||
);
|
||||
|
||||
CREATE TABLE gravity
|
||||
(
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
|
||||
);
|
||||
|
||||
CREATE TABLE antigravity
|
||||
(
|
||||
domain TEXT NOT NULL,
|
||||
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
|
||||
);
|
||||
|
||||
CREATE TABLE info
|
||||
(
|
||||
property TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
property TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO "info" VALUES('version','15');
|
||||
INSERT INTO "info" VALUES('version','18');
|
||||
|
||||
CREATE TABLE domain_audit
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
domain TEXT UNIQUE NOT NULL,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
|
||||
);
|
||||
|
||||
CREATE TABLE domainlist_by_group
|
||||
(
|
||||
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (domainlist_id, group_id)
|
||||
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (domainlist_id, group_id)
|
||||
);
|
||||
|
||||
CREATE TABLE client
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOT NULL UNIQUE,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOT NULL UNIQUE,
|
||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||
comment TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE client_by_group
|
||||
(
|
||||
client_id INTEGER NOT NULL REFERENCES client (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (client_id, group_id)
|
||||
client_id INTEGER NOT NULL REFERENCES client (id),
|
||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||
PRIMARY KEY (client_id, group_id)
|
||||
);
|
||||
|
||||
CREATE TRIGGER tr_adlist_update AFTER UPDATE OF address,enabled,comment ON adlist
|
||||
|
@ -136,14 +145,21 @@ CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist
|
|||
AND domainlist.type = 3
|
||||
ORDER BY domainlist.id;
|
||||
|
||||
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
||||
CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
|
||||
FROM gravity
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
|
||||
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
||||
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
|
||||
CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
|
||||
FROM antigravity
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
|
||||
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
|
||||
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id, type
|
||||
FROM adlist
|
||||
WHERE enabled = 1
|
||||
ORDER BY id;
|
||||
|
|
|
@ -19,8 +19,6 @@ INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
|
|||
DELETE FROM OLD.adlist_by_group WHERE adlist_id NOT IN (SELECT id FROM OLD.adlist);
|
||||
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
|
||||
|
||||
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
|
||||
|
||||
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
|
||||
DELETE FROM OLD.client_by_group WHERE client_id NOT IN (SELECT id FROM OLD.client);
|
||||
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
|
||||
|
|
|
@ -1,21 +1,32 @@
|
|||
/var/log/pihole/pihole.log {
|
||||
# su #
|
||||
daily
|
||||
copytruncate
|
||||
rotate 5
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
nomail
|
||||
# su #
|
||||
daily
|
||||
copytruncate
|
||||
rotate 5
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
nomail
|
||||
}
|
||||
|
||||
/var/log/pihole/FTL.log {
|
||||
# su #
|
||||
weekly
|
||||
copytruncate
|
||||
rotate 3
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
nomail
|
||||
# su #
|
||||
weekly
|
||||
copytruncate
|
||||
rotate 3
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
nomail
|
||||
}
|
||||
|
||||
/var/log/pihole/webserver.log {
|
||||
# su #
|
||||
weekly
|
||||
copytruncate
|
||||
rotate 3
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
nomail
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
# Source utils.sh for getFTLPIDFile()
|
||||
# Source utils.sh for getFTLConfigValue()
|
||||
PI_HOLE_SCRIPT_DIR='/opt/pihole'
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
# shellcheck disable=SC1090
|
||||
. "${utilsfile}"
|
||||
|
||||
# Get file paths
|
||||
FTL_PID_FILE="$(getFTLPIDFile)"
|
||||
FTL_PID_FILE="$(getFTLConfigValue files.pid)"
|
||||
|
||||
# Cleanup
|
||||
rm -f /run/pihole/FTL.sock /dev/shm/FTL-* "${FTL_PID_FILE}"
|
||||
|
|
|
@ -1,38 +1,34 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
# Source utils.sh for getFTLPIDFile()
|
||||
# Source utils.sh for getFTLConfigValue()
|
||||
PI_HOLE_SCRIPT_DIR='/opt/pihole'
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
# shellcheck disable=SC1090
|
||||
. "${utilsfile}"
|
||||
|
||||
# Get file paths
|
||||
FTL_PID_FILE="$(getFTLPIDFile)"
|
||||
FTL_PID_FILE="$(getFTLConfigValue files.pid)"
|
||||
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
# shellcheck disable=SC2174
|
||||
mkdir -pm 0640 /var/log/pihole
|
||||
chown -R pihole:pihole /etc/pihole /var/log/pihole
|
||||
chmod -R 0640 /var/log/pihole
|
||||
chmod -R 0660 /etc/pihole
|
||||
|
||||
# Logrotate config file need to be owned by root and must not be writable by group and others
|
||||
chown root:root /etc/pihole/logrotate
|
||||
chmod 0644 /etc/pihole/logrotate
|
||||
|
||||
# allow all users to enter the directories
|
||||
chmod 0755 /etc/pihole /var/log/pihole
|
||||
|
||||
# allow pihole to access subdirs in /etc/pihole (sets execution bit on dirs)
|
||||
# credits https://stackoverflow.com/a/11512211
|
||||
find /etc/pihole -type d -exec chmod 0755 {} \;
|
||||
|
||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||
# shellcheck disable=SC2174
|
||||
mkdir -pm 0755 /run/pihole /var/log/pihole
|
||||
[ -f "${FTL_PID_FILE}" ] || install -D -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}"
|
||||
[ -f /var/log/pihole/FTL.log ] || install -m 644 -o pihole -g pihole /dev/null /var/log/pihole/FTL.log
|
||||
[ -f /var/log/pihole/FTL.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/FTL.log
|
||||
[ -f /var/log/pihole/pihole.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/pihole.log
|
||||
[ -f /etc/pihole/dhcp.leases ] || install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
chown pihole:pihole /run/pihole /etc/pihole /var/log/pihole /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases
|
||||
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||
chmod -f 0644 /etc/pihole/macvendor.db /etc/pihole/dhcp.leases /var/log/pihole/FTL.log
|
||||
chmod -f 0640 /var/log/pihole/pihole.log
|
||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
||||
# Chmod database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
||||
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
||||
|
||||
# Backward compatibility for user-scripts that still expect log files in /var/log instead of /var/log/pihole
|
||||
# Should be removed with Pi-hole v6.0
|
||||
if [ ! -f /var/log/pihole.log ]; then
|
||||
ln -sf /var/log/pihole/pihole.log /var/log/pihole.log
|
||||
chown -h pihole:pihole /var/log/pihole.log
|
||||
fi
|
||||
if [ ! -f /var/log/pihole-FTL.log ]; then
|
||||
ln -sf /var/log/pihole/FTL.log /var/log/pihole-FTL.log
|
||||
chown -h pihole:pihole /var/log/pihole-FTL.log
|
||||
fi
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
#; Pi-hole FTL config file
|
||||
#; Comments should start with #; to avoid issues with PHP and bash reading this file
|
|
@ -9,7 +9,7 @@
|
|||
# Description: Enable service provided by pihole-FTL daemon
|
||||
### END INIT INFO
|
||||
|
||||
# Source utils.sh for getFTLPIDFile(), getFTLPID()
|
||||
# Source utils.sh for getFTLConfigValue(), getFTLPID()
|
||||
PI_HOLE_SCRIPT_DIR="/opt/pihole"
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
# shellcheck disable=SC1090
|
||||
|
@ -37,7 +37,7 @@ start() {
|
|||
# Run pre-start script, which pre-creates all expected files with correct permissions
|
||||
sh "${PI_HOLE_SCRIPT_DIR}/pihole-FTL-prestart.sh"
|
||||
|
||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
|
||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN,CAP_SYS_TIME+eip "/usr/bin/pihole-FTL"; then
|
||||
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
|
||||
else
|
||||
echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system"
|
||||
|
@ -98,7 +98,7 @@ status() {
|
|||
trap 'cleanup; exit 1' INT HUP TERM ABRT
|
||||
|
||||
# Get FTL's PID file path
|
||||
FTL_PID_FILE="$(getFTLPIDFile)"
|
||||
FTL_PID_FILE="$(getFTLConfigValue files.pid)"
|
||||
|
||||
# Get FTL's current PID
|
||||
FTL_PID="$(getFTLPID "${FTL_PID_FILE}")"
|
||||
|
|
|
@ -18,7 +18,7 @@ StartLimitIntervalSec=60s
|
|||
[Service]
|
||||
User=pihole
|
||||
PermissionsStartOnly=true
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_NICE CAP_IPC_LOCK CAP_CHOWN
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_NICE CAP_IPC_LOCK CAP_CHOWN CAP_SYS_TIME
|
||||
|
||||
ExecStartPre=/opt/pihole/pihole-FTL-prestart.sh
|
||||
ExecStart=/usr/bin/pihole-FTL -f
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Allows the WebUI to use Pi-hole commands
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
#
|
|
@ -1,79 +1,51 @@
|
|||
_pihole() {
|
||||
local cur prev opts opts_admin opts_checkout opts_chronometer opts_debug opts_interface opts_logging opts_privacy opts_query opts_update opts_version
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
prev2="${COMP_WORDS[COMP_CWORD-2]}"
|
||||
local cur prev opts opts_checkout opts_debug opts_logging opts_query opts_update opts_version
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
prev2="${COMP_WORDS[COMP_CWORD-2]}"
|
||||
|
||||
case "${prev}" in
|
||||
"pihole")
|
||||
opts="admin blacklist checkout chronometer debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
;;
|
||||
"whitelist"|"blacklist"|"wildcard"|"regex")
|
||||
opts_lists="\--delmode \--noreload \--quiet \--list \--nuke"
|
||||
COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
|
||||
;;
|
||||
"admin")
|
||||
opts_admin="celsius fahrenheit interface kelvin password privacylevel"
|
||||
COMPREPLY=( $(compgen -W "${opts_admin}" -- ${cur}) )
|
||||
;;
|
||||
"checkout")
|
||||
opts_checkout="core ftl web master dev"
|
||||
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
|
||||
;;
|
||||
"chronometer")
|
||||
opts_chronometer="\--exit \--json \--refresh"
|
||||
COMPREPLY=( $(compgen -W "${opts_chronometer}" -- ${cur}) )
|
||||
;;
|
||||
"debug")
|
||||
opts_debug="-a"
|
||||
COMPREPLY=( $(compgen -W "${opts_debug}" -- ${cur}) )
|
||||
;;
|
||||
"logging")
|
||||
opts_logging="on off 'off noflush'"
|
||||
COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) )
|
||||
;;
|
||||
"query")
|
||||
opts_query="-adlist -all -exact"
|
||||
COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) )
|
||||
;;
|
||||
"updatePihole"|"-up")
|
||||
opts_update="--check-only"
|
||||
COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) )
|
||||
;;
|
||||
"version")
|
||||
opts_version="\--admin \--current \--ftl \--hash \--latest \--pihole"
|
||||
COMPREPLY=( $(compgen -W "${opts_version}" -- ${cur}) )
|
||||
;;
|
||||
"interface")
|
||||
if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
|
||||
opts_interface="$(cat /proc/net/dev | cut -d: -s -f1)"
|
||||
COMPREPLY=( $(compgen -W "${opts_interface}" -- ${cur}) )
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
"privacylevel")
|
||||
if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
|
||||
opts_privacy="0 1 2 3"
|
||||
COMPREPLY=( $(compgen -W "${opts_privacy}" -- ${cur}) )
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
"core"|"admin"|"ftl")
|
||||
if [[ "$prev2" == "checkout" ]]; then
|
||||
opts_checkout="master dev"
|
||||
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
return 0
|
||||
case "${prev}" in
|
||||
"pihole")
|
||||
opts="allow allow-regex allow-wild deny checkout debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard arpflush api"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
;;
|
||||
"allow"|"deny"|"wildcard"|"regex"|"allow-regex"|"allow-wild")
|
||||
opts_lists="\not \--delmode \--quiet \--list \--help"
|
||||
COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
|
||||
;;
|
||||
"checkout")
|
||||
opts_checkout="core ftl web master dev"
|
||||
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
|
||||
;;
|
||||
"debug")
|
||||
opts_debug="-a"
|
||||
COMPREPLY=( $(compgen -W "${opts_debug}" -- ${cur}) )
|
||||
;;
|
||||
"logging")
|
||||
opts_logging="on off 'off noflush'"
|
||||
COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) )
|
||||
;;
|
||||
"query")
|
||||
opts_query="--partial --all"
|
||||
COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) )
|
||||
;;
|
||||
"updatePihole"|"-up")
|
||||
opts_update="--check-only"
|
||||
COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) )
|
||||
;;
|
||||
"core"|"admin"|"ftl")
|
||||
if [[ "$prev2" == "checkout" ]]; then
|
||||
opts_checkout="master dev"
|
||||
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
return 0
|
||||
}
|
||||
complete -F _pihole pihole
|
||||
|
|
|
@ -1,648 +0,0 @@
|
|||
# Configuration file for dnsmasq.
|
||||
#
|
||||
# Format is one option per line, legal options are the same
|
||||
# as the long options legal on the command line. See
|
||||
# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details.
|
||||
|
||||
# Listen on this specific port instead of the standard DNS port
|
||||
# (53). Setting this to zero completely disables DNS function,
|
||||
# leaving only DHCP and/or TFTP.
|
||||
#port=5353
|
||||
|
||||
# The following two options make you a better netizen, since they
|
||||
# tell dnsmasq to filter out queries which the public DNS cannot
|
||||
# answer, and which load the servers (especially the root servers)
|
||||
# unnecessarily. If you have a dial-on-demand link they also stop
|
||||
# these requests from bringing up the link unnecessarily.
|
||||
|
||||
# Never forward plain names (without a dot or domain part)
|
||||
#domain-needed
|
||||
# Never forward addresses in the non-routed address spaces.
|
||||
#bogus-priv
|
||||
|
||||
# Uncomment these to enable DNSSEC validation and caching:
|
||||
# (Requires dnsmasq to be built with DNSSEC option.)
|
||||
#conf-file=%%PREFIX%%/share/dnsmasq/trust-anchors.conf
|
||||
#dnssec
|
||||
|
||||
# Replies which are not DNSSEC signed may be legitimate, because the domain
|
||||
# is unsigned, or may be forgeries. Setting this option tells dnsmasq to
|
||||
# check that an unsigned reply is OK, by finding a secure proof that a DS
|
||||
# record somewhere between the root and the domain does not exist.
|
||||
# The cost of setting this is that even queries in unsigned domains will need
|
||||
# one or more extra DNS queries to verify.
|
||||
#dnssec-check-unsigned
|
||||
|
||||
# Uncomment this to filter useless windows-originated DNS requests
|
||||
# which can trigger dial-on-demand links needlessly.
|
||||
# Note that (amongst other things) this blocks all SRV requests,
|
||||
# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk.
|
||||
# This option only affects forwarding, SRV records originating for
|
||||
# dnsmasq (via srv-host= lines) are not suppressed by it.
|
||||
#filterwin2k
|
||||
|
||||
# Change this line if you want dns to get its upstream servers from
|
||||
# somewhere other that /etc/resolv.conf
|
||||
#resolv-file=
|
||||
|
||||
# By default, dnsmasq will send queries to any of the upstream
|
||||
# servers it knows about and tries to favor servers to are known
|
||||
# to be up. Uncommenting this forces dnsmasq to try each query
|
||||
# with each server strictly in the order they appear in
|
||||
# /etc/resolv.conf
|
||||
#strict-order
|
||||
|
||||
# If you don't want dnsmasq to read /etc/resolv.conf or any other
|
||||
# file, getting its servers from this file instead (see below), then
|
||||
# uncomment this.
|
||||
#no-resolv
|
||||
|
||||
# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv
|
||||
# files for changes and re-read them then uncomment this.
|
||||
#no-poll
|
||||
|
||||
# Add other name servers here, with domain specs if they are for
|
||||
# non-public domains.
|
||||
#server=/localnet/192.168.0.1
|
||||
|
||||
# Example of routing PTR queries to nameservers: this will send all
|
||||
# address->name queries for 192.168.3/24 to nameserver 10.1.2.3
|
||||
#server=/3.168.192.in-addr.arpa/10.1.2.3
|
||||
|
||||
# Add local-only domains here, queries in these domains are answered
|
||||
# from /etc/hosts or DHCP only.
|
||||
#local=/localnet/
|
||||
|
||||
# Add domains which you want to force to an IP address here.
|
||||
# The example below send any host in double-click.net to a local
|
||||
# web-server.
|
||||
#address=/double-click.net/127.0.0.1
|
||||
|
||||
# --address (and --server) work with IPv6 addresses too.
|
||||
#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83
|
||||
|
||||
# Add the IPs of all queries to yahoo.com, google.com, and their
|
||||
# subdomains to the vpn and search ipsets:
|
||||
#ipset=/yahoo.com/google.com/vpn,search
|
||||
|
||||
# You can control how dnsmasq talks to a server: this forces
|
||||
# queries to 10.1.2.3 to be routed via eth1
|
||||
# server=10.1.2.3@eth1
|
||||
|
||||
# and this sets the source (ie local) address used to talk to
|
||||
# 10.1.2.3 to 192.168.1.1 port 55 (there must be a interface with that
|
||||
# IP on the machine, obviously).
|
||||
# server=10.1.2.3@192.168.1.1#55
|
||||
|
||||
# If you want dnsmasq to change uid and gid to something other
|
||||
# than the default, edit the following lines.
|
||||
#user=
|
||||
#group=
|
||||
|
||||
# If you want dnsmasq to listen for DHCP and DNS requests only on
|
||||
# specified interfaces (and the loopback) give the name of the
|
||||
# interface (eg eth0) here.
|
||||
# Repeat the line for more than one interface.
|
||||
#interface=
|
||||
# Or you can specify which interface _not_ to listen on
|
||||
#except-interface=
|
||||
# Or which to listen on by address (remember to include 127.0.0.1 if
|
||||
# you use this.)
|
||||
#listen-address=
|
||||
# If you want dnsmasq to provide only DNS service on an interface,
|
||||
# configure it as shown above, and then use the following line to
|
||||
# disable DHCP and TFTP on it.
|
||||
#no-dhcp-interface=
|
||||
|
||||
# On systems which support it, dnsmasq binds the wildcard address,
|
||||
# even when it is listening on only some interfaces. It then discards
|
||||
# requests that it shouldn't reply to. This has the advantage of
|
||||
# working even when interfaces come and go and change address. If you
|
||||
# want dnsmasq to really bind only the interfaces it is listening on,
|
||||
# uncomment this option. About the only time you may need this is when
|
||||
# running another nameserver on the same machine.
|
||||
#bind-interfaces
|
||||
|
||||
# If you don't want dnsmasq to read /etc/hosts, uncomment the
|
||||
# following line.
|
||||
#no-hosts
|
||||
# or if you want it to read another file, as well as /etc/hosts, use
|
||||
# this.
|
||||
#addn-hosts=/etc/banner_add_hosts
|
||||
|
||||
# Set this (and domain: see below) if you want to have a domain
|
||||
# automatically added to simple names in a hosts-file.
|
||||
#expand-hosts
|
||||
|
||||
# Set the domain for dnsmasq. this is optional, but if it is set, it
|
||||
# does the following things.
|
||||
# 1) Allows DHCP hosts to have fully qualified domain names, as long
|
||||
# as the domain part matches this setting.
|
||||
# 2) Sets the "domain" DHCP option thereby potentially setting the
|
||||
# domain of all systems configured by DHCP
|
||||
# 3) Provides the domain part for "expand-hosts"
|
||||
#domain=thekelleys.org.uk
|
||||
|
||||
# Set a different domain for a particular subnet
|
||||
#domain=wireless.thekelleys.org.uk,192.168.2.0/24
|
||||
|
||||
# Same idea, but range rather then subnet
|
||||
#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200
|
||||
|
||||
# Uncomment this to enable the integrated DHCP server, you need
|
||||
# to supply the range of addresses available for lease and optionally
|
||||
# a lease time. If you have more than one network, you will need to
|
||||
# repeat this for each network on which you want to supply DHCP
|
||||
# service.
|
||||
#dhcp-range=192.168.0.50,192.168.0.150,12h
|
||||
|
||||
# This is an example of a DHCP range where the netmask is given. This
|
||||
# is needed for networks we reach the dnsmasq DHCP server via a relay
|
||||
# agent. If you don't know what a DHCP relay agent is, you probably
|
||||
# don't need to worry about this.
|
||||
#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
|
||||
|
||||
# This is an example of a DHCP range which sets a tag, so that
|
||||
# some DHCP options may be set only for this network.
|
||||
#dhcp-range=set:red,192.168.0.50,192.168.0.150
|
||||
|
||||
# Use this DHCP range only when the tag "green" is set.
|
||||
#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
|
||||
|
||||
# Specify a subnet which can't be used for dynamic address allocation,
|
||||
# is available for hosts with matching --dhcp-host lines. Note that
|
||||
# dhcp-host declarations will be ignored unless there is a dhcp-range
|
||||
# of some type for the subnet in question.
|
||||
# In this case the netmask is implied (it comes from the network
|
||||
# configuration on the machine running dnsmasq) it is possible to give
|
||||
# an explicit netmask instead.
|
||||
#dhcp-range=192.168.0.0,static
|
||||
|
||||
# Enable DHCPv6. Note that the prefix-length does not need to be specified
|
||||
# and defaults to 64 if missing/
|
||||
#dhcp-range=1234::2, 1234::500, 64, 12h
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
||||
#dhcp-range=1234::, ra-only
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
|
||||
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
|
||||
# hosts. Use the DHCPv4 lease to derive the name, network segment and
|
||||
# MAC address and assume that the host will also have an
|
||||
# IPv6 address calculated using the SLAAC algorithm.
|
||||
#dhcp-range=1234::, ra-names
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
||||
# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
|
||||
#dhcp-range=1234::, ra-only, 48h
|
||||
|
||||
# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
|
||||
# so that clients can use SLAAC addresses as well as DHCP ones.
|
||||
#dhcp-range=1234::2, 1234::500, slaac
|
||||
|
||||
# Do Router Advertisements and stateless DHCP for this subnet. Clients will
|
||||
# not get addresses from DHCP, but they will get other configuration information.
|
||||
# They will use SLAAC for addresses.
|
||||
#dhcp-range=1234::, ra-stateless
|
||||
|
||||
# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
|
||||
# from DHCPv4 leases.
|
||||
#dhcp-range=1234::, ra-stateless, ra-names
|
||||
|
||||
# Do router advertisements for all subnets where we're doing DHCPv6
|
||||
# Unless overridden by ra-stateless, ra-names, et al, the router
|
||||
# advertisements will have the M and O bits set, so that the clients
|
||||
# get addresses and configuration from DHCPv6, and the A bit reset, so the
|
||||
# clients don't use SLAAC addresses.
|
||||
#enable-ra
|
||||
|
||||
# Supply parameters for specified hosts using DHCP. There are lots
|
||||
# of valid alternatives, so we will give examples of each. Note that
|
||||
# IP addresses DO NOT have to be in the range given above, they just
|
||||
# need to be on the same network. The order of the parameters in these
|
||||
# do not matter, it's permissible to give name, address and MAC in any
|
||||
# order.
|
||||
|
||||
# Always allocate the host with Ethernet address 11:22:33:44:55:66
|
||||
# The IP address 192.168.0.60
|
||||
#dhcp-host=11:22:33:44:55:66,192.168.0.60
|
||||
|
||||
# Always set the name of the host with hardware address
|
||||
# 11:22:33:44:55:66 to be "fred"
|
||||
#dhcp-host=11:22:33:44:55:66,fred
|
||||
|
||||
# Always give the host with Ethernet address 11:22:33:44:55:66
|
||||
# the name fred and IP address 192.168.0.60 and lease time 45 minutes
|
||||
#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m
|
||||
|
||||
# Give a host with Ethernet address 11:22:33:44:55:66 or
|
||||
# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume
|
||||
# that these two Ethernet interfaces will never be in use at the same
|
||||
# time, and give the IP address to the second, even if it is already
|
||||
# in use by the first. Useful for laptops with wired and wireless
|
||||
# addresses.
|
||||
#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
|
||||
|
||||
# Give the machine which says its name is "bert" IP address
|
||||
# 192.168.0.70 and an infinite lease
|
||||
#dhcp-host=bert,192.168.0.70,infinite
|
||||
|
||||
# Always give the host with client identifier 01:02:02:04
|
||||
# the IP address 192.168.0.60
|
||||
#dhcp-host=id:01:02:02:04,192.168.0.60
|
||||
|
||||
# Always give the host with client identifier "marjorie"
|
||||
# the IP address 192.168.0.60
|
||||
#dhcp-host=id:marjorie,192.168.0.60
|
||||
|
||||
# Enable the address given for "judge" in /etc/hosts
|
||||
# to be given to a machine presenting the name "judge" when
|
||||
# it asks for a DHCP lease.
|
||||
#dhcp-host=judge
|
||||
|
||||
# Never offer DHCP service to a machine whose Ethernet
|
||||
# address is 11:22:33:44:55:66
|
||||
#dhcp-host=11:22:33:44:55:66,ignore
|
||||
|
||||
# Ignore any client-id presented by the machine with Ethernet
|
||||
# address 11:22:33:44:55:66. This is useful to prevent a machine
|
||||
# being treated differently when running under different OS's or
|
||||
# between PXE boot and OS boot.
|
||||
#dhcp-host=11:22:33:44:55:66,id:*
|
||||
|
||||
# Send extra options which are tagged as "red" to
|
||||
# the machine with Ethernet address 11:22:33:44:55:66
|
||||
#dhcp-host=11:22:33:44:55:66,set:red
|
||||
|
||||
# Send extra options which are tagged as "red" to
|
||||
# any machine with Ethernet address starting 11:22:33:
|
||||
#dhcp-host=11:22:33:*:*:*,set:red
|
||||
|
||||
# Give a fixed IPv6 address and name to client with
|
||||
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
|
||||
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
|
||||
# Note also the they [] around the IPv6 address are obligatory.
|
||||
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
|
||||
|
||||
# Ignore any clients which are not specified in dhcp-host lines
|
||||
# or /etc/ethers. Equivalent to ISC "deny unknown-clients".
|
||||
# This relies on the special "known" tag which is set when
|
||||
# a host is matched.
|
||||
#dhcp-ignore=tag:!known
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine whose
|
||||
# DHCP vendorclass string includes the substring "Linux"
|
||||
#dhcp-vendorclass=set:red,Linux
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine one
|
||||
# of whose DHCP userclass strings includes the substring "accounts"
|
||||
#dhcp-userclass=set:red,accounts
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine whose
|
||||
# MAC address matches the pattern.
|
||||
#dhcp-mac=set:red,00:60:8C:*:*:*
|
||||
|
||||
# If this line is uncommented, dnsmasq will read /etc/ethers and act
|
||||
# on the ethernet-address/IP pairs found there just as if they had
|
||||
# been given as --dhcp-host options. Useful if you keep
|
||||
# MAC-address/host mappings there for other purposes.
|
||||
#read-ethers
|
||||
|
||||
# Send options to hosts which ask for a DHCP lease.
|
||||
# See RFC 2132 for details of available options.
|
||||
# Common options can be given to dnsmasq by name:
|
||||
# run "dnsmasq --help dhcp" to get a list.
|
||||
# Note that all the common settings, such as netmask and
|
||||
# broadcast address, DNS server and default route, are given
|
||||
# sane defaults by dnsmasq. You very likely will not need
|
||||
# any dhcp-options. If you use Windows clients and Samba, there
|
||||
# are some options which are recommended, they are detailed at the
|
||||
# end of this section.
|
||||
|
||||
# Override the default route supplied by dnsmasq, which assumes the
|
||||
# router is the same machine as the one running dnsmasq.
|
||||
#dhcp-option=3,1.2.3.4
|
||||
|
||||
# Do the same thing, but using the option name
|
||||
#dhcp-option=option:router,1.2.3.4
|
||||
|
||||
# Override the default route supplied by dnsmasq and send no default
|
||||
# route at all. Note that this only works for the options sent by
|
||||
# default (1, 3, 6, 12, 28) the same line will send a zero-length option
|
||||
# for all other option numbers.
|
||||
#dhcp-option=3
|
||||
|
||||
# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5
|
||||
#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5
|
||||
|
||||
# Send DHCPv6 option. Note [] around IPv6 addresses.
|
||||
#dhcp-option=option6:dns-server,[1234::77],[1234::88]
|
||||
|
||||
# Send DHCPv6 option for namservers as the machine running
|
||||
# dnsmasq and another.
|
||||
#dhcp-option=option6:dns-server,[::],[1234::88]
|
||||
|
||||
# Ask client to poll for option changes every six hours. (RFC4242)
|
||||
#dhcp-option=option6:information-refresh-time,6h
|
||||
|
||||
# Set the NTP time server address to be the same machine as
|
||||
# is running dnsmasq
|
||||
#dhcp-option=42,0.0.0.0
|
||||
|
||||
# Set the NIS domain name to "welly"
|
||||
#dhcp-option=40,welly
|
||||
|
||||
# Set the default time-to-live to 50
|
||||
#dhcp-option=23,50
|
||||
|
||||
# Set the "all subnets are local" flag
|
||||
#dhcp-option=27,1
|
||||
|
||||
# Send the etherboot magic flag and then etherboot options (a string).
|
||||
#dhcp-option=128,e4:45:74:68:00:00
|
||||
#dhcp-option=129,NIC=eepro100
|
||||
|
||||
# Specify an option which will only be sent to the "red" network
|
||||
# (see dhcp-range for the declaration of the "red" network)
|
||||
# Note that the tag: part must precede the option: part.
|
||||
#dhcp-option = tag:red, option:ntp-server, 192.168.1.1
|
||||
|
||||
# The following DHCP options set up dnsmasq in the same way as is specified
|
||||
# for the ISC dhcpcd in
|
||||
# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt
|
||||
# adapted for a typical dnsmasq installation where the host running
|
||||
# dnsmasq is also the host running samba.
|
||||
# you may want to uncomment some or all of them if you use
|
||||
# Windows clients and Samba.
|
||||
#dhcp-option=19,0 # option ip-forwarding off
|
||||
#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s)
|
||||
#dhcp-option=45,0.0.0.0 # netbios datagram distribution server
|
||||
#dhcp-option=46,8 # netbios node type
|
||||
|
||||
# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave.
|
||||
#dhcp-option=252,"\n"
|
||||
|
||||
# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client
|
||||
# probably doesn't support this......
|
||||
#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com
|
||||
|
||||
# Send RFC-3442 classless static routes (note the netmask encoding)
|
||||
#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8
|
||||
|
||||
# Send vendor-class specific options encapsulated in DHCP option 43.
|
||||
# The meaning of the options is defined by the vendor-class so
|
||||
# options are sent only when the client supplied vendor class
|
||||
# matches the class given here. (A substring match is OK, so "MSFT"
|
||||
# matches "MSFT" and "MSFT 5.0"). This example sets the
|
||||
# mtftp address to 0.0.0.0 for PXEClients.
|
||||
#dhcp-option=vendor:PXEClient,1,0.0.0.0
|
||||
|
||||
# Send microsoft-specific option to tell windows to release the DHCP lease
|
||||
# when it shuts down. Note the "i" flag, to tell dnsmasq to send the
|
||||
# value as a four-byte integer - that's what microsoft wants. See
|
||||
# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true
|
||||
#dhcp-option=vendor:MSFT,2,1i
|
||||
|
||||
# Send the Encapsulated-vendor-class ID needed by some configurations of
|
||||
# Etherboot to allow is to recognize the DHCP server.
|
||||
#dhcp-option=vendor:Etherboot,60,"Etherboot"
|
||||
|
||||
# Send options to PXELinux. Note that we need to send the options even
|
||||
# though they don't appear in the parameter request list, so we need
|
||||
# to use dhcp-option-force here.
|
||||
# See http://syslinux.zytor.com/pxe.php#special for details.
|
||||
# Magic number - needed before anything else is recognized
|
||||
#dhcp-option-force=208,f1:00:74:7e
|
||||
# Configuration file name
|
||||
#dhcp-option-force=209,configs/common
|
||||
# Path prefix
|
||||
#dhcp-option-force=210,/tftpboot/pxelinux/files/
|
||||
# Reboot time. (Note 'i' to send 32-bit value)
|
||||
#dhcp-option-force=211,30i
|
||||
|
||||
# Set the boot filename for netboot/PXE. You will only need
|
||||
# this is you want to boot machines over the network and you will need
|
||||
# a TFTP server; either dnsmasq's built in TFTP server or an
|
||||
# external one. (See below for how to enable the TFTP server.)
|
||||
#dhcp-boot=pxelinux.0
|
||||
|
||||
# The same as above, but use custom tftp-server instead machine running dnsmasq
|
||||
#dhcp-boot=pxelinux,server.name,192.168.1.100
|
||||
|
||||
# Boot for Etherboot gPXE. The idea is to send two different
|
||||
# filenames, the first loads gPXE, and the second tells gPXE what to
|
||||
# load. The dhcp-match sets the gpxe tag for requests from gPXE.
|
||||
#dhcp-match=set:gpxe,175 # gPXE sends a 175 option.
|
||||
#dhcp-boot=tag:!gpxe,undionly.kpxe
|
||||
#dhcp-boot=mybootimage
|
||||
|
||||
# Encapsulated options for Etherboot gPXE. All the options are
|
||||
# encapsulated within option 175
|
||||
#dhcp-option=encap:175, 1, 5b # priority code
|
||||
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
|
||||
#dhcp-option=encap:175, 177, string # bus-id
|
||||
#dhcp-option=encap:175, 189, 1b # BIOS drive code
|
||||
#dhcp-option=encap:175, 190, user # iSCSI username
|
||||
#dhcp-option=encap:175, 191, pass # iSCSI password
|
||||
|
||||
# Test for the architecture of a netboot client. PXE clients are
|
||||
# supposed to send their architecture as option 93. (See RFC 4578)
|
||||
#dhcp-match=peecees, option:client-arch, 0 #x86-32
|
||||
#dhcp-match=itanics, option:client-arch, 2 #IA64
|
||||
#dhcp-match=hammers, option:client-arch, 6 #x86-64
|
||||
#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64
|
||||
|
||||
# Do real PXE, rather than just booting a single file, this is an
|
||||
# alternative to dhcp-boot.
|
||||
#pxe-prompt="What system shall I netboot?"
|
||||
# or with timeout before first available action is taken:
|
||||
#pxe-prompt="Press F8 for menu.", 60
|
||||
|
||||
# Available boot services. for PXE.
|
||||
#pxe-service=x86PC, "Boot from local disk"
|
||||
|
||||
# Loads <tftp-root>/pxelinux.0 from dnsmasq TFTP server.
|
||||
#pxe-service=x86PC, "Install Linux", pxelinux
|
||||
|
||||
# Loads <tftp-root>/pxelinux.0 from TFTP server at 1.2.3.4.
|
||||
# Beware this fails on old PXE ROMS.
|
||||
#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4
|
||||
|
||||
# Use bootserver on network, found my multicast or broadcast.
|
||||
#pxe-service=x86PC, "Install windows from RIS server", 1
|
||||
|
||||
# Use bootserver at a known IP address.
|
||||
#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4
|
||||
|
||||
# If you have multicast-FTP available,
|
||||
# information for that can be passed in a similar way using options 1
|
||||
# to 5. See page 19 of
|
||||
# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf
|
||||
|
||||
|
||||
# Enable dnsmasq's built-in TFTP server
|
||||
#enable-tftp
|
||||
|
||||
# Set the root directory for files available via FTP.
|
||||
#tftp-root=/var/ftpd
|
||||
|
||||
# Make the TFTP server more secure: with this set, only files owned by
|
||||
# the user dnsmasq is running as will be send over the net.
|
||||
#tftp-secure
|
||||
|
||||
# This option stops dnsmasq from negotiating a larger blocksize for TFTP
|
||||
# transfers. It will slow things down, but may rescue some broken TFTP
|
||||
# clients.
|
||||
#tftp-no-blocksize
|
||||
|
||||
# Set the boot file name only when the "red" tag is set.
|
||||
#dhcp-boot=tag:red,pxelinux.red-net
|
||||
|
||||
# An example of dhcp-boot with an external TFTP server: the name and IP
|
||||
# address of the server are given after the filename.
|
||||
# Can fail with old PXE ROMS. Overridden by --pxe-service.
|
||||
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3
|
||||
|
||||
# If there are multiple external tftp servers having a same name
|
||||
# (using /etc/hosts) then that name can be specified as the
|
||||
# tftp_servername (the third option to dhcp-boot) and in that
|
||||
# case dnsmasq resolves this name and returns the resultant IP
|
||||
# addresses in round robin fashion. This facility can be used to
|
||||
# load balance the tftp load among a set of servers.
|
||||
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
|
||||
|
||||
# Set the limit on DHCP leases, the default is 150
|
||||
#dhcp-lease-max=150
|
||||
|
||||
# The DHCP server needs somewhere on disk to keep its lease database.
|
||||
# This defaults to a sane location, but if you want to change it, use
|
||||
# the line below.
|
||||
#dhcp-leasefile=/var/lib/misc/dnsmasq.leases
|
||||
|
||||
# Set the DHCP server to authoritative mode. In this mode it will barge in
|
||||
# and take over the lease for any client which broadcasts on the network,
|
||||
# whether it has a record of the lease or not. This avoids long timeouts
|
||||
# when a machine wakes up on a new network. DO NOT enable this if there's
|
||||
# the slightest chance that you might end up accidentally configuring a DHCP
|
||||
# server for your campus/company accidentally. The ISC server uses
|
||||
# the same option, and this URL provides more information:
|
||||
# http://www.isc.org/files/auth.html
|
||||
#dhcp-authoritative
|
||||
|
||||
# Run an executable when a DHCP lease is created or destroyed.
|
||||
# The arguments sent to the script are "add" or "del",
|
||||
# then the MAC address, the IP address and finally the hostname
|
||||
# if there is one.
|
||||
#dhcp-script=/bin/echo
|
||||
|
||||
# Set the cachesize here.
|
||||
#cache-size=150
|
||||
|
||||
# If you want to disable negative caching, uncomment this.
|
||||
#no-negcache
|
||||
|
||||
# Normally responses which come from /etc/hosts and the DHCP lease
|
||||
# file have Time-To-Live set as zero, which conventionally means
|
||||
# do not cache further. If you are happy to trade lower load on the
|
||||
# server for potentially stale date, you can set a time-to-live (in
|
||||
# seconds) here.
|
||||
#local-ttl=
|
||||
|
||||
# If you want dnsmasq to detect attempts by Verisign to send queries
|
||||
# to unregistered .com and .net hosts to its sitefinder service and
|
||||
# have dnsmasq instead return the correct NXDOMAIN response, uncomment
|
||||
# this line. You can add similar lines to do the same for other
|
||||
# registries which have implemented wildcard A records.
|
||||
#bogus-nxdomain=64.94.110.11
|
||||
|
||||
# If you want to fix up DNS results from upstream servers, use the
|
||||
# alias option. This only works for IPv4.
|
||||
# This alias makes a result of 1.2.3.4 appear as 5.6.7.8
|
||||
#alias=1.2.3.4,5.6.7.8
|
||||
# and this maps 1.2.3.x to 5.6.7.x
|
||||
#alias=1.2.3.0,5.6.7.0,255.255.255.0
|
||||
# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40
|
||||
#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0
|
||||
|
||||
# Change these lines if you want dnsmasq to serve MX records.
|
||||
|
||||
# Return an MX record named "maildomain.com" with target
|
||||
# servermachine.com and preference 50
|
||||
#mx-host=maildomain.com,servermachine.com,50
|
||||
|
||||
# Set the default target for MX records created using the localmx option.
|
||||
#mx-target=servermachine.com
|
||||
|
||||
# Return an MX record pointing to the mx-target for all local
|
||||
# machines.
|
||||
#localmx
|
||||
|
||||
# Return an MX record pointing to itself for all local machines.
|
||||
#selfmx
|
||||
|
||||
# Change the following lines if you want dnsmasq to serve SRV
|
||||
# records. These are useful if you want to serve ldap requests for
|
||||
# Active Directory and other windows-originated DNS requests.
|
||||
# See RFC 2782.
|
||||
# You may add multiple srv-host lines.
|
||||
# The fields are <name>,<target>,<port>,<priority>,<weight>
|
||||
# If the domain part if missing from the name (so that is just has the
|
||||
# service and protocol sections) then the domain given by the domain=
|
||||
# config option is used. (Note that expand-hosts does not need to be
|
||||
# set for this to work.)
|
||||
|
||||
# A SRV record sending LDAP for the example.com domain to
|
||||
# ldapserver.example.com port 389
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389
|
||||
|
||||
# A SRV record sending LDAP for the example.com domain to
|
||||
# ldapserver.example.com port 389 (using domain=)
|
||||
#domain=example.com
|
||||
#srv-host=_ldap._tcp,ldapserver.example.com,389
|
||||
|
||||
# Two SRV records for LDAP, each with different priorities
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2
|
||||
|
||||
# A SRV record indicating that there is no LDAP server for the domain
|
||||
# example.com
|
||||
#srv-host=_ldap._tcp.example.com
|
||||
|
||||
# The following line shows how to make dnsmasq serve an arbitrary PTR
|
||||
# record. This is useful for DNS-SD. (Note that the
|
||||
# domain-name expansion done for SRV records _does_not
|
||||
# occur for PTR records.)
|
||||
#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services"
|
||||
|
||||
# Change the following lines to enable dnsmasq to serve TXT records.
|
||||
# These are used for things like SPF and zeroconf. (Note that the
|
||||
# domain-name expansion done for SRV records _does_not
|
||||
# occur for TXT records.)
|
||||
|
||||
#Example SPF.
|
||||
#txt-record=example.com,"v=spf1 a -all"
|
||||
|
||||
#Example zeroconf
|
||||
#txt-record=_http._tcp.example.com,name=value,paper=A4
|
||||
|
||||
# Provide an alias for a "local" DNS name. Note that this _only_ works
|
||||
# for targets which are names from DHCP or /etc/hosts. Give host
|
||||
# "bert" another name, bertrand
|
||||
#cname=bertand,bert
|
||||
|
||||
# For debugging purposes, log each DNS query as it passes through
|
||||
# dnsmasq.
|
||||
#log-queries
|
||||
|
||||
# Log lots of extra information about DHCP transactions.
|
||||
#log-dhcp
|
||||
|
||||
# Include another lot of configuration options.
|
||||
#conf-file=/etc/dnsmasq.more.conf
|
||||
#conf-dir=/etc/dnsmasq.d
|
||||
|
||||
# Include all the files in a directory except those ending in .bak
|
||||
#conf-dir=/etc/dnsmasq.d,.bak
|
||||
|
||||
# Include all files in a directory which end in .conf
|
||||
#conf-dir=/etc/dnsmasq.d/*.conf
|
|
@ -1,73 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Lighttpd config for Pi-hole
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
###################################################################################################
|
||||
# IF THIS HEADER EXISTS, THE FILE WILL BE OVERWRITTEN BY PI-HOLE'S UPDATE PROCEDURE. #
|
||||
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON THE NEXT UPDATE UNLESS YOU REMOVE THIS HEADER #
|
||||
# #
|
||||
# ENSURE THAT YOU DO NOT REMOVE THE REQUIRED LINE: #
|
||||
# #
|
||||
# include "/etc/lighttpd/conf-enabled/*.conf" #
|
||||
# #
|
||||
###################################################################################################
|
||||
|
||||
server.modules = (
|
||||
"mod_access",
|
||||
"mod_auth",
|
||||
"mod_expire",
|
||||
"mod_redirect",
|
||||
"mod_setenv",
|
||||
"mod_rewrite"
|
||||
)
|
||||
|
||||
server.document-root = "/var/www/html"
|
||||
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
||||
server.errorlog = "/var/log/lighttpd/error-pihole.log"
|
||||
server.pid-file = "/run/lighttpd.pid"
|
||||
server.username = "www-data"
|
||||
server.groupname = "www-data"
|
||||
# For lighttpd version 1.4.46 or above, the port can be overwritten in `/etc/lighttpd/external.conf` using the := operator
|
||||
# e.g. server.port := 8000
|
||||
server.port = 80
|
||||
|
||||
# Allow streaming response
|
||||
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||
server.stream-response-body = 1
|
||||
#ssl.read-ahead = "disable"
|
||||
|
||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||
|
||||
mimetype.assign = (
|
||||
".ico" => "image/x-icon",
|
||||
".jpeg" => "image/jpeg",
|
||||
".jpg" => "image/jpeg",
|
||||
".png" => "image/png",
|
||||
".svg" => "image/svg+xml",
|
||||
".css" => "text/css; charset=utf-8",
|
||||
".html" => "text/html; charset=utf-8",
|
||||
".js" => "text/javascript; charset=utf-8",
|
||||
".json" => "application/json; charset=utf-8",
|
||||
".map" => "application/json; charset=utf-8",
|
||||
".txt" => "text/plain; charset=utf-8",
|
||||
".eot" => "application/vnd.ms-fontobject",
|
||||
".otf" => "font/otf",
|
||||
".ttc" => "font/collection",
|
||||
".ttf" => "font/ttf",
|
||||
".woff" => "font/woff",
|
||||
".woff2" => "font/woff2"
|
||||
)
|
||||
|
||||
# Add user chosen options held in (optional) external file
|
||||
include "external*.conf"
|
||||
|
||||
# default listening port for IPv6 falls back to the IPv4 port
|
||||
include_shell "/usr/share/lighttpd/use-ipv6.pl " + server.port
|
||||
include "/etc/lighttpd/conf-enabled/*.conf"
|
|
@ -1,87 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Lighttpd config for Pi-hole
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
###################################################################################################
|
||||
# IF THIS HEADER EXISTS, THE FILE WILL BE OVERWRITTEN BY PI-HOLE'S UPDATE PROCEDURE. #
|
||||
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON THE NEXT UPDATE UNLESS YOU REMOVE THIS HEADER #
|
||||
# #
|
||||
# ENSURE THAT YOU DO NOT REMOVE THE REQUIRED LINE: #
|
||||
# #
|
||||
# include "/etc/lighttpd/conf.d/pihole-admin.conf" #
|
||||
# #
|
||||
###################################################################################################
|
||||
|
||||
server.modules = (
|
||||
"mod_access",
|
||||
"mod_auth",
|
||||
"mod_expire",
|
||||
"mod_fastcgi",
|
||||
"mod_accesslog",
|
||||
"mod_redirect",
|
||||
"mod_setenv",
|
||||
"mod_rewrite"
|
||||
)
|
||||
|
||||
server.document-root = "/var/www/html"
|
||||
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
||||
server.errorlog = "/var/log/lighttpd/error-pihole.log"
|
||||
server.pid-file = "/run/lighttpd.pid"
|
||||
server.username = "lighttpd"
|
||||
server.groupname = "lighttpd"
|
||||
# For lighttpd version 1.4.46 or above, the port can be overwritten in `/etc/lighttpd/external.conf` using the := operator
|
||||
# e.g. server.port := 8000
|
||||
server.port = 80
|
||||
|
||||
# Allow streaming response
|
||||
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||
server.stream-response-body = 1
|
||||
#ssl.read-ahead = "disable"
|
||||
|
||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||
|
||||
mimetype.assign = (
|
||||
".ico" => "image/x-icon",
|
||||
".jpeg" => "image/jpeg",
|
||||
".jpg" => "image/jpeg",
|
||||
".png" => "image/png",
|
||||
".svg" => "image/svg+xml",
|
||||
".css" => "text/css; charset=utf-8",
|
||||
".html" => "text/html; charset=utf-8",
|
||||
".js" => "text/javascript; charset=utf-8",
|
||||
".json" => "application/json; charset=utf-8",
|
||||
".map" => "application/json; charset=utf-8",
|
||||
".txt" => "text/plain; charset=utf-8",
|
||||
".eot" => "application/vnd.ms-fontobject",
|
||||
".otf" => "font/otf",
|
||||
".ttc" => "font/collection",
|
||||
".ttf" => "font/ttf",
|
||||
".woff" => "font/woff",
|
||||
".woff2" => "font/woff2"
|
||||
)
|
||||
|
||||
# Add user chosen options held in (optional) external file
|
||||
include "external*.conf"
|
||||
|
||||
# default listening port for IPv6 falls back to the IPv4 port
|
||||
#include_shell "/usr/share/lighttpd/use-ipv6.pl " + server.port
|
||||
#include_shell "/usr/share/lighttpd/create-mime.assign.pl"
|
||||
#include_shell "/usr/share/lighttpd/include-conf-enabled.pl"
|
||||
|
||||
fastcgi.server = (
|
||||
".php" => (
|
||||
"localhost" => (
|
||||
"socket" => "/tmp/php-fastcgi.socket",
|
||||
"bin-path" => "/usr/bin/php-cgi"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
include "/etc/lighttpd/conf.d/pihole-admin.conf"
|
|
@ -1,82 +0,0 @@
|
|||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Lighttpd config for Pi-hole
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
###############################################################################
|
||||
# FILE AUTOMATICALLY OVERWRITTEN BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
|
||||
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
|
||||
###############################################################################
|
||||
|
||||
server.errorlog := "/var/log/lighttpd/error-pihole.log"
|
||||
|
||||
$HTTP["url"] =~ "^/admin/" {
|
||||
server.document-root = "/var/www/html"
|
||||
server.stream-response-body = 1
|
||||
accesslog.filename = "/var/log/lighttpd/access-pihole.log"
|
||||
accesslog.format = "%{%s}t|%h|%V|%r|%s|%b"
|
||||
|
||||
fastcgi.server = (
|
||||
".php" => (
|
||||
"localhost" => (
|
||||
"socket" => "/run/lighttpd/pihole-php-fastcgi.socket",
|
||||
"bin-path" => "/usr/bin/php-cgi",
|
||||
"min-procs" => 1,
|
||||
"max-procs" => 1,
|
||||
"bin-environment" => (
|
||||
"PHP_FCGI_CHILDREN" => "4",
|
||||
"PHP_FCGI_MAX_REQUESTS" => "10000",
|
||||
),
|
||||
"bin-copy-environment" => (
|
||||
"PATH", "SHELL", "USER"
|
||||
),
|
||||
"broken-scriptfilename" => "enable",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# X-Pi-hole is a response header for debugging using curl -I
|
||||
# X-Frame-Options prevents clickjacking attacks and helps ensure your content is not embedded into other sites via < frame >, < iframe > or < object >.
|
||||
# X-XSS-Protection sets the configuration for the cross-site scripting filters built into most browsers. This is important because it tells the browser to block the response if a malicious script has been inserted from a user input. (deprecated; disabled)
|
||||
# X-Content-Type-Options stops a browser from trying to MIME-sniff the content type and forces it to stick with the declared content-type. This is important because the browser will only load external resources if their content-type matches what is expected, and not malicious hidden code.
|
||||
# Content-Security-Policy tells the browser where resources are allowed to be loaded and if it’s allowed to parse/run inline styles or Javascript. This is important because it prevents content injection attacks, such as Cross Site Scripting (XSS).
|
||||
# X-Permitted-Cross-Domain-Policies is an XML document that grants a web client, such as Adobe Flash Player or Adobe Acrobat (though not necessarily limited to these), permission to handle data across domains.
|
||||
# Referrer-Policy allows control/restriction of the amount of information present in the referral header for links away from your page—the URL path or even if the header is sent at all.
|
||||
setenv.add-response-header = (
|
||||
"X-Pi-hole" => "The Pi-hole Web interface is working!",
|
||||
"X-Frame-Options" => "DENY",
|
||||
"X-XSS-Protection" => "0",
|
||||
"X-Content-Type-Options" => "nosniff",
|
||||
"Content-Security-Policy" => "default-src 'self' 'unsafe-inline';",
|
||||
"X-Permitted-Cross-Domain-Policies" => "none",
|
||||
"Referrer-Policy" => "same-origin"
|
||||
)
|
||||
|
||||
# Block . files from being served, such as .git, .github, .gitignore
|
||||
$HTTP["url"] =~ "^/admin/\." {
|
||||
url.access-deny = ("")
|
||||
}
|
||||
|
||||
# allow teleporter and API qr code iframe on settings page
|
||||
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
|
||||
$HTTP["referer"] =~ "/admin/settings\.php" {
|
||||
setenv.set-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
|
||||
}
|
||||
}
|
||||
}
|
||||
else $HTTP["url"] == "/admin" {
|
||||
url.redirect = ("" => "/admin/")
|
||||
}
|
||||
|
||||
$HTTP["host"] == "pi.hole" {
|
||||
$HTTP["url"] == "/" {
|
||||
url.redirect = ("" => "/admin/")
|
||||
}
|
||||
}
|
||||
|
||||
# (keep this on one line for basic-install.sh filtering during install)
|
||||
server.modules += ( "mod_access", "mod_accesslog", "mod_redirect", "mod_fastcgi", "mod_setenv" )
|
File diff suppressed because it is too large
Load diff
|
@ -45,11 +45,7 @@ source "${setupVars}"
|
|||
package_manager_detect
|
||||
|
||||
# Uninstall packages used by the Pi-hole
|
||||
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}" "${OS_CHECK_DEPS[@]}")
|
||||
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
|
||||
# Install the Web dependencies
|
||||
DEPS+=("${PIHOLE_WEB_DEPS[@]}")
|
||||
fi
|
||||
DEPS=("${INSTALLER_COMMON_DEPS[@]}" "${PIHOLE_COMMON_DEPS[@]}" "${OS_CHECK_COMMON_DEPS[@]}" "${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}" "${OS_CHECK_DEPS[@]}")
|
||||
|
||||
# Compatibility
|
||||
if [ -x "$(command -v apt-get)" ]; then
|
||||
|
@ -193,6 +189,18 @@ removeNoPurge() {
|
|||
else
|
||||
service pihole-FTL stop
|
||||
fi
|
||||
${SUDO} rm -f /etc/systemd/system/pihole-FTL.service
|
||||
if [[ -d '/etc/systemd/system/pihole-FTL.service.d' ]]; then
|
||||
read -rp " ${QST} FTL service override directory /etc/systemd/system/pihole-FTL.service.d detected. Do you wish to remove this from your system? [y/N] " answer
|
||||
case $answer in
|
||||
[yY]*)
|
||||
echo -ne " ${INFO} Removing /etc/systemd/system/pihole-FTL.service.d..."
|
||||
${SUDO} rm -R /etc/systemd/system/pihole-FTL.service.d
|
||||
echo -e "${OVER} ${INFO} Removed /etc/systemd/system/pihole-FTL.service.d"
|
||||
;;
|
||||
*) echo -e " ${INFO} Leaving /etc/systemd/system/pihole-FTL.service.d in place.";;
|
||||
esac
|
||||
fi
|
||||
${SUDO} rm -f /etc/init.d/pihole-FTL
|
||||
${SUDO} rm -f /usr/bin/pihole-FTL
|
||||
echo -e "${OVER} ${TICK} Removed pihole-FTL"
|
||||
|
|
806
gravity.sh
806
gravity.sh
File diff suppressed because it is too large
Load diff
|
@ -1,154 +0,0 @@
|
|||
.TH "Pihole-FTL" "8" "pihole-FTL" "Pi-hole" "November 2020"
|
||||
.SH "NAME"
|
||||
pihole-FTL - Pi-hole : The Faster-Than-Light (FTL) Engine
|
||||
.br
|
||||
.SH "SYNOPSIS"
|
||||
\fBservice pihole-FTL \fR(\fBstart\fR|\fBstop\fR|\fBrestart\fR)
|
||||
.br
|
||||
|
||||
\fBpihole-FTL debug\fR
|
||||
.br
|
||||
\fBpihole-FTL test\fR
|
||||
.br
|
||||
\fBpihole-FTL -v|-vv\fR
|
||||
.br
|
||||
\fBpihole-FTL -t\fR
|
||||
.br
|
||||
\fBpihole-FTL -b\fR
|
||||
.br
|
||||
\fBpihole-FTL -f\fR
|
||||
.br
|
||||
\fBpihole-FTL -h\fR
|
||||
.br
|
||||
\fBpihole-FTL dnsmasq-test\fR
|
||||
.br
|
||||
\fBpihole-FTL regex-test str\fR
|
||||
.br
|
||||
\fBpihole-FTL regex-test str rgx\fR
|
||||
.br
|
||||
\fBpihole-FTL lua\fR
|
||||
.br
|
||||
\fBpihole-FTL luac\fR
|
||||
.br
|
||||
\fBpihole-FTL dhcp-discover\fR
|
||||
.br
|
||||
\fBpihole-FTL --\fR (\fBoptions\fR)
|
||||
.br
|
||||
|
||||
.SH "DESCRIPTION"
|
||||
Pi-hole : The Faster-Than-Light (FTL) Engine is a lightweight, purpose-built daemon used to provide statistics needed for the Pi-hole Web Interface, and its API can be easily integrated into your own projects. Although it is an optional component of the Pi-hole ecosystem, it will be installed by default to provide statistics. As the name implies, FTL does its work \fIvery\fR \fIquickly\fR!
|
||||
.br
|
||||
|
||||
Usage
|
||||
.br
|
||||
|
||||
\fBservice pihole-FTL start\fR
|
||||
.br
|
||||
Start the pihole-FTL daemon
|
||||
.br
|
||||
|
||||
\fBservice pihole-FTL stop\fR
|
||||
.br
|
||||
Stop the pihole-FTL daemon
|
||||
.br
|
||||
|
||||
\fBservice pihole-FTL restart\fR
|
||||
.br
|
||||
If the pihole-FTP daemon is running, stop and then start, otherwise start.
|
||||
.br
|
||||
|
||||
Command line arguments
|
||||
.br
|
||||
|
||||
\fBdebug\fR
|
||||
.br
|
||||
Don't go into daemon mode (stay in foreground) + more verbose logging
|
||||
.br
|
||||
|
||||
\fBtest\fR
|
||||
.br
|
||||
Start FTL and process everything, but shut down immediately afterwards
|
||||
.br
|
||||
|
||||
\fB-v, version\fR
|
||||
.br
|
||||
Don't start FTL, show only version
|
||||
.br
|
||||
|
||||
\fB-vv\fR
|
||||
.br
|
||||
Don't start FTL, show verbose version information of embedded applications
|
||||
.br
|
||||
|
||||
\fB-t, tag\fR
|
||||
.br
|
||||
Don't start FTL, show only git tag
|
||||
.br
|
||||
|
||||
\fB-b, branch\fR
|
||||
.br
|
||||
Don't start FTL, show only git branch FTL was compiled from
|
||||
.br
|
||||
|
||||
\fB-f, no-daemon\fR
|
||||
.br
|
||||
Don't go into background (daemon mode)
|
||||
.br
|
||||
|
||||
\fB-h, help\fR
|
||||
.br
|
||||
Don't start FTL, show help
|
||||
.br
|
||||
|
||||
\fBdnsmasq-test\fR
|
||||
.br
|
||||
Test resolver config file syntax
|
||||
.br
|
||||
|
||||
\fBregex-test str\fR
|
||||
.br
|
||||
Test str against all regular expressions in the database
|
||||
.br
|
||||
|
||||
\fBregex-test str rgx\fR
|
||||
.br
|
||||
Test str against regular expression given by rgx
|
||||
.br
|
||||
|
||||
\fBlua\fR
|
||||
.br
|
||||
Start the embedded Lua interpreter
|
||||
.br
|
||||
|
||||
\fBluac\fR
|
||||
.br
|
||||
Execute the embedded Lua compiler
|
||||
.br
|
||||
|
||||
\fBdhcp-discover\fR
|
||||
.br
|
||||
Discover DHCP servers in the local network
|
||||
.br
|
||||
|
||||
\fB--\fR (options)
|
||||
.br
|
||||
Pass options to internal dnsmasq resolver
|
||||
.br
|
||||
.SH "EXAMPLE"
|
||||
Command line arguments can be arbitrarily combined, e.g:
|
||||
.br
|
||||
|
||||
\fBpihole-FTL debug test\fR
|
||||
.br
|
||||
|
||||
Start ftl in foreground with more verbose logging, process everything and shutdown immediately
|
||||
.br
|
||||
.SH "SEE ALSO"
|
||||
\fBpihole\fR(8)
|
||||
.br
|
||||
\fBFor FTL's config options please see https://docs.pi-hole.net/ftldns/configfile/\fR
|
||||
.br
|
||||
.SH "COLOPHON"
|
||||
|
||||
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net
|
||||
.br
|
|
@ -52,47 +52,43 @@ pihole restartdns\fR [options]
|
|||
Available commands and options:
|
||||
.br
|
||||
|
||||
\fB-w, whitelist\fR [options] [<domain1> <domain2 ...>]
|
||||
\fBallow, allowlist\fR [options] [<domain1> <domain2 ...>]
|
||||
.br
|
||||
Adds or removes specified domain or domains to the Whitelist
|
||||
Adds or removes specified domain or domains to the Allowlist
|
||||
.br
|
||||
|
||||
\fB-b, blacklist\fR [options] [<domain1> <domain2 ...>]
|
||||
\fBdeny, denylist\fR [options] [<domain1> <domain2 ...>]
|
||||
.br
|
||||
Adds or removes specified domain or domains to the blacklist
|
||||
Adds or removes specified domain or domains to the denylist
|
||||
.br
|
||||
|
||||
\fB--regex, regex\fR [options] [<regex1> <regex2 ...>]
|
||||
.br
|
||||
Add or removes specified regex filter to the regex blacklist
|
||||
Add or removes specified regex filter to the regex denylist
|
||||
.br
|
||||
|
||||
\fB--white-regex\fR [options] [<regex1> <regex2 ...>]
|
||||
\fB--allow-regex\fR [options] [<regex1> <regex2 ...>]
|
||||
.br
|
||||
Add or removes specified regex filter to the regex whitelist
|
||||
Add or removes specified regex filter to the regex allowlist
|
||||
.br
|
||||
|
||||
\fB--wild, wildcard\fR [options] [<domain1> <domain2 ...>]
|
||||
.br
|
||||
Add or removes specified domain to the wildcard blacklist
|
||||
Add or removes specified domain to the wildcard denylist
|
||||
.br
|
||||
|
||||
\fB--white-wild\fR [options] [<domain1> <domain2 ...>]
|
||||
\fB--allow-wild\fR [options] [<domain1> <domain2 ...>]
|
||||
.br
|
||||
Add or removes specified domain to the wildcard whitelist
|
||||
Add or removes specified domain to the wildcard allowlist
|
||||
.br
|
||||
|
||||
(Whitelist/Blacklist manipulation options):
|
||||
(Allow-/denylist manipulation options):
|
||||
.br
|
||||
-d, --delmode Remove domain(s) from the list
|
||||
not, -d, --delmode Remove domain(s) from the list
|
||||
.br
|
||||
-nr, --noreload Update list without refreshing dnsmasq
|
||||
-q, --quiet Make output less verbose
|
||||
.br
|
||||
-q, --quiet Make output less verbose
|
||||
.br
|
||||
-l, --list Display all your listed domains
|
||||
.br
|
||||
--nuke Removes all entries in a list
|
||||
-l, --list Display all your listed domains
|
||||
.br
|
||||
|
||||
\fB-d, debug\fR [-a]
|
||||
|
@ -141,20 +137,6 @@ Available commands and options:
|
|||
(0 = lowest, 3 = highest)
|
||||
.br
|
||||
|
||||
\fB-c, chronometer\fR [options]
|
||||
.br
|
||||
Calculates stats and displays to an LCD
|
||||
.br
|
||||
|
||||
(Chronometer Options):
|
||||
.br
|
||||
-j, --json Output stats as JSON formatted string
|
||||
.br
|
||||
-r, --refresh Set update frequency (in seconds)
|
||||
.br
|
||||
-e, --exit Output stats and exit without refreshing
|
||||
.br
|
||||
|
||||
\fB-g, updateGravity\fR
|
||||
.br
|
||||
Update the list of ad-serving domains
|
||||
|
@ -184,7 +166,7 @@ Available commands and options:
|
|||
Specify whether the Pi-hole log should be used
|
||||
.br
|
||||
|
||||
(Logging options):
|
||||
(Logging options):
|
||||
.br
|
||||
on Enable the Pi-hole log at /var/log/pihole/pihole.log
|
||||
.br
|
||||
|
@ -212,7 +194,7 @@ Available commands and options:
|
|||
.br
|
||||
-p, --pihole Only retrieve info regarding Pi-hole repository
|
||||
.br
|
||||
-a, --admin Only retrieve info regarding AdminLTE
|
||||
-a, --admin Only retrieve info regarding web
|
||||
repository
|
||||
.br
|
||||
-f, --ftl Only retrieve info regarding FTL repository
|
||||
|
@ -293,17 +275,17 @@ Available commands and options:
|
|||
Some usage examples
|
||||
.br
|
||||
|
||||
Whitelist/blacklist manipulation
|
||||
Allow-/denylist manipulation
|
||||
.br
|
||||
|
||||
\fBpihole -w iloveads.example.com\fR
|
||||
\fBpihole allow iloveads.example.com\fR
|
||||
.br
|
||||
Adds "iloveads.example.com" to whitelist
|
||||
Allow "iloveads.example.com"
|
||||
.br
|
||||
|
||||
\fBpihole -b -d noads.example.com\fR
|
||||
\fBpihole deny not noads.example.com\fR
|
||||
.br
|
||||
Removes "noads.example.com" from blacklist
|
||||
Removes "noads.example.com" from denylist
|
||||
.br
|
||||
|
||||
\fBpihole --wild example.com\fR
|
||||
|
@ -339,7 +321,7 @@ Displaying version information
|
|||
|
||||
\fBpihole -v -a -c\fR
|
||||
.br
|
||||
Display the current version of AdminLTE
|
||||
Display the current version of web
|
||||
.br
|
||||
|
||||
Temporarily disabling Pi-hole
|
||||
|
|
346
pihole
346
pihole
|
@ -11,18 +11,21 @@
|
|||
|
||||
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
|
||||
|
||||
# setupVars and PI_HOLE_BIN_DIR are not readonly here because in some functions (checkout),
|
||||
# PI_HOLE_BIN_DIR is not readonly here because in some functions (checkout),
|
||||
# they might get set again when the installer is sourced. This causes an
|
||||
# error due to modifying a readonly variable.
|
||||
setupVars="/etc/pihole/setupVars.conf"
|
||||
PI_HOLE_BIN_DIR="/usr/local/bin"
|
||||
|
||||
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
|
||||
source "${colfile}"
|
||||
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
readonly utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
# Source api functions
|
||||
readonly apifile="${PI_HOLE_SCRIPT_DIR}/api.sh"
|
||||
source "${apifile}"
|
||||
|
||||
versionsfile="/etc/pihole/versions"
|
||||
if [ -f "${versionsfile}" ]; then
|
||||
# Only source versionsfile if the file exits
|
||||
|
@ -31,10 +34,36 @@ if [ -f "${versionsfile}" ]; then
|
|||
source "${versionsfile}"
|
||||
fi
|
||||
|
||||
webpageFunc() {
|
||||
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh"
|
||||
main "$@"
|
||||
exit 0
|
||||
# TODO: We can probably remove the reliance on this function too, just tell people to pihole-FTL --config webserver.api.password "password"
|
||||
SetWebPassword() {
|
||||
if [ -n "$2" ] ; then
|
||||
readonly PASSWORD="$2"
|
||||
readonly CONFIRM="${PASSWORD}"
|
||||
else
|
||||
# Prevents a bug if the user presses Ctrl+C and it continues to hide the text typed.
|
||||
# So we reset the terminal via stty if the user does press Ctrl+C
|
||||
trap '{ echo -e "\nNot changed" ; stty sane ; exit 1; }' INT
|
||||
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
|
||||
echo ""
|
||||
|
||||
if [ "${PASSWORD}" == "" ]; then
|
||||
setFTLConfigValue "webserver.api.password" ""
|
||||
echo -e " ${TICK} Password Removed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
read -s -r -p "Confirm Password: " CONFIRM
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
|
||||
# pihole-FTL will automatically hash the password
|
||||
setFTLConfigValue "webserver.api.password" "${PASSWORD}"
|
||||
echo -e " ${TICK} New password set"
|
||||
else
|
||||
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
listFunc() {
|
||||
|
@ -100,8 +129,7 @@ queryFunc() {
|
|||
}
|
||||
|
||||
chronometerFunc() {
|
||||
shift
|
||||
"${PI_HOLE_SCRIPT_DIR}"/chronometer.sh "$@"
|
||||
echo "Chronometer is gone, use PADD (https://github.com/pi-hole/PADD)"
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
@ -116,8 +144,7 @@ uninstallFunc() {
|
|||
}
|
||||
|
||||
versionFunc() {
|
||||
shift
|
||||
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
|
||||
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh
|
||||
}
|
||||
|
||||
restartDNS() {
|
||||
|
@ -125,7 +152,7 @@ restartDNS() {
|
|||
svcOption="${1:-restart}"
|
||||
|
||||
# get the current path to the pihole-FTL.pid
|
||||
FTL_PID_FILE="$(getFTLPIDFile)"
|
||||
FTL_PID_FILE="$(getFTLConfigValue files.pid)"
|
||||
|
||||
# Determine if we should reload or restart
|
||||
if [[ "${svcOption}" =~ "reload-lists" ]]; then
|
||||
|
@ -182,73 +209,60 @@ restartDNS() {
|
|||
|
||||
piholeEnable() {
|
||||
if [[ "${2}" == "-h" ]] || [[ "${2}" == "--help" ]]; then
|
||||
echo "Usage: pihole disable [time]
|
||||
Example: 'pihole disable', or 'pihole disable 5m'
|
||||
Disable Pi-hole subsystems
|
||||
echo "Usage: pihole enable/disable [time]
|
||||
Example: 'pihole enable', or 'pihole disable 5m'
|
||||
En- or disable Pi-hole subsystems
|
||||
|
||||
Time:
|
||||
#s Disable Pi-hole functionality for # second(s)
|
||||
#m Disable Pi-hole functionality for # minute(s)"
|
||||
#s En-/disable Pi-hole functionality for # second(s)
|
||||
#m En-/disable Pi-hole functionality for # minute(s)"
|
||||
exit 0
|
||||
|
||||
elif [[ "${1}" == "0" ]]; then
|
||||
# Disable Pi-hole
|
||||
if grep -cq "BLOCKING_ENABLED=false" "${setupVars}"; then
|
||||
echo -e " ${INFO} Blocking already disabled, nothing to do"
|
||||
exit 0
|
||||
fi
|
||||
if [[ $# > 1 ]]; then
|
||||
local error=false
|
||||
if [[ "${2}" == *"s" ]]; then
|
||||
tt=${2%"s"}
|
||||
if [[ "${tt}" =~ ^-?[0-9]+$ ]];then
|
||||
local str="Disabling blocking for ${tt} seconds"
|
||||
echo -e " ${INFO} ${str}..."
|
||||
local str="Blocking will be re-enabled in ${tt} seconds"
|
||||
nohup "${PI_HOLE_SCRIPT_DIR}"/pihole-reenable.sh ${tt} </dev/null &>/dev/null &
|
||||
else
|
||||
local error=true
|
||||
fi
|
||||
elif [[ "${2}" == *"m" ]]; then
|
||||
tt=${2%"m"}
|
||||
if [[ "${tt}" =~ ^-?[0-9]+$ ]];then
|
||||
local str="Disabling blocking for ${tt} minutes"
|
||||
echo -e " ${INFO} ${str}..."
|
||||
local str="Blocking will be re-enabled in ${tt} minutes"
|
||||
tt=$((${tt}*60))
|
||||
nohup "${PI_HOLE_SCRIPT_DIR}"/pihole-reenable.sh ${tt} </dev/null &>/dev/null &
|
||||
else
|
||||
local error=true
|
||||
fi
|
||||
elif [[ -n "${2}" ]]; then
|
||||
local error=true
|
||||
else
|
||||
echo -e " ${INFO} Disabling blocking"
|
||||
fi
|
||||
|
||||
if [[ ${error} == true ]];then
|
||||
echo -e " ${COL_LIGHT_RED}Unknown format for delayed reactivation of the blocking!${COL_NC}"
|
||||
echo -e " Try 'pihole disable --help' for more information."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local str="Pi-hole Disabled"
|
||||
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "false"
|
||||
fi
|
||||
else
|
||||
# Enable Pi-hole
|
||||
killall -q pihole-reenable
|
||||
if grep -cq "BLOCKING_ENABLED=true" "${setupVars}"; then
|
||||
echo -e " ${INFO} Blocking already enabled, nothing to do"
|
||||
exit 0
|
||||
fi
|
||||
echo -e " ${INFO} Enabling blocking"
|
||||
local str="Pi-hole Enabled"
|
||||
|
||||
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "true"
|
||||
fi
|
||||
|
||||
restartDNS reload-lists
|
||||
# Get timer
|
||||
local tt="null"
|
||||
if [[ $# -gt 1 ]]; then
|
||||
local error=false
|
||||
if [[ "${2}" == *"s" ]]; then
|
||||
tt=${2%"s"}
|
||||
if [[ ! "${tt}" =~ ^-?[0-9]+$ ]];then
|
||||
local error=true
|
||||
fi
|
||||
elif [[ "${2}" == *"m" ]]; then
|
||||
tt=${2%"m"}
|
||||
if [[ "${tt}" =~ ^-?[0-9]+$ ]];then
|
||||
tt=$((${tt}*60))
|
||||
else
|
||||
local error=true
|
||||
fi
|
||||
elif [[ -n "${2}" ]]; then
|
||||
local error=true
|
||||
fi
|
||||
|
||||
if [[ ${error} == true ]];then
|
||||
echo -e " ${COL_LIGHT_RED}Unknown format for blocking timer!${COL_NC}"
|
||||
echo -e " Try 'pihole disable --help' for more information."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Authenticate with the API
|
||||
LoginAPI
|
||||
|
||||
# Send the request
|
||||
data=$(PostFTLData "dns/blocking" "{ \"blocking\": ${1}, \"timer\": ${tt} }")
|
||||
|
||||
# Check the response
|
||||
local extra=" forever"
|
||||
local timer="$(echo "${data}"| jq --raw-output '.timer' )"
|
||||
if [[ "${timer}" != "null" ]]; then
|
||||
extra=" for ${timer}s"
|
||||
fi
|
||||
local str="Pi-hole $(echo "${data}" | jq --raw-output '.blocking')${extra}"
|
||||
|
||||
# Logout from the API
|
||||
LogoutAPI
|
||||
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
}
|
||||
|
@ -267,8 +281,7 @@ Options:
|
|||
exit 0
|
||||
elif [[ "${1}" == "off" ]]; then
|
||||
# Disable logging
|
||||
removeKey /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "false"
|
||||
setFTLConfigValue dns.queryLogging false
|
||||
if [[ "${2}" != "noflush" ]]; then
|
||||
# Flush logs
|
||||
"${PI_HOLE_BIN_DIR}"/pihole -f
|
||||
|
@ -277,8 +290,7 @@ Options:
|
|||
local str="Logging has been disabled!"
|
||||
elif [[ "${1}" == "on" ]]; then
|
||||
# Enable logging
|
||||
addKey /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "true"
|
||||
setFTLConfigValue dns.queryLogging true
|
||||
echo -e " ${INFO} Enabling logging..."
|
||||
local str="Logging has been enabled!"
|
||||
else
|
||||
|
@ -323,28 +335,27 @@ analyze_ports() {
|
|||
|
||||
statusFunc() {
|
||||
# Determine if there is pihole-FTL service is listening
|
||||
local pid port ftl_api_port ftl_pid_file
|
||||
local pid port ftl_pid_file block_status
|
||||
|
||||
ftl_pid_file="$(getFTLPIDFile)"
|
||||
ftl_pid_file="$(getFTLConfigValue files.pid)"
|
||||
|
||||
pid="$(getFTLPID ${ftl_pid_file})"
|
||||
|
||||
ftl_api_port="$(getFTLAPIPort)"
|
||||
if [[ "$pid" -eq "-1" ]]; then
|
||||
case "${1}" in
|
||||
"web") echo "-1";;
|
||||
*) echo -e " ${CROSS} DNS service is NOT running";;
|
||||
esac
|
||||
return 0
|
||||
exit 0
|
||||
else
|
||||
#get the DNS port pihole-FTL is listening on by using FTL's telnet API
|
||||
port="$(echo ">dns-port >quit" | nc 127.0.0.1 "$ftl_api_port")"
|
||||
# get the DNS port pihole-FTL is listening on
|
||||
port="$(getFTLConfigValue dns.port)"
|
||||
if [[ "${port}" == "0" ]]; then
|
||||
case "${1}" in
|
||||
"web") echo "-1";;
|
||||
*) echo -e " ${CROSS} DNS service is NOT listening";;
|
||||
esac
|
||||
return 0
|
||||
exit 0
|
||||
else
|
||||
if [[ "${1}" != "web" ]]; then
|
||||
echo -e " ${TICK} FTL is listening on port ${port}"
|
||||
|
@ -354,73 +365,73 @@ statusFunc() {
|
|||
fi
|
||||
|
||||
# Determine if Pi-hole's blocking is enabled
|
||||
if grep -q "BLOCKING_ENABLED=false" /etc/pihole/setupVars.conf; then
|
||||
# A config is commented out
|
||||
case "${1}" in
|
||||
"web") echo 0;;
|
||||
*) echo -e " ${CROSS} Pi-hole blocking is disabled";;
|
||||
esac
|
||||
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
|
||||
# Configs are set
|
||||
block_status=$(getFTLConfigValue dns.blocking.active)
|
||||
if [ ${block_status} == "true" ]; then
|
||||
case "${1}" in
|
||||
"web") echo "$port";;
|
||||
*) echo -e " ${TICK} Pi-hole blocking is enabled";;
|
||||
esac
|
||||
else
|
||||
# No configs were found
|
||||
case "${1}" in
|
||||
"web") echo -2;;
|
||||
*) echo -e " ${INFO} Pi-hole blocking will be enabled";;
|
||||
"web") echo 0;;
|
||||
*) echo -e " ${CROSS} Pi-hole blocking is disabled";;
|
||||
esac
|
||||
# Enable blocking
|
||||
"${PI_HOLE_BIN_DIR}"/pihole enable
|
||||
fi
|
||||
exit 0
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
tailFunc() {
|
||||
# Warn user if Pi-hole's logging is disabled
|
||||
local logging_enabled=$(grep -c "^log-queries" /etc/dnsmasq.d/01-pihole.conf)
|
||||
if [[ "${logging_enabled}" == "0" ]]; then
|
||||
# No "log-queries" lines are found.
|
||||
# Commented out lines (such as "#log-queries") are ignored
|
||||
local logging_enabled=$(getFTLConfigValue dns.queryLogging)
|
||||
if [[ "${logging_enabled}" != "true" ]]; then
|
||||
echo " ${CROSS} Warning: Query logging is disabled"
|
||||
fi
|
||||
echo -e " ${INFO} Press Ctrl-C to exit"
|
||||
|
||||
# Get logfile path
|
||||
readonly LOGFILE=$(getFTLConfigValue files.log.dnsmasq)
|
||||
|
||||
# Strip date from each line
|
||||
# Color blocklist/blacklist/wildcard entries as red
|
||||
# Color blocklist/denylist/wildcard entries as red
|
||||
# Color A/AAAA/DHCP strings as white
|
||||
# Color everything else as gray
|
||||
tail -f /var/log/pihole/pihole.log | grep --line-buffered "${1}" | sed -E \
|
||||
tail -f $LOGFILE | grep --line-buffered "${1}" | sed -E \
|
||||
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
||||
-e "s,(.*(blacklisted |gravity blocked ).*),${COL_RED}&${COL_NC}," \
|
||||
-e "s,(.*(denied |gravity blocked ).*),${COL_RED}&${COL_NC}," \
|
||||
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
||||
-e "s,.*,${COL_GRAY}&${COL_NC},"
|
||||
exit 0
|
||||
}
|
||||
|
||||
piholeCheckoutFunc() {
|
||||
if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then
|
||||
echo "Usage: pihole checkout [repo] [branch]
|
||||
Example: 'pihole checkout master' or 'pihole checkout core dev'
|
||||
Switch Pi-hole subsystems to a different GitHub branch
|
||||
if [ -n "${DOCKER_VERSION}" ]; then
|
||||
unsupportedFunc
|
||||
else
|
||||
if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then
|
||||
echo "Switch Pi-hole subsystems to a different GitHub branch
|
||||
Usage: ${COL_GREEN}pihole checkout${COL_NC} ${COL_YELLOW}shortcut${COL_NC}
|
||||
or ${COL_GREEN}pihole checkout${COL_NC} ${COL_PURPLE}repo${COL_NC} ${COL_CYAN}branch${COL_NC}
|
||||
|
||||
Repositories:
|
||||
core [branch] Change the branch of Pi-hole's core subsystem
|
||||
web [branch] Change the branch of Web Interface subsystem
|
||||
ftl [branch] Change the branch of Pi-hole's FTL subsystem
|
||||
Example: ${COL_GREEN}pihole checkout${COL_NC} ${COL_YELLOW}master${COL_NC}
|
||||
or ${COL_GREEN}pihole checkout${COL_NC} ${COL_PURPLE}ftl ${COL_CYAN}development${COL_NC}
|
||||
|
||||
Branches:
|
||||
master Update subsystems to the latest stable release
|
||||
dev Update subsystems to the latest development release
|
||||
branchname Update subsystems to the specified branchname"
|
||||
exit 0
|
||||
Shortcuts:
|
||||
${COL_YELLOW}master${COL_NC} Update all subsystems to the latest stable release
|
||||
${COL_YELLOW}dev${COL_NC} Update all subsystems to the latest development release
|
||||
|
||||
Individual components:
|
||||
${COL_PURPLE}core${COL_NC} ${COL_CYAN}branch${COL_NC} Change the branch of Pi-hole's core subsystem
|
||||
${COL_PURPLE}web${COL_NC} ${COL_CYAN}branch${COL_NC} Change the branch of the web interface subsystem
|
||||
${COL_PURPLE}ftl${COL_NC} ${COL_CYAN}branch${COL_NC} Change the branch of Pi-hole's FTL subsystem"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh
|
||||
shift
|
||||
checkout "$@"
|
||||
fi
|
||||
|
||||
source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh
|
||||
shift
|
||||
checkout "$@"
|
||||
}
|
||||
|
||||
tricorderFunc() {
|
||||
|
@ -456,17 +467,17 @@ unsupportedFunc(){
|
|||
|
||||
helpFunc() {
|
||||
echo "Usage: pihole [options]
|
||||
Example: 'pihole -w -h'
|
||||
Example: 'pihole allow -h'
|
||||
Add '-h' after specific commands for more information on usage
|
||||
|
||||
Whitelist/Blacklist Options:
|
||||
-w, whitelist Whitelist domain(s)
|
||||
-b, blacklist Blacklist domain(s)
|
||||
--regex, regex Regex blacklist domains(s)
|
||||
--white-regex Regex whitelist domains(s)
|
||||
--wild, wildcard Wildcard blacklist domain(s)
|
||||
--white-wild Wildcard whitelist domain(s)
|
||||
Add '-h' for more info on whitelist/blacklist usage
|
||||
Domain Options:
|
||||
allow, allowlist Allow domain(s)
|
||||
deny, denylist Deny domain(s)
|
||||
--regex, regex Regex deny domains(s)
|
||||
--allow-regex Regex allow domains(s)
|
||||
--wild, wildcard Wildcard deny domain(s)
|
||||
--allow-wild Wildcard allow domain(s)
|
||||
Add '-h' for more info on allow/deny usage
|
||||
|
||||
Debugging Options:
|
||||
-d, debug Start a debugging session
|
||||
|
@ -477,13 +488,13 @@ Debugging Options:
|
|||
-t, tail [arg] View the live output of the Pi-hole log.
|
||||
Add an optional argument to filter the log
|
||||
(regular expressions are supported)
|
||||
api <endpoint> Query the Pi-hole API at <endpoint>
|
||||
|
||||
|
||||
Options:
|
||||
-a, admin Web interface options
|
||||
Add '-h' for more info on Web Interface usage
|
||||
-c, chronometer Calculates stats and displays to an LCD
|
||||
Add '-h' for more info on chronometer usage
|
||||
setpassword [pwd] Set the password for the web interface
|
||||
Without optional argument, password is read interactively.
|
||||
When specifying a password directly, enclose it in single quotes.
|
||||
-g, updateGravity Update the list of ad-serving domains
|
||||
-h, --help, help Show this help dialog
|
||||
-l, logging Specify whether the Pi-hole log should be used
|
||||
|
@ -493,7 +504,6 @@ Options:
|
|||
-up, updatePihole Update Pi-hole subsystems
|
||||
Add '--check-only' to exit script before update is performed.
|
||||
-v, version Show installed versions of Pi-hole, Web Interface & FTL
|
||||
Add '-h' for more info on version usage
|
||||
uninstall Uninstall Pi-hole from your system
|
||||
status Display the running status of Pi-hole subsystems
|
||||
enable Enable Pi-hole subsystems
|
||||
|
@ -513,59 +523,64 @@ if [[ $# = 0 ]]; then
|
|||
fi
|
||||
|
||||
# functions that do not require sudo power
|
||||
need_root=1
|
||||
case "${1}" in
|
||||
"-h" | "help" | "--help" ) helpFunc;;
|
||||
"-v" | "version" ) versionFunc "$@";;
|
||||
"-v" | "version" ) versionFunc;;
|
||||
"-c" | "chronometer" ) chronometerFunc "$@";;
|
||||
"-q" | "query" ) queryFunc "$@";;
|
||||
"status" ) statusFunc "$2";;
|
||||
|
||||
"tricorder" ) tricorderFunc;;
|
||||
|
||||
# we need to add all arguments that require sudo power to not trigger the * argument
|
||||
"-w" | "whitelist" ) ;;
|
||||
"-b" | "blacklist" ) ;;
|
||||
"--wild" | "wildcard" ) ;;
|
||||
"--regex" | "regex" ) ;;
|
||||
"--white-regex" | "white-regex" ) ;;
|
||||
"--white-wild" | "white-wild" ) ;;
|
||||
"allow" | "allowlist" ) need_root=0;;
|
||||
"deny" | "denylist" ) need_root=0;;
|
||||
"--wild" | "wildcard" ) need_root=0;;
|
||||
"--regex" | "regex" ) need_root=0;;
|
||||
"--allow-regex" | "allow-regex" ) need_root=0;;
|
||||
"--allow-wild" | "allow-wild" ) need_root=0;;
|
||||
"-f" | "flush" ) ;;
|
||||
"-up" | "updatePihole" ) ;;
|
||||
"-r" | "reconfigure" ) ;;
|
||||
"-g" | "updateGravity" ) ;;
|
||||
"-l" | "logging" ) ;;
|
||||
"uninstall" ) ;;
|
||||
"enable" ) ;;
|
||||
"disable" ) ;;
|
||||
"enable" ) need_root=0;;
|
||||
"disable" ) need_root=0;;
|
||||
"-d" | "debug" ) ;;
|
||||
"restartdns" ) ;;
|
||||
"-a" | "admin" ) ;;
|
||||
"-g" | "updateGravity" ) ;;
|
||||
"reloaddns" ) ;;
|
||||
"setpassword" ) ;;
|
||||
"checkout" ) ;;
|
||||
"updatechecker" ) ;;
|
||||
"arpflush" ) ;;
|
||||
"-t" | "tail" ) ;;
|
||||
"api" ) need_root=0;;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
||||
# Must be root to use this tool
|
||||
if [[ ! $EUID -eq 0 ]];then
|
||||
if [[ -x "$(command -v sudo)" ]]; then
|
||||
exec sudo bash "$0" "$@"
|
||||
exit $?
|
||||
else
|
||||
echo -e " ${CROSS} sudo is needed to run pihole commands. Please run this script as root or install sudo."
|
||||
exit 1
|
||||
fi
|
||||
# In the case of alpine running in a container, the USER variable appears to be blank
|
||||
# which prevents the next trap from working correctly. Set it by running whoami
|
||||
if [[ -z ${USER} ]]; then
|
||||
USER=$(whoami)
|
||||
fi
|
||||
|
||||
# Check if the current user is neither root nor pihole and if the command
|
||||
# requires root. If so, exit with an error message.
|
||||
if [[ $EUID -ne 0 && ${USER} != "pihole" && need_root -eq 1 ]];then
|
||||
echo -e " ${CROSS} The Pi-hole command requires root privileges, try:"
|
||||
echo -e " ${COL_GREEN}sudo pihole $*${COL_NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Handle redirecting to specific functions based on arguments
|
||||
case "${1}" in
|
||||
"-w" | "whitelist" ) listFunc "$@";;
|
||||
"-b" | "blacklist" ) listFunc "$@";;
|
||||
"allow" | "allowlist" ) listFunc "$@";;
|
||||
"deny" | "denylist" ) listFunc "$@";;
|
||||
"--wild" | "wildcard" ) listFunc "$@";;
|
||||
"--regex" | "regex" ) listFunc "$@";;
|
||||
"--white-regex" | "white-regex" ) listFunc "$@";;
|
||||
"--white-wild" | "white-wild" ) listFunc "$@";;
|
||||
"--allow-regex" | "allow-regex" ) listFunc "$@";;
|
||||
"--allow-wild" | "allow-wild" ) listFunc "$@";;
|
||||
"-d" | "debug" ) debugFunc "$@";;
|
||||
"-f" | "flush" ) flushFunc "$@";;
|
||||
"-up" | "updatePihole" ) updatePiholeFunc "$@";;
|
||||
|
@ -573,12 +588,15 @@ case "${1}" in
|
|||
"-g" | "updateGravity" ) updateGravityFunc "$@";;
|
||||
"-l" | "logging" ) piholeLogging "$@";;
|
||||
"uninstall" ) uninstallFunc;;
|
||||
"enable" ) piholeEnable 1;;
|
||||
"disable" ) piholeEnable 0 "$2";;
|
||||
"enable" ) piholeEnable true "$2";;
|
||||
"disable" ) piholeEnable false "$2";;
|
||||
"restartdns" ) restartDNS "$2";;
|
||||
"-a" | "admin" ) webpageFunc "$@";;
|
||||
"reloaddns" ) restartDNS "reload";;
|
||||
"setpassword" ) SetWebPassword "$@";;
|
||||
"checkout" ) piholeCheckoutFunc "$@";;
|
||||
"updatechecker" ) shift; updateCheckFunc "$@";;
|
||||
"arpflush" ) arpFunc "$@";;
|
||||
"-t" | "tail" ) tailFunc "$2";;
|
||||
"api" ) apiFunc "$2";;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
|
|
@ -1,18 +1,20 @@
|
|||
FROM quay.io/centos/centos:stream9
|
||||
# Disable SELinux
|
||||
RUN echo "SELINUX=disabled" > /etc/selinux/config
|
||||
RUN yum install -y --allowerasing curl git initscripts
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
FROM buildpack-deps:bullseye-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
FROM buildpack-deps:buster-scm
|
||||
FROM buildpack-deps:bookworm-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
|
@ -1,18 +1,18 @@
|
|||
FROM fedora:37
|
||||
FROM fedora:39
|
||||
RUN dnf install -y git initscripts
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
|
@ -1,18 +1,18 @@
|
|||
FROM fedora:36
|
||||
FROM fedora:40
|
||||
RUN dnf install -y git initscripts
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
|
@ -1,18 +1,17 @@
|
|||
FROM buildpack-deps:focal-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
FROM buildpack-deps:jammy-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
FROM quay.io/centos/centos:stream8
|
||||
RUN yum install -y git initscripts
|
||||
FROM buildpack-deps:lunar-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL true
|
||||
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
18
test/_ubuntu_24.Dockerfile
Normal file
18
test/_ubuntu_24.Dockerfile
Normal file
|
@ -0,0 +1,18 @@
|
|||
FROM buildpack-deps:24.04-scm
|
||||
|
||||
ENV GITDIR=/etc/.pihole
|
||||
ENV SCRIPTDIR=/opt/pihole
|
||||
|
||||
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||
ADD . $GITDIR
|
||||
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN true && \
|
||||
chmod +x $SCRIPTDIR/*
|
||||
|
||||
ENV SKIP_INSTALL=true
|
||||
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
|
||||
|
||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
|
@ -4,13 +4,6 @@ import testinfra.backend.docker
|
|||
import subprocess
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
SETUPVARS = {
|
||||
"PIHOLE_INTERFACE": "eth99",
|
||||
"PIHOLE_DNS_1": "4.2.2.1",
|
||||
"PIHOLE_DNS_2": "4.2.2.2",
|
||||
}
|
||||
|
||||
IMAGE = "pytest_pihole:test_container"
|
||||
|
||||
tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
docker-compose == 1.29.2
|
||||
pytest == 7.2.1
|
||||
pytest-xdist == 3.2.0
|
||||
pytest-testinfra == 7.0.0
|
||||
tox == 4.4.6
|
||||
|
||||
pyyaml == 6.0.2
|
||||
pytest == 8.3.3
|
||||
pytest-xdist == 3.6.1
|
||||
pytest-testinfra == 10.1.1
|
||||
tox == 4.20.0
|
||||
pytest-clarity == 1.0.1
|
||||
|
|
|
@ -2,7 +2,6 @@ import pytest
|
|||
from textwrap import dedent
|
||||
import re
|
||||
from .conftest import (
|
||||
SETUPVARS,
|
||||
tick_box,
|
||||
info_box,
|
||||
cross_box,
|
||||
|
@ -13,6 +12,8 @@ from .conftest import (
|
|||
run_script,
|
||||
)
|
||||
|
||||
FTL_BRANCH = "development-v6"
|
||||
|
||||
|
||||
def test_supported_package_manager(host):
|
||||
"""
|
||||
|
@ -32,76 +33,6 @@ def test_supported_package_manager(host):
|
|||
# assert package_manager_detect.rc == 1
|
||||
|
||||
|
||||
def test_setupVars_are_sourced_to_global_scope(host):
|
||||
"""
|
||||
currently update_dialogs sources setupVars with a dot,
|
||||
then various other functions use the variables.
|
||||
This confirms the sourced variables are in scope between functions
|
||||
"""
|
||||
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n"
|
||||
for k, v in SETUPVARS.items():
|
||||
setup_var_file += "{}={}\n".format(k, v)
|
||||
setup_var_file += "EOF\n"
|
||||
host.run(setup_var_file)
|
||||
|
||||
script = dedent(
|
||||
"""\
|
||||
set -e
|
||||
printSetupVars() {
|
||||
# Currently debug test function only
|
||||
echo "Outputting sourced variables"
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
|
||||
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
|
||||
}
|
||||
update_dialogs() {
|
||||
. /etc/pihole/setupVars.conf
|
||||
}
|
||||
update_dialogs
|
||||
printSetupVars
|
||||
"""
|
||||
)
|
||||
|
||||
output = run_script(host, script).stdout
|
||||
|
||||
for k, v in SETUPVARS.items():
|
||||
assert "{}={}".format(k, v) in output
|
||||
|
||||
|
||||
def test_setupVars_saved_to_file(host):
|
||||
"""
|
||||
confirm saved settings are written to a file for future updates to re-use
|
||||
"""
|
||||
# dedent works better with this and padding matching script below
|
||||
set_setup_vars = "\n"
|
||||
for k, v in SETUPVARS.items():
|
||||
set_setup_vars += " {}={}\n".format(k, v)
|
||||
host.run(set_setup_vars)
|
||||
|
||||
script = dedent(
|
||||
"""\
|
||||
set -e
|
||||
echo start
|
||||
TERM=xterm
|
||||
source /opt/pihole/basic-install.sh
|
||||
source /opt/pihole/utils.sh
|
||||
{}
|
||||
mkdir -p /etc/dnsmasq.d
|
||||
version_check_dnsmasq
|
||||
echo "" > /etc/pihole/pihole-FTL.conf
|
||||
finalExports
|
||||
cat /etc/pihole/setupVars.conf
|
||||
""".format(
|
||||
set_setup_vars
|
||||
)
|
||||
)
|
||||
|
||||
output = run_script(host, script).stdout
|
||||
|
||||
for k, v in SETUPVARS.items():
|
||||
assert "{}={}".format(k, v) in output
|
||||
|
||||
|
||||
def test_selinux_not_detected(host):
|
||||
"""
|
||||
confirms installer continues when SELinux configuration file does not exist
|
||||
|
@ -118,21 +49,6 @@ def test_selinux_not_detected(host):
|
|||
assert check_selinux.rc == 0
|
||||
|
||||
|
||||
def test_installPiholeWeb_fresh_install_no_errors(host):
|
||||
"""
|
||||
confirms all web page assets from Core repo are installed on a fresh build
|
||||
"""
|
||||
installWeb = host.run(
|
||||
"""
|
||||
umask 0027
|
||||
source /opt/pihole/basic-install.sh
|
||||
installPiholeWeb
|
||||
"""
|
||||
)
|
||||
expected_stdout = tick_box + " Installing sudoer file"
|
||||
assert expected_stdout in installWeb.stdout
|
||||
|
||||
|
||||
def get_directories_recursive(host, directory):
|
||||
if directory is None:
|
||||
return directory
|
||||
|
@ -150,13 +66,10 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
mock_command("dialog", {"*": ("", "0")}, host)
|
||||
# mock git pull
|
||||
mock_command_passthrough("git", {"pull": ("", "0")}, host)
|
||||
# mock systemctl to not start lighttpd and FTL
|
||||
# mock systemctl to not start FTL
|
||||
mock_command_2(
|
||||
"systemctl",
|
||||
{
|
||||
"enable lighttpd": ("", "0"),
|
||||
"restart lighttpd": ("", "0"),
|
||||
"start lighttpd": ("", "0"),
|
||||
"enable pihole-FTL": ("", "0"),
|
||||
"restart pihole-FTL": ("", "0"),
|
||||
"start pihole-FTL": ("", "0"),
|
||||
|
@ -168,14 +81,8 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
host.run("command -v apt-get > /dev/null && apt-get install -qq man")
|
||||
host.run("command -v dnf > /dev/null && dnf install -y man")
|
||||
host.run("command -v yum > /dev/null && yum install -y man")
|
||||
# create configuration file
|
||||
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n"
|
||||
for k, v in SETUPVARS.items():
|
||||
setup_var_file += "{}={}\n".format(k, v)
|
||||
setup_var_file += "INSTALL_WEB_SERVER=true\n"
|
||||
setup_var_file += "INSTALL_WEB_INTERFACE=true\n"
|
||||
setup_var_file += "EOF\n"
|
||||
host.run(setup_var_file)
|
||||
# Workaround to get FTLv6 installed until it reaches master branch
|
||||
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
|
||||
install = host.run(
|
||||
"""
|
||||
export TERM=xterm
|
||||
|
@ -187,6 +94,7 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
runUnattended=true
|
||||
useUpdateVars=true
|
||||
main
|
||||
/opt/pihole/pihole-FTL-prestart.sh
|
||||
"""
|
||||
)
|
||||
assert 0 == install.rc
|
||||
|
@ -232,34 +140,6 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
check_macvendor = test_cmd.format("r", "/etc/pihole/macvendor.db", piholeuser)
|
||||
actual_rc = host.run(check_macvendor).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# readable and writeable pihole-FTL.conf
|
||||
check_FTLconf = test_cmd.format("r", "/etc/pihole/pihole-FTL.conf", piholeuser)
|
||||
actual_rc = host.run(check_FTLconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_FTLconf = test_cmd.format("w", "/etc/pihole/pihole-FTL.conf", piholeuser)
|
||||
actual_rc = host.run(check_FTLconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# readable setupVars.conf
|
||||
check_setup = test_cmd.format("r", "/etc/pihole/setupVars.conf", piholeuser)
|
||||
actual_rc = host.run(check_setup).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check dnsmasq files
|
||||
# readable /etc/dnsmasq.conf
|
||||
check_dnsmasqconf = test_cmd.format("r", "/etc/dnsmasq.conf", piholeuser)
|
||||
actual_rc = host.run(check_dnsmasqconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# readable /etc/dnsmasq.d/01-pihole.conf
|
||||
check_dnsmasqconf = test_cmd.format("r", "/etc/dnsmasq.d", piholeuser)
|
||||
actual_rc = host.run(check_dnsmasqconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_dnsmasqconf = test_cmd.format("x", "/etc/dnsmasq.d", piholeuser)
|
||||
actual_rc = host.run(check_dnsmasqconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_dnsmasqconf = test_cmd.format(
|
||||
"r", "/etc/dnsmasq.d/01-pihole.conf", piholeuser
|
||||
)
|
||||
actual_rc = host.run(check_dnsmasqconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check readable and executable /etc/init.d/pihole-FTL
|
||||
check_init = test_cmd.format("x", "/etc/init.d/pihole-FTL", piholeuser)
|
||||
actual_rc = host.run(check_init).rc
|
||||
|
@ -267,28 +147,6 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
check_init = test_cmd.format("r", "/etc/init.d/pihole-FTL", piholeuser)
|
||||
actual_rc = host.run(check_init).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check readable /etc/lighttpd/lighttpd.conf
|
||||
check_lighttpd = test_cmd.format("r", "/etc/lighttpd/lighttpd.conf", piholeuser)
|
||||
actual_rc = host.run(check_lighttpd).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check readable /etc/lighttpd/conf*/pihole-admin.conf
|
||||
check_lighttpd = test_cmd.format("r", "/etc/lighttpd/conf.d", piholeuser)
|
||||
if host.run(check_lighttpd).rc == exit_status_success:
|
||||
check_lighttpd = test_cmd.format(
|
||||
"r", "/etc/lighttpd/conf.d/pihole-admin.conf", piholeuser
|
||||
)
|
||||
actual_rc = host.run(check_lighttpd).rc
|
||||
assert exit_status_success == actual_rc
|
||||
else:
|
||||
check_lighttpd = test_cmd.format(
|
||||
"r", "/etc/lighttpd/conf-available", piholeuser
|
||||
)
|
||||
if host.run(check_lighttpd).rc == exit_status_success:
|
||||
check_lighttpd = test_cmd.format(
|
||||
"r", "/etc/lighttpd/conf-available/15-pihole-admin.conf", piholeuser
|
||||
)
|
||||
actual_rc = host.run(check_lighttpd).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check readable and executable manpages
|
||||
if maninstalled is True:
|
||||
check_man = test_cmd.format("x", "/usr/local/share/man", piholeuser)
|
||||
|
@ -314,15 +172,6 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
)
|
||||
actual_rc = host.run(check_man).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_man = test_cmd.format(
|
||||
"r", "/usr/local/share/man/man8/pihole-FTL.8", piholeuser
|
||||
)
|
||||
actual_rc = host.run(check_man).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check not readable sudoers file
|
||||
check_sudo = test_cmd.format("r", "/etc/sudoers.d/pihole", piholeuser)
|
||||
actual_rc = host.run(check_sudo).rc
|
||||
assert exit_status_success != actual_rc
|
||||
# check not readable cron file
|
||||
check_sudo = test_cmd.format("x", "/etc/cron.d/", piholeuser)
|
||||
actual_rc = host.run(check_sudo).rc
|
||||
|
@ -347,231 +196,6 @@ def test_installPihole_fresh_install_readableFiles(host):
|
|||
actual_rc = host.run(check_pihole).rc
|
||||
|
||||
|
||||
@pytest.mark.parametrize("test_webpage", [True])
|
||||
def test_installPihole_fresh_install_readableBlockpage(host, test_webpage):
|
||||
"""
|
||||
confirms all web page assets from Core repo are readable
|
||||
by $LIGHTTPD_USER on a fresh build
|
||||
"""
|
||||
piholeWebpage = [
|
||||
"127.0.0.1",
|
||||
# "pi.hole"
|
||||
]
|
||||
# dialog returns Cancel for user prompt
|
||||
mock_command("dialog", {"*": ("", "0")}, host)
|
||||
|
||||
# mock git pull
|
||||
mock_command_passthrough("git", {"pull": ("", "0")}, host)
|
||||
# mock systemctl to start lighttpd and FTL
|
||||
ligthttpdcommand = dedent(
|
||||
r'''\"\"
|
||||
echo 'starting lighttpd with {}'
|
||||
if [ command -v "apt-get" >/dev/null 2>&1 ]; then
|
||||
LIGHTTPD_USER="www-data"
|
||||
LIGHTTPD_GROUP="www-data"
|
||||
else
|
||||
LIGHTTPD_USER="lighttpd"
|
||||
LIGHTTPD_GROUP="lighttpd"
|
||||
fi
|
||||
mkdir -p "{run}"
|
||||
chown {usergroup} "{run}"
|
||||
mkdir -p "{cache}"
|
||||
chown {usergroup} "/var/cache"
|
||||
chown {usergroup} "{cache}"
|
||||
mkdir -p "{compress}"
|
||||
chown {usergroup} "{compress}"
|
||||
mkdir -p "{uploads}"
|
||||
chown {usergroup} "{uploads}"
|
||||
chmod 0777 /var
|
||||
chmod 0777 /var/cache
|
||||
chmod 0777 "{cache}"
|
||||
find "{run}" -type d -exec chmod 0777 {chmodarg} \;;
|
||||
find "{run}" -type f -exec chmod 0666 {chmodarg} \;;
|
||||
find "{compress}" -type d -exec chmod 0777 {chmodarg} \;;
|
||||
find "{compress}" -type f -exec chmod 0666 {chmodarg} \;;
|
||||
find "{uploads}" -type d -exec chmod 0777 {chmodarg} \;;
|
||||
find "{uploads}" -type f -exec chmod 0666 {chmodarg} \;;
|
||||
/usr/sbin/lighttpd -tt -f '{config}'
|
||||
/usr/sbin/lighttpd -f '{config}'
|
||||
echo \"\"'''.format(
|
||||
"{}",
|
||||
usergroup="${{LIGHTTPD_USER}}:${{LIGHTTPD_GROUP}}",
|
||||
chmodarg="{{}}",
|
||||
config="/etc/lighttpd/lighttpd.conf",
|
||||
run="/run/lighttpd",
|
||||
cache="/var/cache/lighttpd",
|
||||
uploads="/var/cache/lighttpd/uploads",
|
||||
compress="/var/cache/lighttpd/compress",
|
||||
)
|
||||
)
|
||||
FTLcommand = dedent(
|
||||
'''\"\"
|
||||
set -x
|
||||
/etc/init.d/pihole-FTL restart
|
||||
echo \"\"'''
|
||||
)
|
||||
mock_command_run(
|
||||
"systemctl",
|
||||
{
|
||||
"enable lighttpd": ("", "0"),
|
||||
"restart lighttpd": (ligthttpdcommand.format("restart"), "0"),
|
||||
"start lighttpd": (ligthttpdcommand.format("start"), "0"),
|
||||
"enable pihole-FTL": ("", "0"),
|
||||
"restart pihole-FTL": (FTLcommand, "0"),
|
||||
"start pihole-FTL": (FTLcommand, "0"),
|
||||
"*": ('echo "systemctl call with $@"', "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
# create configuration file
|
||||
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n"
|
||||
for k, v in SETUPVARS.items():
|
||||
setup_var_file += "{}={}\n".format(k, v)
|
||||
setup_var_file += "INSTALL_WEB_SERVER=true\n"
|
||||
setup_var_file += "INSTALL_WEB_INTERFACE=true\n"
|
||||
setup_var_file += "EOF\n"
|
||||
host.run(setup_var_file)
|
||||
installWeb = host.run(
|
||||
"""
|
||||
export TERM=xterm
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
umask 0027
|
||||
runUnattended=true
|
||||
useUpdateVars=true
|
||||
source /opt/pihole/basic-install.sh > /dev/null
|
||||
runUnattended=true
|
||||
useUpdateVars=true
|
||||
main
|
||||
echo "LIGHTTPD_USER=${LIGHTTPD_USER}"
|
||||
echo "webroot=${webroot}"
|
||||
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
|
||||
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
|
||||
"""
|
||||
)
|
||||
assert 0 == installWeb.rc
|
||||
piholeuser = "pihole"
|
||||
webuser = ""
|
||||
user = re.findall(r"^\s*LIGHTTPD_USER=.*$", installWeb.stdout, re.MULTILINE)
|
||||
for match in user:
|
||||
webuser = match.replace("LIGHTTPD_USER=", "").strip()
|
||||
webroot = ""
|
||||
user = re.findall(r"^\s*webroot=.*$", installWeb.stdout, re.MULTILINE)
|
||||
for match in user:
|
||||
webroot = match.replace("webroot=", "").strip()
|
||||
if not webroot.strip():
|
||||
webroot = "/var/www/html"
|
||||
installWebInterface = True
|
||||
interface = re.findall(
|
||||
r"^\s*INSTALL_WEB_INTERFACE=.*$", installWeb.stdout, re.MULTILINE
|
||||
)
|
||||
for match in interface:
|
||||
testvalue = match.replace("INSTALL_WEB_INTERFACE=", "").strip().lower()
|
||||
if not testvalue.strip():
|
||||
installWebInterface = testvalue == "true"
|
||||
installWebServer = True
|
||||
server = re.findall(r"^\s*INSTALL_WEB_SERVER=.*$", installWeb.stdout, re.MULTILINE)
|
||||
for match in server:
|
||||
testvalue = match.replace("INSTALL_WEB_SERVER=", "").strip().lower()
|
||||
if not testvalue.strip():
|
||||
installWebServer = testvalue == "true"
|
||||
# if webserver install was not requested
|
||||
# at least pihole must be able to read files
|
||||
if installWebServer is False:
|
||||
webuser = piholeuser
|
||||
exit_status_success = 0
|
||||
test_cmd = 'su --shell /bin/bash --command "test -{0} {1}" -p {2}'
|
||||
# check files that need a running FTL to be created
|
||||
# readable and writeable pihole-FTL.db
|
||||
check_FTLconf = test_cmd.format("r", "/etc/pihole/pihole-FTL.db", piholeuser)
|
||||
actual_rc = host.run(check_FTLconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_FTLconf = test_cmd.format("w", "/etc/pihole/pihole-FTL.db", piholeuser)
|
||||
actual_rc = host.run(check_FTLconf).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check directories above $webroot for read and execute permission
|
||||
check_var = test_cmd.format("r", "/var", webuser)
|
||||
actual_rc = host.run(check_var).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_var = test_cmd.format("x", "/var", webuser)
|
||||
actual_rc = host.run(check_var).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_www = test_cmd.format("r", "/var/www", webuser)
|
||||
actual_rc = host.run(check_www).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_www = test_cmd.format("x", "/var/www", webuser)
|
||||
actual_rc = host.run(check_www).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_html = test_cmd.format("r", "/var/www/html", webuser)
|
||||
actual_rc = host.run(check_html).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_html = test_cmd.format("x", "/var/www/html", webuser)
|
||||
actual_rc = host.run(check_html).rc
|
||||
assert exit_status_success == actual_rc
|
||||
# check directories below $webroot for read and execute permission
|
||||
check_admin = test_cmd.format("r", webroot + "/admin", webuser)
|
||||
actual_rc = host.run(check_admin).rc
|
||||
assert exit_status_success == actual_rc
|
||||
check_admin = test_cmd.format("x", webroot + "/admin", webuser)
|
||||
actual_rc = host.run(check_admin).rc
|
||||
assert exit_status_success == actual_rc
|
||||
directories = get_directories_recursive(host, webroot + "/admin/")
|
||||
for directory in directories:
|
||||
check_pihole = test_cmd.format("r", directory, webuser)
|
||||
actual_rc = host.run(check_pihole).rc
|
||||
check_pihole = test_cmd.format("x", directory, webuser)
|
||||
actual_rc = host.run(check_pihole).rc
|
||||
findfiles = 'find "{}" -maxdepth 1 -type f -exec echo {{}} \\;;'
|
||||
filelist = host.run(findfiles.format(directory))
|
||||
files = list(filter(bool, filelist.stdout.splitlines()))
|
||||
for file in files:
|
||||
check_pihole = test_cmd.format("r", file, webuser)
|
||||
actual_rc = host.run(check_pihole).rc
|
||||
# check web interface files
|
||||
# change nameserver to pi-hole
|
||||
# setting nameserver in /etc/resolv.conf to pi-hole does
|
||||
# not work here because of the way docker uses this file
|
||||
ns = host.run(r"sed -i 's/nameserver.*/nameserver 127.0.0.1/' /etc/resolv.conf")
|
||||
pihole_is_ns = ns.rc == 0
|
||||
|
||||
def is_ip(address):
|
||||
m = re.match(r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})", address)
|
||||
return bool(m)
|
||||
|
||||
if installWebInterface is True:
|
||||
if test_webpage is True:
|
||||
# check webpage for unreadable files
|
||||
noPHPfopen = re.compile(
|
||||
(
|
||||
r"PHP Error(%d+):\s+fopen([^)]+):\s+"
|
||||
+ r"failed to open stream: "
|
||||
+ r"Permission denied in"
|
||||
),
|
||||
re.I,
|
||||
)
|
||||
# using cURL option --dns-servers is not possible
|
||||
status = (
|
||||
'curl -s --head "{}" | '
|
||||
+ "head -n 1 | "
|
||||
+ 'grep "HTTP/1.[01] [23].." > /dev/null'
|
||||
)
|
||||
digcommand = r"dig A +short {} @127.0.0.1 | head -n 1"
|
||||
pagecontent = 'curl --verbose -L "{}"'
|
||||
for page in piholeWebpage:
|
||||
testpage = "http://" + page + "/admin/"
|
||||
resolvesuccess = True
|
||||
if is_ip(page) is False:
|
||||
dig = host.run(digcommand.format(page))
|
||||
testpage = "http://" + dig.stdout.strip() + "/admin/"
|
||||
resolvesuccess = dig.rc == 0
|
||||
if resolvesuccess or pihole_is_ns:
|
||||
# check HTTP status of blockpage
|
||||
actual_rc = host.run(status.format(testpage))
|
||||
assert exit_status_success == actual_rc.rc
|
||||
# check for PHP error
|
||||
actual_output = host.run(pagecontent.format(testpage))
|
||||
assert noPHPfopen.match(actual_output.stdout) is None
|
||||
|
||||
|
||||
def test_update_package_cache_success_no_errors(host):
|
||||
"""
|
||||
confirms package cache was updated without any errors
|
||||
|
@ -605,21 +229,35 @@ def test_update_package_cache_failure_no_errors(host):
|
|||
assert "Error: Unable to update package cache." in updateCache.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_aarch64_no_errors(host):
|
||||
@pytest.mark.parametrize(
|
||||
"arch,detected_string,supported",
|
||||
[
|
||||
("aarch64", "AArch64 (64 Bit ARM)", True),
|
||||
("armv6", "ARMv6", True),
|
||||
("armv7l", "ARMv7 (or newer)", True),
|
||||
("armv7", "ARMv7 (or newer)", True),
|
||||
("armv8a", "ARMv7 (or newer)", True),
|
||||
("x86_64", "x86_64", True),
|
||||
("riscv64", "riscv64", True),
|
||||
("mips", "mips", False),
|
||||
],
|
||||
)
|
||||
def test_FTL_detect_no_errors(host, arch, detected_string, supported):
|
||||
"""
|
||||
confirms only aarch64 package is downloaded for FTL engine
|
||||
confirms only correct package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return aarch64 platform
|
||||
mock_command("uname", {"-m": ("aarch64", "0")}, host)
|
||||
# mock ldd to respond with aarch64 shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
# mock uname to return passed platform
|
||||
mock_command("uname", {"-m": (arch, "0")}, host)
|
||||
# mock readelf to respond with passed CPU architecture
|
||||
mock_command_2(
|
||||
"readelf",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux-aarch64.so.1", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux-aarch64.so.1", "0"),
|
||||
"-A /bin/sh": ("Tag_CPU_arch: " + arch, "0"),
|
||||
"-A /usr/bin/sh": ("Tag_CPU_arch: " + arch, "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
|
@ -630,258 +268,30 @@ def test_FTL_detect_aarch64_no_errors(host):
|
|||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Detected AArch64 (64 Bit ARM) processor"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
if supported:
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Detected " + detected_string + " architecture"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
else:
|
||||
expected_stdout = (
|
||||
"Not able to detect architecture (unknown: " + detected_string + ")"
|
||||
)
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv4t_no_errors(host):
|
||||
def test_FTL_development_binary_installed_and_responsive_no_errors(host):
|
||||
"""
|
||||
confirms only armv4t package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return armv4t platform
|
||||
mock_command("uname", {"-m": ("armv4t", "0")}, host)
|
||||
# mock ldd to respond with armv4t shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux.so.3", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux.so.3", "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (" Detected ARMv4 processor")
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv5te_no_errors(host):
|
||||
"""
|
||||
confirms only armv5te package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return armv5te platform
|
||||
mock_command("uname", {"-m": ("armv5te", "0")}, host)
|
||||
# mock ldd to respond with ld-linux shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux.so.3", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux.so.3", "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (" Detected ARMv5 (or newer) processor")
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv6l_no_errors(host):
|
||||
"""
|
||||
confirms only armv6l package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return armv6l platform
|
||||
mock_command("uname", {"-m": ("armv6l", "0")}, host)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (
|
||||
" Detected ARMv6 processor " "(with hard-float support)"
|
||||
)
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv7l_no_errors(host):
|
||||
"""
|
||||
confirms only armv7l package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return armv7l platform
|
||||
mock_command("uname", {"-m": ("armv7l", "0")}, host)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (
|
||||
" Detected ARMv7 processor " "(with hard-float support)"
|
||||
)
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv8a_no_errors(host):
|
||||
"""
|
||||
confirms only armv8a package is downloaded for FTL engine
|
||||
"""
|
||||
# mock uname to return armv8a platform
|
||||
mock_command("uname", {"-m": ("armv8a", "0")}, host)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command(
|
||||
"ldd",
|
||||
{
|
||||
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
|
||||
},
|
||||
host,
|
||||
)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Detected ARMv8 (or newer) processor"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_x86_64_no_errors(host):
|
||||
"""
|
||||
confirms only x86_64 package is downloaded for FTL engine
|
||||
"""
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + " FTL Checks..."
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Detected x86_64 processor"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_unknown_no_errors(host):
|
||||
"""confirms only generic package is downloaded for FTL engine"""
|
||||
# mock uname to return generic platform
|
||||
mock_command("uname", {"-m": ("mips", "0")}, host)
|
||||
detectPlatform = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
expected_stdout = "Not able to detect processor (unknown: mips)"
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_download_aarch64_no_errors(host):
|
||||
"""
|
||||
confirms only aarch64 package is downloaded for FTL engine
|
||||
"""
|
||||
# mock dialog answers and ensure installer dependencies
|
||||
mock_command("dialog", {"*": ("", "0")}, host)
|
||||
host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${INSTALLER_DEPS[@]}
|
||||
"""
|
||||
)
|
||||
download_binary = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
FTLinstall "pihole-FTL-aarch64-linux-gnu"
|
||||
"""
|
||||
)
|
||||
expected_stdout = tick_box + " Downloading and Installing FTL"
|
||||
assert expected_stdout in download_binary.stdout
|
||||
assert "error" not in download_binary.stdout.lower()
|
||||
|
||||
|
||||
def test_FTL_binary_installed_and_responsive_no_errors(host):
|
||||
"""
|
||||
confirms FTL binary is copied and functional in installed location
|
||||
confirms FTL development binary is copied and functional in installed location
|
||||
"""
|
||||
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
|
||||
host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
echo "development" > /etc/pihole/ftlbranch
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
|
|
|
@ -40,6 +40,26 @@ def test_key_addition_works(host):
|
|||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_key_addition_substr(host):
|
||||
"""Confirms addKey adds substring keys (no value) to a file"""
|
||||
host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
addKey "./testoutput" "KEY_ONE"
|
||||
addKey "./testoutput" "KEY_O"
|
||||
addKey "./testoutput" "KEY_TWO"
|
||||
addKey "./testoutput" "Y_TWO"
|
||||
"""
|
||||
)
|
||||
output = host.run(
|
||||
"""
|
||||
cat ./testoutput
|
||||
"""
|
||||
)
|
||||
expected_stdout = "KEY_ONE\nKEY_O\nKEY_TWO\nY_TWO\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_key_removal_works(host):
|
||||
"""Confirms removeKey removes a key or key/value pair"""
|
||||
host.run(
|
||||
|
@ -62,64 +82,6 @@ def test_key_removal_works(host):
|
|||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLAPIPort_default(host):
|
||||
"""Confirms getFTLAPIPort returns the default API port"""
|
||||
output = host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLAPIPort
|
||||
"""
|
||||
)
|
||||
expected_stdout = "4711\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLAPIPort_custom(host):
|
||||
"""Confirms getFTLAPIPort returns a custom API port"""
|
||||
host.run(
|
||||
"""
|
||||
echo "FTLPORT=1234" > /etc/pihole/pihole-FTL.conf
|
||||
"""
|
||||
)
|
||||
output = host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLAPIPort
|
||||
"""
|
||||
)
|
||||
expected_stdout = "1234\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLAPIPort_malicious(host):
|
||||
"""Confirms getFTLAPIPort returns 4711 if the setting in pihole-FTL.conf contains non-digits"""
|
||||
host.run(
|
||||
"""
|
||||
echo "FTLPORT=*$ssdfsd" > /etc/pihole/pihole-FTL.conf
|
||||
"""
|
||||
)
|
||||
output = host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLAPIPort
|
||||
"""
|
||||
)
|
||||
expected_stdout = "4711\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLPIDFile_default(host):
|
||||
"""Confirms getFTLPIDFile returns the default PID file path"""
|
||||
output = host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLPIDFile
|
||||
"""
|
||||
)
|
||||
expected_stdout = "/run/pihole-FTL.pid\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLPID_default(host):
|
||||
"""Confirms getFTLPID returns the default value if FTL is not running"""
|
||||
output = host.run(
|
||||
|
@ -132,21 +94,30 @@ def test_getFTLPID_default(host):
|
|||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLPIDFile_and_getFTLPID_custom(host):
|
||||
"""Confirms getFTLPIDFile returns a custom PID file path"""
|
||||
def test_setFTLConfigValue_getFTLConfigValue(host):
|
||||
"""
|
||||
Confirms getFTLConfigValue works (also assumes setFTLConfigValue works)
|
||||
Requires FTL to be installed, so we do that first
|
||||
(taken from test_FTL_development_binary_installed_and_responsive_no_errors)
|
||||
"""
|
||||
host.run(
|
||||
"""
|
||||
tmpfile=$(mktemp)
|
||||
echo "PIDFILE=${tmpfile}" > /etc/pihole/pihole-FTL.conf
|
||||
echo "1234" > ${tmpfile}
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
echo "development-v6" > /etc/pihole/ftlbranch
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
"""
|
||||
)
|
||||
|
||||
output = host.run(
|
||||
"""
|
||||
source /opt/pihole/utils.sh
|
||||
FTL_PID_FILE=$(getFTLPIDFile)
|
||||
getFTLPID "${FTL_PID_FILE}"
|
||||
setFTLConfigValue "dns.upstreams" '["9.9.9.9"]' > /dev/null
|
||||
getFTLConfigValue "dns.upstreams"
|
||||
"""
|
||||
)
|
||||
expected_stdout = "1234\n"
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
assert "[ 9.9.9.9 ]" in output.stdout
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
import pytest
|
||||
from .conftest import (
|
||||
tick_box,
|
||||
info_box,
|
||||
cross_box,
|
||||
mock_command,
|
||||
)
|
||||
|
||||
|
||||
def test_enable_epel_repository_centos(host):
|
||||
"""
|
||||
confirms the EPEL package repository is enabled when installed on CentOS
|
||||
"""
|
||||
package_manager_detect = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
"""
|
||||
)
|
||||
expected_stdout = info_box + (
|
||||
" Enabling EPEL package repository " "(https://fedoraproject.org/wiki/EPEL)"
|
||||
)
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
expected_stdout = tick_box + " Installed"
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
epel_package = host.package("epel-release")
|
||||
assert epel_package.is_installed
|
|
@ -1,15 +0,0 @@
|
|||
def test_epel_and_remi_not_installed_fedora(host):
|
||||
"""
|
||||
confirms installer does not attempt to install EPEL/REMI repositories
|
||||
on Fedora
|
||||
"""
|
||||
package_manager_detect = host.run(
|
||||
"""
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
"""
|
||||
)
|
||||
assert package_manager_detect.stdout == ""
|
||||
|
||||
epel_package = host.package("epel-release")
|
||||
assert not epel_package.is_installed
|
|
@ -1,8 +0,0 @@
|
|||
[tox]
|
||||
envlist = py3
|
||||
|
||||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker buildx build --load --progress plain -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _centos_9.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py
|
||||
|
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker buildx build --load --progress plain -f _debian_10.Dockerfile -t pytest_pihole:test_container ../
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _debian_12.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker buildx build --load --progress plain -f _fedora_37.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _fedora_39.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py
|
|
@ -1,8 +1,10 @@
|
|||
[tox]
|
||||
envlist = py3
|
||||
|
||||
[testenv:py3]
|
||||
[testenv]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker buildx build --load --progress plain -f _fedora_36.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _fedora_40.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
|
|
@ -4,5 +4,7 @@ envlist = py3
|
|||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _ubuntu_22.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
|
10
test/tox.ubuntu_23.ini
Normal file
10
test/tox.ubuntu_23.ini
Normal file
|
@ -0,0 +1,10 @@
|
|||
[tox]
|
||||
envlist = py3
|
||||
|
||||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _ubuntu_23.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
10
test/tox.ubuntu_24.ini
Normal file
10
test/tox.ubuntu_24.ini
Normal file
|
@ -0,0 +1,10 @@
|
|||
[tox]
|
||||
envlist = py3
|
||||
|
||||
[testenv:py3]
|
||||
allowlist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
setenv =
|
||||
COLUMNS=120
|
||||
commands = docker buildx build --load --progress plain -f _ubuntu_24.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
Loading…
Reference in a new issue