mirror of
https://github.com/pi-hole/pi-hole.git
synced 2024-11-15 02:42:58 +00:00
Merge branch 'development' into fix-ipv6
This commit is contained in:
commit
67f5f16f84
47 changed files with 847 additions and 715 deletions
7
.github/release.yml
vendored
Normal file
7
.github/release.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
changelog:
|
||||||
|
exclude:
|
||||||
|
labels:
|
||||||
|
- internal
|
||||||
|
authors:
|
||||||
|
- dependabot
|
||||||
|
- github-actions
|
25
.github/workflows/stale.yml
vendored
Normal file
25
.github/workflows/stale.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
name: Mark stale issues
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 * * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v4
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
days-before-stale: 30
|
||||||
|
days-before-close: 5
|
||||||
|
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||||
|
stale-issue-label: 'stale'
|
||||||
|
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
|
||||||
|
exempt-all-issue-assignees: true
|
||||||
|
operations-per-run: 300
|
27
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
27
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
name: Sync Back to Development
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sync-branches:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Syncing branches
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Opening pull request
|
||||||
|
id: pull
|
||||||
|
uses: tretuna/sync-branches@1.4.0
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
FROM_BRANCH: 'master'
|
||||||
|
TO_BRANCH: 'development'
|
||||||
|
- name: Label the pull request to ignore for release note generation
|
||||||
|
uses: actions-ecosystem/action-add-labels@v1
|
||||||
|
with:
|
||||||
|
labels: internal
|
||||||
|
repo: ${{ github.repository }}
|
||||||
|
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}
|
5
.github/workflows/test.yml
vendored
5
.github/workflows/test.yml
vendored
|
@ -4,6 +4,9 @@ on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened, ready_for_review]
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
smoke-test:
|
smoke-test:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
@ -37,7 +40,7 @@ jobs:
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
-
|
-
|
||||||
name: Set up Python 3.8
|
name: Set up Python 3.8
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v3
|
||||||
with:
|
with:
|
||||||
python-version: 3.8
|
python-version: 3.8
|
||||||
-
|
-
|
||||||
|
|
|
@ -161,4 +161,4 @@ Some notable features include:
|
||||||
There are several ways to [access the dashboard](https://discourse.pi-hole.net/t/how-do-i-access-pi-holes-dashboard-admin-interface/3168):
|
There are several ways to [access the dashboard](https://discourse.pi-hole.net/t/how-do-i-access-pi-holes-dashboard-admin-interface/3168):
|
||||||
|
|
||||||
1. `http://pi.hole/admin/` (when using Pi-hole as your DNS server)
|
1. `http://pi.hole/admin/` (when using Pi-hole as your DNS server)
|
||||||
2. `http://<IP_ADDPRESS_OF_YOUR_PI_HOLE>/admin/`
|
2. `http://<IP_ADDRESS_OF_YOUR_PI_HOLE>/admin/`
|
||||||
|
|
|
@ -37,6 +37,6 @@ interface=@INT@
|
||||||
cache-size=@CACHE_SIZE@
|
cache-size=@CACHE_SIZE@
|
||||||
|
|
||||||
log-queries
|
log-queries
|
||||||
log-facility=/var/log/pihole.log
|
log-facility=/var/log/pihole/pihole.log
|
||||||
|
|
||||||
log-async
|
log-async
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Determine if terminal is capable of showing colors
|
# Determine if terminal is capable of showing colors
|
||||||
if [[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]; then
|
if ([[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]) || [[ "${WEBCALL}" ]]; then
|
||||||
# Bold and underline may not show up on all clients
|
# Bold and underline may not show up on all clients
|
||||||
# If something MUST be emphasized, use both
|
# If something MUST be emphasized, use both
|
||||||
COL_BOLD='[1m'
|
COL_BOLD='[1m'
|
||||||
|
|
|
@ -357,7 +357,7 @@ get_sys_stats() {
|
||||||
ram_used="${ram_raw[1]}"
|
ram_used="${ram_raw[1]}"
|
||||||
ram_total="${ram_raw[2]}"
|
ram_total="${ram_raw[2]}"
|
||||||
|
|
||||||
if [[ "$(pihole status web 2> /dev/null)" == "1" ]]; then
|
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then
|
||||||
ph_status="${COL_LIGHT_GREEN}Active"
|
ph_status="${COL_LIGHT_GREEN}Active"
|
||||||
else
|
else
|
||||||
ph_status="${COL_LIGHT_RED}Offline"
|
ph_status="${COL_LIGHT_RED}Offline"
|
||||||
|
|
|
@ -19,13 +19,13 @@ upgrade_gravityDB(){
|
||||||
auditFile="${piholeDir}/auditlog.list"
|
auditFile="${piholeDir}/auditlog.list"
|
||||||
|
|
||||||
# Get database version
|
# Get database version
|
||||||
version="$(sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
version="$(pihole-FTL sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
||||||
|
|
||||||
if [[ "$version" == "1" ]]; then
|
if [[ "$version" == "1" ]]; then
|
||||||
# This migration script upgrades the gravity.db file by
|
# This migration script upgrades the gravity.db file by
|
||||||
# adding the domain_audit table
|
# adding the domain_audit table
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
|
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
|
||||||
sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
|
||||||
version=2
|
version=2
|
||||||
|
|
||||||
# Store audit domains in database table
|
# Store audit domains in database table
|
||||||
|
@ -40,28 +40,28 @@ upgrade_gravityDB(){
|
||||||
# renaming the regex table to regex_blacklist, and
|
# renaming the regex table to regex_blacklist, and
|
||||||
# creating a new regex_whitelist table + corresponding linking table and views
|
# creating a new regex_whitelist table + corresponding linking table and views
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
|
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
|
||||||
sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
|
||||||
version=3
|
version=3
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "3" ]]; then
|
if [[ "$version" == "3" ]]; then
|
||||||
# This migration script unifies the formally separated domain
|
# This migration script unifies the formally separated domain
|
||||||
# lists into a single table with a UNIQUE domain constraint
|
# lists into a single table with a UNIQUE domain constraint
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
|
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
|
||||||
sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
|
||||||
version=4
|
version=4
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "4" ]]; then
|
if [[ "$version" == "4" ]]; then
|
||||||
# This migration script upgrades the gravity and list views
|
# This migration script upgrades the gravity and list views
|
||||||
# implementing necessary changes for per-client blocking
|
# implementing necessary changes for per-client blocking
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
|
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
|
||||||
sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
|
||||||
version=5
|
version=5
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "5" ]]; then
|
if [[ "$version" == "5" ]]; then
|
||||||
# This migration script upgrades the adlist view
|
# This migration script upgrades the adlist view
|
||||||
# to return an ID used in gravity.sh
|
# to return an ID used in gravity.sh
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
|
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
|
||||||
sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
|
||||||
version=6
|
version=6
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "6" ]]; then
|
if [[ "$version" == "6" ]]; then
|
||||||
|
@ -69,7 +69,7 @@ upgrade_gravityDB(){
|
||||||
# which is automatically associated to all clients not
|
# which is automatically associated to all clients not
|
||||||
# having their own group assignments
|
# having their own group assignments
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
|
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
|
||||||
sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
|
||||||
version=7
|
version=7
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "7" ]]; then
|
if [[ "$version" == "7" ]]; then
|
||||||
|
@ -77,21 +77,21 @@ upgrade_gravityDB(){
|
||||||
# to ensure uniqueness on the group name
|
# to ensure uniqueness on the group name
|
||||||
# We also add date_added and date_modified columns
|
# We also add date_added and date_modified columns
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
|
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
|
||||||
sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
|
||||||
version=8
|
version=8
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "8" ]]; then
|
if [[ "$version" == "8" ]]; then
|
||||||
# This migration fixes some issues that were introduced
|
# This migration fixes some issues that were introduced
|
||||||
# in the previous migration script.
|
# in the previous migration script.
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
|
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
|
||||||
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
||||||
version=9
|
version=9
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "9" ]]; then
|
if [[ "$version" == "9" ]]; then
|
||||||
# This migration drops unused tables and creates triggers to remove
|
# This migration drops unused tables and creates triggers to remove
|
||||||
# obsolete groups assignments when the linked items are deleted
|
# obsolete groups assignments when the linked items are deleted
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
||||||
sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
||||||
version=10
|
version=10
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "10" ]]; then
|
if [[ "$version" == "10" ]]; then
|
||||||
|
@ -101,31 +101,31 @@ upgrade_gravityDB(){
|
||||||
# to keep the copying process generic (needs the same columns in both the
|
# to keep the copying process generic (needs the same columns in both the
|
||||||
# source and the destination databases).
|
# source and the destination databases).
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
||||||
sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
||||||
version=11
|
version=11
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "11" ]]; then
|
if [[ "$version" == "11" ]]; then
|
||||||
# Rename group 0 from "Unassociated" to "Default"
|
# Rename group 0 from "Unassociated" to "Default"
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
|
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
|
||||||
sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
|
||||||
version=12
|
version=12
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "12" ]]; then
|
if [[ "$version" == "12" ]]; then
|
||||||
# Add column date_updated to adlist table
|
# Add column date_updated to adlist table
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
|
echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
|
||||||
sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
|
||||||
version=13
|
version=13
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "13" ]]; then
|
if [[ "$version" == "13" ]]; then
|
||||||
# Add columns number and status to adlist table
|
# Add columns number and status to adlist table
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
|
echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
|
||||||
sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
||||||
version=14
|
version=14
|
||||||
fi
|
fi
|
||||||
if [[ "$version" == "14" ]]; then
|
if [[ "$version" == "14" ]]; then
|
||||||
# Changes the vw_adlist created in 5_to_6
|
# Changes the vw_adlist created in 5_to_6
|
||||||
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
||||||
sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
|
pihole-FTL sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
|
||||||
version=15
|
version=15
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,18 +142,18 @@ AddDomain() {
|
||||||
domain="$1"
|
domain="$1"
|
||||||
|
|
||||||
# Is the domain in the list we want to add it to?
|
# Is the domain in the list we want to add it to?
|
||||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
|
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
|
||||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
|
|
||||||
if [[ "${num}" -ne 0 ]]; then
|
if [[ "${num}" -ne 0 ]]; then
|
||||||
existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
existingTypeId="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
||||||
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
||||||
if [[ "${verbose}" == true ]]; then
|
if [[ "${verbose}" == true ]]; then
|
||||||
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
||||||
if [[ "${verbose}" == true ]]; then
|
if [[ "${verbose}" == true ]]; then
|
||||||
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
||||||
fi
|
fi
|
||||||
|
@ -169,10 +169,10 @@ AddDomain() {
|
||||||
# Insert only the domain here. The enabled and date_added fields will be filled
|
# Insert only the domain here. The enabled and date_added fields will be filled
|
||||||
# with their default values (enabled = true, date_added = current timestamp)
|
# with their default values (enabled = true, date_added = current timestamp)
|
||||||
if [[ -z "${comment}" ]]; then
|
if [[ -z "${comment}" ]]; then
|
||||||
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
|
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
|
||||||
else
|
else
|
||||||
# also add comment when variable has been set through the "--comment" option
|
# also add comment when variable has been set through the "--comment" option
|
||||||
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
|
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ RemoveDomain() {
|
||||||
domain="$1"
|
domain="$1"
|
||||||
|
|
||||||
# Is the domain in the list we want to remove it from?
|
# Is the domain in the list we want to remove it from?
|
||||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
|
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
|
||||||
|
|
||||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
|
|
||||||
|
@ -198,14 +198,14 @@ RemoveDomain() {
|
||||||
fi
|
fi
|
||||||
reload=true
|
reload=true
|
||||||
# Remove it from the current list
|
# Remove it from the current list
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
|
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
|
||||||
}
|
}
|
||||||
|
|
||||||
Displaylist() {
|
Displaylist() {
|
||||||
local count num_pipes domain enabled status nicedate requestedListname
|
local count num_pipes domain enabled status nicedate requestedListname
|
||||||
|
|
||||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
data="$(sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
|
data="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
|
||||||
|
|
||||||
if [[ -z $data ]]; then
|
if [[ -z $data ]]; then
|
||||||
echo -e "Not showing empty list"
|
echo -e "Not showing empty list"
|
||||||
|
@ -243,10 +243,10 @@ Displaylist() {
|
||||||
}
|
}
|
||||||
|
|
||||||
NukeList() {
|
NukeList() {
|
||||||
count=$(sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
count=$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
||||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
if [ "$count" -gt 0 ];then
|
if [ "$count" -gt 0 ];then
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
||||||
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
||||||
else
|
else
|
||||||
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
||||||
|
|
|
@ -39,7 +39,7 @@ flushARP(){
|
||||||
# Truncate network_addresses table in pihole-FTL.db
|
# Truncate network_addresses table in pihole-FTL.db
|
||||||
# This needs to be done before we can truncate the network table due to
|
# This needs to be done before we can truncate the network table due to
|
||||||
# foreign key constraints
|
# foreign key constraints
|
||||||
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
||||||
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
|
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
|
||||||
echo " Database location: ${DBFILE}"
|
echo " Database location: ${DBFILE}"
|
||||||
echo " Output: ${output}"
|
echo " Output: ${output}"
|
||||||
|
@ -47,7 +47,7 @@ flushARP(){
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Truncate network table in pihole-FTL.db
|
# Truncate network table in pihole-FTL.db
|
||||||
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
|
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
|
||||||
echo -e "${OVER} ${CROSS} Failed to truncate network table"
|
echo -e "${OVER} ${CROSS} Failed to truncate network table"
|
||||||
echo " Database location: ${DBFILE}"
|
echo " Database location: ${DBFILE}"
|
||||||
echo " Output: ${output}"
|
echo " Output: ${output}"
|
||||||
|
|
|
@ -66,8 +66,8 @@ PIHOLE_DIRECTORY="/etc/pihole"
|
||||||
PIHOLE_SCRIPTS_DIRECTORY="/opt/pihole"
|
PIHOLE_SCRIPTS_DIRECTORY="/opt/pihole"
|
||||||
BIN_DIRECTORY="/usr/local/bin"
|
BIN_DIRECTORY="/usr/local/bin"
|
||||||
RUN_DIRECTORY="/run"
|
RUN_DIRECTORY="/run"
|
||||||
LOG_DIRECTORY="/var/log"
|
LOG_DIRECTORY="/var/log/pihole"
|
||||||
WEB_SERVER_LOG_DIRECTORY="${LOG_DIRECTORY}/lighttpd"
|
WEB_SERVER_LOG_DIRECTORY="/var/log/lighttpd"
|
||||||
WEB_SERVER_CONFIG_DIRECTORY="/etc/lighttpd"
|
WEB_SERVER_CONFIG_DIRECTORY="/etc/lighttpd"
|
||||||
HTML_DIRECTORY="/var/www/html"
|
HTML_DIRECTORY="/var/www/html"
|
||||||
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
|
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
|
||||||
|
@ -131,8 +131,8 @@ PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*"
|
||||||
PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log"
|
PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log"
|
||||||
PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log")"
|
PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log")"
|
||||||
|
|
||||||
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
|
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access-pihole.log"
|
||||||
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
|
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error-pihole.log"
|
||||||
|
|
||||||
RESOLVCONF="${ETC}/resolv.conf"
|
RESOLVCONF="${ETC}/resolv.conf"
|
||||||
DNSMASQ_CONF="${ETC}/dnsmasq.conf"
|
DNSMASQ_CONF="${ETC}/dnsmasq.conf"
|
||||||
|
@ -467,6 +467,9 @@ diagnose_operating_system() {
|
||||||
# Display the current test that is running
|
# Display the current test that is running
|
||||||
echo_current_diagnostic "Operating system"
|
echo_current_diagnostic "Operating system"
|
||||||
|
|
||||||
|
# If the PIHOLE_DOCKER_TAG variable is set, include this information in the debug output
|
||||||
|
[ -n "${PIHOLE_DOCKER_TAG}" ] && log_write "${INFO} Pi-hole Docker Container: ${PIHOLE_DOCKER_TAG}"
|
||||||
|
|
||||||
# If there is a /etc/*release file, it's probably a supported operating system, so we can
|
# If there is a /etc/*release file, it's probably a supported operating system, so we can
|
||||||
if ls /etc/*release 1> /dev/null 2>&1; then
|
if ls /etc/*release 1> /dev/null 2>&1; then
|
||||||
# display the attributes to the user from the function made earlier
|
# display the attributes to the user from the function made earlier
|
||||||
|
@ -730,11 +733,11 @@ compare_port_to_service_assigned() {
|
||||||
|
|
||||||
# If the service is a Pi-hole service, highlight it in green
|
# If the service is a Pi-hole service, highlight it in green
|
||||||
if [[ "${service_name}" == "${expected_service}" ]]; then
|
if [[ "${service_name}" == "${expected_service}" ]]; then
|
||||||
log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
log_write "${TICK} ${COL_GREEN}${port}${COL_NC} is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
||||||
# Otherwise,
|
# Otherwise,
|
||||||
else
|
else
|
||||||
# Show the service name in red since it's non-standard
|
# Show the service name in red since it's non-standard
|
||||||
log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
log_write "${CROSS} ${COL_RED}${port}${COL_NC} is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -750,36 +753,47 @@ check_required_ports() {
|
||||||
# Sort the addresses and remove duplicates
|
# Sort the addresses and remove duplicates
|
||||||
while IFS= read -r line; do
|
while IFS= read -r line; do
|
||||||
ports_in_use+=( "$line" )
|
ports_in_use+=( "$line" )
|
||||||
done < <( lsof -iTCP -sTCP:LISTEN -P -n +c 10 )
|
done < <( ss --listening --numeric --tcp --udp --processes --no-header )
|
||||||
|
|
||||||
# Now that we have the values stored,
|
# Now that we have the values stored,
|
||||||
for i in "${!ports_in_use[@]}"; do
|
for i in "${!ports_in_use[@]}"; do
|
||||||
# loop through them and assign some local variables
|
# loop through them and assign some local variables
|
||||||
local service_name
|
local service_name
|
||||||
service_name=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
service_name=$(echo "${ports_in_use[$i]}" | awk '{gsub(/users:\(\("/,"",$7);gsub(/".*/,"",$7);print $7}')
|
||||||
local protocol_type
|
local protocol_type
|
||||||
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $5}')
|
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
||||||
local port_number
|
local port_number
|
||||||
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $9}')"
|
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
|
||||||
|
|
||||||
# Skip the line if it's the titles of the columns the lsof command produces
|
|
||||||
if [[ "${service_name}" == COMMAND ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
# Use a case statement to determine if the right services are using the right ports
|
# Use a case statement to determine if the right services are using the right ports
|
||||||
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
|
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in
|
||||||
53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
|
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
|
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
|
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
||||||
*) log_write "${port_number} ${service_name} (${protocol_type})";
|
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ip_command() {
|
||||||
|
# Obtain and log information from "ip XYZ show" commands
|
||||||
|
echo_current_diagnostic "${2}"
|
||||||
|
local entries=()
|
||||||
|
mapfile -t entries < <(ip "${1}" show)
|
||||||
|
for line in "${entries[@]}"; do
|
||||||
|
log_write " ${line}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
check_ip_command() {
|
||||||
|
ip_command "addr" "Network interfaces and addresses"
|
||||||
|
ip_command "route" "Network routing table"
|
||||||
|
}
|
||||||
|
|
||||||
check_networking() {
|
check_networking() {
|
||||||
# Runs through several of the functions made earlier; we just clump them
|
# Runs through several of the functions made earlier; we just clump them
|
||||||
# together since they are all related to the networking aspect of things
|
# together since they are all related to the networking aspect of things
|
||||||
|
@ -788,7 +802,9 @@ check_networking() {
|
||||||
detect_ip_addresses "6"
|
detect_ip_addresses "6"
|
||||||
ping_gateway "4"
|
ping_gateway "4"
|
||||||
ping_gateway "6"
|
ping_gateway "6"
|
||||||
check_required_ports
|
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required
|
||||||
|
# to resolve the service name listening - and the container should not start if there was a port conflict anyway
|
||||||
|
[ -z "${PIHOLE_DOCKER_TAG}" ] && check_required_ports
|
||||||
}
|
}
|
||||||
|
|
||||||
check_x_headers() {
|
check_x_headers() {
|
||||||
|
@ -872,7 +888,7 @@ dig_at() {
|
||||||
# This helps emulate queries to different domains that a user might query
|
# This helps emulate queries to different domains that a user might query
|
||||||
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
|
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
|
||||||
local random_url
|
local random_url
|
||||||
random_url=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
|
random_url=$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
|
||||||
|
|
||||||
# Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address
|
# Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address
|
||||||
# This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is
|
# This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is
|
||||||
|
@ -890,9 +906,11 @@ dig_at() {
|
||||||
# Removes all interfaces which are not UP
|
# Removes all interfaces which are not UP
|
||||||
# s/^[0-9]*: //g;
|
# s/^[0-9]*: //g;
|
||||||
# Removes interface index
|
# Removes interface index
|
||||||
|
# s/@.*//g;
|
||||||
|
# Removes everything after @ (if found)
|
||||||
# s/: <.*//g;
|
# s/: <.*//g;
|
||||||
# Removes everything after the interface name
|
# Removes everything after the interface name
|
||||||
interfaces="$(ip link show | sed "/ master /d;/UP/!d;s/^[0-9]*: //g;s/: <.*//g;")"
|
interfaces="$(ip link show | sed "/ master /d;/UP/!d;s/^[0-9]*: //g;s/@.*//g;s/: <.*//g;")"
|
||||||
|
|
||||||
while IFS= read -r iface ; do
|
while IFS= read -r iface ; do
|
||||||
# Get addresses of current interface
|
# Get addresses of current interface
|
||||||
|
@ -1186,7 +1204,7 @@ show_db_entries() {
|
||||||
IFS=$'\r\n'
|
IFS=$'\r\n'
|
||||||
local entries=()
|
local entries=()
|
||||||
mapfile -t entries < <(\
|
mapfile -t entries < <(\
|
||||||
sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
|
pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
|
||||||
-cmd ".headers on" \
|
-cmd ".headers on" \
|
||||||
-cmd ".mode column" \
|
-cmd ".mode column" \
|
||||||
-cmd ".width ${widths}" \
|
-cmd ".width ${widths}" \
|
||||||
|
@ -1211,7 +1229,7 @@ show_FTL_db_entries() {
|
||||||
IFS=$'\r\n'
|
IFS=$'\r\n'
|
||||||
local entries=()
|
local entries=()
|
||||||
mapfile -t entries < <(\
|
mapfile -t entries < <(\
|
||||||
sqlite3 "${PIHOLE_FTL_DB_FILE}" \
|
pihole-FTL sqlite3 "${PIHOLE_FTL_DB_FILE}" \
|
||||||
-cmd ".headers on" \
|
-cmd ".headers on" \
|
||||||
-cmd ".mode column" \
|
-cmd ".mode column" \
|
||||||
-cmd ".width ${widths}" \
|
-cmd ".width ${widths}" \
|
||||||
|
@ -1257,7 +1275,7 @@ show_clients() {
|
||||||
}
|
}
|
||||||
|
|
||||||
show_messages() {
|
show_messages() {
|
||||||
show_FTL_db_entries "Pi-hole diagnosis messages" "SELECT id,datetime(timestamp,'unixepoch','localtime') timestamp,type,message,blob1,blob2,blob3,blob4,blob5 FROM message;" "4 19 20 60 20 20 20 20 20"
|
show_FTL_db_entries "Pi-hole diagnosis messages" "SELECT count (message) as count, datetime(max(timestamp),'unixepoch','localtime') as 'last timestamp', type, message, blob1, blob2, blob3, blob4, blob5 FROM message GROUP BY type, message, blob1, blob2, blob3, blob4, blob5;" "6 19 20 60 20 20 20 20 20"
|
||||||
}
|
}
|
||||||
|
|
||||||
analyze_gravity_list() {
|
analyze_gravity_list() {
|
||||||
|
@ -1268,7 +1286,7 @@ analyze_gravity_list() {
|
||||||
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
||||||
|
|
||||||
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
||||||
gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
gravity_updated_raw="$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||||
gravity_updated="$(date -d @"${gravity_updated_raw}")"
|
gravity_updated="$(date -d @"${gravity_updated_raw}")"
|
||||||
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
|
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
|
||||||
log_write ""
|
log_write ""
|
||||||
|
@ -1276,7 +1294,7 @@ analyze_gravity_list() {
|
||||||
OLD_IFS="$IFS"
|
OLD_IFS="$IFS"
|
||||||
IFS=$'\r\n'
|
IFS=$'\r\n'
|
||||||
local gravity_sample=()
|
local gravity_sample=()
|
||||||
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
mapfile -t gravity_sample < <(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||||
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
|
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
|
||||||
|
|
||||||
for line in "${gravity_sample[@]}"; do
|
for line in "${gravity_sample[@]}"; do
|
||||||
|
@ -1386,9 +1404,9 @@ upload_to_tricorder() {
|
||||||
log_write "${TICK} ${COL_GREEN}** FINISHED DEBUGGING! **${COL_NC}\\n"
|
log_write "${TICK} ${COL_GREEN}** FINISHED DEBUGGING! **${COL_NC}\\n"
|
||||||
|
|
||||||
# Provide information on what they should do with their token
|
# Provide information on what they should do with their token
|
||||||
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
||||||
|
|
||||||
# If pihole -d is running automatically (usually through the dashboard)
|
# If pihole -d is running automatically
|
||||||
if [[ "${AUTOMATED}" ]]; then
|
if [[ "${AUTOMATED}" ]]; then
|
||||||
# let the user know
|
# let the user know
|
||||||
log_write "${INFO} Debug script running in automated mode"
|
log_write "${INFO} Debug script running in automated mode"
|
||||||
|
@ -1396,16 +1414,19 @@ upload_to_tricorder() {
|
||||||
curl_to_tricorder
|
curl_to_tricorder
|
||||||
# If we're not running in automated mode,
|
# If we're not running in automated mode,
|
||||||
else
|
else
|
||||||
echo ""
|
# if not being called from the web interface
|
||||||
# give the user a choice of uploading it or not
|
if [[ ! "${WEBCALL}" ]]; then
|
||||||
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
echo ""
|
||||||
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
# give the user a choice of uploading it or not
|
||||||
case ${response} in
|
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
||||||
# If they say yes, run our function for uploading the log
|
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
||||||
[yY][eE][sS]|[yY]) curl_to_tricorder;;
|
case ${response} in
|
||||||
# If they choose no, just exit out of the script
|
# If they say yes, run our function for uploading the log
|
||||||
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
[yY][eE][sS]|[yY]) curl_to_tricorder;;
|
||||||
esac
|
# If they choose no, just exit out of the script
|
||||||
|
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
# Check if tricorder.pi-hole.net is reachable and provide token
|
# Check if tricorder.pi-hole.net is reachable and provide token
|
||||||
# along with some additional useful information
|
# along with some additional useful information
|
||||||
|
@ -1425,8 +1446,13 @@ upload_to_tricorder() {
|
||||||
# If no token was generated
|
# If no token was generated
|
||||||
else
|
else
|
||||||
# Show an error and some help instructions
|
# Show an error and some help instructions
|
||||||
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
# Skip this if being called from web interface and autmatic mode was not chosen (users opt-out to upload)
|
||||||
log_write " * Please try again or contact the Pi-hole team for assistance."
|
if [[ "${WEBCALL}" ]] && [[ ! "${AUTOMATED}" ]]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
||||||
|
log_write " * Please try again or contact the Pi-hole team for assistance."
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
# Finally, show where the log file is no matter the outcome of the function so users can look at it
|
# Finally, show where the log file is no matter the outcome of the function so users can look at it
|
||||||
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
|
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
|
||||||
|
@ -1445,6 +1471,7 @@ check_selinux
|
||||||
check_firewalld
|
check_firewalld
|
||||||
processor_check
|
processor_check
|
||||||
disk_usage
|
disk_usage
|
||||||
|
check_ip_command
|
||||||
check_networking
|
check_networking
|
||||||
check_name_resolution
|
check_name_resolution
|
||||||
check_dhcp_servers
|
check_dhcp_servers
|
||||||
|
|
|
@ -31,7 +31,7 @@ if [ -z "$DBFILE" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$@" != *"quiet"* ]]; then
|
if [[ "$@" != *"quiet"* ]]; then
|
||||||
echo -ne " ${INFO} Flushing /var/log/pihole.log ..."
|
echo -ne " ${INFO} Flushing /var/log/pihole/pihole.log ..."
|
||||||
fi
|
fi
|
||||||
if [[ "$@" == *"once"* ]]; then
|
if [[ "$@" == *"once"* ]]; then
|
||||||
# Nightly logrotation
|
# Nightly logrotation
|
||||||
|
@ -44,9 +44,9 @@ if [[ "$@" == *"once"* ]]; then
|
||||||
# Note that moving the file is not an option, as
|
# Note that moving the file is not an option, as
|
||||||
# dnsmasq would happily continue writing into the
|
# dnsmasq would happily continue writing into the
|
||||||
# moved file (it will have the same file handler)
|
# moved file (it will have the same file handler)
|
||||||
cp -p /var/log/pihole.log /var/log/pihole.log.1
|
cp -p /var/log/pihole/pihole.log /var/log/pihole/pihole.log.1
|
||||||
echo " " > /var/log/pihole.log
|
echo " " > /var/log/pihole/pihole.log
|
||||||
chmod 644 /var/log/pihole.log
|
chmod 644 /var/log/pihole/pihole.log
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# Manual flushing
|
# Manual flushing
|
||||||
|
@ -56,20 +56,20 @@ else
|
||||||
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
||||||
else
|
else
|
||||||
# Flush both pihole.log and pihole.log.1 (if existing)
|
# Flush both pihole.log and pihole.log.1 (if existing)
|
||||||
echo " " > /var/log/pihole.log
|
echo " " > /var/log/pihole/pihole.log
|
||||||
if [ -f /var/log/pihole.log.1 ]; then
|
if [ -f /var/log/pihole/pihole.log.1 ]; then
|
||||||
echo " " > /var/log/pihole.log.1
|
echo " " > /var/log/pihole/pihole.log.1
|
||||||
chmod 644 /var/log/pihole.log.1
|
chmod 644 /var/log/pihole/pihole.log.1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
|
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
|
||||||
deleted=$(sqlite3 "${DBFILE}" "DELETE FROM queries WHERE timestamp >= strftime('%s','now')-86400; select changes() from queries limit 1")
|
deleted=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
|
||||||
|
|
||||||
# Restart pihole-FTL to force reloading history
|
# Restart pihole-FTL to force reloading history
|
||||||
sudo pihole restartdns
|
sudo pihole restartdns
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$@" != *"quiet"* ]]; then
|
if [[ "$@" != *"quiet"* ]]; then
|
||||||
echo -e "${OVER} ${TICK} Flushed /var/log/pihole.log"
|
echo -e "${OVER} ${TICK} Flushed /var/log/pihole/pihole.log"
|
||||||
echo -e " ${TICK} Deleted ${deleted} queries from database"
|
echo -e " ${TICK} Deleted ${deleted} queries from database"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -64,8 +64,8 @@ Example: 'pihole -q -exact domain.com'
|
||||||
Query the adlists for a specified domain
|
Query the adlists for a specified domain
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
-exact Search the block lists for exact domain matches
|
-exact Search the adlists for exact domain matches
|
||||||
-all Return all query matches within a block list
|
-all Return all query matches within the adlists
|
||||||
-h, --help Show this help dialog"
|
-h, --help Show this help dialog"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
@ -121,7 +121,7 @@ scanDatabaseTable() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Send prepared query to gravity database
|
# Send prepared query to gravity database
|
||||||
result="$(sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
|
result="$(pihole-FTL sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
|
||||||
if [[ -z "${result}" ]]; then
|
if [[ -z "${result}" ]]; then
|
||||||
# Return early when there are no matches in this table
|
# Return early when there are no matches in this table
|
||||||
return
|
return
|
||||||
|
@ -164,7 +164,7 @@ scanRegexDatabaseTable() {
|
||||||
type="${3:-}"
|
type="${3:-}"
|
||||||
|
|
||||||
# Query all regex from the corresponding database tables
|
# Query all regex from the corresponding database tables
|
||||||
mapfile -t regexList < <(sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
|
mapfile -t regexList < <(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
|
||||||
|
|
||||||
# If we have regexps to process
|
# If we have regexps to process
|
||||||
if [[ "${#regexList[@]}" -ne 0 ]]; then
|
if [[ "${#regexList[@]}" -ne 0 ]]; then
|
||||||
|
@ -210,7 +210,7 @@ mapfile -t results <<< "$(scanDatabaseTable "${domainQuery}" "gravity")"
|
||||||
|
|
||||||
# Handle notices
|
# Handle notices
|
||||||
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then
|
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then
|
||||||
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the block lists"
|
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the adlists"
|
||||||
exit 0
|
exit 0
|
||||||
elif [[ -z "${results[*]}" ]]; then
|
elif [[ -z "${results[*]}" ]]; then
|
||||||
# Result found in WL/BL/Wildcards
|
# Result found in WL/BL/Wildcards
|
||||||
|
@ -233,7 +233,7 @@ for result in "${results[@]}"; do
|
||||||
adlistAddress="${extra/|*/}"
|
adlistAddress="${extra/|*/}"
|
||||||
extra="${extra#*|}"
|
extra="${extra#*|}"
|
||||||
if [[ "${extra}" == "0" ]]; then
|
if [[ "${extra}" == "0" ]]; then
|
||||||
extra="(disabled)"
|
extra=" (disabled)"
|
||||||
else
|
else
|
||||||
extra=""
|
extra=""
|
||||||
fi
|
fi
|
||||||
|
@ -241,7 +241,7 @@ for result in "${results[@]}"; do
|
||||||
if [[ -n "${blockpage}" ]]; then
|
if [[ -n "${blockpage}" ]]; then
|
||||||
echo "0 ${adlistAddress}"
|
echo "0 ${adlistAddress}"
|
||||||
elif [[ -n "${exact}" ]]; then
|
elif [[ -n "${exact}" ]]; then
|
||||||
echo " - ${adlistAddress} ${extra}"
|
echo " - ${adlistAddress}${extra}"
|
||||||
else
|
else
|
||||||
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
||||||
count=""
|
count=""
|
||||||
|
@ -256,7 +256,7 @@ for result in "${results[@]}"; do
|
||||||
[[ "${count}" -gt "${max_count}" ]] && continue
|
[[ "${count}" -gt "${max_count}" ]] && continue
|
||||||
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
||||||
else
|
else
|
||||||
echo " ${match} ${extra}"
|
echo " ${match}${extra}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
|
@ -41,7 +41,7 @@ GitCheckUpdateAvail() {
|
||||||
cd "${directory}" || return
|
cd "${directory}" || return
|
||||||
|
|
||||||
# Fetch latest changes in this repo
|
# Fetch latest changes in this repo
|
||||||
git fetch --tags --quiet origin
|
git fetch --quiet origin
|
||||||
|
|
||||||
# Check current branch. If it is master, then check for the latest available tag instead of latest commit.
|
# Check current branch. If it is master, then check for the latest available tag instead of latest commit.
|
||||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||||
|
|
98
advanced/Scripts/utils.sh
Executable file
98
advanced/Scripts/utils.sh
Executable file
|
@ -0,0 +1,98 @@
|
||||||
|
#!/usr/bin/env sh
|
||||||
|
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
|
||||||
|
|
||||||
|
# Pi-hole: A black hole for Internet advertisements
|
||||||
|
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||||
|
# Network-wide ad blocking via your own hardware.
|
||||||
|
#
|
||||||
|
# Script to hold utility functions for use in other scripts
|
||||||
|
#
|
||||||
|
# This file is copyright under the latest version of the EUPL.
|
||||||
|
# Please see LICENSE file for your rights under this license.
|
||||||
|
|
||||||
|
# Basic Housekeeping rules
|
||||||
|
# - Functions must be self contained
|
||||||
|
# - Functions should be grouped with other similar functions
|
||||||
|
# - Functions must be documented
|
||||||
|
# - New functions must have a test added for them in test/test_any_utils.py
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Takes Three arguments: file, key, and value.
|
||||||
|
#
|
||||||
|
# Checks the target file for the existence of the key
|
||||||
|
# - If it exists, it changes the value
|
||||||
|
# - If it does not exist, it adds the value
|
||||||
|
#
|
||||||
|
# Example usage:
|
||||||
|
# addOrEditKeyValPair "/etc/pihole/setupVars.conf" "BLOCKING_ENABLED" "true"
|
||||||
|
#######################
|
||||||
|
addOrEditKeyValPair() {
|
||||||
|
local file="${1}"
|
||||||
|
local key="${2}"
|
||||||
|
local value="${3}"
|
||||||
|
|
||||||
|
if grep -q "^${key}=" "${file}"; then
|
||||||
|
# Key already exists in file, modify the value
|
||||||
|
sed -i "/^${key}=/c\\${key}=${value}" "${file}"
|
||||||
|
else
|
||||||
|
# Key does not already exist, add it and it's value
|
||||||
|
echo "${key}=${value}" >> "${file}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Takes two arguments: file, and key.
|
||||||
|
# Adds a key to target file
|
||||||
|
#
|
||||||
|
# Example usage:
|
||||||
|
# addKey "/etc/dnsmasq.d/01-pihole.conf" "log-queries"
|
||||||
|
#######################
|
||||||
|
addKey(){
|
||||||
|
local file="${1}"
|
||||||
|
local key="${2}"
|
||||||
|
|
||||||
|
if ! grep -q "^${key}" "${file}"; then
|
||||||
|
# Key does not exist, add it.
|
||||||
|
echo "${key}" >> "${file}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Takes two arguments: file, and key.
|
||||||
|
# Deletes a key or key/value pair from target file
|
||||||
|
#
|
||||||
|
# Example usage:
|
||||||
|
# removeKey "/etc/pihole/setupVars.conf" "PIHOLE_DNS_1"
|
||||||
|
#######################
|
||||||
|
removeKey() {
|
||||||
|
local file="${1}"
|
||||||
|
local key="${2}"
|
||||||
|
sed -i "/^${key}/d" "${file}"
|
||||||
|
}
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# returns FTL's current telnet API port
|
||||||
|
#######################
|
||||||
|
getFTLAPIPort(){
|
||||||
|
local FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
|
||||||
|
local DEFAULT_PORT_FILE="/run/pihole-FTL.port"
|
||||||
|
local DEFAULT_FTL_PORT=4711
|
||||||
|
local PORTFILE
|
||||||
|
local ftl_api_port
|
||||||
|
|
||||||
|
if [ -f "$FTLCONFFILE" ]; then
|
||||||
|
# if PORTFILE is not set in pihole-FTL.conf, use the default path
|
||||||
|
PORTFILE="$( (grep "^PORTFILE=" $FTLCONFFILE || echo "$DEFAULT_PORT_FILE") | cut -d"=" -f2-)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -s "$PORTFILE" ]; then
|
||||||
|
# -s: FILE exists and has a size greater than zero
|
||||||
|
ftl_api_port=$(cat "${PORTFILE}")
|
||||||
|
# Exploit prevention: unset the variable if there is malicious content
|
||||||
|
# Verify that the value read from the file is numeric
|
||||||
|
expr "$ftl_api_port" : "[^[:digit:]]" > /dev/null && unset ftl_api_port
|
||||||
|
fi
|
||||||
|
|
||||||
|
# echo the port found in the portfile or default to the default port
|
||||||
|
echo "${ftl_api_port:=$DEFAULT_FTL_PORT}"
|
||||||
|
}
|
|
@ -1,5 +1,7 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC1090
|
# shellcheck disable=SC1090
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
|
||||||
|
|
||||||
# Pi-hole: A black hole for Internet advertisements
|
# Pi-hole: A black hole for Internet advertisements
|
||||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||||
|
@ -26,6 +28,9 @@ readonly PI_HOLE_FILES_DIR="/etc/.pihole"
|
||||||
PH_TEST="true"
|
PH_TEST="true"
|
||||||
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
||||||
|
|
||||||
|
utilsfile="/opt/pihole/utils.sh"
|
||||||
|
source "${utilsfile}"
|
||||||
|
|
||||||
coltable="/opt/pihole/COL_TABLE"
|
coltable="/opt/pihole/COL_TABLE"
|
||||||
if [[ -f ${coltable} ]]; then
|
if [[ -f ${coltable} ]]; then
|
||||||
source ${coltable}
|
source ${coltable}
|
||||||
|
@ -37,58 +42,53 @@ Example: pihole -a -p password
|
||||||
Set options for the Admin Console
|
Set options for the Admin Console
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
-p, password Set Admin Console password
|
-p, password Set Admin Console password
|
||||||
-c, celsius Set Celsius as preferred temperature unit
|
-c, celsius Set Celsius as preferred temperature unit
|
||||||
-f, fahrenheit Set Fahrenheit as preferred temperature unit
|
-f, fahrenheit Set Fahrenheit as preferred temperature unit
|
||||||
-k, kelvin Set Kelvin as preferred temperature unit
|
-k, kelvin Set Kelvin as preferred temperature unit
|
||||||
-e, email Set an administrative contact address for the Block Page
|
-e, email Set an administrative contact address for the Block Page
|
||||||
-h, --help Show this help dialog
|
-h, --help Show this help dialog
|
||||||
-i, interface Specify dnsmasq's interface listening behavior
|
-i, interface Specify dnsmasq's interface listening behavior
|
||||||
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
||||||
-t, teleporter Backup configuration as an archive"
|
-t, teleporter Backup configuration as an archive
|
||||||
|
-t, teleporter myname.tar.gz Backup configuration to archive with name myname.tar.gz as specified"
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
add_setting() {
|
add_setting() {
|
||||||
echo "${1}=${2}" >> "${setupVars}"
|
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_setting() {
|
delete_setting() {
|
||||||
sed -i "/^${1}/d" "${setupVars}"
|
removeKey "${setupVars}" "${1}"
|
||||||
}
|
}
|
||||||
|
|
||||||
change_setting() {
|
change_setting() {
|
||||||
delete_setting "${1}"
|
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||||
add_setting "${1}" "${2}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
addFTLsetting() {
|
addFTLsetting() {
|
||||||
echo "${1}=${2}" >> "${FTLconf}"
|
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteFTLsetting() {
|
deleteFTLsetting() {
|
||||||
sed -i "/^${1}/d" "${FTLconf}"
|
removeKey "${FTLconf}" "${1}"
|
||||||
}
|
}
|
||||||
|
|
||||||
changeFTLsetting() {
|
changeFTLsetting() {
|
||||||
deleteFTLsetting "${1}"
|
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||||
addFTLsetting "${1}" "${2}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
add_dnsmasq_setting() {
|
add_dnsmasq_setting() {
|
||||||
if [[ "${2}" != "" ]]; then
|
addOrEditKeyValPair "${dnsmasqconfig}" "${1}" "${2}"
|
||||||
echo "${1}=${2}" >> "${dnsmasqconfig}"
|
|
||||||
else
|
|
||||||
echo "${1}" >> "${dnsmasqconfig}"
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_dnsmasq_setting() {
|
delete_dnsmasq_setting() {
|
||||||
sed -i "/^${1}/d" "${dnsmasqconfig}"
|
removeKey "${dnsmasqconfig}" "${1}"
|
||||||
}
|
}
|
||||||
|
|
||||||
SetTemperatureUnit() {
|
SetTemperatureUnit() {
|
||||||
change_setting "TEMPERATUREUNIT" "${unit}"
|
addOrEditKeyValPair "${setupVars}" "TEMPERATUREUNIT" "${unit}"
|
||||||
echo -e " ${TICK} Set temperature unit to ${unit}"
|
echo -e " ${TICK} Set temperature unit to ${unit}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ SetWebPassword() {
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [ "${PASSWORD}" == "" ]; then
|
if [ "${PASSWORD}" == "" ]; then
|
||||||
change_setting "WEBPASSWORD" ""
|
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" ""
|
||||||
echo -e " ${TICK} Password Removed"
|
echo -e " ${TICK} Password Removed"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
@ -136,7 +136,7 @@ SetWebPassword() {
|
||||||
# We do not wrap this in brackets, otherwise BASH will expand any appropriate syntax
|
# We do not wrap this in brackets, otherwise BASH will expand any appropriate syntax
|
||||||
hash=$(HashPassword "$PASSWORD")
|
hash=$(HashPassword "$PASSWORD")
|
||||||
# Save hash to file
|
# Save hash to file
|
||||||
change_setting "WEBPASSWORD" "${hash}"
|
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" "${hash}"
|
||||||
echo -e " ${TICK} New password set"
|
echo -e " ${TICK} New password set"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
|
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
|
||||||
|
@ -147,7 +147,7 @@ SetWebPassword() {
|
||||||
ProcessDNSSettings() {
|
ProcessDNSSettings() {
|
||||||
source "${setupVars}"
|
source "${setupVars}"
|
||||||
|
|
||||||
delete_dnsmasq_setting "server"
|
removeKey "${dnsmasqconfig}" "server"
|
||||||
|
|
||||||
COUNTER=1
|
COUNTER=1
|
||||||
while true ; do
|
while true ; do
|
||||||
|
@ -155,34 +155,34 @@ ProcessDNSSettings() {
|
||||||
if [ -z "${!var}" ]; then
|
if [ -z "${!var}" ]; then
|
||||||
break;
|
break;
|
||||||
fi
|
fi
|
||||||
add_dnsmasq_setting "server" "${!var}"
|
addKey "${dnsmasqconfig}" "server=${!var}"
|
||||||
(( COUNTER++ ))
|
(( COUNTER++ ))
|
||||||
done
|
done
|
||||||
|
|
||||||
# The option LOCAL_DNS_PORT is deprecated
|
# The option LOCAL_DNS_PORT is deprecated
|
||||||
# We apply it once more, and then convert it into the current format
|
# We apply it once more, and then convert it into the current format
|
||||||
if [ -n "${LOCAL_DNS_PORT}" ]; then
|
if [ -n "${LOCAL_DNS_PORT}" ]; then
|
||||||
add_dnsmasq_setting "server" "127.0.0.1#${LOCAL_DNS_PORT}"
|
addOrEditKeyValPair "${dnsmasqconfig}" "server" "127.0.0.1#${LOCAL_DNS_PORT}"
|
||||||
add_setting "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}"
|
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}"
|
||||||
delete_setting "LOCAL_DNS_PORT"
|
removeKey "${setupVars}" "LOCAL_DNS_PORT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
delete_dnsmasq_setting "domain-needed"
|
removeKey "${dnsmasqconfig}" "domain-needed"
|
||||||
delete_dnsmasq_setting "expand-hosts"
|
removeKey "${dnsmasqconfig}" "expand-hosts"
|
||||||
|
|
||||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||||
add_dnsmasq_setting "domain-needed"
|
addKey "${dnsmasqconfig}" "domain-needed"
|
||||||
add_dnsmasq_setting "expand-hosts"
|
addKey "${dnsmasqconfig}" "expand-hosts"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
delete_dnsmasq_setting "bogus-priv"
|
removeKey "${dnsmasqconfig}" "bogus-priv"
|
||||||
|
|
||||||
if [[ "${DNS_BOGUS_PRIV}" == true ]]; then
|
if [[ "${DNS_BOGUS_PRIV}" == true ]]; then
|
||||||
add_dnsmasq_setting "bogus-priv"
|
addKey "${dnsmasqconfig}" "bogus-priv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
delete_dnsmasq_setting "dnssec"
|
removeKey "${dnsmasqconfig}" "dnssec"
|
||||||
delete_dnsmasq_setting "trust-anchor="
|
removeKey "${dnsmasqconfig}" "trust-anchor"
|
||||||
|
|
||||||
if [[ "${DNSSEC}" == true ]]; then
|
if [[ "${DNSSEC}" == true ]]; then
|
||||||
echo "dnssec
|
echo "dnssec
|
||||||
|
@ -190,24 +190,24 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||||
" >> "${dnsmasqconfig}"
|
" >> "${dnsmasqconfig}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
delete_dnsmasq_setting "host-record"
|
removeKey "${dnsmasqconfig}" "host-record"
|
||||||
|
|
||||||
if [ -n "${HOSTRECORD}" ]; then
|
if [ -n "${HOSTRECORD}" ]; then
|
||||||
add_dnsmasq_setting "host-record" "${HOSTRECORD}"
|
addOrEditKeyValPair "${dnsmasqconfig}" "host-record" "${HOSTRECORD}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Setup interface listening behavior of dnsmasq
|
# Setup interface listening behavior of dnsmasq
|
||||||
delete_dnsmasq_setting "interface"
|
removeKey "${dnsmasqconfig}" "interface"
|
||||||
delete_dnsmasq_setting "local-service"
|
removeKey "${dnsmasqconfig}" "local-service"
|
||||||
delete_dnsmasq_setting "except-interface"
|
removeKey "${dnsmasqconfig}" "except-interface"
|
||||||
delete_dnsmasq_setting "bind-interfaces"
|
removeKey "${dnsmasqconfig}" "bind-interfaces"
|
||||||
|
|
||||||
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
||||||
# Listen on all interfaces, permit all origins
|
# Listen on all interfaces, permit all origins
|
||||||
add_dnsmasq_setting "except-interface" "nonexisting"
|
addOrEditKeyValPair "${dnsmasqconfig}" "except-interface" "nonexisting"
|
||||||
elif [[ "${DNSMASQ_LISTENING}" == "local" ]]; then
|
elif [[ "${DNSMASQ_LISTENING}" == "local" ]]; then
|
||||||
# Listen only on all interfaces, but only local subnets
|
# Listen only on all interfaces, but only local subnets
|
||||||
add_dnsmasq_setting "local-service"
|
addKey "${dnsmasqconfig}" "local-service"
|
||||||
else
|
else
|
||||||
# Options "bind" and "single"
|
# Options "bind" and "single"
|
||||||
# Listen only on one interface
|
# Listen only on one interface
|
||||||
|
@ -216,30 +216,30 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||||
PIHOLE_INTERFACE="eth0"
|
PIHOLE_INTERFACE="eth0"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}"
|
addOrEditKeyValPair "${dnsmasqconfig}" "interface" "${PIHOLE_INTERFACE}"
|
||||||
|
|
||||||
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
|
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
|
||||||
# Really bind to interface
|
# Really bind to interface
|
||||||
add_dnsmasq_setting "bind-interfaces"
|
addKey "${dnsmasqconfig}" "bind-interfaces"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
||||||
# Convert legacy "conditional forwarding" to rev-server configuration
|
# Convert legacy "conditional forwarding" to rev-server configuration
|
||||||
# Remove any existing REV_SERVER settings
|
# Remove any existing REV_SERVER settings
|
||||||
delete_setting "REV_SERVER"
|
removeKey "${setupVars}" "REV_SERVER"
|
||||||
delete_setting "REV_SERVER_DOMAIN"
|
removeKey "${setupVars}" "REV_SERVER_DOMAIN"
|
||||||
delete_setting "REV_SERVER_TARGET"
|
removeKey "${setupVars}" "REV_SERVER_TARGET"
|
||||||
delete_setting "REV_SERVER_CIDR"
|
removeKey "${setupVars}" "REV_SERVER_CIDR"
|
||||||
|
|
||||||
REV_SERVER=true
|
REV_SERVER=true
|
||||||
add_setting "REV_SERVER" "true"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
|
||||||
|
|
||||||
REV_SERVER_DOMAIN="${CONDITIONAL_FORWARDING_DOMAIN}"
|
REV_SERVER_DOMAIN="${CONDITIONAL_FORWARDING_DOMAIN}"
|
||||||
add_setting "REV_SERVER_DOMAIN" "${REV_SERVER_DOMAIN}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${REV_SERVER_DOMAIN}"
|
||||||
|
|
||||||
REV_SERVER_TARGET="${CONDITIONAL_FORWARDING_IP}"
|
REV_SERVER_TARGET="${CONDITIONAL_FORWARDING_IP}"
|
||||||
add_setting "REV_SERVER_TARGET" "${REV_SERVER_TARGET}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${REV_SERVER_TARGET}"
|
||||||
|
|
||||||
#Convert CONDITIONAL_FORWARDING_REVERSE if necessary e.g:
|
#Convert CONDITIONAL_FORWARDING_REVERSE if necessary e.g:
|
||||||
# 1.1.168.192.in-addr.arpa to 192.168.1.1/32
|
# 1.1.168.192.in-addr.arpa to 192.168.1.1/32
|
||||||
|
@ -266,28 +266,28 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||||
# shellcheck disable=2001
|
# shellcheck disable=2001
|
||||||
REV_SERVER_CIDR="$(sed "s+\\.[0-9]*$+\\.0/24+" <<< "${REV_SERVER_TARGET}")"
|
REV_SERVER_CIDR="$(sed "s+\\.[0-9]*$+\\.0/24+" <<< "${REV_SERVER_TARGET}")"
|
||||||
fi
|
fi
|
||||||
add_setting "REV_SERVER_CIDR" "${REV_SERVER_CIDR}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${REV_SERVER_CIDR}"
|
||||||
|
|
||||||
# Remove obsolete settings from setupVars.conf
|
# Remove obsolete settings from setupVars.conf
|
||||||
delete_setting "CONDITIONAL_FORWARDING"
|
removeKey "${setupVars}" "CONDITIONAL_FORWARDING"
|
||||||
delete_setting "CONDITIONAL_FORWARDING_REVERSE"
|
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_REVERSE"
|
||||||
delete_setting "CONDITIONAL_FORWARDING_DOMAIN"
|
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_DOMAIN"
|
||||||
delete_setting "CONDITIONAL_FORWARDING_IP"
|
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_IP"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
delete_dnsmasq_setting "rev-server"
|
removeKey "${dnsmasqconfig}" "rev-server"
|
||||||
|
|
||||||
if [[ "${REV_SERVER}" == true ]]; then
|
if [[ "${REV_SERVER}" == true ]]; then
|
||||||
add_dnsmasq_setting "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
|
addKey "${dnsmasqconfig}" "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
|
||||||
if [ -n "${REV_SERVER_DOMAIN}" ]; then
|
if [ -n "${REV_SERVER_DOMAIN}" ]; then
|
||||||
# Forward local domain names to the CF target, too
|
# Forward local domain names to the CF target, too
|
||||||
add_dnsmasq_setting "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
|
addKey "${dnsmasqconfig}" "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
|
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
|
||||||
# Forward unqualified names to the CF target only when the "never
|
# Forward unqualified names to the CF target only when the "never
|
||||||
# forward non-FQDN" option is unticked
|
# forward non-FQDN" option is unticked
|
||||||
add_dnsmasq_setting "server=//${REV_SERVER_TARGET}"
|
addKey "${dnsmasqconfig}" "server=//${REV_SERVER_TARGET}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
@ -302,7 +302,7 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||||
|
|
||||||
SetDNSServers() {
|
SetDNSServers() {
|
||||||
# Save setting to file
|
# Save setting to file
|
||||||
delete_setting "PIHOLE_DNS"
|
removeKey "${setupVars}" "PIHOLE_DNS"
|
||||||
IFS=',' read -r -a array <<< "${args[2]}"
|
IFS=',' read -r -a array <<< "${args[2]}"
|
||||||
for index in "${!array[@]}"
|
for index in "${!array[@]}"
|
||||||
do
|
do
|
||||||
|
@ -311,7 +311,7 @@ SetDNSServers() {
|
||||||
ip="${array[index]//\\#/#}"
|
ip="${array[index]//\\#/#}"
|
||||||
|
|
||||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||||
add_setting "PIHOLE_DNS_$((index+1))" "${ip}"
|
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_$((index+1))" "${ip}"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} Invalid IP has been passed"
|
echo -e " ${CROSS} Invalid IP has been passed"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -319,30 +319,30 @@ SetDNSServers() {
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ "${args[3]}" == "domain-needed" ]]; then
|
if [[ "${args[3]}" == "domain-needed" ]]; then
|
||||||
change_setting "DNS_FQDN_REQUIRED" "true"
|
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "true"
|
||||||
else
|
else
|
||||||
change_setting "DNS_FQDN_REQUIRED" "false"
|
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "false"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${args[4]}" == "bogus-priv" ]]; then
|
if [[ "${args[4]}" == "bogus-priv" ]]; then
|
||||||
change_setting "DNS_BOGUS_PRIV" "true"
|
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "true"
|
||||||
else
|
else
|
||||||
change_setting "DNS_BOGUS_PRIV" "false"
|
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "false"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${args[5]}" == "dnssec" ]]; then
|
if [[ "${args[5]}" == "dnssec" ]]; then
|
||||||
change_setting "DNSSEC" "true"
|
addOrEditKeyValPair "${setupVars}" "DNSSEC" "true"
|
||||||
else
|
else
|
||||||
change_setting "DNSSEC" "false"
|
addOrEditKeyValPair "${setupVars}" "DNSSEC" "false"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${args[6]}" == "rev-server" ]]; then
|
if [[ "${args[6]}" == "rev-server" ]]; then
|
||||||
change_setting "REV_SERVER" "true"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
|
||||||
change_setting "REV_SERVER_CIDR" "${args[7]}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${args[7]}"
|
||||||
change_setting "REV_SERVER_TARGET" "${args[8]}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${args[8]}"
|
||||||
change_setting "REV_SERVER_DOMAIN" "${args[9]}"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${args[9]}"
|
||||||
else
|
else
|
||||||
change_setting "REV_SERVER" "false"
|
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "false"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ProcessDNSSettings
|
ProcessDNSSettings
|
||||||
|
@ -352,11 +352,11 @@ SetDNSServers() {
|
||||||
}
|
}
|
||||||
|
|
||||||
SetExcludeDomains() {
|
SetExcludeDomains() {
|
||||||
change_setting "API_EXCLUDE_DOMAINS" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_DOMAINS" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
SetExcludeClients() {
|
SetExcludeClients() {
|
||||||
change_setting "API_EXCLUDE_CLIENTS" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_CLIENTS" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
Poweroff(){
|
Poweroff(){
|
||||||
|
@ -372,7 +372,7 @@ RestartDNS() {
|
||||||
}
|
}
|
||||||
|
|
||||||
SetQueryLogOptions() {
|
SetQueryLogOptions() {
|
||||||
change_setting "API_QUERY_LOG_SHOW" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "API_QUERY_LOG_SHOW" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
ProcessDHCPSettings() {
|
ProcessDHCPSettings() {
|
||||||
|
@ -388,19 +388,19 @@ ProcessDHCPSettings() {
|
||||||
|
|
||||||
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
||||||
PIHOLE_DOMAIN="lan"
|
PIHOLE_DOMAIN="lan"
|
||||||
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
||||||
leasetime="infinite"
|
leasetime="infinite"
|
||||||
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
||||||
leasetime="24"
|
leasetime="24"
|
||||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "${leasetime}"
|
||||||
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
||||||
#Installation is affected by known bug, introduced in a previous version.
|
#Installation is affected by known bug, introduced in a previous version.
|
||||||
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
||||||
leasetime="24"
|
leasetime="24"
|
||||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "${leasetime}"
|
||||||
else
|
else
|
||||||
leasetime="${DHCP_LEASETIME}h"
|
leasetime="${DHCP_LEASETIME}h"
|
||||||
fi
|
fi
|
||||||
|
@ -440,7 +440,8 @@ dhcp-leasefile=/etc/pihole/dhcp.leases
|
||||||
echo "#quiet-dhcp6
|
echo "#quiet-dhcp6
|
||||||
#enable-ra
|
#enable-ra
|
||||||
dhcp-option=option6:dns-server,[::]
|
dhcp-option=option6:dns-server,[::]
|
||||||
dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600
|
dhcp-range=::,constructor:${interface},ra-names,ra-stateless,64
|
||||||
|
|
||||||
" >> "${dhcpconfig}"
|
" >> "${dhcpconfig}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -452,24 +453,24 @@ dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600
|
||||||
}
|
}
|
||||||
|
|
||||||
EnableDHCP() {
|
EnableDHCP() {
|
||||||
change_setting "DHCP_ACTIVE" "true"
|
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "true"
|
||||||
change_setting "DHCP_START" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_START" "${args[2]}"
|
||||||
change_setting "DHCP_END" "${args[3]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_END" "${args[3]}"
|
||||||
change_setting "DHCP_ROUTER" "${args[4]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_ROUTER" "${args[4]}"
|
||||||
change_setting "DHCP_LEASETIME" "${args[5]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "${args[5]}"
|
||||||
change_setting "PIHOLE_DOMAIN" "${args[6]}"
|
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${args[6]}"
|
||||||
change_setting "DHCP_IPv6" "${args[7]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_IPv6" "${args[7]}"
|
||||||
change_setting "DHCP_rapid_commit" "${args[8]}"
|
addOrEditKeyValPair "${setupVars}" "DHCP_rapid_commit" "${args[8]}"
|
||||||
|
|
||||||
# Remove possible old setting from file
|
# Remove possible old setting from file
|
||||||
delete_dnsmasq_setting "dhcp-"
|
removeKey "${dnsmasqconfig}" "dhcp-"
|
||||||
delete_dnsmasq_setting "quiet-dhcp"
|
removeKey "${dnsmasqconfig}" "quiet-dhcp"
|
||||||
|
|
||||||
# If a DHCP client claims that its name is "wpad", ignore that.
|
# If a DHCP client claims that its name is "wpad", ignore that.
|
||||||
# This fixes a security hole. see CERT Vulnerability VU#598349
|
# This fixes a security hole. see CERT Vulnerability VU#598349
|
||||||
# We also ignore "localhost" as Windows behaves strangely if a
|
# We also ignore "localhost" as Windows behaves strangely if a
|
||||||
# device claims this host name
|
# device claims this host name
|
||||||
add_dnsmasq_setting "dhcp-name-match=set:hostname-ignore,wpad
|
addKey "${dnsmasqconfig}" "dhcp-name-match=set:hostname-ignore,wpad
|
||||||
dhcp-name-match=set:hostname-ignore,localhost
|
dhcp-name-match=set:hostname-ignore,localhost
|
||||||
dhcp-ignore-names=tag:hostname-ignore"
|
dhcp-ignore-names=tag:hostname-ignore"
|
||||||
|
|
||||||
|
@ -479,11 +480,11 @@ dhcp-ignore-names=tag:hostname-ignore"
|
||||||
}
|
}
|
||||||
|
|
||||||
DisableDHCP() {
|
DisableDHCP() {
|
||||||
change_setting "DHCP_ACTIVE" "false"
|
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "false"
|
||||||
|
|
||||||
# Remove possible old setting from file
|
# Remove possible old setting from file
|
||||||
delete_dnsmasq_setting "dhcp-"
|
removeKey "${dnsmasqconfig}" "dhcp-"
|
||||||
delete_dnsmasq_setting "quiet-dhcp"
|
removeKey "${dnsmasqconfig}" "quiet-dhcp"
|
||||||
|
|
||||||
ProcessDHCPSettings
|
ProcessDHCPSettings
|
||||||
|
|
||||||
|
@ -491,11 +492,11 @@ DisableDHCP() {
|
||||||
}
|
}
|
||||||
|
|
||||||
SetWebUILayout() {
|
SetWebUILayout() {
|
||||||
change_setting "WEBUIBOXEDLAYOUT" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "WEBUIBOXEDLAYOUT" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
SetWebUITheme() {
|
SetWebUITheme() {
|
||||||
change_setting "WEBTHEME" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "WEBTHEME" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
CheckUrl(){
|
CheckUrl(){
|
||||||
|
@ -522,13 +523,13 @@ CustomizeAdLists() {
|
||||||
|
|
||||||
if CheckUrl "${address}"; then
|
if CheckUrl "${address}"; then
|
||||||
if [[ "${args[2]}" == "enable" ]]; then
|
if [[ "${args[2]}" == "enable" ]]; then
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
||||||
elif [[ "${args[2]}" == "disable" ]]; then
|
elif [[ "${args[2]}" == "disable" ]]; then
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
||||||
elif [[ "${args[2]}" == "add" ]]; then
|
elif [[ "${args[2]}" == "add" ]]; then
|
||||||
sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
||||||
elif [[ "${args[2]}" == "del" ]]; then
|
elif [[ "${args[2]}" == "del" ]]; then
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
||||||
else
|
else
|
||||||
echo "Not permitted"
|
echo "Not permitted"
|
||||||
return 1
|
return 1
|
||||||
|
@ -590,10 +591,10 @@ Options:
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
change_setting "ADMIN_EMAIL" "${args[2]}"
|
addOrEditKeyValPair "${setupVars}" "ADMIN_EMAIL" "${args[2]}"
|
||||||
echo -e " ${TICK} Setting admin contact to ${args[2]}"
|
echo -e " ${TICK} Setting admin contact to ${args[2]}"
|
||||||
else
|
else
|
||||||
change_setting "ADMIN_EMAIL" ""
|
addOrEditKeyValPair "${setupVars}" "ADMIN_EMAIL" ""
|
||||||
echo -e " ${TICK} Removing admin contact"
|
echo -e " ${TICK} Removing admin contact"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -617,16 +618,16 @@ Interfaces:
|
||||||
|
|
||||||
if [[ "${args[2]}" == "all" ]]; then
|
if [[ "${args[2]}" == "all" ]]; then
|
||||||
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
||||||
change_setting "DNSMASQ_LISTENING" "all"
|
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "all"
|
||||||
elif [[ "${args[2]}" == "local" ]]; then
|
elif [[ "${args[2]}" == "local" ]]; then
|
||||||
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
||||||
change_setting "DNSMASQ_LISTENING" "local"
|
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "local"
|
||||||
elif [[ "${args[2]}" == "bind" ]]; then
|
elif [[ "${args[2]}" == "bind" ]]; then
|
||||||
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
|
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
|
||||||
change_setting "DNSMASQ_LISTENING" "bind"
|
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "bind"
|
||||||
else
|
else
|
||||||
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
||||||
change_setting "DNSMASQ_LISTENING" "single"
|
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "single"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Don't restart DNS server yet because other settings
|
# Don't restart DNS server yet because other settings
|
||||||
|
@ -639,12 +640,18 @@ Interfaces:
|
||||||
}
|
}
|
||||||
|
|
||||||
Teleporter() {
|
Teleporter() {
|
||||||
local datetimestamp
|
local filename
|
||||||
local host
|
filename="${args[2]}"
|
||||||
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
|
if [[ -z "${filename}" ]]; then
|
||||||
host=$(hostname)
|
local datetimestamp
|
||||||
host="${host//./_}"
|
local host
|
||||||
php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
|
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
|
||||||
|
host=$(hostname)
|
||||||
|
host="${host//./_}"
|
||||||
|
filename="pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
|
||||||
|
fi
|
||||||
|
# webroot is sourced from basic-install above
|
||||||
|
php "${webroot}/admin/scripts/pi-hole/php/teleporter.php" > "${filename}"
|
||||||
}
|
}
|
||||||
|
|
||||||
checkDomain()
|
checkDomain()
|
||||||
|
@ -680,18 +687,18 @@ addAudit()
|
||||||
done
|
done
|
||||||
# Insert only the domain here. The date_added field will be
|
# Insert only the domain here. The date_added field will be
|
||||||
# filled with its default value (date_added = current timestamp)
|
# filled with its default value (date_added = current timestamp)
|
||||||
sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
|
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
|
||||||
}
|
}
|
||||||
|
|
||||||
clearAudit()
|
clearAudit()
|
||||||
{
|
{
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
|
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
|
||||||
}
|
}
|
||||||
|
|
||||||
SetPrivacyLevel() {
|
SetPrivacyLevel() {
|
||||||
# Set privacy level. Minimum is 0, maximum is 3
|
# Set privacy level. Minimum is 0, maximum is 3
|
||||||
if [ "${args[2]}" -ge 0 ] && [ "${args[2]}" -le 3 ]; then
|
if [ "${args[2]}" -ge 0 ] && [ "${args[2]}" -le 3 ]; then
|
||||||
changeFTLsetting "PRIVACYLEVEL" "${args[2]}"
|
addOrEditKeyValPair "${FTLconf}" "PRIVACYLEVEL" "${args[2]}"
|
||||||
pihole restartdns reload-lists
|
pihole restartdns reload-lists
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -732,7 +739,7 @@ RemoveCustomDNSAddress() {
|
||||||
validHost="$(checkDomain "${host}")"
|
validHost="$(checkDomain "${host}")"
|
||||||
if [[ -n "${validHost}" ]]; then
|
if [[ -n "${validHost}" ]]; then
|
||||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||||
sed -i "/^${ip} ${validHost}$/d" "${dnscustomfile}"
|
sed -i "/^${ip} ${validHost}$/Id" "${dnscustomfile}"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} Invalid IP has been passed"
|
echo -e " ${CROSS} Invalid IP has been passed"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -785,7 +792,7 @@ RemoveCustomCNAMERecord() {
|
||||||
if [[ -n "${validDomain}" ]]; then
|
if [[ -n "${validDomain}" ]]; then
|
||||||
validTarget="$(checkDomain "${target}")"
|
validTarget="$(checkDomain "${target}")"
|
||||||
if [[ -n "${validTarget}" ]]; then
|
if [[ -n "${validTarget}" ]]; then
|
||||||
sed -i "/cname=${validDomain},${validTarget}$/d" "${dnscustomcnamefile}"
|
sed -i "/cname=${validDomain},${validTarget}$/Id" "${dnscustomcnamefile}"
|
||||||
else
|
else
|
||||||
echo " ${CROSS} Invalid Target Passed!"
|
echo " ${CROSS} Invalid Target Passed!"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -801,6 +808,23 @@ RemoveCustomCNAMERecord() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SetRateLimit() {
|
||||||
|
local rate_limit_count rate_limit_interval reload
|
||||||
|
rate_limit_count="${args[2]}"
|
||||||
|
rate_limit_interval="${args[3]}"
|
||||||
|
reload="${args[4]}"
|
||||||
|
|
||||||
|
# Set rate-limit setting inf valid
|
||||||
|
if [ "${rate_limit_count}" -ge 0 ] && [ "${rate_limit_interval}" -ge 0 ]; then
|
||||||
|
addOrEditKeyValPair "${FTLconf}" "RATE_LIMIT" "${rate_limit_count}/${rate_limit_interval}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restart FTL to update rate-limit settings only if $reload not false
|
||||||
|
if [[ ! $reload == "false" ]]; then
|
||||||
|
RestartDNS
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
args=("$@")
|
args=("$@")
|
||||||
|
|
||||||
|
@ -834,6 +858,7 @@ main() {
|
||||||
"removecustomdns" ) RemoveCustomDNSAddress;;
|
"removecustomdns" ) RemoveCustomDNSAddress;;
|
||||||
"addcustomcname" ) AddCustomCNAMERecord;;
|
"addcustomcname" ) AddCustomCNAMERecord;;
|
||||||
"removecustomcname" ) RemoveCustomCNAMERecord;;
|
"removecustomcname" ) RemoveCustomCNAMERecord;;
|
||||||
|
"ratelimit" ) SetRateLimit;;
|
||||||
* ) helpFunc;;
|
* ) helpFunc;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
|
|
@ -12,14 +12,17 @@ INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
|
||||||
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
|
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
|
||||||
|
|
||||||
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
|
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
|
||||||
|
DELETE FROM OLD.domainlist_by_group WHERE domainlist_id NOT IN (SELECT id FROM OLD.domainlist);
|
||||||
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
|
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
|
||||||
|
|
||||||
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
|
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
|
||||||
|
DELETE FROM OLD.adlist_by_group WHERE adlist_id NOT IN (SELECT id FROM OLD.adlist);
|
||||||
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
|
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
|
||||||
|
|
||||||
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
|
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
|
||||||
|
|
||||||
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
|
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
|
||||||
|
DELETE FROM OLD.client_by_group WHERE client_id NOT IN (SELECT id FROM OLD.client);
|
||||||
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
|
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/var/log/pihole.log {
|
/var/log/pihole/pihole.log {
|
||||||
# su #
|
# su #
|
||||||
daily
|
daily
|
||||||
copytruncate
|
copytruncate
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
nomail
|
nomail
|
||||||
}
|
}
|
||||||
|
|
||||||
/var/log/pihole-FTL.log {
|
/var/log/pihole/pihole-FTL.log {
|
||||||
# su #
|
# su #
|
||||||
weekly
|
weekly
|
||||||
copytruncate
|
copytruncate
|
||||||
|
|
2
advanced/Templates/pihole-FTL.conf
Normal file
2
advanced/Templates/pihole-FTL.conf
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
#; Pi-hole FTL config file
|
||||||
|
#; Comments should start with #; to avoid issues with PHP and bash reading this file
|
|
@ -9,8 +9,55 @@
|
||||||
# Description: Enable service provided by pihole-FTL daemon
|
# Description: Enable service provided by pihole-FTL daemon
|
||||||
### END INIT INFO
|
### END INIT INFO
|
||||||
|
|
||||||
|
# Global variables
|
||||||
|
FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
|
||||||
|
DEFAULT_PID_FILE="/run/pihole-FTL.pid"
|
||||||
|
DEFAULT_PORT_FILE="/run/pihole-FTL.port"
|
||||||
|
FTL_PID=''
|
||||||
|
|
||||||
|
# Get the file path of the pihole-FTL.pid file
|
||||||
|
getFTLPIDFile() {
|
||||||
|
if [ -s "${FTLCONFFILE}" ]; then
|
||||||
|
# if PIDFILE is not set in pihole-FTL.conf, use the default path
|
||||||
|
FTL_PID_FILE="$({ grep '^PIDFILE=' "${FTLCONFFILE}" || echo "${DEFAULT_PID_FILE}"; } | cut -d'=' -f2-)"
|
||||||
|
else
|
||||||
|
# if there is no pihole-FTL.conf, use the default path
|
||||||
|
FTL_PID_FILE="${DEFAULT_PID_FILE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get the PID of the FTL process based on the content of the pihole-FTL.pid file
|
||||||
|
getFTLPID() {
|
||||||
|
if [ -s "${FTL_PID_FILE}" ]; then
|
||||||
|
# -s: FILE exists and has a size greater than zero
|
||||||
|
FTL_PID="$(cat "${FTL_PID_FILE}")"
|
||||||
|
# Exploit prevention: unset the variable if there is malicious content
|
||||||
|
# Verify that the value read from the file is numeric
|
||||||
|
expr "${FTL_PID}" : "[^[:digit:]]" > /dev/null && unset FTL_PID
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If FTL is not running, or the PID file contains malicious stuff, substitute
|
||||||
|
# negative PID to signal this
|
||||||
|
FTL_PID=${FTL_PID:=-1}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get the file path of the pihole-FTL.port file
|
||||||
|
getFTLPortFile() {
|
||||||
|
if [ -s "${FTLCONFFILE}" ]; then
|
||||||
|
# if PORTFILE is not set in pihole-FTL.conf, use the default path
|
||||||
|
FTL_PORT_FILE="$({ grep '^PORTFILE=' "${FTLCONFFILE}" || echo "${DEFAULT_PORT_FILE}"; } | cut -d'=' -f2-)"
|
||||||
|
else
|
||||||
|
# if there is no pihole-FTL.conf, use the default path
|
||||||
|
FTL_PORT_FILE="${DEFAULT_PORT_FILE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
is_running() {
|
is_running() {
|
||||||
pgrep -xo "pihole-FTL" > /dev/null
|
if [ -d "/proc/${FTL_PID}" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,17 +67,33 @@ start() {
|
||||||
echo "pihole-FTL is already running"
|
echo "pihole-FTL is already running"
|
||||||
else
|
else
|
||||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||||
mkdir -pm 0755 /run/pihole
|
mkdir -pm 0755 /run/pihole /var/log/pihole
|
||||||
touch /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
[ ! -f "${FTL_PID_FILE}" ] && install -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}"
|
||||||
|
[ ! -f "${FTL_PORT_FILE}" ] && install -m 644 -o pihole -g pihole /dev/null "${FTL_PORT_FILE}"
|
||||||
|
[ ! -f /var/log/pihole/pihole-FTL.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole/pihole-FTL.log
|
||||||
|
[ ! -f /var/log/pihole/pihole.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole/pihole.log
|
||||||
|
[ ! -f /etc/pihole/dhcp.leases ] && install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases
|
||||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
chown pihole:pihole /run/pihole /etc/pihole /var/log/pihole /var/log/pihole/pihole-FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases
|
||||||
chmod 0644 /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
|
||||||
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||||
chmod -f 0644 /etc/pihole/macvendor.db
|
chmod -f 0644 /etc/pihole/macvendor.db /etc/pihole/dhcp.leases /var/log/pihole/pihole-FTL.log /var/log/pihole/pihole.log
|
||||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||||
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
||||||
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
||||||
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
||||||
|
|
||||||
|
# Backward compatibility for user-scripts that still expect log files in /var/log instead of /var/log/pihole/
|
||||||
|
# Should be removed with Pi-hole v6.0
|
||||||
|
if [ ! -f /var/log/pihole.log ]; then
|
||||||
|
ln -s /var/log/pihole/pihole.log /var/log/pihole.log
|
||||||
|
chown -h pihole:pihole /var/log/pihole.log
|
||||||
|
|
||||||
|
fi
|
||||||
|
if [ ! -f /var/log/pihole-FTL.log ]; then
|
||||||
|
ln -s /var/log/pihole/pihole-FTL.log /var/log/pihole-FTL.log
|
||||||
|
chown -h pihole:pihole /var/log/pihole-FTL.log
|
||||||
|
fi
|
||||||
|
|
||||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
|
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
|
||||||
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
|
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
|
||||||
else
|
else
|
||||||
|
@ -44,7 +107,7 @@ start() {
|
||||||
# Stop the service
|
# Stop the service
|
||||||
stop() {
|
stop() {
|
||||||
if is_running; then
|
if is_running; then
|
||||||
pkill -xo "pihole-FTL"
|
kill "${FTL_PID}"
|
||||||
for i in 1 2 3 4 5; do
|
for i in 1 2 3 4 5; do
|
||||||
if ! is_running; then
|
if ! is_running; then
|
||||||
break
|
break
|
||||||
|
@ -57,8 +120,7 @@ stop() {
|
||||||
|
|
||||||
if is_running; then
|
if is_running; then
|
||||||
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
|
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
|
||||||
pkill -xo -9 "pihole-FTL"
|
kill -9 "${FTL_PID}"
|
||||||
exit 1
|
|
||||||
else
|
else
|
||||||
echo "Stopped"
|
echo "Stopped"
|
||||||
fi
|
fi
|
||||||
|
@ -66,7 +128,7 @@ stop() {
|
||||||
echo "Not running"
|
echo "Not running"
|
||||||
fi
|
fi
|
||||||
# Cleanup
|
# Cleanup
|
||||||
rm -f /run/pihole/FTL.sock /dev/shm/FTL-*
|
rm -f /run/pihole/FTL.sock /dev/shm/FTL-* "${FTL_PID_FILE}" "${FTL_PORT_FILE}"
|
||||||
echo
|
echo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,6 +145,14 @@ status() {
|
||||||
|
|
||||||
|
|
||||||
### main logic ###
|
### main logic ###
|
||||||
|
|
||||||
|
# Get file paths
|
||||||
|
getFTLPIDFile
|
||||||
|
getFTLPortFile
|
||||||
|
|
||||||
|
# Get FTL's current PID
|
||||||
|
getFTLPID
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
stop)
|
stop)
|
||||||
stop
|
stop
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
# early morning. Download any updates from the adlists
|
# early morning. Download any updates from the adlists
|
||||||
# Squash output to log, then splat the log to stdout on error to allow for
|
# Squash output to log, then splat the log to stdout on error to allow for
|
||||||
# standard crontab job error handling.
|
# standard crontab job error handling.
|
||||||
59 1 * * 7 root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updateGravity >/var/log/pihole_updateGravity.log || cat /var/log/pihole_updateGravity.log
|
59 1 * * 7 root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updateGravity >/var/log/pihole/pihole_updateGravity.log || cat /var/log/pihole/pihole_updateGravity.log
|
||||||
|
|
||||||
# Pi-hole: Flush the log daily at 00:00
|
# Pi-hole: Flush the log daily at 00:00
|
||||||
# The flush script will use logrotate if available
|
# The flush script will use logrotate if available
|
||||||
|
|
|
@ -164,13 +164,35 @@ ini_set("default_socket_timeout", 3);
|
||||||
function queryAds($serverName) {
|
function queryAds($serverName) {
|
||||||
// Determine the time it takes while querying adlists
|
// Determine the time it takes while querying adlists
|
||||||
$preQueryTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
|
$preQueryTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
|
||||||
|
|
||||||
|
// Determine which protocol should be used
|
||||||
|
$protocol = "http";
|
||||||
|
if ((isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] === 'on') ||
|
||||||
|
(isset($_SERVER['REQUEST_SCHEME']) && $_SERVER['REQUEST_SCHEME'] === 'https') ||
|
||||||
|
(isset($_SERVER['HTTP_X_FORWARDED_PROTO']) && $_SERVER['HTTP_X_FORWARDED_PROTO'] === 'https')
|
||||||
|
) {
|
||||||
|
$protocol = "https";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format the URL
|
||||||
$queryAdsURL = sprintf(
|
$queryAdsURL = sprintf(
|
||||||
"http://127.0.0.1:%s/admin/scripts/pi-hole/php/queryads.php?domain=%s&bp",
|
"%s://127.0.0.1:%s/admin/scripts/pi-hole/php/queryads.php?domain=%s&bp",
|
||||||
|
$protocol,
|
||||||
$_SERVER["SERVER_PORT"],
|
$_SERVER["SERVER_PORT"],
|
||||||
$serverName
|
$serverName
|
||||||
);
|
);
|
||||||
$queryAds = file($queryAdsURL, FILE_IGNORE_NEW_LINES);
|
|
||||||
$queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAds)));
|
// Request the file and receive the response
|
||||||
|
$queryAdsFile = file($queryAdsURL, FILE_IGNORE_NEW_LINES);
|
||||||
|
|
||||||
|
// $queryAdsFile must be an array (to avoid PHP 8.0+ error)
|
||||||
|
if (is_array($queryAdsFile)) {
|
||||||
|
$queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAdsFile)));
|
||||||
|
} else {
|
||||||
|
// if not an array, return an error message
|
||||||
|
return array("0" => "error", "1" => "<br>(".gettype($queryAdsFile).")<br>".print_r($queryAdsFile, true));
|
||||||
|
}
|
||||||
|
|
||||||
$queryTime = sprintf("%.0f", (microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]) - $preQueryTime);
|
$queryTime = sprintf("%.0f", (microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]) - $preQueryTime);
|
||||||
|
|
||||||
// Exception Handling
|
// Exception Handling
|
||||||
|
|
|
@ -28,14 +28,19 @@ server.modules = (
|
||||||
server.document-root = "/var/www/html"
|
server.document-root = "/var/www/html"
|
||||||
server.error-handler-404 = "/pihole/index.php"
|
server.error-handler-404 = "/pihole/index.php"
|
||||||
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
||||||
server.errorlog = "/var/log/lighttpd/error.log"
|
server.errorlog = "/var/log/lighttpd/error-pihole.log"
|
||||||
server.pid-file = "/run/lighttpd.pid"
|
server.pid-file = "/run/lighttpd.pid"
|
||||||
server.username = "www-data"
|
server.username = "www-data"
|
||||||
server.groupname = "www-data"
|
server.groupname = "www-data"
|
||||||
server.port = 80
|
server.port = 80
|
||||||
accesslog.filename = "/var/log/lighttpd/access.log"
|
accesslog.filename = "/var/log/lighttpd/access-pihole.log"
|
||||||
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
||||||
|
|
||||||
|
# Allow streaming response
|
||||||
|
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||||
|
server.stream-response-body = 1
|
||||||
|
#ssl.read-ahead = "disable"
|
||||||
|
|
||||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||||
|
|
|
@ -29,14 +29,19 @@ server.modules = (
|
||||||
server.document-root = "/var/www/html"
|
server.document-root = "/var/www/html"
|
||||||
server.error-handler-404 = "/pihole/index.php"
|
server.error-handler-404 = "/pihole/index.php"
|
||||||
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
|
||||||
server.errorlog = "/var/log/lighttpd/error.log"
|
server.errorlog = "/var/log/lighttpd/error-pihole.log"
|
||||||
server.pid-file = "/run/lighttpd.pid"
|
server.pid-file = "/run/lighttpd.pid"
|
||||||
server.username = "lighttpd"
|
server.username = "lighttpd"
|
||||||
server.groupname = "lighttpd"
|
server.groupname = "lighttpd"
|
||||||
server.port = 80
|
server.port = 80
|
||||||
accesslog.filename = "/var/log/lighttpd/access.log"
|
accesslog.filename = "/var/log/lighttpd/access-pihole.log"
|
||||||
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
||||||
|
|
||||||
|
# Allow streaming response
|
||||||
|
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||||
|
server.stream-response-body = 1
|
||||||
|
#ssl.read-ahead = "disable"
|
||||||
|
|
||||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||||
|
|
|
@ -239,10 +239,10 @@ os_check() {
|
||||||
printf " If you wish to attempt to continue anyway, you can try one of the following commands to skip this check:\\n"
|
printf " If you wish to attempt to continue anyway, you can try one of the following commands to skip this check:\\n"
|
||||||
printf "\\n"
|
printf "\\n"
|
||||||
printf " e.g: If you are seeing this message on a fresh install, you can run:\\n"
|
printf " e.g: If you are seeing this message on a fresh install, you can run:\\n"
|
||||||
printf " %bcurl -sSL https://install.pi-hole.net | PIHOLE_SKIP_OS_CHECK=true sudo -E bash%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}"
|
printf " %bcurl -sSL https://install.pi-hole.net | sudo PIHOLE_SKIP_OS_CHECK=true bash%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}"
|
||||||
printf "\\n"
|
printf "\\n"
|
||||||
printf " If you are seeing this message after having run pihole -up:\\n"
|
printf " If you are seeing this message after having run pihole -up:\\n"
|
||||||
printf " %bPIHOLE_SKIP_OS_CHECK=true sudo -E pihole -r%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}"
|
printf " %bsudo PIHOLE_SKIP_OS_CHECK=true pihole -r%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}"
|
||||||
printf " (In this case, your previous run of pihole -up will have already updated the local repository)\\n"
|
printf " (In this case, your previous run of pihole -up will have already updated the local repository)\\n"
|
||||||
printf "\\n"
|
printf "\\n"
|
||||||
printf " It is possible that the installation will still fail at this stage due to an unsupported configuration.\\n"
|
printf " It is possible that the installation will still fail at this stage due to an unsupported configuration.\\n"
|
||||||
|
@ -259,6 +259,29 @@ os_check() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# This function waits for dpkg to unlock, which signals that the previous apt-get command has finished.
|
||||||
|
test_dpkg_lock() {
|
||||||
|
i=0
|
||||||
|
printf " %b Waiting for package manager to finish (up to 30 seconds)\\n" "${INFO}"
|
||||||
|
# fuser is a program to show which processes use the named files, sockets, or filesystems
|
||||||
|
# So while the lock is held,
|
||||||
|
while fuser /var/lib/dpkg/lock >/dev/null 2>&1
|
||||||
|
do
|
||||||
|
# we wait half a second,
|
||||||
|
sleep 0.5
|
||||||
|
# increase the iterator,
|
||||||
|
((i=i+1))
|
||||||
|
# exit if waiting for more then 30 seconds
|
||||||
|
if [[ $i -gt 60 ]]; then
|
||||||
|
printf " %b %bError: Could not verify package manager finished and released lock. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||||
|
printf " Attempt to install packages manually and retry.\\n"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# and then report success once dpkg is unlocked.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
# Compatibility
|
# Compatibility
|
||||||
package_manager_detect() {
|
package_manager_detect() {
|
||||||
# First check to see if apt-get is installed.
|
# First check to see if apt-get is installed.
|
||||||
|
@ -287,7 +310,7 @@ package_manager_detect() {
|
||||||
# Packages required to run this install script (stored as an array)
|
# Packages required to run this install script (stored as an array)
|
||||||
INSTALLER_DEPS=(git iproute2 whiptail ca-certificates)
|
INSTALLER_DEPS=(git iproute2 whiptail ca-certificates)
|
||||||
# Packages required to run Pi-hole (stored as an array)
|
# Packages required to run Pi-hole (stored as an array)
|
||||||
PIHOLE_DEPS=(cron curl iputils-ping lsof psmisc sudo unzip idn2 sqlite3 libcap2-bin dns-root-data libcap2 netcat)
|
PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip idn2 libcap2-bin dns-root-data libcap2 netcat-openbsd procps)
|
||||||
# Packages required for the Web admin interface (stored as an array)
|
# Packages required for the Web admin interface (stored as an array)
|
||||||
# It's useful to separate this from Pi-hole, since the two repos are also setup separately
|
# It's useful to separate this from Pi-hole, since the two repos are also setup separately
|
||||||
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-sqlite3" "${phpVer}-xml" "${phpVer}-intl")
|
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-sqlite3" "${phpVer}-xml" "${phpVer}-intl")
|
||||||
|
@ -302,22 +325,6 @@ package_manager_detect() {
|
||||||
# and config file
|
# and config file
|
||||||
LIGHTTPD_CFG="lighttpd.conf.debian"
|
LIGHTTPD_CFG="lighttpd.conf.debian"
|
||||||
|
|
||||||
# This function waits for dpkg to unlock, which signals that the previous apt-get command has finished.
|
|
||||||
test_dpkg_lock() {
|
|
||||||
i=0
|
|
||||||
# fuser is a program to show which processes use the named files, sockets, or filesystems
|
|
||||||
# So while the lock is held,
|
|
||||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1
|
|
||||||
do
|
|
||||||
# we wait half a second,
|
|
||||||
sleep 0.5
|
|
||||||
# increase the iterator,
|
|
||||||
((i=i+1))
|
|
||||||
done
|
|
||||||
# and then report success once dpkg is unlocked.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# If apt-get is not found, check for rpm.
|
# If apt-get is not found, check for rpm.
|
||||||
elif is_command rpm ; then
|
elif is_command rpm ; then
|
||||||
# Then check if dnf or yum is the package manager
|
# Then check if dnf or yum is the package manager
|
||||||
|
@ -332,7 +339,7 @@ package_manager_detect() {
|
||||||
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
||||||
OS_CHECK_DEPS=(grep bind-utils)
|
OS_CHECK_DEPS=(grep bind-utils)
|
||||||
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig ca-certificates)
|
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig ca-certificates)
|
||||||
PIHOLE_DEPS=(cronie curl findutils sudo unzip libidn2 psmisc sqlite libcap lsof nmap-ncat)
|
PIHOLE_DEPS=(cronie curl findutils sudo unzip libidn2 psmisc libcap nmap-ncat)
|
||||||
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
||||||
LIGHTTPD_USER="lighttpd"
|
LIGHTTPD_USER="lighttpd"
|
||||||
LIGHTTPD_GROUP="lighttpd"
|
LIGHTTPD_GROUP="lighttpd"
|
||||||
|
@ -939,7 +946,7 @@ setDNS() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Prompt the user to enter custom upstream servers
|
# Prompt the user to enter custom upstream servers
|
||||||
piholeDNS=$(whiptail --backtitle "Specify Upstream DNS Provider(s)" --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\\n\\nFor example '8.8.8.8, 8.8.4.4'" "${r}" "${c}" "${prePopulate}" 3>&1 1>&2 2>&3) || \
|
piholeDNS=$(whiptail --backtitle "Specify Upstream DNS Provider(s)" --inputbox "Enter your desired upstream DNS provider(s), separated by a comma. If you want to specify a port other than 53, separate it with a hash.\\n\\nFor example '8.8.8.8, 8.8.4.4' or '127.0.0.1#5335'" "${r}" "${c}" "${prePopulate}" 3>&1 1>&2 2>&3) || \
|
||||||
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||||
# Clean user input and replace whitespace with comma.
|
# Clean user input and replace whitespace with comma.
|
||||||
piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}")
|
piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}")
|
||||||
|
@ -1128,8 +1135,11 @@ chooseBlocklists() {
|
||||||
appendToListsFile "${choice}"
|
appendToListsFile "${choice}"
|
||||||
done
|
done
|
||||||
# Create an empty adList file with appropriate permissions.
|
# Create an empty adList file with appropriate permissions.
|
||||||
touch "${adlistFile}"
|
if [ ! -f "${adlistFile}" ]; then
|
||||||
chmod 644 "${adlistFile}"
|
install -m 644 /dev/null "${adlistFile}"
|
||||||
|
else
|
||||||
|
chmod 644 "${adlistFile}"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Accept a string parameter, it must be one of the default lists
|
# Accept a string parameter, it must be one of the default lists
|
||||||
|
@ -1299,10 +1309,10 @@ installConfigs() {
|
||||||
echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
||||||
chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
||||||
|
|
||||||
# Install empty file if it does not exist
|
# Install template file if it does not exist
|
||||||
if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
|
if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
|
||||||
install -d -m 0755 ${PI_HOLE_CONFIG_DIR}
|
install -d -m 0755 ${PI_HOLE_CONFIG_DIR}
|
||||||
if ! install -o pihole -m 664 /dev/null "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then
|
if ! install -T -o pihole -m 664 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.conf" "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then
|
||||||
printf " %bError: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}"
|
printf " %bError: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
@ -1316,6 +1326,9 @@ installConfigs() {
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Install pihole-FTL.service
|
||||||
|
install -T -m 0755 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.service" "/etc/init.d/pihole-FTL"
|
||||||
|
|
||||||
# If the user chose to install the dashboard,
|
# If the user chose to install the dashboard,
|
||||||
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
|
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
|
||||||
# and if the Web server conf directory does not exist,
|
# and if the Web server conf directory does not exist,
|
||||||
|
@ -1330,8 +1343,9 @@ installConfigs() {
|
||||||
# and copy in the config file Pi-hole needs
|
# and copy in the config file Pi-hole needs
|
||||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} "${lighttpdConfig}"
|
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} "${lighttpdConfig}"
|
||||||
# Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it
|
# Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it
|
||||||
touch /etc/lighttpd/external.conf
|
if [ ! -f /etc/lighttpd/external.conf ]; then
|
||||||
chmod 644 /etc/lighttpd/external.conf
|
install -m 644 /dev/null /etc/lighttpd/external.conf
|
||||||
|
fi
|
||||||
# If there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config
|
# If there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config
|
||||||
if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then
|
if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then
|
||||||
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"\/pihole\/custom\.php"/' "${lighttpdConfig}"
|
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"\/pihole\/custom\.php"/' "${lighttpdConfig}"
|
||||||
|
@ -1371,7 +1385,12 @@ install_manpage() {
|
||||||
# Testing complete, copy the files & update the man db
|
# Testing complete, copy the files & update the man db
|
||||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8
|
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8
|
||||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8
|
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8
|
||||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.conf.5 /usr/local/share/man/man5/pihole-FTL.conf.5
|
|
||||||
|
# remove previously installed "pihole-FTL.conf.5" man page
|
||||||
|
if [[ -f "/usr/local/share/man/man5/pihole-FTL.conf.5" ]]; then
|
||||||
|
rm /usr/local/share/man/man5/pihole-FTL.conf.5
|
||||||
|
fi
|
||||||
|
|
||||||
if mandb -q &>/dev/null; then
|
if mandb -q &>/dev/null; then
|
||||||
# Updated successfully
|
# Updated successfully
|
||||||
printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}"
|
printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}"
|
||||||
|
@ -1379,7 +1398,7 @@ install_manpage() {
|
||||||
else
|
else
|
||||||
# Something is wrong with the system's man installation, clean up
|
# Something is wrong with the system's man installation, clean up
|
||||||
# our files, (leave everything how we found it).
|
# our files, (leave everything how we found it).
|
||||||
rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5
|
rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8
|
||||||
printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}"
|
printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -1731,7 +1750,7 @@ finalExports() {
|
||||||
# If the setup variable file exists,
|
# If the setup variable file exists,
|
||||||
if [[ -e "${setupVars}" ]]; then
|
if [[ -e "${setupVars}" ]]; then
|
||||||
# update the variables in the file
|
# update the variables in the file
|
||||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;/DNS_FQDN_REQUIRED/d;/DNS_BOGUS_PRIV/d;' "${setupVars}"
|
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;/DNS_FQDN_REQUIRED/d;/DNS_BOGUS_PRIV/d;/DNSMASQ_LISTENING/d;' "${setupVars}"
|
||||||
fi
|
fi
|
||||||
# echo the information to the user
|
# echo the information to the user
|
||||||
{
|
{
|
||||||
|
@ -1747,6 +1766,7 @@ finalExports() {
|
||||||
echo "CACHE_SIZE=${CACHE_SIZE}"
|
echo "CACHE_SIZE=${CACHE_SIZE}"
|
||||||
echo "DNS_FQDN_REQUIRED=${DNS_FQDN_REQUIRED:-true}"
|
echo "DNS_FQDN_REQUIRED=${DNS_FQDN_REQUIRED:-true}"
|
||||||
echo "DNS_BOGUS_PRIV=${DNS_BOGUS_PRIV:-true}"
|
echo "DNS_BOGUS_PRIV=${DNS_BOGUS_PRIV:-true}"
|
||||||
|
echo "DNSMASQ_LISTENING=${DNSMASQ_LISTENING:-local}"
|
||||||
}>> "${setupVars}"
|
}>> "${setupVars}"
|
||||||
chmod 644 "${setupVars}"
|
chmod 644 "${setupVars}"
|
||||||
|
|
||||||
|
@ -2100,9 +2120,6 @@ FTLinstall() {
|
||||||
# Move into the temp ftl directory
|
# Move into the temp ftl directory
|
||||||
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
|
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
|
||||||
|
|
||||||
# Always replace pihole-FTL.service
|
|
||||||
install -T -m 0755 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.service" "/etc/init.d/pihole-FTL"
|
|
||||||
|
|
||||||
local ftlBranch
|
local ftlBranch
|
||||||
local url
|
local url
|
||||||
|
|
||||||
|
@ -2199,7 +2216,7 @@ get_binary_name() {
|
||||||
local rev
|
local rev
|
||||||
rev=$(uname -m | sed "s/[^0-9]//g;")
|
rev=$(uname -m | sed "s/[^0-9]//g;")
|
||||||
local lib
|
local lib
|
||||||
lib=$(ldd /bin/ls | grep -E '^\s*/lib' | awk '{ print $1 }')
|
lib=$(ldd "$(which sh)" | grep -E '^\s*/lib' | awk '{ print $1 }')
|
||||||
if [[ "${lib}" == "/lib/ld-linux-aarch64.so.1" ]]; then
|
if [[ "${lib}" == "/lib/ld-linux-aarch64.so.1" ]]; then
|
||||||
printf "%b %b Detected AArch64 (64 Bit ARM) processor\\n" "${OVER}" "${TICK}"
|
printf "%b %b Detected AArch64 (64 Bit ARM) processor\\n" "${OVER}" "${TICK}"
|
||||||
# set the binary to be used
|
# set the binary to be used
|
||||||
|
@ -2596,6 +2613,19 @@ main() {
|
||||||
# Fixes a problem reported on Ubuntu 18.04 where trying to start
|
# Fixes a problem reported on Ubuntu 18.04 where trying to start
|
||||||
# the service before enabling causes installer to exit
|
# the service before enabling causes installer to exit
|
||||||
enable_service pihole-FTL
|
enable_service pihole-FTL
|
||||||
|
|
||||||
|
# If this is an update from a previous Pi-hole installation
|
||||||
|
# we need to move any existing `pihole*` logs from `/var/log` to `/var/log/pihole`
|
||||||
|
# if /var/log/pihole.log is not a symlink (set durign FTL startup) move the files
|
||||||
|
# can be removed with Pi-hole v6.0
|
||||||
|
# To be sure FTL is not running when we move the files we explicitly stop it here
|
||||||
|
|
||||||
|
stop_service pihole-FTL &> /dev/null
|
||||||
|
|
||||||
|
if [ -f /var/log/pihole.log ] && [ ! -L /var/log/pihole.log ]; then
|
||||||
|
mv /var/log/pihole*.* /var/log/pihole/ 2>/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
restart_service pihole-FTL
|
restart_service pihole-FTL
|
||||||
|
|
||||||
# Download and compile the aggregated block list
|
# Download and compile the aggregated block list
|
||||||
|
|
|
@ -11,10 +11,9 @@
|
||||||
source "/opt/pihole/COL_TABLE"
|
source "/opt/pihole/COL_TABLE"
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " yn
|
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " answer
|
||||||
case ${yn} in
|
case ${answer} in
|
||||||
[Yy]* ) break;;
|
[Yy]* ) break;;
|
||||||
[Nn]* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
|
||||||
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
@ -76,8 +75,8 @@ removeAndPurge() {
|
||||||
for i in "${DEPS[@]}"; do
|
for i in "${DEPS[@]}"; do
|
||||||
if package_check "${i}" > /dev/null; then
|
if package_check "${i}" > /dev/null; then
|
||||||
while true; do
|
while true; do
|
||||||
read -rp " ${QST} Do you wish to remove ${COL_WHITE}${i}${COL_NC} from your system? [Y/N] " yn
|
read -rp " ${QST} Do you wish to remove ${COL_WHITE}${i}${COL_NC} from your system? [Y/N] " answer
|
||||||
case ${yn} in
|
case ${answer} in
|
||||||
[Yy]* )
|
[Yy]* )
|
||||||
echo -ne " ${INFO} Removing ${i}...";
|
echo -ne " ${INFO} Removing ${i}...";
|
||||||
${SUDO} "${PKG_REMOVE[@]}" "${i}" &> /dev/null;
|
${SUDO} "${PKG_REMOVE[@]}" "${i}" &> /dev/null;
|
||||||
|
@ -147,6 +146,7 @@ removeNoPurge() {
|
||||||
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
|
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
|
||||||
${SUDO} rm -f /etc/dnsmasq.d/06-rfc6761.conf &> /dev/null
|
${SUDO} rm -f /etc/dnsmasq.d/06-rfc6761.conf &> /dev/null
|
||||||
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
|
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
|
||||||
|
${SUDO} rm -rf /var/log/pihole/*pihole* &> /dev/null
|
||||||
${SUDO} rm -rf /etc/pihole/ &> /dev/null
|
${SUDO} rm -rf /etc/pihole/ &> /dev/null
|
||||||
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
|
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
|
||||||
${SUDO} rm -rf /opt/pihole/ &> /dev/null
|
${SUDO} rm -rf /opt/pihole/ &> /dev/null
|
||||||
|
@ -215,8 +215,8 @@ while true; do
|
||||||
echo -n "${i} "
|
echo -n "${i} "
|
||||||
done
|
done
|
||||||
echo "${COL_NC}"
|
echo "${COL_NC}"
|
||||||
read -rp " ${QST} Do you wish to go through each dependency for removal? (Choosing No will leave all dependencies installed) [Y/n] " yn
|
read -rp " ${QST} Do you wish to go through each dependency for removal? (Choosing No will leave all dependencies installed) [Y/n] " answer
|
||||||
case ${yn} in
|
case ${answer} in
|
||||||
[Yy]* ) removeAndPurge; break;;
|
[Yy]* ) removeAndPurge; break;;
|
||||||
[Nn]* ) removeNoPurge; break;;
|
[Nn]* ) removeNoPurge; break;;
|
||||||
* ) removeAndPurge; break;;
|
* ) removeAndPurge; break;;
|
||||||
|
|
94
gravity.sh
94
gravity.sh
|
@ -73,9 +73,9 @@ if [[ -r "${piholeDir}/pihole.conf" ]]; then
|
||||||
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
|
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Generate new sqlite3 file from schema template
|
# Generate new SQLite3 file from schema template
|
||||||
generate_gravity_database() {
|
generate_gravity_database() {
|
||||||
if ! sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then
|
if ! pihole-FTL sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then
|
||||||
echo -e " ${CROSS} Unable to create ${gravityDBfile}"
|
echo -e " ${CROSS} Unable to create ${gravityDBfile}"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
@ -85,12 +85,12 @@ generate_gravity_database() {
|
||||||
|
|
||||||
# Copy data from old to new database file and swap them
|
# Copy data from old to new database file and swap them
|
||||||
gravity_swap_databases() {
|
gravity_swap_databases() {
|
||||||
local str copyGravity
|
local str copyGravity oldAvail
|
||||||
str="Building tree"
|
str="Building tree"
|
||||||
echo -ne " ${INFO} ${str}..."
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
|
||||||
# The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
|
# The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
|
||||||
output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
|
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -102,22 +102,6 @@ gravity_swap_databases() {
|
||||||
str="Swapping databases"
|
str="Swapping databases"
|
||||||
echo -ne " ${INFO} ${str}..."
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
|
||||||
# Gravity copying SQL script
|
|
||||||
copyGravity="$(cat "${gravityDBcopy}")"
|
|
||||||
if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
|
|
||||||
# Replace default gravity script location by custom location
|
|
||||||
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
output=$( { sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
|
|
||||||
status="$?"
|
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
|
||||||
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
|
|
||||||
# Swap databases and remove or conditionally rename old database
|
# Swap databases and remove or conditionally rename old database
|
||||||
# Number of available blocks on disk
|
# Number of available blocks on disk
|
||||||
availableBlocks=$(stat -f --format "%a" "${gravityDIR}")
|
availableBlocks=$(stat -f --format "%a" "${gravityDIR}")
|
||||||
|
@ -125,18 +109,24 @@ gravity_swap_databases() {
|
||||||
gravityBlocks=$(stat --format "%b" ${gravityDBfile})
|
gravityBlocks=$(stat --format "%b" ${gravityDBfile})
|
||||||
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
|
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
|
||||||
# Better be safe than sorry...
|
# Better be safe than sorry...
|
||||||
|
oldAvail=false
|
||||||
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
||||||
echo -e " ${TICK} The old database remains available."
|
oldAvail=true
|
||||||
mv "${gravityDBfile}" "${gravityOLDfile}"
|
mv "${gravityDBfile}" "${gravityOLDfile}"
|
||||||
else
|
else
|
||||||
rm "${gravityDBfile}"
|
rm "${gravityDBfile}"
|
||||||
fi
|
fi
|
||||||
mv "${gravityTEMPfile}" "${gravityDBfile}"
|
mv "${gravityTEMPfile}" "${gravityDBfile}"
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
|
if $oldAvail; then
|
||||||
|
echo -e " ${TICK} The old database remains available."
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Update timestamp when the gravity table was last updated successfully
|
# Update timestamp when the gravity table was last updated successfully
|
||||||
update_gravity_timestamp() {
|
update_gravity_timestamp() {
|
||||||
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -177,7 +167,7 @@ database_table_from_file() {
|
||||||
|
|
||||||
# Get MAX(id) from domainlist when INSERTing into this table
|
# Get MAX(id) from domainlist when INSERTing into this table
|
||||||
if [[ "${table}" == "domainlist" ]]; then
|
if [[ "${table}" == "domainlist" ]]; then
|
||||||
rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
|
rowid="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
|
||||||
if [[ -z "$rowid" ]]; then
|
if [[ -z "$rowid" ]]; then
|
||||||
rowid=0
|
rowid=0
|
||||||
fi
|
fi
|
||||||
|
@ -207,7 +197,7 @@ database_table_from_file() {
|
||||||
# Store domains in database table specified by ${table}
|
# Store domains in database table specified by ${table}
|
||||||
# Use printf as .mode and .import need to be on separate lines
|
# Use printf as .mode and .import need to be on separate lines
|
||||||
# see https://unix.stackexchange.com/a/445615/83260
|
# see https://unix.stackexchange.com/a/445615/83260
|
||||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -227,7 +217,7 @@ database_table_from_file() {
|
||||||
|
|
||||||
# Update timestamp of last update of this list. We store this in the "old" database as all values in the new database will later be overwritten
|
# Update timestamp of last update of this list. We store this in the "old" database as all values in the new database will later be overwritten
|
||||||
database_adlist_updated() {
|
database_adlist_updated() {
|
||||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -238,7 +228,7 @@ database_adlist_updated() {
|
||||||
|
|
||||||
# Check if a column with name ${2} exists in gravity table with name ${1}
|
# Check if a column with name ${2} exists in gravity table with name ${1}
|
||||||
gravity_column_exists() {
|
gravity_column_exists() {
|
||||||
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
if [[ "${output}" == "1" ]]; then
|
if [[ "${output}" == "1" ]]; then
|
||||||
return 0 # Bash 0 is success
|
return 0 # Bash 0 is success
|
||||||
fi
|
fi
|
||||||
|
@ -253,7 +243,7 @@ database_adlist_number() {
|
||||||
return;
|
return;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_source_lines}" "${num_invalid}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_source_lines}" "${num_invalid}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -269,7 +259,7 @@ database_adlist_status() {
|
||||||
return;
|
return;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -386,9 +376,9 @@ gravity_DownloadBlocklists() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Retrieve source URLs from gravity database
|
# Retrieve source URLs from gravity database
|
||||||
# We source only enabled adlists, sqlite3 stores boolean values as 0 (false) or 1 (true)
|
# We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true)
|
||||||
mapfile -t sources <<< "$(sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
|
mapfile -t sources <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
|
||||||
mapfile -t sourceIDs <<< "$(sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
|
mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
|
||||||
|
|
||||||
# Parse source domains from $sources
|
# Parse source domains from $sources
|
||||||
mapfile -t sourceDomains <<< "$(
|
mapfile -t sourceDomains <<< "$(
|
||||||
|
@ -402,14 +392,12 @@ gravity_DownloadBlocklists() {
|
||||||
)"
|
)"
|
||||||
|
|
||||||
local str="Pulling blocklist source list into range"
|
local str="Pulling blocklist source list into range"
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
if [[ -n "${sources[*]}" ]] && [[ -n "${sourceDomains[*]}" ]]; then
|
if [[ -z "${sources[*]}" ]] || [[ -z "${sourceDomains[*]}" ]]; then
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
else
|
|
||||||
echo -e "${OVER} ${CROSS} ${str}"
|
|
||||||
echo -e " ${INFO} No source list found, or it is empty"
|
echo -e " ${INFO} No source list found, or it is empty"
|
||||||
echo ""
|
echo ""
|
||||||
return 1
|
unset sources
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local url domain agent cmd_ext str target compression
|
local url domain agent cmd_ext str target compression
|
||||||
|
@ -419,7 +407,7 @@ gravity_DownloadBlocklists() {
|
||||||
str="Preparing new gravity database"
|
str="Preparing new gravity database"
|
||||||
echo -ne " ${INFO} ${str}..."
|
echo -ne " ${INFO} ${str}..."
|
||||||
rm "${gravityTEMPfile}" > /dev/null 2>&1
|
rm "${gravityTEMPfile}" > /dev/null 2>&1
|
||||||
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
|
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -477,9 +465,28 @@ gravity_DownloadBlocklists() {
|
||||||
echo ""
|
echo ""
|
||||||
done
|
done
|
||||||
|
|
||||||
|
str="Creating new gravity databases"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
|
||||||
|
# Gravity copying SQL script
|
||||||
|
copyGravity="$(cat "${gravityDBcopy}")"
|
||||||
|
if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
|
||||||
|
# Replace default gravity script location by custom location
|
||||||
|
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
|
||||||
|
status="$?"
|
||||||
|
|
||||||
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
str="Storing downloaded domains in new gravity database"
|
str="Storing downloaded domains in new gravity database"
|
||||||
echo -ne " ${INFO} ${str}..."
|
echo -ne " ${INFO} ${str}..."
|
||||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -520,8 +527,9 @@ parseList() {
|
||||||
# This sed does the following things:
|
# This sed does the following things:
|
||||||
# 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
|
# 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
|
||||||
# 2. Append ,adlistID to every line
|
# 2. Append ,adlistID to every line
|
||||||
# 3. Ensures there is a newline on the last line
|
# 3. Remove trailing period (see https://github.com/pi-hole/pi-hole/issues/4701)
|
||||||
sed -e "/[^a-zA-Z0-9.\_-]/d;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
|
# 4. Ensures there is a newline on the last line
|
||||||
|
sed -e "/[^a-zA-Z0-9.\_-]/d;s/\.$//;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
|
||||||
# Find (up to) five domains containing invalid characters (see above)
|
# Find (up to) five domains containing invalid characters (see above)
|
||||||
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
||||||
|
|
||||||
|
@ -784,12 +792,12 @@ gravity_Table_Count() {
|
||||||
local table="${1}"
|
local table="${1}"
|
||||||
local str="${2}"
|
local str="${2}"
|
||||||
local num
|
local num
|
||||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
|
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
|
||||||
if [[ "${table}" == "vw_gravity" ]]; then
|
if [[ "${table}" == "vw_gravity" ]]; then
|
||||||
local unique
|
local unique
|
||||||
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
unique="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
||||||
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
|
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
|
||||||
sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||||
else
|
else
|
||||||
echo -e " ${INFO} Number of ${str}: ${num}"
|
echo -e " ${INFO} Number of ${str}: ${num}"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -144,7 +144,9 @@ Command line arguments can be arbitrarily combined, e.g:
|
||||||
Start ftl in foreground with more verbose logging, process everything and shutdown immediately
|
Start ftl in foreground with more verbose logging, process everything and shutdown immediately
|
||||||
.br
|
.br
|
||||||
.SH "SEE ALSO"
|
.SH "SEE ALSO"
|
||||||
\fBpihole\fR(8), \fBpihole-FTL.conf\fR(5)
|
\fBpihole\fR(8)
|
||||||
|
.br
|
||||||
|
\fBFor FTL's config options please see https://docs.pi-hole.net/ftldns/configfile/\fR
|
||||||
.br
|
.br
|
||||||
.SH "COLOPHON"
|
.SH "COLOPHON"
|
||||||
|
|
||||||
|
|
|
@ -1,313 +0,0 @@
|
||||||
.TH "pihole-FTL.conf" "5" "pihole-FTL.conf" "pihole-FTL.conf" "November 2020"
|
|
||||||
.SH "NAME"
|
|
||||||
|
|
||||||
pihole-FTL.conf - FTL's config file
|
|
||||||
.br
|
|
||||||
.SH "DESCRIPTION"
|
|
||||||
|
|
||||||
/etc/pihole/pihole-FTL.conf will be read by \fBpihole-FTL(8)\fR on startup.
|
|
||||||
.br
|
|
||||||
For each setting the option shown first is the default.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBBLOCKINGMODE=IP|IP-AAAA-NODATA|NODATA|NXDOMAIN|NULL\fR
|
|
||||||
.br
|
|
||||||
How should FTL reply to blocked queries?
|
|
||||||
|
|
||||||
IP - Pi-hole's IPs for blocked domains
|
|
||||||
|
|
||||||
IP-AAAA-NODATA - Pi-hole's IP + NODATA-IPv6 for blocked domains
|
|
||||||
|
|
||||||
NODATA - Using NODATA for blocked domains
|
|
||||||
|
|
||||||
NXDOMAIN - NXDOMAIN for blocked domains
|
|
||||||
|
|
||||||
NULL - Null IPs for blocked domains
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBCNAME_DEEP_INSPECT=true|false\fR
|
|
||||||
.br
|
|
||||||
Use this option to disable deep CNAME inspection. This might be beneficial for very low-end devices.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBBLOCK_ESNI=true|false\fR
|
|
||||||
.br
|
|
||||||
Block requests to _esni.* sub-domains.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBMAXLOGAGE=24.0\fR
|
|
||||||
.br
|
|
||||||
Up to how many hours of queries should be imported from the database and logs?
|
|
||||||
.br
|
|
||||||
Maximum is 744 (31 days)
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBPRIVACYLEVEL=0|1|2|3|4\fR
|
|
||||||
.br
|
|
||||||
Privacy level used to collect Pi-hole statistics.
|
|
||||||
.br
|
|
||||||
0 - show everything
|
|
||||||
.br
|
|
||||||
1 - hide domains
|
|
||||||
.br
|
|
||||||
2 - hide domains and clients
|
|
||||||
.br
|
|
||||||
3 - anonymous mode (hide everything)
|
|
||||||
.br
|
|
||||||
4 - disable all statistics
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBIGNORE_LOCALHOST=no|yes\fR
|
|
||||||
.br
|
|
||||||
Should FTL ignore queries coming from the local machine?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBAAAA_QUERY_ANALYSIS=yes|no\fR
|
|
||||||
.br
|
|
||||||
Should FTL analyze AAAA queries?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBANALYZE_ONLY_A_AND_AAAA=false|true\fR
|
|
||||||
.br
|
|
||||||
Should FTL only analyze A and AAAA queries?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBSOCKET_LISTENING=localonly|all\fR
|
|
||||||
.br
|
|
||||||
Listen only for local socket connections on the API port or permit all connections.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBFTLPORT=4711\fR
|
|
||||||
.br
|
|
||||||
On which port should FTL be listening?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBRESOLVE_IPV6=yes|no\fR
|
|
||||||
.br
|
|
||||||
Should FTL try to resolve IPv6 addresses to hostnames?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBRESOLVE_IPV4=yes|no\fR
|
|
||||||
.br
|
|
||||||
Should FTL try to resolve IPv4 addresses to hostnames?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDELAY_STARTUP=0\fR
|
|
||||||
.br
|
|
||||||
Time in seconds (between 0 and 300) to delay FTL startup.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBNICE=-10\fR
|
|
||||||
.br
|
|
||||||
Set the niceness of the Pi-hole FTL process.
|
|
||||||
.br
|
|
||||||
Can be disabled altogether by setting a value of -999.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBNAMES_FROM_NETDB=true|false\fR
|
|
||||||
.br
|
|
||||||
Control whether FTL should use a fallback option and try to obtain client names from checking the network table.
|
|
||||||
.br
|
|
||||||
E.g. IPv6 clients without a hostname will be compared via MAC address to known clients.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fB\fBREFRESH_HOSTNAMES=IPV4|ALL|NONE\fR
|
|
||||||
.br
|
|
||||||
Change how (and if) hourly PTR requests are made to check for changes in client and upstream server hostnames:
|
|
||||||
.br
|
|
||||||
IPV4 - Do the hourly PTR lookups only for IPv4 addresses resolving issues in networks with many short-lived PE IPv6 addresses.
|
|
||||||
.br
|
|
||||||
ALL - Do the hourly PTR lookups for all addresses. This can create a lot of PTR queries in networks with many IPv6 addresses.
|
|
||||||
.br
|
|
||||||
NONE - Don't do hourly PTR lookups. Look up hostnames once (when first seeing a client) and never again. Future hostname changes may be missed.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBMAXNETAGE=365\fR
|
|
||||||
.br
|
|
||||||
IP addresses (and associated host names) older than the specified number of days are removed.
|
|
||||||
.br
|
|
||||||
This avoids dead entries in the network overview table.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBEDNS0_ECS=true|false\fR
|
|
||||||
.br
|
|
||||||
Should we overwrite the query source when client information is provided through EDNS0 client subnet (ECS) information?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBPARSE_ARP_CACHE=true|false\fR
|
|
||||||
.br
|
|
||||||
Parse ARP cache to fill network overview table.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDBIMPORT=yes|no\fR
|
|
||||||
.br
|
|
||||||
Should FTL load information from the database on startup to be aware of the most recent history?
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBMAXDBDAYS=365\fR
|
|
||||||
.br
|
|
||||||
How long should queries be stored in the database? Setting this to 0 disables the database
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDBINTERVAL=1.0\fR
|
|
||||||
.br
|
|
||||||
How often do we store queries in FTL's database [minutes]?
|
|
||||||
.br
|
|
||||||
Accepts value between 0.1 (6 sec) and 1440 (1 day)
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDBFILE=/etc/pihole/pihole-FTL.db\fR
|
|
||||||
.br
|
|
||||||
Specify path and filename of FTL's SQLite long-term database.
|
|
||||||
.br
|
|
||||||
Setting this to DBFILE= disables the database altogether
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBLOGFILE=/var/log/pihole-FTL.log\fR
|
|
||||||
.br
|
|
||||||
The location of FTL's log file.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBPIDFILE=/run/pihole-FTL.pid\fR
|
|
||||||
.br
|
|
||||||
The file which contains the PID of FTL's main process.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBPORTFILE=/run/pihole-FTL.port\fR
|
|
||||||
.br
|
|
||||||
Specify path and filename where the FTL process will write its API port number.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBSOCKETFILE=/run/pihole/FTL.sock\fR
|
|
||||||
.br
|
|
||||||
The file containing the socket FTL's API is listening on.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBSETUPVARSFILE=/etc/pihole/setupVars.conf\fR
|
|
||||||
.br
|
|
||||||
The config file of Pi-hole containing, e.g., the current blocking status (do not change).
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBMACVENDORDB=/etc/pihole/macvendor.db\fR
|
|
||||||
.br
|
|
||||||
The database containing MAC -> Vendor information for the network table.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBGRAVITYDB=/etc/pihole/gravity.db\fR
|
|
||||||
.br
|
|
||||||
Specify path and filename of FTL's SQLite3 gravity database. This database contains all domains relevant for Pi-hole's DNS blocking.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_ALL=false|true\fR
|
|
||||||
.br
|
|
||||||
Enable all debug flags. If this is set to true, all other debug config options are ignored.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_DATABASE=false|true\fR
|
|
||||||
.br
|
|
||||||
Print debugging information about database actions such as SQL statements and performance.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_NETWORKING=false|true\fR
|
|
||||||
.br
|
|
||||||
Prints a list of the detected network interfaces on the startup of FTL.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_LOCKS=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about shared memory locks.
|
|
||||||
.br
|
|
||||||
Messages will be generated when waiting, obtaining, and releasing a lock.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_QUERIES=false|true\fR
|
|
||||||
.br
|
|
||||||
Print extensive DNS query information (domains, types, replies, etc.).
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_FLAGS=false|true\fR
|
|
||||||
.br
|
|
||||||
Print flags of queries received by the DNS hooks.
|
|
||||||
.br
|
|
||||||
Only effective when \fBDEBUG_QUERIES\fR is enabled as well.
|
|
||||||
|
|
||||||
\fBDEBUG_SHMEM=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about shared memory buffers.
|
|
||||||
.br
|
|
||||||
Messages are either about creating or enlarging shmem objects or string injections.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_GC=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about garbage collection (GC):
|
|
||||||
.br
|
|
||||||
What is to be removed, how many have been removed and how long did GC take.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_ARP=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about ARP table processing:
|
|
||||||
.br
|
|
||||||
How long did parsing take, whether read MAC addresses are valid, and if the macvendor.db file exists.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_REGEX=false|true\fR
|
|
||||||
.br
|
|
||||||
Controls if FTL should print extended details about regex matching.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_API=false|true\fR
|
|
||||||
.br
|
|
||||||
Print extra debugging information during telnet API calls.
|
|
||||||
.br
|
|
||||||
Currently only used to send extra information when getting all queries.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_OVERTIME=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about overTime memory operations, such as initializing or moving overTime slots.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_EXTBLOCKED=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about why FTL decided that certain queries were recognized as being externally blocked.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_CAPS=false|true\fR
|
|
||||||
.br
|
|
||||||
Print information about POSIX capabilities granted to the FTL process.
|
|
||||||
.br
|
|
||||||
The current capabilities are printed on receipt of SIGHUP i.e. after executing `killall -HUP pihole-FTL`.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_DNSMASQ_LINES=false|true\fR
|
|
||||||
.br
|
|
||||||
Print file and line causing a dnsmasq event into FTL's log files.
|
|
||||||
.br
|
|
||||||
This is handy to implement additional hooks missing from FTL.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_VECTORS=false|true\fR
|
|
||||||
.br
|
|
||||||
FTL uses dynamically allocated vectors for various tasks.
|
|
||||||
.br
|
|
||||||
This config option enables extensive debugging information such as information about allocation, referencing, deletion, and appending.
|
|
||||||
.br
|
|
||||||
|
|
||||||
\fBDEBUG_RESOLVER=false|true\fR
|
|
||||||
.br
|
|
||||||
Extensive information about hostname resolution like which DNS servers are used in the first and second hostname resolving tries.
|
|
||||||
.br
|
|
||||||
|
|
||||||
.SH "SEE ALSO"
|
|
||||||
|
|
||||||
\fBpihole\fR(8), \fBpihole-FTL\fR(8)
|
|
||||||
.br
|
|
||||||
.SH "COLOPHON"
|
|
||||||
|
|
||||||
Pi-hole : The Faster-Than-Light (FTL) Engine is a lightweight, purpose-built daemon used to provide statistics needed for the Pi-hole Web Interface, and its API can be easily integrated into your own projects. Although it is an optional component of the Pi-hole ecosystem, it will be installed by default to provide statistics. As the name implies, FTL does its work \fIvery quickly\fR!
|
|
||||||
.br
|
|
||||||
|
|
||||||
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net
|
|
||||||
.br
|
|
|
@ -187,12 +187,12 @@ Available commands and options:
|
||||||
|
|
||||||
(Logging options):
|
(Logging options):
|
||||||
.br
|
.br
|
||||||
on Enable the Pi-hole log at /var/log/pihole.log
|
on Enable the Pi-hole log at /var/log/pihole/pihole.log
|
||||||
.br
|
.br
|
||||||
off Disable and flush the Pi-hole log at
|
off Disable and flush the Pi-hole log at
|
||||||
/var/log/pihole.log
|
/var/log/pihole/pihole.log
|
||||||
.br
|
.br
|
||||||
off noflush Disable the Pi-hole log at /var/log/pihole.log
|
off noflush Disable the Pi-hole log at /var/log/pihole/pihole.log
|
||||||
.br
|
.br
|
||||||
|
|
||||||
\fB-up, updatePihole\fR [--check-only]
|
\fB-up, updatePihole\fR [--check-only]
|
||||||
|
|
86
pihole
86
pihole
|
@ -21,6 +21,9 @@ readonly FTL_PID_FILE="/run/pihole-FTL.pid"
|
||||||
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
|
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
|
||||||
source "${colfile}"
|
source "${colfile}"
|
||||||
|
|
||||||
|
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||||
|
source "${utilsfile}"
|
||||||
|
|
||||||
webpageFunc() {
|
webpageFunc() {
|
||||||
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh"
|
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh"
|
||||||
main "$@"
|
main "$@"
|
||||||
|
@ -223,8 +226,7 @@ Time:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local str="Pi-hole Disabled"
|
local str="Pi-hole Disabled"
|
||||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "false"
|
||||||
echo "BLOCKING_ENABLED=false" >> "${setupVars}"
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# Enable Pi-hole
|
# Enable Pi-hole
|
||||||
|
@ -236,8 +238,7 @@ Time:
|
||||||
echo -e " ${INFO} Enabling blocking"
|
echo -e " ${INFO} Enabling blocking"
|
||||||
local str="Pi-hole Enabled"
|
local str="Pi-hole Enabled"
|
||||||
|
|
||||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "true"
|
||||||
echo "BLOCKING_ENABLED=true" >> "${setupVars}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
restartDNS reload-lists
|
restartDNS reload-lists
|
||||||
|
@ -253,14 +254,14 @@ Example: 'pihole logging on'
|
||||||
Specify whether the Pi-hole log should be used
|
Specify whether the Pi-hole log should be used
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
on Enable the Pi-hole log at /var/log/pihole.log
|
on Enable the Pi-hole log at /var/log/pihole/pihole.log
|
||||||
off Disable and flush the Pi-hole log at /var/log/pihole.log
|
off Disable and flush the Pi-hole log at /var/log/pihole/pihole.log
|
||||||
off noflush Disable the Pi-hole log at /var/log/pihole.log"
|
off noflush Disable the Pi-hole log at /var/log/pihole/pihole.log"
|
||||||
exit 0
|
exit 0
|
||||||
elif [[ "${1}" == "off" ]]; then
|
elif [[ "${1}" == "off" ]]; then
|
||||||
# Disable logging
|
# Disable logging
|
||||||
sed -i 's/^log-queries/#log-queries/' /etc/dnsmasq.d/01-pihole.conf
|
removeKey /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||||
sed -i 's/^QUERY_LOGGING=true/QUERY_LOGGING=false/' /etc/pihole/setupVars.conf
|
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "false"
|
||||||
if [[ "${2}" != "noflush" ]]; then
|
if [[ "${2}" != "noflush" ]]; then
|
||||||
# Flush logs
|
# Flush logs
|
||||||
"${PI_HOLE_BIN_DIR}"/pihole -f
|
"${PI_HOLE_BIN_DIR}"/pihole -f
|
||||||
|
@ -269,8 +270,8 @@ Options:
|
||||||
local str="Logging has been disabled!"
|
local str="Logging has been disabled!"
|
||||||
elif [[ "${1}" == "on" ]]; then
|
elif [[ "${1}" == "on" ]]; then
|
||||||
# Enable logging
|
# Enable logging
|
||||||
sed -i 's/^#log-queries/log-queries/' /etc/dnsmasq.d/01-pihole.conf
|
addKey /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||||
sed -i 's/^QUERY_LOGGING=false/QUERY_LOGGING=true/' /etc/pihole/setupVars.conf
|
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "true"
|
||||||
echo -e " ${INFO} Enabling logging..."
|
echo -e " ${INFO} Enabling logging..."
|
||||||
local str="Logging has been enabled!"
|
local str="Logging has been enabled!"
|
||||||
else
|
else
|
||||||
|
@ -283,26 +284,29 @@ Options:
|
||||||
}
|
}
|
||||||
|
|
||||||
analyze_ports() {
|
analyze_ports() {
|
||||||
|
local lv4 lv6 port=${1}
|
||||||
# FTL is listening at least on at least one port when this
|
# FTL is listening at least on at least one port when this
|
||||||
# function is getting called
|
# function is getting called
|
||||||
# Check individual address family/protocol combinations
|
# Check individual address family/protocol combinations
|
||||||
# For a healthy Pi-hole, they should all be up (nothing printed)
|
# For a healthy Pi-hole, they should all be up (nothing printed)
|
||||||
if grep -q "IPv4.*UDP" <<< "${1}"; then
|
lv4="$(ss --ipv4 --listening --numeric --tcp --udp src :${port})"
|
||||||
|
if grep -q "udp " <<< "${lv4}"; then
|
||||||
echo -e " ${TICK} UDP (IPv4)"
|
echo -e " ${TICK} UDP (IPv4)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} UDP (IPv4)"
|
echo -e " ${CROSS} UDP (IPv4)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv4.*TCP" <<< "${1}"; then
|
if grep -q "tcp " <<< "${lv4}"; then
|
||||||
echo -e " ${TICK} TCP (IPv4)"
|
echo -e " ${TICK} TCP (IPv4)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} TCP (IPv4)"
|
echo -e " ${CROSS} TCP (IPv4)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv6.*UDP" <<< "${1}"; then
|
lv6="$(ss --ipv6 --listening --numeric --tcp --udp src :${port})"
|
||||||
|
if grep -q "udp " <<< "${lv6}"; then
|
||||||
echo -e " ${TICK} UDP (IPv6)"
|
echo -e " ${TICK} UDP (IPv6)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} UDP (IPv6)"
|
echo -e " ${CROSS} UDP (IPv6)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv6.*TCP" <<< "${1}"; then
|
if grep -q "tcp " <<< "${lv6}"; then
|
||||||
echo -e " ${TICK} TCP (IPv6)"
|
echo -e " ${TICK} TCP (IPv6)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} TCP (IPv6)"
|
echo -e " ${CROSS} TCP (IPv6)"
|
||||||
|
@ -312,9 +316,10 @@ analyze_ports() {
|
||||||
|
|
||||||
statusFunc() {
|
statusFunc() {
|
||||||
# Determine if there is pihole-FTL service is listening
|
# Determine if there is pihole-FTL service is listening
|
||||||
local listening pid port
|
local pid port ftl_api_port
|
||||||
|
|
||||||
pid="$(getFTLPID)"
|
pid="$(getFTLPID)"
|
||||||
|
ftl_api_port="$(getFTLAPIPort)"
|
||||||
if [[ "$pid" -eq "-1" ]]; then
|
if [[ "$pid" -eq "-1" ]]; then
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"web") echo "-1";;
|
"web") echo "-1";;
|
||||||
|
@ -322,9 +327,8 @@ statusFunc() {
|
||||||
esac
|
esac
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
#get the port pihole-FTL is listening on by using FTL's telnet API
|
#get the DNS port pihole-FTL is listening on by using FTL's telnet API
|
||||||
port="$(echo ">dns-port >quit" | nc 127.0.0.1 4711)"
|
port="$(echo ">dns-port >quit" | nc 127.0.0.1 "$ftl_api_port")"
|
||||||
listening="$(lsof -Pni:${port})"
|
|
||||||
if [[ "${port}" == "0" ]]; then
|
if [[ "${port}" == "0" ]]; then
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"web") echo "-1";;
|
"web") echo "-1";;
|
||||||
|
@ -334,7 +338,7 @@ statusFunc() {
|
||||||
else
|
else
|
||||||
if [[ "${1}" != "web" ]]; then
|
if [[ "${1}" != "web" ]]; then
|
||||||
echo -e " ${TICK} FTL is listening on port ${port}"
|
echo -e " ${TICK} FTL is listening on port ${port}"
|
||||||
analyze_ports "${listening}"
|
analyze_ports "${port}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -361,7 +365,7 @@ statusFunc() {
|
||||||
# Enable blocking
|
# Enable blocking
|
||||||
"${PI_HOLE_BIN_DIR}"/pihole enable
|
"${PI_HOLE_BIN_DIR}"/pihole enable
|
||||||
fi
|
fi
|
||||||
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
tailFunc() {
|
tailFunc() {
|
||||||
|
@ -378,7 +382,7 @@ tailFunc() {
|
||||||
# Color blocklist/blacklist/wildcard entries as red
|
# Color blocklist/blacklist/wildcard entries as red
|
||||||
# Color A/AAAA/DHCP strings as white
|
# Color A/AAAA/DHCP strings as white
|
||||||
# Color everything else as gray
|
# Color everything else as gray
|
||||||
tail -f /var/log/pihole.log | grep --line-buffered "${1}" | sed -E \
|
tail -f /var/log/pihole/pihole.log | grep --line-buffered "${1}" | sed -E \
|
||||||
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
||||||
-e "s,(.*(blacklisted |gravity blocked ).*),${COL_RED}&${COL_NC}," \
|
-e "s,(.*(blacklisted |gravity blocked ).*),${COL_RED}&${COL_NC}," \
|
||||||
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
||||||
|
@ -492,8 +496,38 @@ if [[ $# = 0 ]]; then
|
||||||
helpFunc
|
helpFunc
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# functions that do not require sudo power
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"-h" | "help" | "--help" ) helpFunc;;
|
"-h" | "help" | "--help" ) helpFunc;;
|
||||||
|
"-v" | "version" ) versionFunc "$@";;
|
||||||
|
"-c" | "chronometer" ) chronometerFunc "$@";;
|
||||||
|
"-q" | "query" ) queryFunc "$@";;
|
||||||
|
"status" ) statusFunc "$2";;
|
||||||
|
"-t" | "tail" ) tailFunc "$2";;
|
||||||
|
"tricorder" ) tricorderFunc;;
|
||||||
|
|
||||||
|
# we need to add all arguments that require sudo power to not trigger the * argument
|
||||||
|
"-w" | "whitelist" ) ;;
|
||||||
|
"-b" | "blacklist" ) ;;
|
||||||
|
"--wild" | "wildcard" ) ;;
|
||||||
|
"--regex" | "regex" ) ;;
|
||||||
|
"--white-regex" | "white-regex" ) ;;
|
||||||
|
"--white-wild" | "white-wild" ) ;;
|
||||||
|
"-f" | "flush" ) ;;
|
||||||
|
"-up" | "updatePihole" ) ;;
|
||||||
|
"-r" | "reconfigure" ) ;;
|
||||||
|
"-g" | "updateGravity" ) ;;
|
||||||
|
"-l" | "logging" ) ;;
|
||||||
|
"uninstall" ) ;;
|
||||||
|
"enable" ) ;;
|
||||||
|
"disable" ) ;;
|
||||||
|
"-d" | "debug" ) ;;
|
||||||
|
"restartdns" ) ;;
|
||||||
|
"-a" | "admin" ) ;;
|
||||||
|
"checkout" ) ;;
|
||||||
|
"updatechecker" ) ;;
|
||||||
|
"arpflush" ) ;;
|
||||||
|
* ) helpFunc;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Must be root to use this tool
|
# Must be root to use this tool
|
||||||
|
@ -520,21 +554,13 @@ case "${1}" in
|
||||||
"-up" | "updatePihole" ) updatePiholeFunc "$@";;
|
"-up" | "updatePihole" ) updatePiholeFunc "$@";;
|
||||||
"-r" | "reconfigure" ) reconfigurePiholeFunc;;
|
"-r" | "reconfigure" ) reconfigurePiholeFunc;;
|
||||||
"-g" | "updateGravity" ) updateGravityFunc "$@";;
|
"-g" | "updateGravity" ) updateGravityFunc "$@";;
|
||||||
"-c" | "chronometer" ) chronometerFunc "$@";;
|
|
||||||
"-h" | "help" ) helpFunc;;
|
|
||||||
"-v" | "version" ) versionFunc "$@";;
|
|
||||||
"-q" | "query" ) queryFunc "$@";;
|
|
||||||
"-l" | "logging" ) piholeLogging "$@";;
|
"-l" | "logging" ) piholeLogging "$@";;
|
||||||
"uninstall" ) uninstallFunc;;
|
"uninstall" ) uninstallFunc;;
|
||||||
"enable" ) piholeEnable 1;;
|
"enable" ) piholeEnable 1;;
|
||||||
"disable" ) piholeEnable 0 "$2";;
|
"disable" ) piholeEnable 0 "$2";;
|
||||||
"status" ) statusFunc "$2";;
|
|
||||||
"restartdns" ) restartDNS "$2";;
|
"restartdns" ) restartDNS "$2";;
|
||||||
"-a" | "admin" ) webpageFunc "$@";;
|
"-a" | "admin" ) webpageFunc "$@";;
|
||||||
"-t" | "tail" ) tailFunc "$2";;
|
|
||||||
"checkout" ) piholeCheckoutFunc "$@";;
|
"checkout" ) piholeCheckoutFunc "$@";;
|
||||||
"tricorder" ) tricorderFunc;;
|
|
||||||
"updatechecker" ) updateCheckFunc "$@";;
|
"updatechecker" ) updateCheckFunc "$@";;
|
||||||
"arpflush" ) arpFunc "$@";;
|
"arpflush" ) arpFunc "$@";;
|
||||||
* ) helpFunc;;
|
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM centos:8
|
FROM quay.io/centos/centos:stream8
|
||||||
RUN yum install -y git
|
RUN yum install -y git
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM buildpack-deps:hirsute-scm
|
FROM buildpack-deps:impish-scm
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
ENV SCRIPTDIR /opt/pihole
|
ENV SCRIPTDIR /opt/pihole
|
||||||
|
|
|
@ -351,10 +351,6 @@ def test_installPihole_fresh_install_readableFiles(host):
|
||||||
'r', '/usr/local/share/man/man8/pihole-FTL.8', piholeuser)
|
'r', '/usr/local/share/man/man8/pihole-FTL.8', piholeuser)
|
||||||
actual_rc = host.run(check_man).rc
|
actual_rc = host.run(check_man).rc
|
||||||
assert exit_status_success == actual_rc
|
assert exit_status_success == actual_rc
|
||||||
check_man = test_cmd.format(
|
|
||||||
'r', '/usr/local/share/man/man5/pihole-FTL.conf.5', piholeuser)
|
|
||||||
actual_rc = host.run(check_man).rc
|
|
||||||
assert exit_status_success == actual_rc
|
|
||||||
# check not readable sudoers file
|
# check not readable sudoers file
|
||||||
check_sudo = test_cmd.format(
|
check_sudo = test_cmd.format(
|
||||||
'r', '/etc/sudoers.d/pihole', piholeuser)
|
'r', '/etc/sudoers.d/pihole', piholeuser)
|
||||||
|
@ -679,17 +675,10 @@ def test_FTL_detect_aarch64_no_errors(host):
|
||||||
'''
|
'''
|
||||||
# mock uname to return aarch64 platform
|
# mock uname to return aarch64 platform
|
||||||
mock_command('uname', {'-m': ('aarch64', '0')}, host)
|
mock_command('uname', {'-m': ('aarch64', '0')}, host)
|
||||||
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
# mock ldd to respond with aarch64 shared library
|
# mock ldd to respond with aarch64 shared library
|
||||||
mock_command(
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux-aarch64.so.1', '0')}, host)
|
||||||
'ldd',
|
|
||||||
{
|
|
||||||
'/bin/ls': (
|
|
||||||
'/lib/ld-linux-aarch64.so.1',
|
|
||||||
'0'
|
|
||||||
)
|
|
||||||
},
|
|
||||||
host
|
|
||||||
)
|
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -712,8 +701,10 @@ def test_FTL_detect_armv4t_no_errors(host):
|
||||||
'''
|
'''
|
||||||
# mock uname to return armv4t platform
|
# mock uname to return armv4t platform
|
||||||
mock_command('uname', {'-m': ('armv4t', '0')}, host)
|
mock_command('uname', {'-m': ('armv4t', '0')}, host)
|
||||||
# mock ldd to respond with ld-linux shared library
|
# mock `which sh` to return `/bin/sh`
|
||||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, host)
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
|
# mock ldd to respond with armv4t shared library
|
||||||
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux.so.3', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -736,8 +727,10 @@ def test_FTL_detect_armv5te_no_errors(host):
|
||||||
'''
|
'''
|
||||||
# mock uname to return armv5te platform
|
# mock uname to return armv5te platform
|
||||||
mock_command('uname', {'-m': ('armv5te', '0')}, host)
|
mock_command('uname', {'-m': ('armv5te', '0')}, host)
|
||||||
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
# mock ldd to respond with ld-linux shared library
|
# mock ldd to respond with ld-linux shared library
|
||||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, host)
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux.so.3', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -761,7 +754,9 @@ def test_FTL_detect_armv6l_no_errors(host):
|
||||||
# mock uname to return armv6l platform
|
# mock uname to return armv6l platform
|
||||||
mock_command('uname', {'-m': ('armv6l', '0')}, host)
|
mock_command('uname', {'-m': ('armv6l', '0')}, host)
|
||||||
# mock ldd to respond with ld-linux-armhf shared library
|
# mock ldd to respond with ld-linux-armhf shared library
|
||||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -786,7 +781,9 @@ def test_FTL_detect_armv7l_no_errors(host):
|
||||||
# mock uname to return armv7l platform
|
# mock uname to return armv7l platform
|
||||||
mock_command('uname', {'-m': ('armv7l', '0')}, host)
|
mock_command('uname', {'-m': ('armv7l', '0')}, host)
|
||||||
# mock ldd to respond with ld-linux-armhf shared library
|
# mock ldd to respond with ld-linux-armhf shared library
|
||||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -810,8 +807,10 @@ def test_FTL_detect_armv8a_no_errors(host):
|
||||||
'''
|
'''
|
||||||
# mock uname to return armv8a platform
|
# mock uname to return armv8a platform
|
||||||
mock_command('uname', {'-m': ('armv8a', '0')}, host)
|
mock_command('uname', {'-m': ('armv8a', '0')}, host)
|
||||||
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
# mock ldd to respond with ld-linux-armhf shared library
|
# mock ldd to respond with ld-linux-armhf shared library
|
||||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
mock_command('ldd', {'/bin/sh': ('/lib/ld-linux-armhf.so.3', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -832,6 +831,8 @@ def test_FTL_detect_x86_64_no_errors(host):
|
||||||
'''
|
'''
|
||||||
confirms only x86_64 package is downloaded for FTL engine
|
confirms only x86_64 package is downloaded for FTL engine
|
||||||
'''
|
'''
|
||||||
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -852,6 +853,8 @@ def test_FTL_detect_unknown_no_errors(host):
|
||||||
''' confirms only generic package is downloaded for FTL engine '''
|
''' confirms only generic package is downloaded for FTL engine '''
|
||||||
# mock uname to return generic platform
|
# mock uname to return generic platform
|
||||||
mock_command('uname', {'-m': ('mips', '0')}, host)
|
mock_command('uname', {'-m': ('mips', '0')}, host)
|
||||||
|
# mock `which sh` to return `/bin/sh`
|
||||||
|
mock_command('which', {'sh': ('/bin/sh', '0')}, host)
|
||||||
detectPlatform = host.run('''
|
detectPlatform = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
create_pihole_user
|
create_pihole_user
|
||||||
|
@ -902,23 +905,6 @@ def test_FTL_binary_installed_and_responsive_no_errors(host):
|
||||||
assert expected_stdout in installed_binary.stdout
|
assert expected_stdout in installed_binary.stdout
|
||||||
|
|
||||||
|
|
||||||
# def test_FTL_support_files_installed(host):
|
|
||||||
# '''
|
|
||||||
# confirms FTL support files are installed
|
|
||||||
# '''
|
|
||||||
# support_files = host.run('''
|
|
||||||
# source /opt/pihole/basic-install.sh
|
|
||||||
# FTLdetect
|
|
||||||
# stat -c '%a %n' /var/log/pihole-FTL.log
|
|
||||||
# stat -c '%a %n' /run/pihole-FTL.port
|
|
||||||
# stat -c '%a %n' /run/pihole-FTL.pid
|
|
||||||
# ls -lac /run
|
|
||||||
# ''')
|
|
||||||
# assert '644 /run/pihole-FTL.port' in support_files.stdout
|
|
||||||
# assert '644 /run/pihole-FTL.pid' in support_files.stdout
|
|
||||||
# assert '644 /var/log/pihole-FTL.log' in support_files.stdout
|
|
||||||
|
|
||||||
|
|
||||||
def test_IPv6_only_link_local(host):
|
def test_IPv6_only_link_local(host):
|
||||||
'''
|
'''
|
||||||
confirms IPv6 blocking is disabled for Link-local address
|
confirms IPv6 blocking is disabled for Link-local address
|
74
test/test_any_utils.py
Normal file
74
test/test_any_utils.py
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
def test_key_val_replacement_works(host):
|
||||||
|
''' Confirms addOrEditKeyValPair either adds or replaces a key value pair in a given file '''
|
||||||
|
host.run('''
|
||||||
|
source /opt/pihole/utils.sh
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value1"
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_TWO" "value2"
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value3"
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_FOUR" "value4"
|
||||||
|
''')
|
||||||
|
output = host.run('''
|
||||||
|
cat ./testoutput
|
||||||
|
''')
|
||||||
|
expected_stdout = 'KEY_ONE=value3\nKEY_TWO=value2\nKEY_FOUR=value4\n'
|
||||||
|
assert expected_stdout == output.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_key_addition_works(host):
|
||||||
|
''' Confirms addKey adds a key (no value) to a file without duplicating it '''
|
||||||
|
host.run('''
|
||||||
|
source /opt/pihole/utils.sh
|
||||||
|
addKey "./testoutput" "KEY_ONE"
|
||||||
|
addKey "./testoutput" "KEY_ONE"
|
||||||
|
addKey "./testoutput" "KEY_TWO"
|
||||||
|
addKey "./testoutput" "KEY_TWO"
|
||||||
|
addKey "./testoutput" "KEY_THREE"
|
||||||
|
addKey "./testoutput" "KEY_THREE"
|
||||||
|
''')
|
||||||
|
output = host.run('''
|
||||||
|
cat ./testoutput
|
||||||
|
''')
|
||||||
|
expected_stdout = 'KEY_ONE\nKEY_TWO\nKEY_THREE\n'
|
||||||
|
assert expected_stdout == output.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_key_removal_works(host):
|
||||||
|
''' Confirms removeKey removes a key or key/value pair '''
|
||||||
|
host.run('''
|
||||||
|
source /opt/pihole/utils.sh
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value1"
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_TWO" "value2"
|
||||||
|
addOrEditKeyValPair "./testoutput" "KEY_THREE" "value3"
|
||||||
|
addKey "./testoutput" "KEY_FOUR"
|
||||||
|
removeKey "./testoutput" "KEY_TWO"
|
||||||
|
removeKey "./testoutput" "KEY_FOUR"
|
||||||
|
''')
|
||||||
|
output = host.run('''
|
||||||
|
cat ./testoutput
|
||||||
|
''')
|
||||||
|
expected_stdout = 'KEY_ONE=value1\nKEY_THREE=value3\n'
|
||||||
|
assert expected_stdout == output.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_getFTLAPIPort_default(host):
|
||||||
|
''' Confirms getFTLAPIPort returns the default API port '''
|
||||||
|
output = host.run('''
|
||||||
|
source /opt/pihole/utils.sh
|
||||||
|
getFTLAPIPort
|
||||||
|
''')
|
||||||
|
expected_stdout = '4711\n'
|
||||||
|
assert expected_stdout == output.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_getFTLAPIPort_custom(host):
|
||||||
|
''' Confirms getFTLAPIPort returns a custom API port in a custom PORTFILE location '''
|
||||||
|
host.run('''
|
||||||
|
echo "PORTFILE=/tmp/port.file" > /etc/pihole/pihole-FTL.conf
|
||||||
|
echo "1234" > /tmp/port.file
|
||||||
|
''')
|
||||||
|
output = host.run('''
|
||||||
|
source /opt/pihole/utils.sh
|
||||||
|
getFTLAPIPort
|
||||||
|
''')
|
||||||
|
expected_stdout = '1234\n'
|
||||||
|
assert expected_stdout == output.stdout
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _centos_7.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _centos_7.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _debian_10.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _debian_10.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _debian_9.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _debian_9.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _fedora_33.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _fedora_33.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _fedora_34.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _fedora_34.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _ubuntu_16.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _ubuntu_16.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _ubuntu_18.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _ubuntu_18.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
|
@ -5,4 +5,4 @@ envlist = py38
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _ubuntu_21.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _ubuntu_21.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||||
|
|
Loading…
Reference in a new issue