mirror of
https://github.com/pi-hole/pi-hole.git
synced 2024-11-15 02:42:58 +00:00
Merge branch 'development' into development
This commit is contained in:
commit
462457fe7f
32 changed files with 592 additions and 333 deletions
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
@ -1,4 +0,0 @@
|
||||||
# These are supported funding model platforms
|
|
||||||
|
|
||||||
patreon: pihole
|
|
||||||
custom: https://pi-hole.net/donate
|
|
4
.github/ISSUE_TEMPLATE.md
vendored
4
.github/ISSUE_TEMPLATE.md
vendored
|
@ -9,11 +9,11 @@
|
||||||
`{Replace this with a number from 1 to 10. 1 being not familiar, and 10 being very familiar}`
|
`{Replace this with a number from 1 to 10. 1 being not familiar, and 10 being very familiar}`
|
||||||
|
|
||||||
---
|
---
|
||||||
**Expected behaviour:**
|
**Expected behavior:**
|
||||||
|
|
||||||
`{A detailed description of what you expect to see}`
|
`{A detailed description of what you expect to see}`
|
||||||
|
|
||||||
**Actual behaviour:**
|
**Actual behavior:**
|
||||||
|
|
||||||
`{A detailed description and/or screenshots of what you do see}`
|
`{A detailed description and/or screenshots of what you do see}`
|
||||||
|
|
||||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -15,7 +15,7 @@ __pycache__
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
# All idea files, with execptions
|
# All idea files, with exceptions
|
||||||
.idea
|
.idea
|
||||||
!.idea/codeStyles/*
|
!.idea/codeStyles/*
|
||||||
!.idea/codeStyleSettings.xml
|
!.idea/codeStyleSettings.xml
|
||||||
|
|
|
@ -3,7 +3,7 @@ services:
|
||||||
- docker
|
- docker
|
||||||
language: python
|
language: python
|
||||||
python:
|
python:
|
||||||
- "2.7"
|
- "3.6"
|
||||||
install:
|
install:
|
||||||
- pip install -r requirements.txt
|
- pip install -r requirements.txt
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Determine if terminal is capable of showing colours
|
# Determine if terminal is capable of showing colors
|
||||||
if [[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]; then
|
if [[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]; then
|
||||||
# Bold and underline may not show up on all clients
|
# Bold and underline may not show up on all clients
|
||||||
# If something MUST be emphasised, use both
|
# If something MUST be emphasized, use both
|
||||||
COL_BOLD='[1m'
|
COL_BOLD='[1m'
|
||||||
COL_ULINE='[4m'
|
COL_ULINE='[4m'
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ printFunc() {
|
||||||
|
|
||||||
# Remove excess characters from main text
|
# Remove excess characters from main text
|
||||||
if [[ "$text_main_len" -gt "$text_main_max_len" ]]; then
|
if [[ "$text_main_len" -gt "$text_main_max_len" ]]; then
|
||||||
# Trim text without colours
|
# Trim text without colors
|
||||||
text_main_trim="${text_main_nocol:0:$text_main_max_len}"
|
text_main_trim="${text_main_nocol:0:$text_main_max_len}"
|
||||||
# Replace with trimmed text
|
# Replace with trimmed text
|
||||||
text_main="${text_main/$text_main_nocol/$text_main_trim}"
|
text_main="${text_main/$text_main_nocol/$text_main_trim}"
|
||||||
|
@ -88,7 +88,7 @@ printFunc() {
|
||||||
|
|
||||||
[[ "$spc_num" -le 0 ]] && spc_num="0"
|
[[ "$spc_num" -le 0 ]] && spc_num="0"
|
||||||
spc=$(printf "%${spc_num}s")
|
spc=$(printf "%${spc_num}s")
|
||||||
#spc="${spc// /.}" # Debug: Visualise spaces
|
#spc="${spc// /.}" # Debug: Visualize spaces
|
||||||
|
|
||||||
printf "%s%s$spc" "$title" "$text_main"
|
printf "%s%s$spc" "$title" "$text_main"
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ get_init_stats() {
|
||||||
printf "%s%02d:%02d:%02d\\n" "$days" "$hrs" "$mins" "$secs"
|
printf "%s%02d:%02d:%02d\\n" "$days" "$hrs" "$mins" "$secs"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Set Colour Codes
|
# Set Color Codes
|
||||||
coltable="/opt/pihole/COL_TABLE"
|
coltable="/opt/pihole/COL_TABLE"
|
||||||
if [[ -f "${coltable}" ]]; then
|
if [[ -f "${coltable}" ]]; then
|
||||||
source ${coltable}
|
source ${coltable}
|
||||||
|
@ -269,7 +269,7 @@ get_sys_stats() {
|
||||||
scr_lines="${scr_size[0]}"
|
scr_lines="${scr_size[0]}"
|
||||||
scr_cols="${scr_size[1]}"
|
scr_cols="${scr_size[1]}"
|
||||||
|
|
||||||
# Determine Chronometer size behaviour
|
# Determine Chronometer size behavior
|
||||||
if [[ "$scr_cols" -ge 58 ]]; then
|
if [[ "$scr_cols" -ge 58 ]]; then
|
||||||
chrono_width="large"
|
chrono_width="large"
|
||||||
elif [[ "$scr_cols" -gt 40 ]]; then
|
elif [[ "$scr_cols" -gt 40 ]]; then
|
||||||
|
@ -308,7 +308,7 @@ get_sys_stats() {
|
||||||
[[ "${cpu_freq}" == *".0"* ]] && cpu_freq="${cpu_freq/.0/}"
|
[[ "${cpu_freq}" == *".0"* ]] && cpu_freq="${cpu_freq/.0/}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Determine colour for temperature
|
# Determine color for temperature
|
||||||
if [[ -n "$temp_file" ]]; then
|
if [[ -n "$temp_file" ]]; then
|
||||||
if [[ "$temp_unit" == "C" ]]; then
|
if [[ "$temp_unit" == "C" ]]; then
|
||||||
cpu_temp=$(printf "%.0fc\\n" "$(calcFunc "$(< $temp_file) / 1000")")
|
cpu_temp=$(printf "%.0fc\\n" "$(calcFunc "$(< $temp_file) / 1000")")
|
||||||
|
|
|
@ -87,4 +87,21 @@ upgrade_gravityDB(){
|
||||||
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
||||||
version=9
|
version=9
|
||||||
fi
|
fi
|
||||||
|
if [[ "$version" == "9" ]]; then
|
||||||
|
# This migration drops unused tables and creates triggers to remove
|
||||||
|
# obsolete groups assignments when the linked items are deleted
|
||||||
|
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
||||||
|
sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
||||||
|
version=10
|
||||||
|
fi
|
||||||
|
if [[ "$version" == "10" ]]; then
|
||||||
|
# This adds timestamp and an optional comment field to the client table
|
||||||
|
# These fields are only temporary and will be replaces by the columns
|
||||||
|
# defined in gravity.db.sql during gravity swapping. We add them here
|
||||||
|
# to keep the copying process generic (needs the same columns in both the
|
||||||
|
# source and the destination databases).
|
||||||
|
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
||||||
|
sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
||||||
|
version=11
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
16
advanced/Scripts/database_migration/gravity/10_to_11.sql
Normal file
16
advanced/Scripts/database_migration/gravity/10_to_11.sql
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
.timeout 30000
|
||||||
|
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
ALTER TABLE client ADD COLUMN date_added INTEGER;
|
||||||
|
ALTER TABLE client ADD COLUMN date_modified INTEGER;
|
||||||
|
ALTER TABLE client ADD COLUMN comment TEXT;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_client_update AFTER UPDATE ON client
|
||||||
|
BEGIN
|
||||||
|
UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
UPDATE info SET value = 11 WHERE property = 'version';
|
||||||
|
|
||||||
|
COMMIT;
|
29
advanced/Scripts/database_migration/gravity/9_to_10.sql
Normal file
29
advanced/Scripts/database_migration/gravity/9_to_10.sql
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
.timeout 30000
|
||||||
|
|
||||||
|
PRAGMA FOREIGN_KEYS=OFF;
|
||||||
|
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS whitelist;
|
||||||
|
DROP TABLE IF EXISTS blacklist;
|
||||||
|
DROP TABLE IF EXISTS regex_whitelist;
|
||||||
|
DROP TABLE IF EXISTS regex_blacklist;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_client_delete AFTER DELETE ON client
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM client_by_group WHERE client_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
UPDATE info SET value = 10 WHERE property = 'version';
|
||||||
|
|
||||||
|
COMMIT;
|
|
@ -36,13 +36,6 @@ flushARP(){
|
||||||
echo -ne " ${INFO} Flushing network table ..."
|
echo -ne " ${INFO} Flushing network table ..."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Flush ARP cache to avoid re-adding of dead entries
|
|
||||||
if ! output=$(ip neigh flush all 2>&1); then
|
|
||||||
echo -e "${OVER} ${CROSS} Failed to clear ARP cache"
|
|
||||||
echo " Output: ${output}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Truncate network_addresses table in pihole-FTL.db
|
# Truncate network_addresses table in pihole-FTL.db
|
||||||
# This needs to be done before we can truncate the network table due to
|
# This needs to be done before we can truncate the network table due to
|
||||||
# foreign key contraints
|
# foreign key contraints
|
||||||
|
|
|
@ -36,7 +36,7 @@ warning1() {
|
||||||
return 0
|
return 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo -e "\\n ${INFO} Branch change has been cancelled"
|
echo -e "\\n ${INFO} Branch change has been canceled"
|
||||||
return 1
|
return 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -84,7 +84,7 @@ checkout() {
|
||||||
echo -e " ${INFO} Shortcut \"dev\" detected - checking out development / devel branches..."
|
echo -e " ${INFO} Shortcut \"dev\" detected - checking out development / devel branches..."
|
||||||
echo ""
|
echo ""
|
||||||
echo -e " ${INFO} Pi-hole Core"
|
echo -e " ${INFO} Pi-hole Core"
|
||||||
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "development" || { echo " ${CROSS} Unable to pull Core developement branch"; exit 1; }
|
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "development" || { echo " ${CROSS} Unable to pull Core development branch"; exit 1; }
|
||||||
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then
|
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo -e " ${INFO} Web interface"
|
echo -e " ${INFO} Web interface"
|
||||||
|
|
|
@ -138,7 +138,7 @@ PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log"
|
||||||
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
|
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
|
||||||
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
|
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
|
||||||
|
|
||||||
# An array of operating system "pretty names" that we officialy support
|
# An array of operating system "pretty names" that we officially support
|
||||||
# We can loop through the array at any time to see if it matches a value
|
# We can loop through the array at any time to see if it matches a value
|
||||||
#SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS")
|
#SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS")
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ compare_local_version_to_git_version() {
|
||||||
if [[ "${remote_branch}" == "master" ]]; then
|
if [[ "${remote_branch}" == "master" ]]; then
|
||||||
# so the color of the text is green
|
# so the color of the text is green
|
||||||
log_write "${INFO} Branch: ${COL_GREEN}${remote_branch}${COL_NC}"
|
log_write "${INFO} Branch: ${COL_GREEN}${remote_branch}${COL_NC}"
|
||||||
# If it is any other branch, they are in a developement branch
|
# If it is any other branch, they are in a development branch
|
||||||
else
|
else
|
||||||
# So show that in yellow, signifying it's something to take a look at, but not a critical error
|
# So show that in yellow, signifying it's something to take a look at, but not a critical error
|
||||||
log_write "${INFO} Branch: ${COL_YELLOW}${remote_branch:-Detached}${COL_NC} (${FAQ_CHECKOUT_COMMAND})"
|
log_write "${INFO} Branch: ${COL_YELLOW}${remote_branch:-Detached}${COL_NC} (${FAQ_CHECKOUT_COMMAND})"
|
||||||
|
@ -357,7 +357,7 @@ check_component_versions() {
|
||||||
|
|
||||||
get_program_version() {
|
get_program_version() {
|
||||||
local program_name="${1}"
|
local program_name="${1}"
|
||||||
# Create a loval variable so this function can be safely reused
|
# Create a local variable so this function can be safely reused
|
||||||
local program_version
|
local program_version
|
||||||
echo_current_diagnostic "${program_name} version"
|
echo_current_diagnostic "${program_name} version"
|
||||||
# Evalutate the program we are checking, if it is any of the ones below, show the version
|
# Evalutate the program we are checking, if it is any of the ones below, show the version
|
||||||
|
@ -662,19 +662,21 @@ ping_internet() {
|
||||||
}
|
}
|
||||||
|
|
||||||
compare_port_to_service_assigned() {
|
compare_port_to_service_assigned() {
|
||||||
local service_name="${1}"
|
local service_name
|
||||||
# The programs we use may change at some point, so they are in a varible here
|
local expected_service
|
||||||
local resolver="pihole-FTL"
|
local port
|
||||||
local web_server="lighttpd"
|
|
||||||
local ftl="pihole-FTL"
|
service_name="${2}"
|
||||||
|
expected_service="${1}"
|
||||||
|
port="${3}"
|
||||||
|
|
||||||
# If the service is a Pi-hole service, highlight it in green
|
# If the service is a Pi-hole service, highlight it in green
|
||||||
if [[ "${service_name}" == "${resolver}" ]] || [[ "${service_name}" == "${web_server}" ]] || [[ "${service_name}" == "${ftl}" ]]; then
|
if [[ "${service_name}" == "${expected_service}" ]]; then
|
||||||
log_write "[${COL_GREEN}${port_number}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
||||||
# Otherwise,
|
# Otherwise,
|
||||||
else
|
else
|
||||||
# Show the service name in red since it's non-standard
|
# Show the service name in red since it's non-standard
|
||||||
log_write "[${COL_RED}${port_number}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,11 +710,11 @@ check_required_ports() {
|
||||||
fi
|
fi
|
||||||
# Use a case statement to determine if the right services are using the right ports
|
# Use a case statement to determine if the right services are using the right ports
|
||||||
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
|
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
|
||||||
53) compare_port_to_service_assigned "${resolver}"
|
53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
|
||||||
;;
|
;;
|
||||||
80) compare_port_to_service_assigned "${web_server}"
|
80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
|
||||||
;;
|
;;
|
||||||
4711) compare_port_to_service_assigned "${ftl}"
|
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
|
||||||
;;
|
;;
|
||||||
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
||||||
*) log_write "${port_number} ${service_name} (${protocol_type})";
|
*) log_write "${port_number} ${service_name} (${protocol_type})";
|
||||||
|
@ -745,7 +747,7 @@ check_x_headers() {
|
||||||
# Do it for the dashboard as well, as the header is different than above
|
# Do it for the dashboard as well, as the header is different than above
|
||||||
local dashboard
|
local dashboard
|
||||||
dashboard=$(curl -Is localhost/admin/ | awk '/X-Pi-hole/' | tr -d '\r')
|
dashboard=$(curl -Is localhost/admin/ | awk '/X-Pi-hole/' | tr -d '\r')
|
||||||
# Store what the X-Header shoud be in variables for comparision later
|
# Store what the X-Header shoud be in variables for comparison later
|
||||||
local block_page_working
|
local block_page_working
|
||||||
block_page_working="X-Pi-hole: A black hole for Internet advertisements."
|
block_page_working="X-Pi-hole: A black hole for Internet advertisements."
|
||||||
local dashboard_working
|
local dashboard_working
|
||||||
|
@ -816,7 +818,7 @@ dig_at() {
|
||||||
|
|
||||||
# First, do a dig on localhost to see if Pi-hole can use itself to block a domain
|
# First, do a dig on localhost to see if Pi-hole can use itself to block a domain
|
||||||
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @${local_address} +short "${record_type}"); then
|
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @${local_address} +short "${record_type}"); then
|
||||||
# If it can, show sucess
|
# If it can, show success
|
||||||
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} via ${COL_CYAN}localhost$COL_NC (${local_address})"
|
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} via ${COL_CYAN}localhost$COL_NC (${local_address})"
|
||||||
else
|
else
|
||||||
# Otherwise, show a failure
|
# Otherwise, show a failure
|
||||||
|
@ -967,7 +969,7 @@ check_name_resolution() {
|
||||||
# This function can check a directory exists
|
# This function can check a directory exists
|
||||||
# Pi-hole has files in several places, so we will reuse this function
|
# Pi-hole has files in several places, so we will reuse this function
|
||||||
dir_check() {
|
dir_check() {
|
||||||
# Set the first argument passed to tihs function as a named variable for better readability
|
# Set the first argument passed to this function as a named variable for better readability
|
||||||
local directory="${1}"
|
local directory="${1}"
|
||||||
# Display the current test that is running
|
# Display the current test that is running
|
||||||
echo_current_diagnostic "contents of ${COL_CYAN}${directory}${COL_NC}"
|
echo_current_diagnostic "contents of ${COL_CYAN}${directory}${COL_NC}"
|
||||||
|
@ -985,14 +987,14 @@ dir_check() {
|
||||||
}
|
}
|
||||||
|
|
||||||
list_files_in_dir() {
|
list_files_in_dir() {
|
||||||
# Set the first argument passed to tihs function as a named variable for better readability
|
# Set the first argument passed to this function as a named variable for better readability
|
||||||
local dir_to_parse="${1}"
|
local dir_to_parse="${1}"
|
||||||
# Store the files found in an array
|
# Store the files found in an array
|
||||||
mapfile -t files_found < <(ls "${dir_to_parse}")
|
mapfile -t files_found < <(ls "${dir_to_parse}")
|
||||||
# For each file in the array,
|
# For each file in the array,
|
||||||
for each_file in "${files_found[@]}"; do
|
for each_file in "${files_found[@]}"; do
|
||||||
if [[ -d "${dir_to_parse}/${each_file}" ]]; then
|
if [[ -d "${dir_to_parse}/${each_file}" ]]; then
|
||||||
# If it's a directoy, do nothing
|
# If it's a directory, do nothing
|
||||||
:
|
:
|
||||||
elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
|
elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
|
||||||
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
|
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
|
||||||
|
@ -1105,7 +1107,7 @@ show_db_entries() {
|
||||||
}
|
}
|
||||||
|
|
||||||
show_groups() {
|
show_groups() {
|
||||||
show_db_entries "Groups" "SELECT * FROM \"group\"" "4 4 30 50"
|
show_db_entries "Groups" "SELECT id,name,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 50 7 19 19 50"
|
||||||
}
|
}
|
||||||
|
|
||||||
show_adlists() {
|
show_adlists() {
|
||||||
|
@ -1113,18 +1115,14 @@ show_adlists() {
|
||||||
show_db_entries "Adlist groups" "SELECT * FROM adlist_by_group" "4 4"
|
show_db_entries "Adlist groups" "SELECT * FROM adlist_by_group" "4 4"
|
||||||
}
|
}
|
||||||
|
|
||||||
show_whitelist() {
|
show_domainlist() {
|
||||||
show_db_entries "Exact whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM whitelist" "4 100 7 19 19 50"
|
show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,type,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist" "4 4 100 7 19 19 50"
|
||||||
show_db_entries "Exact whitelist groups" "SELECT * FROM whitelist_by_group" "4 4"
|
show_db_entries "Domainlist groups" "SELECT * FROM domainlist_by_group" "10 10"
|
||||||
show_db_entries "Regex whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_whitelist" "4 100 7 19 19 50"
|
|
||||||
show_db_entries "Regex whitelist groups" "SELECT * FROM regex_whitelist_by_group" "4 4"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
show_blacklist() {
|
show_clients() {
|
||||||
show_db_entries "Exact blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM blacklist" "4 100 7 19 19 50"
|
show_db_entries "Clients" "SELECT id,ip,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM client" "4 100 19 19 50"
|
||||||
show_db_entries "Exact blacklist groups" "SELECT * FROM blacklist_by_group" "4 4"
|
show_db_entries "Client groups" "SELECT * FROM client_by_group" "10 10"
|
||||||
show_db_entries "Regex blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_blacklist" "4 100 7 19 19 50"
|
|
||||||
show_db_entries "Regex blacklist groups" "SELECT * FROM regex_blacklist_by_group" "4 4"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
analyze_gravity_list() {
|
analyze_gravity_list() {
|
||||||
|
@ -1134,16 +1132,17 @@ analyze_gravity_list() {
|
||||||
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
|
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
|
||||||
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
||||||
|
|
||||||
local gravity_size
|
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
||||||
gravity_size=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT COUNT(*) FROM vw_gravity")
|
gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||||
log_write " Size (excluding blacklist): ${COL_CYAN}${gravity_size}${COL_NC} entries"
|
gravity_updated="$(date -d @"${gravity_updated_raw}")"
|
||||||
|
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
|
||||||
log_write ""
|
log_write ""
|
||||||
|
|
||||||
OLD_IFS="$IFS"
|
OLD_IFS="$IFS"
|
||||||
IFS=$'\r\n'
|
IFS=$'\r\n'
|
||||||
local gravity_sample=()
|
local gravity_sample=()
|
||||||
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||||
log_write " ${COL_CYAN}----- First 10 Domains -----${COL_NC}"
|
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
|
||||||
|
|
||||||
for line in "${gravity_sample[@]}"; do
|
for line in "${gravity_sample[@]}"; do
|
||||||
log_write " ${line}"
|
log_write " ${line}"
|
||||||
|
@ -1191,7 +1190,7 @@ analyze_pihole_log() {
|
||||||
# So first check if there are domains in the log that should be obfuscated
|
# So first check if there are domains in the log that should be obfuscated
|
||||||
if [[ -n ${line_to_obfuscate} ]]; then
|
if [[ -n ${line_to_obfuscate} ]]; then
|
||||||
# If there are, we need to use awk to replace only the domain name (the 6th field in the log)
|
# If there are, we need to use awk to replace only the domain name (the 6th field in the log)
|
||||||
# so we substitue the domain for the placeholder value
|
# so we substitute the domain for the placeholder value
|
||||||
obfuscated_line=$(echo "${line_to_obfuscate}" | awk -v placeholder="${OBFUSCATED_PLACEHOLDER}" '{sub($6,placeholder); print $0}')
|
obfuscated_line=$(echo "${line_to_obfuscate}" | awk -v placeholder="${OBFUSCATED_PLACEHOLDER}" '{sub($6,placeholder); print $0}')
|
||||||
log_write " ${obfuscated_line}"
|
log_write " ${obfuscated_line}"
|
||||||
else
|
else
|
||||||
|
@ -1239,7 +1238,7 @@ upload_to_tricorder() {
|
||||||
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
||||||
log_write " * For more information, see: ${TRICORDER_CONTEST}"
|
log_write " * For more information, see: ${TRICORDER_CONTEST}"
|
||||||
log_write " * If available, we'll use openssl to upload the log, otherwise it will fall back to netcat."
|
log_write " * If available, we'll use openssl to upload the log, otherwise it will fall back to netcat."
|
||||||
# If pihole -d is running automatically (usually throught the dashboard)
|
# If pihole -d is running automatically (usually through the dashboard)
|
||||||
if [[ "${AUTOMATED}" ]]; then
|
if [[ "${AUTOMATED}" ]]; then
|
||||||
# let the user know
|
# let the user know
|
||||||
log_write "${INFO} Debug script running in automated mode"
|
log_write "${INFO} Debug script running in automated mode"
|
||||||
|
@ -1301,9 +1300,9 @@ parse_setup_vars
|
||||||
check_x_headers
|
check_x_headers
|
||||||
analyze_gravity_list
|
analyze_gravity_list
|
||||||
show_groups
|
show_groups
|
||||||
|
show_domainlist
|
||||||
|
show_clients
|
||||||
show_adlists
|
show_adlists
|
||||||
show_whitelist
|
|
||||||
show_blacklist
|
|
||||||
show_content_of_pihole_files
|
show_content_of_pihole_files
|
||||||
parse_locale
|
parse_locale
|
||||||
analyze_pihole_log
|
analyze_pihole_log
|
||||||
|
|
|
@ -33,15 +33,17 @@ scanList(){
|
||||||
export LC_CTYPE=C
|
export LC_CTYPE=C
|
||||||
|
|
||||||
# /dev/null forces filename to be printed when only one list has been generated
|
# /dev/null forces filename to be printed when only one list has been generated
|
||||||
# shellcheck disable=SC2086
|
|
||||||
case "${type}" in
|
case "${type}" in
|
||||||
"exact" ) grep -i -E -l "(^|(?<!#)\\s)${esc_domain}($|\\s|#)" ${lists} /dev/null 2>/dev/null;;
|
"exact" ) grep -i -E -l "(^|(?<!#)\\s)${esc_domain}($|\\s|#)" ${lists} /dev/null 2>/dev/null;;
|
||||||
# Create array of regexps
|
|
||||||
# Iterate through each regexp and check whether it matches the domainQuery
|
# Iterate through each regexp and check whether it matches the domainQuery
|
||||||
# If it does, print the matching regexp and continue looping
|
# If it does, print the matching regexp and continue looping
|
||||||
# Input 1 - regexps | Input 2 - domainQuery
|
# Input 1 - regexps | Input 2 - domainQuery
|
||||||
"regex" ) awk 'NR==FNR{regexps[$0];next}{for (r in regexps)if($0 ~ r)print r}' \
|
"regex" )
|
||||||
<(echo "${lists}") <(echo "${domain}") 2>/dev/null;;
|
for list in ${lists}; do
|
||||||
|
if [[ "${domain}" =~ ${list} ]]; then
|
||||||
|
printf "%b\n" "${list}";
|
||||||
|
fi
|
||||||
|
done;;
|
||||||
* ) grep -i "${esc_domain}" ${lists} /dev/null 2>/dev/null;;
|
* ) grep -i "${esc_domain}" ${lists} /dev/null 2>/dev/null;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ getInitSys() {
|
||||||
elif [ -f /etc/init.d/cron ] && [ ! -h /etc/init.d/cron ]; then
|
elif [ -f /etc/init.d/cron ] && [ ! -h /etc/init.d/cron ]; then
|
||||||
SYSTEMD=0
|
SYSTEMD=0
|
||||||
else
|
else
|
||||||
echo "Unrecognised init system"
|
echo "Unrecognized init system"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,6 +198,14 @@ main() {
|
||||||
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
|
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
|
||||||
echo -e "${basicError}" && exit 1
|
echo -e "${basicError}" && exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
|
||||||
|
# Force an update of the updatechecker
|
||||||
|
/opt/pihole/updatecheck.sh
|
||||||
|
/opt/pihole/updatecheck.sh x remote
|
||||||
|
echo -e " ${INFO} Local version file information updated."
|
||||||
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -179,7 +179,6 @@ ProcessDNSSettings() {
|
||||||
|
|
||||||
if [[ "${DNSSEC}" == true ]]; then
|
if [[ "${DNSSEC}" == true ]]; then
|
||||||
echo "dnssec
|
echo "dnssec
|
||||||
trust-anchor=.,19036,8,2,49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5
|
|
||||||
trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D
|
trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D
|
||||||
" >> "${dnsmasqconfig}"
|
" >> "${dnsmasqconfig}"
|
||||||
fi
|
fi
|
||||||
|
@ -402,22 +401,38 @@ SetWebUILayout() {
|
||||||
change_setting "WEBUIBOXEDLAYOUT" "${args[2]}"
|
change_setting "WEBUIBOXEDLAYOUT" "${args[2]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CheckUrl(){
|
||||||
|
local regex
|
||||||
|
# Check for characters NOT allowed in URLs
|
||||||
|
regex="[^a-zA-Z0-9:/?&%=~._-]"
|
||||||
|
if [[ "${1}" =~ ${regex} ]]; then
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
CustomizeAdLists() {
|
CustomizeAdLists() {
|
||||||
local address
|
local address
|
||||||
address="${args[3]}"
|
address="${args[3]}"
|
||||||
local comment
|
local comment
|
||||||
comment="${args[4]}"
|
comment="${args[4]}"
|
||||||
|
|
||||||
if [[ "${args[2]}" == "enable" ]]; then
|
if CheckUrl "${address}"; then
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
if [[ "${args[2]}" == "enable" ]]; then
|
||||||
elif [[ "${args[2]}" == "disable" ]]; then
|
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
elif [[ "${args[2]}" == "disable" ]]; then
|
||||||
elif [[ "${args[2]}" == "add" ]]; then
|
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
||||||
sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
elif [[ "${args[2]}" == "add" ]]; then
|
||||||
elif [[ "${args[2]}" == "del" ]]; then
|
sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
elif [[ "${args[2]}" == "del" ]]; then
|
||||||
|
sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
||||||
|
else
|
||||||
|
echo "Not permitted"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "Not permitted"
|
echo "Invalid Url"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -502,6 +517,13 @@ Options:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "${args[2]}" ]]; then
|
if [[ -n "${args[2]}" ]]; then
|
||||||
|
|
||||||
|
# Sanitize email address in case of security issues
|
||||||
|
if [[ ! "${args[2]}" =~ ^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}$ ]]; then
|
||||||
|
echo -e " ${CROSS} Invalid email address"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
change_setting "ADMIN_EMAIL" "${args[2]}"
|
change_setting "ADMIN_EMAIL" "${args[2]}"
|
||||||
echo -e " ${TICK} Setting admin contact to ${args[2]}"
|
echo -e " ${TICK} Setting admin contact to ${args[2]}"
|
||||||
else
|
else
|
||||||
|
|
|
@ -1,16 +1,21 @@
|
||||||
PRAGMA FOREIGN_KEYS=ON;
|
PRAGMA foreign_keys=OFF;
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
CREATE TABLE "group"
|
CREATE TABLE "group"
|
||||||
(
|
(
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||||
name TEXT NOT NULL,
|
name TEXT UNIQUE NOT NULL,
|
||||||
|
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||||
|
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||||
description TEXT
|
description TEXT
|
||||||
);
|
);
|
||||||
|
INSERT INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
|
||||||
|
|
||||||
CREATE TABLE whitelist
|
CREATE TABLE domainlist
|
||||||
(
|
(
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
type INTEGER NOT NULL DEFAULT 0,
|
||||||
domain TEXT UNIQUE NOT NULL,
|
domain TEXT UNIQUE NOT NULL,
|
||||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
enabled BOOLEAN NOT NULL DEFAULT 1,
|
||||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||||
|
@ -18,47 +23,6 @@ CREATE TABLE whitelist
|
||||||
comment TEXT
|
comment TEXT
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE whitelist_by_group
|
|
||||||
(
|
|
||||||
whitelist_id INTEGER NOT NULL REFERENCES whitelist (id),
|
|
||||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
|
||||||
PRIMARY KEY (whitelist_id, group_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE blacklist
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
domain TEXT UNIQUE NOT NULL,
|
|
||||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
|
||||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
|
||||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
|
||||||
comment TEXT
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE blacklist_by_group
|
|
||||||
(
|
|
||||||
blacklist_id INTEGER NOT NULL REFERENCES blacklist (id),
|
|
||||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
|
||||||
PRIMARY KEY (blacklist_id, group_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE regex
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
domain TEXT UNIQUE NOT NULL,
|
|
||||||
enabled BOOLEAN NOT NULL DEFAULT 1,
|
|
||||||
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
|
||||||
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
|
||||||
comment TEXT
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE regex_by_group
|
|
||||||
(
|
|
||||||
regex_id INTEGER NOT NULL REFERENCES regex (id),
|
|
||||||
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
|
||||||
PRIMARY KEY (regex_id, group_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE adlist
|
CREATE TABLE adlist
|
||||||
(
|
(
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
@ -78,7 +42,8 @@ CREATE TABLE adlist_by_group
|
||||||
|
|
||||||
CREATE TABLE gravity
|
CREATE TABLE gravity
|
||||||
(
|
(
|
||||||
domain TEXT PRIMARY KEY
|
domain TEXT NOT NULL,
|
||||||
|
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE info
|
CREATE TABLE info
|
||||||
|
@ -87,56 +52,137 @@ CREATE TABLE info
|
||||||
value TEXT NOT NULL
|
value TEXT NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
INSERT INTO info VALUES("version","1");
|
INSERT INTO "info" VALUES('version','11');
|
||||||
|
|
||||||
CREATE VIEW vw_whitelist AS SELECT DISTINCT domain
|
CREATE TABLE domain_audit
|
||||||
FROM whitelist
|
(
|
||||||
LEFT JOIN whitelist_by_group ON whitelist_by_group.whitelist_id = whitelist.id
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
LEFT JOIN "group" ON "group".id = whitelist_by_group.group_id
|
domain TEXT UNIQUE NOT NULL,
|
||||||
WHERE whitelist.enabled = 1 AND (whitelist_by_group.group_id IS NULL OR "group".enabled = 1)
|
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
|
||||||
ORDER BY whitelist.id;
|
);
|
||||||
|
|
||||||
CREATE TRIGGER tr_whitelist_update AFTER UPDATE ON whitelist
|
CREATE TABLE domainlist_by_group
|
||||||
BEGIN
|
(
|
||||||
UPDATE whitelist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
|
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
|
||||||
END;
|
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||||
|
PRIMARY KEY (domainlist_id, group_id)
|
||||||
|
);
|
||||||
|
|
||||||
CREATE VIEW vw_blacklist AS SELECT DISTINCT domain
|
CREATE TABLE client
|
||||||
FROM blacklist
|
(
|
||||||
LEFT JOIN blacklist_by_group ON blacklist_by_group.blacklist_id = blacklist.id
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
LEFT JOIN "group" ON "group".id = blacklist_by_group.group_id
|
ip TEXT NOL NULL UNIQUE,
|
||||||
WHERE blacklist.enabled = 1 AND (blacklist_by_group.group_id IS NULL OR "group".enabled = 1)
|
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||||
ORDER BY blacklist.id;
|
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
|
||||||
|
comment TEXT
|
||||||
|
);
|
||||||
|
|
||||||
CREATE TRIGGER tr_blacklist_update AFTER UPDATE ON blacklist
|
CREATE TABLE client_by_group
|
||||||
BEGIN
|
(
|
||||||
UPDATE blacklist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
|
client_id INTEGER NOT NULL REFERENCES client (id),
|
||||||
END;
|
group_id INTEGER NOT NULL REFERENCES "group" (id),
|
||||||
|
PRIMARY KEY (client_id, group_id)
|
||||||
CREATE VIEW vw_regex AS SELECT DISTINCT domain
|
);
|
||||||
FROM regex
|
|
||||||
LEFT JOIN regex_by_group ON regex_by_group.regex_id = regex.id
|
|
||||||
LEFT JOIN "group" ON "group".id = regex_by_group.group_id
|
|
||||||
WHERE regex.enabled = 1 AND (regex_by_group.group_id IS NULL OR "group".enabled = 1)
|
|
||||||
ORDER BY regex.id;
|
|
||||||
|
|
||||||
CREATE TRIGGER tr_regex_update AFTER UPDATE ON regex
|
|
||||||
BEGIN
|
|
||||||
UPDATE regex SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
|
|
||||||
END;
|
|
||||||
|
|
||||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address
|
|
||||||
FROM adlist
|
|
||||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
|
|
||||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
|
||||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
|
||||||
ORDER BY adlist.id;
|
|
||||||
|
|
||||||
CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist
|
CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist
|
||||||
BEGIN
|
BEGIN
|
||||||
UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address;
|
UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address;
|
||||||
END;
|
END;
|
||||||
|
|
||||||
CREATE VIEW vw_gravity AS SELECT domain
|
CREATE TRIGGER tr_client_update AFTER UPDATE ON client
|
||||||
|
BEGIN
|
||||||
|
UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE ip = NEW.ip;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
|
||||||
|
BEGIN
|
||||||
|
UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
|
||||||
|
FROM domainlist
|
||||||
|
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
|
||||||
|
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
|
||||||
|
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||||
|
AND domainlist.type = 0
|
||||||
|
ORDER BY domainlist.id;
|
||||||
|
|
||||||
|
CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
|
||||||
|
FROM domainlist
|
||||||
|
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
|
||||||
|
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
|
||||||
|
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||||
|
AND domainlist.type = 1
|
||||||
|
ORDER BY domainlist.id;
|
||||||
|
|
||||||
|
CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
|
||||||
|
FROM domainlist
|
||||||
|
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
|
||||||
|
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
|
||||||
|
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||||
|
AND domainlist.type = 2
|
||||||
|
ORDER BY domainlist.id;
|
||||||
|
|
||||||
|
CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
|
||||||
|
FROM domainlist
|
||||||
|
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
|
||||||
|
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
|
||||||
|
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||||
|
AND domainlist.type = 3
|
||||||
|
ORDER BY domainlist.id;
|
||||||
|
|
||||||
|
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
||||||
FROM gravity
|
FROM gravity
|
||||||
WHERE domain NOT IN (SELECT domain from vw_whitelist);
|
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
|
||||||
|
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
|
||||||
|
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||||
|
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
||||||
|
|
||||||
|
CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
|
||||||
|
FROM adlist
|
||||||
|
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
|
||||||
|
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||||
|
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||||
|
ORDER BY adlist.id;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_client_add AFTER INSERT ON client
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
|
||||||
|
BEGIN
|
||||||
|
UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
|
||||||
|
BEGIN
|
||||||
|
INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_client_delete AFTER DELETE ON client
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM client_by_group WHERE client_id = OLD.id;
|
||||||
|
END;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
|
42
advanced/Templates/gravity_copy.sql
Normal file
42
advanced/Templates/gravity_copy.sql
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
.timeout 30000
|
||||||
|
|
||||||
|
ATTACH DATABASE '/etc/pihole/gravity.db' AS OLD;
|
||||||
|
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
DROP TRIGGER tr_domainlist_add;
|
||||||
|
DROP TRIGGER tr_client_add;
|
||||||
|
DROP TRIGGER tr_adlist_add;
|
||||||
|
|
||||||
|
INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
|
||||||
|
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
|
||||||
|
|
||||||
|
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
|
||||||
|
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
|
||||||
|
|
||||||
|
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
|
||||||
|
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
|
||||||
|
|
||||||
|
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
|
||||||
|
|
||||||
|
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
|
||||||
|
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_client_add AFTER INSERT ON client
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
|
||||||
|
END;
|
||||||
|
|
||||||
|
|
||||||
|
COMMIT;
|
|
@ -14,7 +14,7 @@
|
||||||
#bpOutput.add:before { content: "Info"; }
|
#bpOutput.add:before { content: "Info"; }
|
||||||
#bpOutput.add:after { content: "The domain is being whitelisted..."; }
|
#bpOutput.add:after { content: "The domain is being whitelisted..."; }
|
||||||
#bpOutput.error:before, .unhandled:before { content: "Error"; }
|
#bpOutput.error:before, .unhandled:before { content: "Error"; }
|
||||||
#bpOutput.unhandled:after { content: "An unhandled exception occured. This may happen when your browser is unable to load jQuery, or when the webserver is denying access to the Pi-hole API."; }
|
#bpOutput.unhandled:after { content: "An unhandled exception occurred. This may happen when your browser is unable to load jQuery, or when the webserver is denying access to the Pi-hole API."; }
|
||||||
#bpOutput.success:before { content: "Success"; }
|
#bpOutput.success:before { content: "Success"; }
|
||||||
#bpOutput.success:after { content: "Website has been whitelisted! You may need to flush your DNS cache"; }
|
#bpOutput.success:after { content: "Website has been whitelisted! You may need to flush your DNS cache"; }
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ main {
|
||||||
box-shadow: inset 0 3px 5px rgba(0,0,0,0.125);
|
box-shadow: inset 0 3px 5px rgba(0,0,0,0.125);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Input border colour */
|
/* Input border color */
|
||||||
.buttons *:not([disabled]):hover, .buttons input:focus {
|
.buttons *:not([disabled]):hover, .buttons input:focus {
|
||||||
border-color: rgba(0,0,0,0.25);
|
border-color: rgba(0,0,0,0.25);
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@
|
||||||
#resolv-file=
|
#resolv-file=
|
||||||
|
|
||||||
# By default, dnsmasq will send queries to any of the upstream
|
# By default, dnsmasq will send queries to any of the upstream
|
||||||
# servers it knows about and tries to favour servers to are known
|
# servers it knows about and tries to favor servers to are known
|
||||||
# to be up. Uncommenting this forces dnsmasq to try each query
|
# to be up. Uncommenting this forces dnsmasq to try each query
|
||||||
# with each server strictly in the order they appear in
|
# with each server strictly in the order they appear in
|
||||||
# /etc/resolv.conf
|
# /etc/resolv.conf
|
||||||
|
@ -189,7 +189,7 @@
|
||||||
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
|
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
|
||||||
# hosts. Use the DHCPv4 lease to derive the name, network segment and
|
# hosts. Use the DHCPv4 lease to derive the name, network segment and
|
||||||
# MAC address and assume that the host will also have an
|
# MAC address and assume that the host will also have an
|
||||||
# IPv6 address calculated using the SLAAC alogrithm.
|
# IPv6 address calculated using the SLAAC algorithm.
|
||||||
#dhcp-range=1234::, ra-names
|
#dhcp-range=1234::, ra-names
|
||||||
|
|
||||||
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
||||||
|
@ -210,7 +210,7 @@
|
||||||
#dhcp-range=1234::, ra-stateless, ra-names
|
#dhcp-range=1234::, ra-stateless, ra-names
|
||||||
|
|
||||||
# Do router advertisements for all subnets where we're doing DHCPv6
|
# Do router advertisements for all subnets where we're doing DHCPv6
|
||||||
# Unless overriden by ra-stateless, ra-names, et al, the router
|
# Unless overridden by ra-stateless, ra-names, et al, the router
|
||||||
# advertisements will have the M and O bits set, so that the clients
|
# advertisements will have the M and O bits set, so that the clients
|
||||||
# get addresses and configuration from DHCPv6, and the A bit reset, so the
|
# get addresses and configuration from DHCPv6, and the A bit reset, so the
|
||||||
# clients don't use SLAAC addresses.
|
# clients don't use SLAAC addresses.
|
||||||
|
@ -281,7 +281,7 @@
|
||||||
# Give a fixed IPv6 address and name to client with
|
# Give a fixed IPv6 address and name to client with
|
||||||
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
|
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
|
||||||
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
|
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
|
||||||
# Note also the they [] around the IPv6 address are obilgatory.
|
# Note also the they [] around the IPv6 address are obligatory.
|
||||||
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
|
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
|
||||||
|
|
||||||
# Ignore any clients which are not specified in dhcp-host lines
|
# Ignore any clients which are not specified in dhcp-host lines
|
||||||
|
@ -404,14 +404,14 @@
|
||||||
#dhcp-option=vendor:MSFT,2,1i
|
#dhcp-option=vendor:MSFT,2,1i
|
||||||
|
|
||||||
# Send the Encapsulated-vendor-class ID needed by some configurations of
|
# Send the Encapsulated-vendor-class ID needed by some configurations of
|
||||||
# Etherboot to allow is to recognise the DHCP server.
|
# Etherboot to allow is to recognize the DHCP server.
|
||||||
#dhcp-option=vendor:Etherboot,60,"Etherboot"
|
#dhcp-option=vendor:Etherboot,60,"Etherboot"
|
||||||
|
|
||||||
# Send options to PXELinux. Note that we need to send the options even
|
# Send options to PXELinux. Note that we need to send the options even
|
||||||
# though they don't appear in the parameter request list, so we need
|
# though they don't appear in the parameter request list, so we need
|
||||||
# to use dhcp-option-force here.
|
# to use dhcp-option-force here.
|
||||||
# See http://syslinux.zytor.com/pxe.php#special for details.
|
# See http://syslinux.zytor.com/pxe.php#special for details.
|
||||||
# Magic number - needed before anything else is recognised
|
# Magic number - needed before anything else is recognized
|
||||||
#dhcp-option-force=208,f1:00:74:7e
|
#dhcp-option-force=208,f1:00:74:7e
|
||||||
# Configuration file name
|
# Configuration file name
|
||||||
#dhcp-option-force=209,configs/common
|
#dhcp-option-force=209,configs/common
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
* This file is copyright under the latest version of the EUPL.
|
* This file is copyright under the latest version of the EUPL.
|
||||||
* Please see LICENSE file for your rights under this license. */
|
* Please see LICENSE file for your rights under this license. */
|
||||||
|
|
||||||
// Sanitise HTTP_HOST output
|
// Sanitize HTTP_HOST output
|
||||||
$serverName = htmlspecialchars($_SERVER["HTTP_HOST"]);
|
$serverName = htmlspecialchars($_SERVER["HTTP_HOST"]);
|
||||||
// Remove external ipv6 brackets if any
|
// Remove external ipv6 brackets if any
|
||||||
$serverName = preg_replace('/^\[(.*)\]$/', '${1}', $serverName);
|
$serverName = preg_replace('/^\[(.*)\]$/', '${1}', $serverName);
|
||||||
|
@ -68,7 +68,7 @@ if ($serverName === "pi.hole") {
|
||||||
// Unset variables so as to not be included in $landPage
|
// Unset variables so as to not be included in $landPage
|
||||||
unset($serverName, $svPasswd, $svEmail, $authorizedHosts, $validExtTypes, $currentUrlExt, $viewPort);
|
unset($serverName, $svPasswd, $svEmail, $authorizedHosts, $validExtTypes, $currentUrlExt, $viewPort);
|
||||||
|
|
||||||
// Render splash/landing page when directly browsing via IP or authorised hostname
|
// Render splash/landing page when directly browsing via IP or authorized hostname
|
||||||
exit($renderPage);
|
exit($renderPage);
|
||||||
} elseif ($currentUrlExt === "js") {
|
} elseif ($currentUrlExt === "js") {
|
||||||
// Serve Pi-hole Javascript for blocked domains requesting JS
|
// Serve Pi-hole Javascript for blocked domains requesting JS
|
||||||
|
@ -96,12 +96,6 @@ if ($serverName === "pi.hole") {
|
||||||
// Define admin email address text based off $svEmail presence
|
// Define admin email address text based off $svEmail presence
|
||||||
$bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>";
|
$bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>";
|
||||||
|
|
||||||
// Determine if at least one block list has been generated
|
|
||||||
$blocklistglob = glob("/etc/pihole/list.0.*.domains");
|
|
||||||
if ($blocklistglob === array()) {
|
|
||||||
die("[ERROR] There are no domain lists generated lists within <code>/etc/pihole/</code>! Please update gravity by running <code>pihole -g</code>, or repair Pi-hole using <code>pihole -r</code>.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get possible non-standard location of FTL's database
|
// Get possible non-standard location of FTL's database
|
||||||
$FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf");
|
$FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf");
|
||||||
if (isset($FTLsettings["GRAVITYDB"])) {
|
if (isset($FTLsettings["GRAVITYDB"])) {
|
||||||
|
@ -215,7 +209,7 @@ $phVersion = exec("cd /etc/.pihole/ && git describe --long --tags");
|
||||||
if (explode("-", $phVersion)[1] != "0")
|
if (explode("-", $phVersion)[1] != "0")
|
||||||
$execTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
|
$execTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
|
||||||
|
|
||||||
// Please Note: Text is added via CSS to allow an admin to provide a localised
|
// Please Note: Text is added via CSS to allow an admin to provide a localized
|
||||||
// language without the need to edit this file
|
// language without the need to edit this file
|
||||||
|
|
||||||
setHeader();
|
setHeader();
|
||||||
|
|
|
@ -184,7 +184,7 @@ if is_command apt-get ; then
|
||||||
# A variable to store the command used to update the package cache
|
# A variable to store the command used to update the package cache
|
||||||
UPDATE_PKG_CACHE="${PKG_MANAGER} update"
|
UPDATE_PKG_CACHE="${PKG_MANAGER} update"
|
||||||
# An array for something...
|
# An array for something...
|
||||||
PKG_INSTALL=("${PKG_MANAGER}" --yes --no-install-recommends install)
|
PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install)
|
||||||
# grep -c will return 1 retVal on 0 matches, block this throwing the set -e with an OR TRUE
|
# grep -c will return 1 retVal on 0 matches, block this throwing the set -e with an OR TRUE
|
||||||
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
|
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
|
||||||
# Some distros vary slightly so these fixes for dependencies may apply
|
# Some distros vary slightly so these fixes for dependencies may apply
|
||||||
|
@ -244,10 +244,10 @@ if is_command apt-get ; then
|
||||||
# These programs are stored in an array so they can be looped through later
|
# These programs are stored in an array so they can be looped through later
|
||||||
INSTALLER_DEPS=(dhcpcd5 git "${iproute_pkg}" whiptail)
|
INSTALLER_DEPS=(dhcpcd5 git "${iproute_pkg}" whiptail)
|
||||||
# Pi-hole itself has several dependencies that also need to be installed
|
# Pi-hole itself has several dependencies that also need to be installed
|
||||||
PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data resolvconf libcap2)
|
PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data libcap2)
|
||||||
# The Web dashboard has some that also need to be installed
|
# The Web dashboard has some that also need to be installed
|
||||||
# It's useful to separate the two since our repos are also setup as "Core" code and "Web" code
|
# It's useful to separate the two since our repos are also setup as "Core" code and "Web" code
|
||||||
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "${phpVer}-intl")
|
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "php-intl")
|
||||||
# The Web server user,
|
# The Web server user,
|
||||||
LIGHTTPD_USER="www-data"
|
LIGHTTPD_USER="www-data"
|
||||||
# group,
|
# group,
|
||||||
|
@ -286,7 +286,7 @@ elif is_command rpm ; then
|
||||||
PKG_INSTALL=("${PKG_MANAGER}" install -y)
|
PKG_INSTALL=("${PKG_MANAGER}" install -y)
|
||||||
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
||||||
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig)
|
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig)
|
||||||
PIHOLE_DEPS=(bind-utils cronie curl findutils nmap-ncat sudo unzip wget libidn2 psmisc sqlite libcap)
|
PIHOLE_DEPS=(bind-utils cronie curl findutils nmap-ncat sudo unzip libidn2 psmisc sqlite libcap)
|
||||||
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
||||||
LIGHTTPD_USER="lighttpd"
|
LIGHTTPD_USER="lighttpd"
|
||||||
LIGHTTPD_GROUP="lighttpd"
|
LIGHTTPD_GROUP="lighttpd"
|
||||||
|
@ -428,7 +428,7 @@ make_repo() {
|
||||||
git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
|
git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
|
||||||
# Move into the directory that was passed as an argument
|
# Move into the directory that was passed as an argument
|
||||||
pushd "${directory}" &> /dev/null || return 1
|
pushd "${directory}" &> /dev/null || return 1
|
||||||
# Check current branch. If it is master, then reset to the latest availible tag.
|
# Check current branch. If it is master, then reset to the latest available tag.
|
||||||
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
|
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
|
||||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||||
if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check.
|
if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check.
|
||||||
|
@ -456,7 +456,7 @@ update_repo() {
|
||||||
# Again, it's useful to store these in variables in case we need to reuse or change the message;
|
# Again, it's useful to store these in variables in case we need to reuse or change the message;
|
||||||
# we only need to make one change here
|
# we only need to make one change here
|
||||||
local str="Update repo in ${1}"
|
local str="Update repo in ${1}"
|
||||||
# Move into the directory that was passed as an argument
|
# Move into the directory that was passed as an argument
|
||||||
pushd "${directory}" &> /dev/null || return 1
|
pushd "${directory}" &> /dev/null || return 1
|
||||||
# Let the user know what's happening
|
# Let the user know what's happening
|
||||||
printf " %b %s..." "${INFO}" "${str}"
|
printf " %b %s..." "${INFO}" "${str}"
|
||||||
|
@ -465,7 +465,7 @@ update_repo() {
|
||||||
git clean --quiet --force -d || true # Okay for already clean directory
|
git clean --quiet --force -d || true # Okay for already clean directory
|
||||||
# Pull the latest commits
|
# Pull the latest commits
|
||||||
git pull --quiet &> /dev/null || return $?
|
git pull --quiet &> /dev/null || return $?
|
||||||
# Check current branch. If it is master, then reset to the latest availible tag.
|
# Check current branch. If it is master, then reset to the latest available tag.
|
||||||
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
|
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
|
||||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||||
if [[ "${curBranch}" == "master" ]]; then
|
if [[ "${curBranch}" == "master" ]]; then
|
||||||
|
@ -528,7 +528,7 @@ resetRepo() {
|
||||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||||
# Return to where we came from
|
# Return to where we came from
|
||||||
popd &> /dev/null || return 1
|
popd &> /dev/null || return 1
|
||||||
# Returning success anyway?
|
# Returning success anyway?
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -818,13 +818,13 @@ It is also possible to use a DHCP reservation, but if you are going to do that,
|
||||||
|
|
||||||
# Ask for the IPv4 address
|
# Ask for the IPv4 address
|
||||||
IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" "${r}" "${c}" "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \
|
IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" "${r}" "${c}" "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \
|
||||||
# Cancelling IPv4 settings window
|
# Canceling IPv4 settings window
|
||||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||||
printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
|
printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
|
||||||
|
|
||||||
# Ask for the gateway
|
# Ask for the gateway
|
||||||
IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" "${r}" "${c}" "${IPv4gw}" 3>&1 1>&2 2>&3) || \
|
IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" "${r}" "${c}" "${IPv4gw}" 3>&1 1>&2 2>&3) || \
|
||||||
# Cancelling gateway settings window
|
# Canceling gateway settings window
|
||||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||||
printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}"
|
printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}"
|
||||||
|
|
||||||
|
@ -854,7 +854,7 @@ setDHCPCD() {
|
||||||
echo "interface ${PIHOLE_INTERFACE}
|
echo "interface ${PIHOLE_INTERFACE}
|
||||||
static ip_address=${IPV4_ADDRESS}
|
static ip_address=${IPV4_ADDRESS}
|
||||||
static routers=${IPv4gw}
|
static routers=${IPv4gw}
|
||||||
static domain_name_servers=127.0.0.1" | tee -a /etc/dhcpcd.conf >/dev/null
|
static domain_name_servers=${PIHOLE_DNS_1} ${PIHOLE_DNS_2}" | tee -a /etc/dhcpcd.conf >/dev/null
|
||||||
# Then use the ip command to immediately set the new address
|
# Then use the ip command to immediately set the new address
|
||||||
ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}"
|
ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}"
|
||||||
# Also give a warning that the user may need to reboot their system
|
# Also give a warning that the user may need to reboot their system
|
||||||
|
@ -1211,8 +1211,7 @@ chooseBlocklists() {
|
||||||
MalwareDom "MalwareDomains" on
|
MalwareDom "MalwareDomains" on
|
||||||
Cameleon "Cameleon" on
|
Cameleon "Cameleon" on
|
||||||
DisconTrack "Disconnect.me Tracking" on
|
DisconTrack "Disconnect.me Tracking" on
|
||||||
DisconAd "Disconnect.me Ads" on
|
DisconAd "Disconnect.me Ads" on)
|
||||||
HostsFile "Hosts-file.net Ads" on)
|
|
||||||
|
|
||||||
# In a variable, show the choices available; exit if Cancel is selected
|
# In a variable, show the choices available; exit if Cancel is selected
|
||||||
choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; rm "${adlistFile}" ;exit 1; }
|
choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; rm "${adlistFile}" ;exit 1; }
|
||||||
|
@ -1231,10 +1230,9 @@ appendToListsFile() {
|
||||||
case $1 in
|
case $1 in
|
||||||
StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";;
|
StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";;
|
||||||
MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";;
|
MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";;
|
||||||
Cameleon ) echo "http://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
|
Cameleon ) echo "https://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
|
||||||
DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";;
|
DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";;
|
||||||
DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";;
|
DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";;
|
||||||
HostsFile ) echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}";;
|
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2227,15 +2225,6 @@ FTLinstall() {
|
||||||
local str="Downloading and Installing FTL"
|
local str="Downloading and Installing FTL"
|
||||||
printf " %b %s..." "${INFO}" "${str}"
|
printf " %b %s..." "${INFO}" "${str}"
|
||||||
|
|
||||||
# Find the latest version tag for FTL
|
|
||||||
latesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep "Location" | awk -F '/' '{print $NF}')
|
|
||||||
# Tags should always start with v, check for that.
|
|
||||||
if [[ ! "${latesttag}" == v* ]]; then
|
|
||||||
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
|
|
||||||
printf " %bError: Unable to get latest release location from GitHub%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Move into the temp ftl directory
|
# Move into the temp ftl directory
|
||||||
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
|
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
|
||||||
|
|
||||||
|
@ -2256,7 +2245,7 @@ FTLinstall() {
|
||||||
|
|
||||||
# Determine which version of FTL to download
|
# Determine which version of FTL to download
|
||||||
if [[ "${ftlBranch}" == "master" ]];then
|
if [[ "${ftlBranch}" == "master" ]];then
|
||||||
url="https://github.com/pi-hole/FTL/releases/download/${latesttag%$'\r'}"
|
url="https://github.com/pi-hole/ftl/releases/latest/download"
|
||||||
else
|
else
|
||||||
url="https://ftl.pi-hole.net/${ftlBranch}"
|
url="https://ftl.pi-hole.net/${ftlBranch}"
|
||||||
fi
|
fi
|
||||||
|
@ -2467,17 +2456,14 @@ FTLcheckUpdate() {
|
||||||
if [[ ${ftlLoc} ]]; then
|
if [[ ${ftlLoc} ]]; then
|
||||||
local FTLversion
|
local FTLversion
|
||||||
FTLversion=$(/usr/bin/pihole-FTL tag)
|
FTLversion=$(/usr/bin/pihole-FTL tag)
|
||||||
local FTLreleaseData
|
|
||||||
local FTLlatesttag
|
local FTLlatesttag
|
||||||
|
|
||||||
if ! FTLreleaseData=$(curl -sI https://github.com/pi-hole/FTL/releases/latest); then
|
if ! FTLlatesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep --color=never -i Location | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
|
||||||
# There was an issue while retrieving the latest version
|
# There was an issue while retrieving the latest version
|
||||||
printf " %b Failed to retrieve latest FTL release metadata" "${CROSS}"
|
printf " %b Failed to retrieve latest FTL release metadata" "${CROSS}"
|
||||||
return 3
|
return 3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
FTLlatesttag=$(grep 'Location' <<< "${FTLreleaseData}" | awk -F '/' '{print $NF}' | tr -d '\r\n')
|
|
||||||
|
|
||||||
if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then
|
if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
|
|
|
@ -14,8 +14,8 @@ while true; do
|
||||||
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " yn
|
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " yn
|
||||||
case ${yn} in
|
case ${yn} in
|
||||||
[Yy]* ) break;;
|
[Yy]* ) break;;
|
||||||
[Nn]* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been cancelled${COL_NC}"; exit 0;;
|
[Nn]* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
||||||
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been cancelled${COL_NC}"; exit 0;;
|
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
|
||||||
DEPS+=("${PIHOLE_WEB_DEPS[@]}")
|
DEPS+=("${PIHOLE_WEB_DEPS[@]}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Compatability
|
# Compatibility
|
||||||
if [ -x "$(command -v apt-get)" ]; then
|
if [ -x "$(command -v apt-get)" ]; then
|
||||||
# Debian Family
|
# Debian Family
|
||||||
PKG_REMOVE=("${PKG_MANAGER}" -y remove --purge)
|
PKG_REMOVE=("${PKG_MANAGER}" -y remove --purge)
|
||||||
|
|
292
gravity.sh
292
gravity.sh
|
@ -36,7 +36,9 @@ VPNList="/etc/openvpn/ipp.txt"
|
||||||
|
|
||||||
piholeGitDir="/etc/.pihole"
|
piholeGitDir="/etc/.pihole"
|
||||||
gravityDBfile="${piholeDir}/gravity.db"
|
gravityDBfile="${piholeDir}/gravity.db"
|
||||||
|
gravityTEMPfile="${piholeDir}/gravity_temp.db"
|
||||||
gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
|
gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
|
||||||
|
gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
|
||||||
optimize_database=false
|
optimize_database=false
|
||||||
|
|
||||||
domainsExtension="domains"
|
domainsExtension="domains"
|
||||||
|
@ -80,12 +82,45 @@ fi
|
||||||
|
|
||||||
# Generate new sqlite3 file from schema template
|
# Generate new sqlite3 file from schema template
|
||||||
generate_gravity_database() {
|
generate_gravity_database() {
|
||||||
sqlite3 "${gravityDBfile}" < "${gravityDBschema}"
|
sqlite3 "${1}" < "${gravityDBschema}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Copy data from old to new database file and swap them
|
||||||
|
gravity_swap_databases() {
|
||||||
|
local str
|
||||||
|
str="Building tree"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
|
||||||
|
# The index is intentionally not UNIQUE as prro quality adlists may contain domains more than once
|
||||||
|
output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
|
||||||
|
status="$?"
|
||||||
|
|
||||||
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
echo -e "\\n ${CROSS} Unable to build gravity tree in ${gravityTEMPfile}\\n ${output}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
|
str="Swapping databases"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
|
||||||
|
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBcopy}"; } 2>&1 )
|
||||||
|
status="$?"
|
||||||
|
|
||||||
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
|
# Swap databases and remove old database
|
||||||
|
rm "${gravityDBfile}"
|
||||||
|
mv "${gravityTEMPfile}" "${gravityDBfile}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update timestamp when the gravity table was last updated successfully
|
||||||
update_gravity_timestamp() {
|
update_gravity_timestamp() {
|
||||||
# Update timestamp when the gravity table was last updated successfully
|
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
output=$( { sqlite3 "${gravityDBfile}" <<< "INSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%s', 'now') as int));"; } 2>&1 )
|
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
@ -95,91 +130,83 @@ update_gravity_timestamp() {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
database_truncate_table() {
|
|
||||||
local table
|
|
||||||
table="${1}"
|
|
||||||
|
|
||||||
output=$( { sqlite3 "${gravityDBfile}" <<< "DELETE FROM ${table};"; } 2>&1 )
|
|
||||||
status="$?"
|
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
|
||||||
echo -e "\\n ${CROSS} Unable to truncate ${table} database ${gravityDBfile}\\n ${output}"
|
|
||||||
gravity_Cleanup "error"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Import domains from file and store them in the specified database table
|
# Import domains from file and store them in the specified database table
|
||||||
database_table_from_file() {
|
database_table_from_file() {
|
||||||
# Define locals
|
# Define locals
|
||||||
local table source backup_path backup_file arg
|
local table source backup_path backup_file tmpFile type
|
||||||
table="${1}"
|
table="${1}"
|
||||||
source="${2}"
|
source="${2}"
|
||||||
arg="${3}"
|
|
||||||
backup_path="${piholeDir}/migration_backup"
|
backup_path="${piholeDir}/migration_backup"
|
||||||
backup_file="${backup_path}/$(basename "${2}")"
|
backup_file="${backup_path}/$(basename "${2}")"
|
||||||
|
|
||||||
# Truncate table only if not gravity (we add multiple times to this table)
|
|
||||||
if [[ "${table}" != "gravity" ]]; then
|
|
||||||
database_truncate_table "${table}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
local tmpFile
|
|
||||||
tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")"
|
tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")"
|
||||||
|
|
||||||
local timestamp
|
local timestamp
|
||||||
timestamp="$(date --utc +'%s')"
|
timestamp="$(date --utc +'%s')"
|
||||||
local inputfile
|
|
||||||
# Apply format for white-, blacklist, regex, and adlist tables
|
|
||||||
# Read file line by line
|
|
||||||
local rowid
|
local rowid
|
||||||
declare -i rowid
|
declare -i rowid
|
||||||
rowid=1
|
rowid=1
|
||||||
|
|
||||||
if [[ "${table}" == "gravity" ]]; then
|
# Special handling for domains to be imported into the common domainlist table
|
||||||
#Append ,${arg} to every line and then remove blank lines before import
|
if [[ "${table}" == "whitelist" ]]; then
|
||||||
sed -e "s/$/,${arg}/" "${source}" > "${tmpFile}"
|
type="0"
|
||||||
sed -i '/^$/d' "${tmpFile}"
|
table="domainlist"
|
||||||
else
|
elif [[ "${table}" == "blacklist" ]]; then
|
||||||
grep -v '^ *#' < "${source}" | while IFS= read -r domain
|
type="1"
|
||||||
do
|
table="domainlist"
|
||||||
# Only add non-empty lines
|
elif [[ "${table}" == "regex" ]]; then
|
||||||
if [[ -n "${domain}" ]]; then
|
type="3"
|
||||||
if [[ "${table}" == "domain_audit" ]]; then
|
table="domainlist"
|
||||||
# domain_audit table format (no enable or modified fields)
|
|
||||||
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
|
|
||||||
else
|
|
||||||
# White-, black-, and regexlist format
|
|
||||||
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
|
|
||||||
fi
|
|
||||||
rowid+=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
inputfile="${tmpFile}"
|
|
||||||
|
|
||||||
# Remove possible duplicates found in lower-quality adlists
|
# Get MAX(id) from domainlist when INSERTing into this table
|
||||||
sort -u -o "${inputfile}" "${inputfile}"
|
if [[ "${table}" == "domainlist" ]]; then
|
||||||
|
rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
|
||||||
|
if [[ -z "$rowid" ]]; then
|
||||||
|
rowid=0
|
||||||
|
fi
|
||||||
|
rowid+=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Loop over all domains in ${source} file
|
||||||
|
# Read file line by line
|
||||||
|
grep -v '^ *#' < "${source}" | while IFS= read -r domain
|
||||||
|
do
|
||||||
|
# Only add non-empty lines
|
||||||
|
if [[ -n "${domain}" ]]; then
|
||||||
|
if [[ "${table}" == "domain_audit" ]]; then
|
||||||
|
# domain_audit table format (no enable or modified fields)
|
||||||
|
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
|
||||||
|
elif [[ "${table}" == "adlist" ]]; then
|
||||||
|
# Adlist table format
|
||||||
|
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
|
||||||
|
else
|
||||||
|
# White-, black-, and regexlist table format
|
||||||
|
echo "${rowid},${type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
|
||||||
|
fi
|
||||||
|
rowid+=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
# Store domains in database table specified by ${table}
|
# Store domains in database table specified by ${table}
|
||||||
# Use printf as .mode and .import need to be on separate lines
|
# Use printf as .mode and .import need to be on separate lines
|
||||||
# see https://unix.stackexchange.com/a/445615/83260
|
# see https://unix.stackexchange.com/a/445615/83260
|
||||||
output=$( { printf ".timeout 10000\\n.mode csv\\n.import \"%s\" %s\\n" "${inputfile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
echo -e "\\n ${CROSS} Unable to fill table ${table} in database ${gravityDBfile}\\n ${output}"
|
echo -e "\\n ${CROSS} Unable to fill table ${table}${type} in database ${gravityDBfile}\\n ${output}"
|
||||||
gravity_Cleanup "error"
|
gravity_Cleanup "error"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete tmpfile
|
|
||||||
rm "${tmpFile}" > /dev/null 2>&1 || \
|
|
||||||
echo -e " ${CROSS} Unable to remove ${tmpFile}"
|
|
||||||
|
|
||||||
# Move source file to backup directory, create directory if not existing
|
# Move source file to backup directory, create directory if not existing
|
||||||
mkdir -p "${backup_path}"
|
mkdir -p "${backup_path}"
|
||||||
mv "${source}" "${backup_file}" 2> /dev/null || \
|
mv "${source}" "${backup_file}" 2> /dev/null || \
|
||||||
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
||||||
|
|
||||||
|
# Delete tmpFile
|
||||||
|
rm "${tmpFile}" > /dev/null 2>&1 || \
|
||||||
|
echo -e " ${CROSS} Unable to remove ${tmpFile}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Migrate pre-v5.0 list files to database-based Pi-hole versions
|
# Migrate pre-v5.0 list files to database-based Pi-hole versions
|
||||||
|
@ -188,7 +215,10 @@ migrate_to_database() {
|
||||||
if [ ! -e "${gravityDBfile}" ]; then
|
if [ ! -e "${gravityDBfile}" ]; then
|
||||||
# Create new database file - note that this will be created in version 1
|
# Create new database file - note that this will be created in version 1
|
||||||
echo -e " ${INFO} Creating new gravity database"
|
echo -e " ${INFO} Creating new gravity database"
|
||||||
generate_gravity_database
|
generate_gravity_database "${gravityDBfile}"
|
||||||
|
|
||||||
|
# Check if gravity database needs to be updated
|
||||||
|
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
|
||||||
|
|
||||||
# Migrate list files to new database
|
# Migrate list files to new database
|
||||||
if [ -e "${adListFile}" ]; then
|
if [ -e "${adListFile}" ]; then
|
||||||
|
@ -241,7 +271,7 @@ gravity_CheckDNSResolutionAvailable() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN.
|
# If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN.
|
||||||
# This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventualy fails
|
# This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventually fails
|
||||||
# So we check the output of the last command and if it failed, attempt to use dig +short as a fallback
|
# So we check the output of the last command and if it failed, attempt to use dig +short as a fallback
|
||||||
if timeout 4 dig +short "${lookupDomain}" &> /dev/null; then
|
if timeout 4 dig +short "${lookupDomain}" &> /dev/null; then
|
||||||
if [[ -n "${secs:-}" ]]; then
|
if [[ -n "${secs:-}" ]]; then
|
||||||
|
@ -306,16 +336,25 @@ gravity_DownloadBlocklists() {
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local url domain agent cmd_ext str
|
local url domain agent cmd_ext str target
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Flush gravity table once before looping over sources
|
# Prepare new gravity database
|
||||||
str="Flushing gravity table"
|
str="Preparing new gravity database"
|
||||||
echo -ne " ${INFO} ${str}..."
|
echo -ne " ${INFO} ${str}..."
|
||||||
if database_truncate_table "gravity"; then
|
rm "${gravityTEMPfile}" > /dev/null 2>&1
|
||||||
|
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
|
||||||
|
status="$?"
|
||||||
|
|
||||||
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
echo -e "\\n ${CROSS} Unable to create new database ${gravityTEMPfile}\\n ${output}"
|
||||||
|
gravity_Cleanup "error"
|
||||||
|
else
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
target="$(mktemp -p "/tmp" --suffix=".gravity")"
|
||||||
|
|
||||||
# Loop through $sources and download each one
|
# Loop through $sources and download each one
|
||||||
for ((i = 0; i < "${#sources[@]}"; i++)); do
|
for ((i = 0; i < "${#sources[@]}"; i++)); do
|
||||||
url="${sources[$i]}"
|
url="${sources[$i]}"
|
||||||
|
@ -335,15 +374,89 @@ gravity_DownloadBlocklists() {
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo -e " ${INFO} Target: ${url}"
|
echo -e " ${INFO} Target: ${url}"
|
||||||
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}"
|
local regex
|
||||||
|
# Check for characters NOT allowed in URLs
|
||||||
|
regex="[^a-zA-Z0-9:/?&%=~._-]"
|
||||||
|
if [[ "${url}" =~ ${regex} ]]; then
|
||||||
|
echo -e " ${CROSS} Invalid Target"
|
||||||
|
else
|
||||||
|
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}"
|
||||||
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
done
|
done
|
||||||
|
|
||||||
|
str="Storing downloaded domains in new gravity database"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
|
||||||
|
status="$?"
|
||||||
|
|
||||||
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
|
echo -e "\\n ${CROSS} Unable to fill gravity table in database ${gravityTEMPfile}\\n ${output}"
|
||||||
|
gravity_Cleanup "error"
|
||||||
|
else
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${status}" -eq 0 && -n "${output}" ]]; then
|
||||||
|
echo -e " Encountered non-critical SQL warnings. Please check the suitability of the lists you're using!\\n\\n SQL warnings:"
|
||||||
|
local warning file line lineno
|
||||||
|
while IFS= read -r line; do
|
||||||
|
echo " - ${line}"
|
||||||
|
warning="$(grep -oh "^[^:]*:[0-9]*" <<< "${line}")"
|
||||||
|
file="${warning%:*}"
|
||||||
|
lineno="${warning#*:}"
|
||||||
|
if [[ -n "${file}" && -n "${lineno}" ]]; then
|
||||||
|
echo -n " Line contains: "
|
||||||
|
awk "NR==${lineno}" < "${file}"
|
||||||
|
fi
|
||||||
|
done <<< "${output}"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm "${target}" > /dev/null 2>&1 || \
|
||||||
|
echo -e " ${CROSS} Unable to remove ${target}"
|
||||||
|
|
||||||
gravity_Blackbody=true
|
gravity_Blackbody=true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
total_num=0
|
||||||
|
parseList() {
|
||||||
|
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
|
||||||
|
# This sed does the following things:
|
||||||
|
# 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
|
||||||
|
# 2. Append ,adlistID to every line
|
||||||
|
# 3. Ensures there is a newline on the last line
|
||||||
|
sed -e "/[^a-zA-Z0-9.\_-]/d;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
|
||||||
|
# Find (up to) five domains containing invalid characters (see above)
|
||||||
|
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
||||||
|
|
||||||
|
local num_lines num_target_lines num_correct_lines num_invalid
|
||||||
|
# Get number of lines in source file
|
||||||
|
num_lines="$(grep -c "^" "${src}")"
|
||||||
|
# Get number of lines in destination file
|
||||||
|
num_target_lines="$(grep -c "^" "${target}")"
|
||||||
|
num_correct_lines="$(( num_target_lines-total_num ))"
|
||||||
|
total_num="$num_target_lines"
|
||||||
|
num_invalid="$(( num_lines-num_correct_lines ))"
|
||||||
|
if [[ "${num_invalid}" -eq 0 ]]; then
|
||||||
|
echo " ${INFO} Received ${num_lines} domains"
|
||||||
|
else
|
||||||
|
echo " ${INFO} Received ${num_lines} domains, ${num_invalid} domains invalid!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display sample of invalid lines if we found some
|
||||||
|
if [[ -n "${incorrect_lines}" ]]; then
|
||||||
|
echo " Sample of invalid domains:"
|
||||||
|
while IFS= read -r line; do
|
||||||
|
echo " - ${line}"
|
||||||
|
done <<< "${incorrect_lines}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Download specified URL and perform checks on HTTP status and file content
|
# Download specified URL and perform checks on HTTP status and file content
|
||||||
gravity_DownloadBlocklistFromUrl() {
|
gravity_DownloadBlocklistFromUrl() {
|
||||||
local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" heisenbergCompensator="" patternBuffer str httpCode success=""
|
local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}"
|
||||||
|
local heisenbergCompensator="" patternBuffer str httpCode success=""
|
||||||
|
|
||||||
# Create temp file to store content on disk instead of RAM
|
# Create temp file to store content on disk instead of RAM
|
||||||
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
|
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
|
||||||
|
@ -424,20 +537,14 @@ gravity_DownloadBlocklistFromUrl() {
|
||||||
# Determine if the blocklist was downloaded and saved correctly
|
# Determine if the blocklist was downloaded and saved correctly
|
||||||
if [[ "${success}" == true ]]; then
|
if [[ "${success}" == true ]]; then
|
||||||
if [[ "${httpCode}" == "304" ]]; then
|
if [[ "${httpCode}" == "304" ]]; then
|
||||||
# Add domains to database table
|
# Add domains to database table file
|
||||||
str="Adding adlist with ID ${adlistID} to database table"
|
parseList "${adlistID}" "${saveLocation}" "${target}"
|
||||||
echo -ne " ${INFO} ${str}..."
|
|
||||||
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
|
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
# Check if $patternbuffer is a non-zero length file
|
# Check if $patternbuffer is a non-zero length file
|
||||||
elif [[ -s "${patternBuffer}" ]]; then
|
elif [[ -s "${patternBuffer}" ]]; then
|
||||||
# Determine if blocklist is non-standard and parse as appropriate
|
# Determine if blocklist is non-standard and parse as appropriate
|
||||||
gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}"
|
gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}"
|
||||||
# Add domains to database table
|
# Add domains to database table file
|
||||||
str="Adding adlist with ID ${adlistID} to database table"
|
parseList "${adlistID}" "${saveLocation}" "${target}"
|
||||||
echo -ne " ${INFO} ${str}..."
|
|
||||||
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
|
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
else
|
else
|
||||||
# Fall back to previously cached list if $patternBuffer is empty
|
# Fall back to previously cached list if $patternBuffer is empty
|
||||||
echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
|
echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
|
||||||
|
@ -446,11 +553,8 @@ gravity_DownloadBlocklistFromUrl() {
|
||||||
# Determine if cached list has read permission
|
# Determine if cached list has read permission
|
||||||
if [[ -r "${saveLocation}" ]]; then
|
if [[ -r "${saveLocation}" ]]; then
|
||||||
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
|
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
|
||||||
# Add domains to database table
|
# Add domains to database table file
|
||||||
str="Adding to database table"
|
parseList "${adlistID}" "${saveLocation}" "${target}"
|
||||||
echo -ne " ${INFO} ${str}..."
|
|
||||||
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
|
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
|
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
|
||||||
fi
|
fi
|
||||||
|
@ -464,7 +568,7 @@ gravity_ParseFileIntoDomains() {
|
||||||
# Determine if we are parsing a consolidated list
|
# Determine if we are parsing a consolidated list
|
||||||
#if [[ "${source}" == "${piholeDir}/${matterAndLight}" ]]; then
|
#if [[ "${source}" == "${piholeDir}/${matterAndLight}" ]]; then
|
||||||
# Remove comments and print only the domain name
|
# Remove comments and print only the domain name
|
||||||
# Most of the lists downloaded are already in hosts file format but the spacing/formating is not contigious
|
# Most of the lists downloaded are already in hosts file format but the spacing/formating is not contiguous
|
||||||
# This helps with that and makes it easier to read
|
# This helps with that and makes it easier to read
|
||||||
# It also helps with debugging so each stage of the script can be researched more in depth
|
# It also helps with debugging so each stage of the script can be researched more in depth
|
||||||
# 1) Remove carriage returns
|
# 1) Remove carriage returns
|
||||||
|
@ -535,6 +639,7 @@ gravity_Table_Count() {
|
||||||
local unique
|
local unique
|
||||||
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
||||||
echo -e " ${INFO} Number of ${str}: ${num} (${unique} unique domains)"
|
echo -e " ${INFO} Number of ${str}: ${num} (${unique} unique domains)"
|
||||||
|
sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||||
else
|
else
|
||||||
echo -e " ${INFO} Number of ${str}: ${num}"
|
echo -e " ${INFO} Number of ${str}: ${num}"
|
||||||
fi
|
fi
|
||||||
|
@ -644,7 +749,7 @@ gravity_Cleanup() {
|
||||||
dnsWasOffline=true
|
dnsWasOffline=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Print Pi-hole status if an error occured
|
# Print Pi-hole status if an error occurred
|
||||||
if [[ -n "${error}" ]]; then
|
if [[ -n "${error}" ]]; then
|
||||||
"${PIHOLE_COMMAND}" status
|
"${PIHOLE_COMMAND}" status
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -686,10 +791,6 @@ fi
|
||||||
# Move possibly existing legacy files to the gravity database
|
# Move possibly existing legacy files to the gravity database
|
||||||
migrate_to_database
|
migrate_to_database
|
||||||
|
|
||||||
# Ensure proper permissions are set for the newly created database
|
|
||||||
chown pihole:pihole "${gravityDBfile}"
|
|
||||||
chmod g+w "${piholeDir}" "${gravityDBfile}"
|
|
||||||
|
|
||||||
if [[ "${forceDelete:-}" == true ]]; then
|
if [[ "${forceDelete:-}" == true ]]; then
|
||||||
str="Deleting existing list cache"
|
str="Deleting existing list cache"
|
||||||
echo -ne "${INFO} ${str}..."
|
echo -ne "${INFO} ${str}..."
|
||||||
|
@ -704,15 +805,26 @@ gravity_DownloadBlocklists
|
||||||
|
|
||||||
# Create local.list
|
# Create local.list
|
||||||
gravity_generateLocalList
|
gravity_generateLocalList
|
||||||
gravity_ShowCount
|
|
||||||
|
|
||||||
|
# Migrate rest of the data from old to new database
|
||||||
|
gravity_swap_databases
|
||||||
|
|
||||||
|
# Update gravity timestamp
|
||||||
update_gravity_timestamp
|
update_gravity_timestamp
|
||||||
|
|
||||||
gravity_Cleanup
|
# Ensure proper permissions are set for the database
|
||||||
echo ""
|
chown pihole:pihole "${gravityDBfile}"
|
||||||
|
chmod g+w "${piholeDir}" "${gravityDBfile}"
|
||||||
|
|
||||||
|
# Compute numbers to be displayed
|
||||||
|
gravity_ShowCount
|
||||||
|
|
||||||
# Determine if DNS has been restarted by this instance of gravity
|
# Determine if DNS has been restarted by this instance of gravity
|
||||||
if [[ -z "${dnsWasOffline:-}" ]]; then
|
if [[ -z "${dnsWasOffline:-}" ]]; then
|
||||||
"${PIHOLE_COMMAND}" restartdns reload
|
"${PIHOLE_COMMAND}" restartdns reload
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
gravity_Cleanup
|
||||||
|
echo ""
|
||||||
|
|
||||||
"${PIHOLE_COMMAND}" status
|
"${PIHOLE_COMMAND}" status
|
||||||
|
|
10
pihole
10
pihole
|
@ -302,12 +302,12 @@ tailFunc() {
|
||||||
source /etc/pihole/setupVars.conf
|
source /etc/pihole/setupVars.conf
|
||||||
|
|
||||||
# Strip date from each line
|
# Strip date from each line
|
||||||
# Colour blocklist/blacklist/wildcard entries as red
|
# Color blocklist/blacklist/wildcard entries as red
|
||||||
# Colour A/AAAA/DHCP strings as white
|
# Color A/AAAA/DHCP strings as white
|
||||||
# Colour everything else as gray
|
# Color everything else as gray
|
||||||
tail -f /var/log/pihole.log | sed -E \
|
tail -f /var/log/pihole.log | sed -E \
|
||||||
-e "s,($(date +'%b %d ')| dnsmasq[.*[0-9]]),,g" \
|
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
||||||
-e "s,(.*(gravity |black |regex | config ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
|
-e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
|
||||||
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
||||||
-e "s,.*,${COL_GRAY}&${COL_NC},"
|
-e "s,.*,${COL_GRAY}&${COL_NC},"
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
@ -7,11 +7,11 @@ From command line all you need to do is:
|
||||||
- `pip install tox`
|
- `pip install tox`
|
||||||
- `tox`
|
- `tox`
|
||||||
|
|
||||||
Tox handles setting up a virtual environment for python dependancies, installing dependancies, building the docker images used by tests, and finally running tests. It's an easy way to have travis-ci like build behavior locally.
|
Tox handles setting up a virtual environment for python dependencies, installing dependencies, building the docker images used by tests, and finally running tests. It's an easy way to have travis-ci like build behavior locally.
|
||||||
|
|
||||||
## Alternative py.test method of running tests
|
## Alternative py.test method of running tests
|
||||||
|
|
||||||
You're responsible for setting up your virtual env and dependancies in this situation.
|
You're responsible for setting up your virtual env and dependencies in this situation.
|
||||||
|
|
||||||
```
|
```
|
||||||
py.test -vv -n auto -m "build_stage"
|
py.test -vv -n auto -m "build_stage"
|
||||||
|
|
|
@ -14,9 +14,9 @@ SETUPVARS = {
|
||||||
'PIHOLE_DNS_2': '4.2.2.2'
|
'PIHOLE_DNS_2': '4.2.2.2'
|
||||||
}
|
}
|
||||||
|
|
||||||
tick_box = "[\x1b[1;32m\xe2\x9c\x93\x1b[0m]".decode("utf-8")
|
tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
|
||||||
cross_box = "[\x1b[1;31m\xe2\x9c\x97\x1b[0m]".decode("utf-8")
|
cross_box = "[\x1b[1;31m\u2717\x1b[0m]"
|
||||||
info_box = "[i]".decode("utf-8")
|
info_box = "[i]"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -38,9 +38,7 @@ def Pihole(Docker):
|
||||||
return out
|
return out
|
||||||
|
|
||||||
funcType = type(Docker.run)
|
funcType = type(Docker.run)
|
||||||
Docker.run = funcType(run_bash,
|
Docker.run = funcType(run_bash, Docker)
|
||||||
Docker,
|
|
||||||
testinfra.backend.docker.DockerBackend)
|
|
||||||
return Docker
|
return Docker
|
||||||
|
|
||||||
|
|
||||||
|
@ -106,7 +104,7 @@ def mock_command(script, args, container):
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
echo "\$0 \$@" >> /var/log/{script}
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
case "\$1" in'''.format(script=script))
|
case "\$1" in'''.format(script=script))
|
||||||
for k, v in args.iteritems():
|
for k, v in args.items():
|
||||||
case = dedent('''
|
case = dedent('''
|
||||||
{arg})
|
{arg})
|
||||||
echo {res}
|
echo {res}
|
||||||
|
@ -133,7 +131,7 @@ def mock_command_2(script, args, container):
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
echo "\$0 \$@" >> /var/log/{script}
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
case "\$1 \$2" in'''.format(script=script))
|
case "\$1 \$2" in'''.format(script=script))
|
||||||
for k, v in args.iteritems():
|
for k, v in args.items():
|
||||||
case = dedent('''
|
case = dedent('''
|
||||||
\"{arg}\")
|
\"{arg}\")
|
||||||
echo \"{res}\"
|
echo \"{res}\"
|
||||||
|
|
|
@ -18,6 +18,6 @@ run_local = testinfra.get_backend(
|
||||||
def test_build_pihole_image(image, tag):
|
def test_build_pihole_image(image, tag):
|
||||||
build_cmd = run_local('docker build -f {} -t {} .'.format(image, tag))
|
build_cmd = run_local('docker build -f {} -t {} .'.format(image, tag))
|
||||||
if build_cmd.rc != 0:
|
if build_cmd.rc != 0:
|
||||||
print build_cmd.stdout
|
print(build_cmd.stdout)
|
||||||
print build_cmd.stderr
|
print(build_cmd.stderr)
|
||||||
assert build_cmd.rc == 0
|
assert build_cmd.rc == 0
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
import re
|
import re
|
||||||
from conftest import (
|
from .conftest import (
|
||||||
SETUPVARS,
|
SETUPVARS,
|
||||||
tick_box,
|
tick_box,
|
||||||
info_box,
|
info_box,
|
||||||
|
@ -34,7 +34,7 @@ def test_setupVars_are_sourced_to_global_scope(Pihole):
|
||||||
This confirms the sourced variables are in scope between functions
|
This confirms the sourced variables are in scope between functions
|
||||||
'''
|
'''
|
||||||
setup_var_file = 'cat <<EOF> /etc/pihole/setupVars.conf\n'
|
setup_var_file = 'cat <<EOF> /etc/pihole/setupVars.conf\n'
|
||||||
for k, v in SETUPVARS.iteritems():
|
for k, v in SETUPVARS.items():
|
||||||
setup_var_file += "{}={}\n".format(k, v)
|
setup_var_file += "{}={}\n".format(k, v)
|
||||||
setup_var_file += "EOF\n"
|
setup_var_file += "EOF\n"
|
||||||
Pihole.run(setup_var_file)
|
Pihole.run(setup_var_file)
|
||||||
|
@ -59,7 +59,7 @@ def test_setupVars_are_sourced_to_global_scope(Pihole):
|
||||||
|
|
||||||
output = run_script(Pihole, script).stdout
|
output = run_script(Pihole, script).stdout
|
||||||
|
|
||||||
for k, v in SETUPVARS.iteritems():
|
for k, v in SETUPVARS.items():
|
||||||
assert "{}={}".format(k, v) in output
|
assert "{}={}".format(k, v) in output
|
||||||
|
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ def test_setupVars_saved_to_file(Pihole):
|
||||||
'''
|
'''
|
||||||
# dedent works better with this and padding matching script below
|
# dedent works better with this and padding matching script below
|
||||||
set_setup_vars = '\n'
|
set_setup_vars = '\n'
|
||||||
for k, v in SETUPVARS.iteritems():
|
for k, v in SETUPVARS.items():
|
||||||
set_setup_vars += " {}={}\n".format(k, v)
|
set_setup_vars += " {}={}\n".format(k, v)
|
||||||
Pihole.run(set_setup_vars).stdout
|
Pihole.run(set_setup_vars).stdout
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ def test_setupVars_saved_to_file(Pihole):
|
||||||
|
|
||||||
output = run_script(Pihole, script).stdout
|
output = run_script(Pihole, script).stdout
|
||||||
|
|
||||||
for k, v in SETUPVARS.iteritems():
|
for k, v in SETUPVARS.items():
|
||||||
assert "{}={}".format(k, v) in output
|
assert "{}={}".format(k, v) in output
|
||||||
|
|
||||||
|
|
||||||
|
@ -195,12 +195,12 @@ def test_configureFirewall_IPTables_enabled_rules_exist_no_errors(Pihole):
|
||||||
expected_stdout = 'Installing new IPTables firewall rulesets'
|
expected_stdout = 'Installing new IPTables firewall rulesets'
|
||||||
assert expected_stdout in configureFirewall.stdout
|
assert expected_stdout in configureFirewall.stdout
|
||||||
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
|
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
|
||||||
# General call type occurances
|
# General call type occurrences
|
||||||
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
|
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
|
||||||
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
|
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
|
||||||
assert len(re.findall(r'iptables -I', firewall_calls)) == 0
|
assert len(re.findall(r'iptables -I', firewall_calls)) == 0
|
||||||
|
|
||||||
# Specific port call occurances
|
# Specific port call occurrences
|
||||||
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 1
|
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 1
|
||||||
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 1
|
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 1
|
||||||
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 1
|
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 1
|
||||||
|
@ -242,12 +242,12 @@ def test_configureFirewall_IPTables_enabled_not_exist_no_errors(Pihole):
|
||||||
expected_stdout = 'Installing new IPTables firewall rulesets'
|
expected_stdout = 'Installing new IPTables firewall rulesets'
|
||||||
assert expected_stdout in configureFirewall.stdout
|
assert expected_stdout in configureFirewall.stdout
|
||||||
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
|
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
|
||||||
# General call type occurances
|
# General call type occurrences
|
||||||
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
|
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
|
||||||
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
|
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
|
||||||
assert len(re.findall(r'iptables -I', firewall_calls)) == 4
|
assert len(re.findall(r'iptables -I', firewall_calls)) == 4
|
||||||
|
|
||||||
# Specific port call occurances
|
# Specific port call occurrences
|
||||||
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 2
|
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 2
|
||||||
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 2
|
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 2
|
||||||
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 2
|
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 2
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
import pytest
|
import pytest
|
||||||
from conftest import (
|
from .conftest import (
|
||||||
tick_box,
|
tick_box,
|
||||||
info_box,
|
info_box,
|
||||||
cross_box,
|
cross_box,
|
||||||
mock_command,
|
mock_command,
|
||||||
mock_command_2,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -14,5 +14,5 @@ def test_scripts_pass_shellcheck():
|
||||||
"shellcheck -x \"$file\" -e SC1090,SC1091; "
|
"shellcheck -x \"$file\" -e SC1090,SC1091; "
|
||||||
"done;")
|
"done;")
|
||||||
results = run_local(shellcheck)
|
results = run_local(shellcheck)
|
||||||
print results.stdout
|
print(results.stdout)
|
||||||
assert '' == results.stdout
|
assert '' == results.stdout
|
||||||
|
|
2
tox.ini
2
tox.ini
|
@ -1,5 +1,5 @@
|
||||||
[tox]
|
[tox]
|
||||||
envlist = py27
|
envlist = py36
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
|
Loading…
Reference in a new issue