diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index 3a75dc12..00000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-# These are supported funding model platforms
-
-patreon: pihole
-custom: https://pi-hole.net/donate
diff --git a/advanced/Scripts/database_migration/gravity-db.sh b/advanced/Scripts/database_migration/gravity-db.sh
index 184b3a4a..8a669429 100644
--- a/advanced/Scripts/database_migration/gravity-db.sh
+++ b/advanced/Scripts/database_migration/gravity-db.sh
@@ -87,4 +87,21 @@ upgrade_gravityDB(){
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
version=9
fi
+ if [[ "$version" == "9" ]]; then
+ # This migration drops unused tables and creates triggers to remove
+ # obsolete groups assignments when the linked items are deleted
+ echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
+ sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
+ version=10
+ fi
+ if [[ "$version" == "10" ]]; then
+ # This adds timestamp and an optional comment field to the client table
+ # These fields are only temporary and will be replaces by the columns
+ # defined in gravity.db.sql during gravity swapping. We add them here
+ # to keep the copying process generic (needs the same columns in both the
+ # source and the destination databases).
+ echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
+ sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
+ version=11
+ fi
}
diff --git a/advanced/Scripts/database_migration/gravity/10_to_11.sql b/advanced/Scripts/database_migration/gravity/10_to_11.sql
new file mode 100644
index 00000000..b073f83b
--- /dev/null
+++ b/advanced/Scripts/database_migration/gravity/10_to_11.sql
@@ -0,0 +1,16 @@
+.timeout 30000
+
+BEGIN TRANSACTION;
+
+ALTER TABLE client ADD COLUMN date_added INTEGER;
+ALTER TABLE client ADD COLUMN date_modified INTEGER;
+ALTER TABLE client ADD COLUMN comment TEXT;
+
+CREATE TRIGGER tr_client_update AFTER UPDATE ON client
+ BEGIN
+ UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
+ END;
+
+UPDATE info SET value = 11 WHERE property = 'version';
+
+COMMIT;
diff --git a/advanced/Scripts/database_migration/gravity/9_to_10.sql b/advanced/Scripts/database_migration/gravity/9_to_10.sql
new file mode 100644
index 00000000..a5636a23
--- /dev/null
+++ b/advanced/Scripts/database_migration/gravity/9_to_10.sql
@@ -0,0 +1,29 @@
+.timeout 30000
+
+PRAGMA FOREIGN_KEYS=OFF;
+
+BEGIN TRANSACTION;
+
+DROP TABLE IF EXISTS whitelist;
+DROP TABLE IF EXISTS blacklist;
+DROP TABLE IF EXISTS regex_whitelist;
+DROP TABLE IF EXISTS regex_blacklist;
+
+CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
+ BEGIN
+ DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
+ END;
+
+CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
+ BEGIN
+ DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
+ END;
+
+CREATE TRIGGER tr_client_delete AFTER DELETE ON client
+ BEGIN
+ DELETE FROM client_by_group WHERE client_id = OLD.id;
+ END;
+
+UPDATE info SET value = 10 WHERE property = 'version';
+
+COMMIT;
diff --git a/advanced/Scripts/piholeDebug.sh b/advanced/Scripts/piholeDebug.sh
index 84e34416..28d34ab6 100755
--- a/advanced/Scripts/piholeDebug.sh
+++ b/advanced/Scripts/piholeDebug.sh
@@ -662,19 +662,21 @@ ping_internet() {
}
compare_port_to_service_assigned() {
- local service_name="${1}"
- # The programs we use may change at some point, so they are in a varible here
- local resolver="pihole-FTL"
- local web_server="lighttpd"
- local ftl="pihole-FTL"
+ local service_name
+ local expected_service
+ local port
+
+ service_name="${2}"
+ expected_service="${1}"
+ port="${3}"
# If the service is a Pi-hole service, highlight it in green
- if [[ "${service_name}" == "${resolver}" ]] || [[ "${service_name}" == "${web_server}" ]] || [[ "${service_name}" == "${ftl}" ]]; then
- log_write "[${COL_GREEN}${port_number}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
+ if [[ "${service_name}" == "${expected_service}" ]]; then
+ log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
# Otherwise,
else
# Show the service name in red since it's non-standard
- log_write "[${COL_RED}${port_number}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
+ log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
fi
}
@@ -708,11 +710,11 @@ check_required_ports() {
fi
# Use a case statement to determine if the right services are using the right ports
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
- 53) compare_port_to_service_assigned "${resolver}"
+ 53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
;;
- 80) compare_port_to_service_assigned "${web_server}"
+ 80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
;;
- 4711) compare_port_to_service_assigned "${ftl}"
+ 4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
;;
# If it's not a default port that Pi-hole needs, just print it out for the user to see
*) log_write "${port_number} ${service_name} (${protocol_type})";
@@ -1105,7 +1107,7 @@ show_db_entries() {
}
show_groups() {
- show_db_entries "Groups" "SELECT * FROM \"group\"" "4 4 30 50"
+ show_db_entries "Groups" "SELECT id,name,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 50 7 19 19 50"
}
show_adlists() {
@@ -1113,18 +1115,14 @@ show_adlists() {
show_db_entries "Adlist groups" "SELECT * FROM adlist_by_group" "4 4"
}
-show_whitelist() {
- show_db_entries "Exact whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM whitelist" "4 100 7 19 19 50"
- show_db_entries "Exact whitelist groups" "SELECT * FROM whitelist_by_group" "4 4"
- show_db_entries "Regex whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_whitelist" "4 100 7 19 19 50"
- show_db_entries "Regex whitelist groups" "SELECT * FROM regex_whitelist_by_group" "4 4"
+show_domainlist() {
+ show_db_entries "Domainlist (0/1 = exact/regex whitelist, 2/3 = exact/regex blacklist)" "SELECT id,type,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist" "4 4 100 7 19 19 50"
+ show_db_entries "Domainlist groups" "SELECT * FROM domainlist_by_group" "10 10"
}
-show_blacklist() {
- show_db_entries "Exact blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM blacklist" "4 100 7 19 19 50"
- show_db_entries "Exact blacklist groups" "SELECT * FROM blacklist_by_group" "4 4"
- show_db_entries "Regex blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_blacklist" "4 100 7 19 19 50"
- show_db_entries "Regex blacklist groups" "SELECT * FROM regex_blacklist_by_group" "4 4"
+show_clients() {
+ show_db_entries "Clients" "SELECT id,ip,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM client" "4 100 19 19 50"
+ show_db_entries "Client groups" "SELECT * FROM client_by_group" "10 10"
}
analyze_gravity_list() {
@@ -1134,16 +1132,17 @@ analyze_gravity_list() {
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
- local gravity_size
- gravity_size=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT COUNT(*) FROM vw_gravity")
- log_write " Size (excluding blacklist): ${COL_CYAN}${gravity_size}${COL_NC} entries"
+ show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
+ gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
+ gravity_updated="$(date -d @"${gravity_updated_raw}")"
+ log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
log_write ""
OLD_IFS="$IFS"
IFS=$'\r\n'
local gravity_sample=()
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
- log_write " ${COL_CYAN}----- First 10 Domains -----${COL_NC}"
+ log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
for line in "${gravity_sample[@]}"; do
log_write " ${line}"
@@ -1301,9 +1300,9 @@ parse_setup_vars
check_x_headers
analyze_gravity_list
show_groups
+show_domainlist
+show_clients
show_adlists
-show_whitelist
-show_blacklist
show_content_of_pihole_files
parse_locale
analyze_pihole_log
diff --git a/advanced/Scripts/update.sh b/advanced/Scripts/update.sh
index e45be5cf..f833fc2f 100755
--- a/advanced/Scripts/update.sh
+++ b/advanced/Scripts/update.sh
@@ -198,6 +198,14 @@ main() {
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
echo -e "${basicError}" && exit 1
fi
+
+ if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
+ # Force an update of the updatechecker
+ /opt/pihole/updatecheck.sh
+ /opt/pihole/updatecheck.sh x remote
+ echo -e " ${INFO} Local version file information updated."
+ fi
+
echo ""
exit 0
}
diff --git a/advanced/Templates/gravity.db.sql b/advanced/Templates/gravity.db.sql
index d0c744f4..e543bd19 100644
--- a/advanced/Templates/gravity.db.sql
+++ b/advanced/Templates/gravity.db.sql
@@ -1,16 +1,21 @@
-PRAGMA FOREIGN_KEYS=ON;
+PRAGMA foreign_keys=OFF;
+BEGIN TRANSACTION;
CREATE TABLE "group"
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
enabled BOOLEAN NOT NULL DEFAULT 1,
- name TEXT NOT NULL,
+ name TEXT UNIQUE NOT NULL,
+ date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
+ date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
description TEXT
);
+INSERT INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
-CREATE TABLE whitelist
+CREATE TABLE domainlist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
+ type INTEGER NOT NULL DEFAULT 0,
domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
@@ -18,47 +23,6 @@ CREATE TABLE whitelist
comment TEXT
);
-CREATE TABLE whitelist_by_group
-(
- whitelist_id INTEGER NOT NULL REFERENCES whitelist (id),
- group_id INTEGER NOT NULL REFERENCES "group" (id),
- PRIMARY KEY (whitelist_id, group_id)
-);
-
-CREATE TABLE blacklist
-(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- domain TEXT UNIQUE NOT NULL,
- enabled BOOLEAN NOT NULL DEFAULT 1,
- date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
- date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
- comment TEXT
-);
-
-CREATE TABLE blacklist_by_group
-(
- blacklist_id INTEGER NOT NULL REFERENCES blacklist (id),
- group_id INTEGER NOT NULL REFERENCES "group" (id),
- PRIMARY KEY (blacklist_id, group_id)
-);
-
-CREATE TABLE regex
-(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- domain TEXT UNIQUE NOT NULL,
- enabled BOOLEAN NOT NULL DEFAULT 1,
- date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
- date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
- comment TEXT
-);
-
-CREATE TABLE regex_by_group
-(
- regex_id INTEGER NOT NULL REFERENCES regex (id),
- group_id INTEGER NOT NULL REFERENCES "group" (id),
- PRIMARY KEY (regex_id, group_id)
-);
-
CREATE TABLE adlist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -78,7 +42,8 @@ CREATE TABLE adlist_by_group
CREATE TABLE gravity
(
- domain TEXT PRIMARY KEY
+ domain TEXT NOT NULL,
+ adlist_id INTEGER NOT NULL REFERENCES adlist (id)
);
CREATE TABLE info
@@ -87,56 +52,137 @@ CREATE TABLE info
value TEXT NOT NULL
);
-INSERT INTO info VALUES("version","1");
+INSERT INTO "info" VALUES('version','11');
-CREATE VIEW vw_whitelist AS SELECT DISTINCT domain
- FROM whitelist
- LEFT JOIN whitelist_by_group ON whitelist_by_group.whitelist_id = whitelist.id
- LEFT JOIN "group" ON "group".id = whitelist_by_group.group_id
- WHERE whitelist.enabled = 1 AND (whitelist_by_group.group_id IS NULL OR "group".enabled = 1)
- ORDER BY whitelist.id;
+CREATE TABLE domain_audit
+(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ domain TEXT UNIQUE NOT NULL,
+ date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
+);
-CREATE TRIGGER tr_whitelist_update AFTER UPDATE ON whitelist
- BEGIN
- UPDATE whitelist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
- END;
+CREATE TABLE domainlist_by_group
+(
+ domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
+ group_id INTEGER NOT NULL REFERENCES "group" (id),
+ PRIMARY KEY (domainlist_id, group_id)
+);
-CREATE VIEW vw_blacklist AS SELECT DISTINCT domain
- FROM blacklist
- LEFT JOIN blacklist_by_group ON blacklist_by_group.blacklist_id = blacklist.id
- LEFT JOIN "group" ON "group".id = blacklist_by_group.group_id
- WHERE blacklist.enabled = 1 AND (blacklist_by_group.group_id IS NULL OR "group".enabled = 1)
- ORDER BY blacklist.id;
+CREATE TABLE client
+(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ ip TEXT NOL NULL UNIQUE,
+ date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
+ date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
+ comment TEXT
+);
-CREATE TRIGGER tr_blacklist_update AFTER UPDATE ON blacklist
- BEGIN
- UPDATE blacklist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
- END;
-
-CREATE VIEW vw_regex AS SELECT DISTINCT domain
- FROM regex
- LEFT JOIN regex_by_group ON regex_by_group.regex_id = regex.id
- LEFT JOIN "group" ON "group".id = regex_by_group.group_id
- WHERE regex.enabled = 1 AND (regex_by_group.group_id IS NULL OR "group".enabled = 1)
- ORDER BY regex.id;
-
-CREATE TRIGGER tr_regex_update AFTER UPDATE ON regex
- BEGIN
- UPDATE regex SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
- END;
-
-CREATE VIEW vw_adlist AS SELECT DISTINCT address
- FROM adlist
- LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
- LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
- WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
- ORDER BY adlist.id;
+CREATE TABLE client_by_group
+(
+ client_id INTEGER NOT NULL REFERENCES client (id),
+ group_id INTEGER NOT NULL REFERENCES "group" (id),
+ PRIMARY KEY (client_id, group_id)
+);
CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist
BEGIN
UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address;
END;
-CREATE VIEW vw_gravity AS SELECT domain
+CREATE TRIGGER tr_client_update AFTER UPDATE ON client
+ BEGIN
+ UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE ip = NEW.ip;
+ END;
+
+CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
+ BEGIN
+ UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
+ END;
+
+CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
+ FROM domainlist
+ LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
+ LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
+ WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
+ AND domainlist.type = 0
+ ORDER BY domainlist.id;
+
+CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
+ FROM domainlist
+ LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
+ LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
+ WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
+ AND domainlist.type = 1
+ ORDER BY domainlist.id;
+
+CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
+ FROM domainlist
+ LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
+ LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
+ WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
+ AND domainlist.type = 2
+ ORDER BY domainlist.id;
+
+CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
+ FROM domainlist
+ LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
+ LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
+ WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
+ AND domainlist.type = 3
+ ORDER BY domainlist.id;
+
+CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
FROM gravity
- WHERE domain NOT IN (SELECT domain from vw_whitelist);
+ LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
+ LEFT JOIN adlist ON adlist.id = gravity.adlist_id
+ LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
+ WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
+
+CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
+ FROM adlist
+ LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
+ LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
+ WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
+ ORDER BY adlist.id;
+
+CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
+ BEGIN
+ INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+CREATE TRIGGER tr_client_add AFTER INSERT ON client
+ BEGIN
+ INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
+ BEGIN
+ INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
+ BEGIN
+ UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
+ END;
+
+CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
+ BEGIN
+ INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
+ END;
+
+CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
+ BEGIN
+ DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
+ END;
+
+CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
+ BEGIN
+ DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
+ END;
+
+CREATE TRIGGER tr_client_delete AFTER DELETE ON client
+ BEGIN
+ DELETE FROM client_by_group WHERE client_id = OLD.id;
+ END;
+
+COMMIT;
diff --git a/advanced/Templates/gravity_copy.sql b/advanced/Templates/gravity_copy.sql
new file mode 100644
index 00000000..4a2a9b22
--- /dev/null
+++ b/advanced/Templates/gravity_copy.sql
@@ -0,0 +1,42 @@
+.timeout 30000
+
+ATTACH DATABASE '/etc/pihole/gravity.db' AS OLD;
+
+BEGIN TRANSACTION;
+
+DROP TRIGGER tr_domainlist_add;
+DROP TRIGGER tr_client_add;
+DROP TRIGGER tr_adlist_add;
+
+INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
+INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
+
+INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
+INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
+
+INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
+INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
+
+INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
+
+INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
+INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
+
+
+CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
+ BEGIN
+ INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+CREATE TRIGGER tr_client_add AFTER INSERT ON client
+ BEGIN
+ INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
+ BEGIN
+ INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
+ END;
+
+
+COMMIT;
diff --git a/advanced/index.php b/advanced/index.php
index 62e45091..b0c4a7c3 100644
--- a/advanced/index.php
+++ b/advanced/index.php
@@ -96,12 +96,6 @@ if ($serverName === "pi.hole") {
// Define admin email address text based off $svEmail presence
$bpAskAdmin = !empty($svEmail) ? '' : "";
-// Determine if at least one block list has been generated
-$blocklistglob = glob("/etc/pihole/list.0.*.domains");
-if ($blocklistglob === array()) {
- die("[ERROR] There are no domain lists generated lists within /etc/pihole/
! Please update gravity by running pihole -g
, or repair Pi-hole using pihole -r
.");
-}
-
// Get possible non-standard location of FTL's database
$FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf");
if (isset($FTLsettings["GRAVITYDB"])) {
diff --git a/automated install/basic-install.sh b/automated install/basic-install.sh
index 6b0927de..65c72b40 100755
--- a/automated install/basic-install.sh
+++ b/automated install/basic-install.sh
@@ -247,7 +247,7 @@ if is_command apt-get ; then
PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data resolvconf libcap2)
# The Web dashboard has some that also need to be installed
# It's useful to separate the two since our repos are also setup as "Core" code and "Web" code
- PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "${phpVer}-intl")
+ PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "php-intl")
# The Web server user,
LIGHTTPD_USER="www-data"
# group,
@@ -427,11 +427,11 @@ make_repo() {
# Clone the repo and return the return code from this command
git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
- chmod -R a+rX "${directory}"
+ chmod -R a+rX "${directory}"
# Move into the directory that was passed as an argument
pushd "${directory}" &> /dev/null || return 1
# Check current branch. If it is master, then reset to the latest availible tag.
- # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
+ # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check.
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
@@ -457,7 +457,7 @@ update_repo() {
# Again, it's useful to store these in variables in case we need to reuse or change the message;
# we only need to make one change here
local str="Update repo in ${1}"
- # Move into the directory that was passed as an argument
+ # Move into the directory that was passed as an argument
pushd "${directory}" &> /dev/null || return 1
# Let the user know what's happening
printf " %b %s..." "${INFO}" "${str}"
@@ -467,7 +467,7 @@ update_repo() {
# Pull the latest commits
git pull --quiet &> /dev/null || return $?
# Check current branch. If it is master, then reset to the latest availible tag.
- # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
+ # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
@@ -529,7 +529,7 @@ resetRepo() {
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
# Return to where we came from
popd &> /dev/null || return 1
- # Returning success anyway?
+ # Returning success anyway?
return 0
}
@@ -1232,7 +1232,7 @@ appendToListsFile() {
case $1 in
StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";;
MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";;
- Cameleon ) echo "http://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
+ Cameleon ) echo "https://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";;
DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";;
HostsFile ) echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}";;
@@ -2228,15 +2228,6 @@ FTLinstall() {
local str="Downloading and Installing FTL"
printf " %b %s..." "${INFO}" "${str}"
- # Find the latest version tag for FTL
- latesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep "Location" | awk -F '/' '{print $NF}')
- # Tags should always start with v, check for that.
- if [[ ! "${latesttag}" == v* ]]; then
- printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
- printf " %bError: Unable to get latest release location from GitHub%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
- return 1
- fi
-
# Move into the temp ftl directory
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
@@ -2257,7 +2248,7 @@ FTLinstall() {
# Determine which version of FTL to download
if [[ "${ftlBranch}" == "master" ]];then
- url="https://github.com/pi-hole/FTL/releases/download/${latesttag%$'\r'}"
+ url="https://github.com/pi-hole/ftl/releases/latest/download"
else
url="https://ftl.pi-hole.net/${ftlBranch}"
fi
@@ -2468,17 +2459,14 @@ FTLcheckUpdate() {
if [[ ${ftlLoc} ]]; then
local FTLversion
FTLversion=$(/usr/bin/pihole-FTL tag)
- local FTLreleaseData
local FTLlatesttag
- if ! FTLreleaseData=$(curl -sI https://github.com/pi-hole/FTL/releases/latest); then
+ if ! FTLlatesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep --color=never -i Location | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
# There was an issue while retrieving the latest version
printf " %b Failed to retrieve latest FTL release metadata" "${CROSS}"
return 3
fi
- FTLlatesttag=$(grep 'Location' <<< "${FTLreleaseData}" | awk -F '/' '{print $NF}' | tr -d '\r\n')
-
if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then
return 0
else
diff --git a/gravity.sh b/gravity.sh
index 659263b5..c421e832 100755
--- a/gravity.sh
+++ b/gravity.sh
@@ -36,7 +36,9 @@ VPNList="/etc/openvpn/ipp.txt"
piholeGitDir="/etc/.pihole"
gravityDBfile="${piholeDir}/gravity.db"
+gravityTEMPfile="${piholeDir}/gravity_temp.db"
gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
+gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
optimize_database=false
domainsExtension="domains"
@@ -80,12 +82,45 @@ fi
# Generate new sqlite3 file from schema template
generate_gravity_database() {
- sqlite3 "${gravityDBfile}" < "${gravityDBschema}"
+ sqlite3 "${1}" < "${gravityDBschema}"
}
+# Copy data from old to new database file and swap them
+gravity_swap_databases() {
+ local str
+ str="Building tree"
+ echo -ne " ${INFO} ${str}..."
+
+ # The index is intentionally not UNIQUE as prro quality adlists may contain domains more than once
+ output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
+ status="$?"
+
+ if [[ "${status}" -ne 0 ]]; then
+ echo -e "\\n ${CROSS} Unable to build gravity tree in ${gravityTEMPfile}\\n ${output}"
+ return 1
+ fi
+ echo -e "${OVER} ${TICK} ${str}"
+
+ str="Swapping databases"
+ echo -ne " ${INFO} ${str}..."
+
+ output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBcopy}"; } 2>&1 )
+ status="$?"
+
+ if [[ "${status}" -ne 0 ]]; then
+ echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
+ return 1
+ fi
+ echo -e "${OVER} ${TICK} ${str}"
+
+ # Swap databases and remove old database
+ rm "${gravityDBfile}"
+ mv "${gravityTEMPfile}" "${gravityDBfile}"
+}
+
+# Update timestamp when the gravity table was last updated successfully
update_gravity_timestamp() {
- # Update timestamp when the gravity table was last updated successfully
- output=$( { sqlite3 "${gravityDBfile}" <<< "INSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%s', 'now') as int));"; } 2>&1 )
+ output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
@@ -95,91 +130,83 @@ update_gravity_timestamp() {
return 0
}
-database_truncate_table() {
- local table
- table="${1}"
-
- output=$( { sqlite3 "${gravityDBfile}" <<< "DELETE FROM ${table};"; } 2>&1 )
- status="$?"
-
- if [[ "${status}" -ne 0 ]]; then
- echo -e "\\n ${CROSS} Unable to truncate ${table} database ${gravityDBfile}\\n ${output}"
- gravity_Cleanup "error"
- return 1
- fi
- return 0
-}
-
# Import domains from file and store them in the specified database table
database_table_from_file() {
# Define locals
- local table source backup_path backup_file arg
+ local table source backup_path backup_file tmpFile type
table="${1}"
source="${2}"
- arg="${3}"
backup_path="${piholeDir}/migration_backup"
backup_file="${backup_path}/$(basename "${2}")"
-
- # Truncate table only if not gravity (we add multiple times to this table)
- if [[ "${table}" != "gravity" ]]; then
- database_truncate_table "${table}"
- fi
-
- local tmpFile
tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")"
+
local timestamp
timestamp="$(date --utc +'%s')"
- local inputfile
- # Apply format for white-, blacklist, regex, and adlist tables
- # Read file line by line
+
local rowid
declare -i rowid
rowid=1
- if [[ "${table}" == "gravity" ]]; then
- #Append ,${arg} to every line and then remove blank lines before import
- sed -e "s/$/,${arg}/" "${source}" > "${tmpFile}"
- sed -i '/^$/d' "${tmpFile}"
- else
- grep -v '^ *#' < "${source}" | while IFS= read -r domain
- do
- # Only add non-empty lines
- if [[ -n "${domain}" ]]; then
- if [[ "${table}" == "domain_audit" ]]; then
- # domain_audit table format (no enable or modified fields)
- echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
- else
- # White-, black-, and regexlist format
- echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
- fi
- rowid+=1
- fi
- done
+ # Special handling for domains to be imported into the common domainlist table
+ if [[ "${table}" == "whitelist" ]]; then
+ type="0"
+ table="domainlist"
+ elif [[ "${table}" == "blacklist" ]]; then
+ type="1"
+ table="domainlist"
+ elif [[ "${table}" == "regex" ]]; then
+ type="3"
+ table="domainlist"
fi
- inputfile="${tmpFile}"
- # Remove possible duplicates found in lower-quality adlists
- sort -u -o "${inputfile}" "${inputfile}"
+ # Get MAX(id) from domainlist when INSERTing into this table
+ if [[ "${table}" == "domainlist" ]]; then
+ rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
+ if [[ -z "$rowid" ]]; then
+ rowid=0
+ fi
+ rowid+=1
+ fi
+
+ # Loop over all domains in ${source} file
+ # Read file line by line
+ grep -v '^ *#' < "${source}" | while IFS= read -r domain
+ do
+ # Only add non-empty lines
+ if [[ -n "${domain}" ]]; then
+ if [[ "${table}" == "domain_audit" ]]; then
+ # domain_audit table format (no enable or modified fields)
+ echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
+ elif [[ "${table}" == "adlist" ]]; then
+ # Adlist table format
+ echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
+ else
+ # White-, black-, and regexlist table format
+ echo "${rowid},${type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
+ fi
+ rowid+=1
+ fi
+ done
# Store domains in database table specified by ${table}
# Use printf as .mode and .import need to be on separate lines
# see https://unix.stackexchange.com/a/445615/83260
- output=$( { printf ".timeout 10000\\n.mode csv\\n.import \"%s\" %s\\n" "${inputfile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
+ output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
- echo -e "\\n ${CROSS} Unable to fill table ${table} in database ${gravityDBfile}\\n ${output}"
+ echo -e "\\n ${CROSS} Unable to fill table ${table}${type} in database ${gravityDBfile}\\n ${output}"
gravity_Cleanup "error"
fi
- # Delete tmpfile
- rm "${tmpFile}" > /dev/null 2>&1 || \
- echo -e " ${CROSS} Unable to remove ${tmpFile}"
-
# Move source file to backup directory, create directory if not existing
mkdir -p "${backup_path}"
mv "${source}" "${backup_file}" 2> /dev/null || \
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
+
+ # Delete tmpFile
+ rm "${tmpFile}" > /dev/null 2>&1 || \
+ echo -e " ${CROSS} Unable to remove ${tmpFile}"
}
# Migrate pre-v5.0 list files to database-based Pi-hole versions
@@ -188,7 +215,10 @@ migrate_to_database() {
if [ ! -e "${gravityDBfile}" ]; then
# Create new database file - note that this will be created in version 1
echo -e " ${INFO} Creating new gravity database"
- generate_gravity_database
+ generate_gravity_database "${gravityDBfile}"
+
+ # Check if gravity database needs to be updated
+ upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
# Migrate list files to new database
if [ -e "${adListFile}" ]; then
@@ -306,16 +336,25 @@ gravity_DownloadBlocklists() {
return 1
fi
- local url domain agent cmd_ext str
+ local url domain agent cmd_ext str target
echo ""
- # Flush gravity table once before looping over sources
- str="Flushing gravity table"
+ # Prepare new gravity database
+ str="Preparing new gravity database"
echo -ne " ${INFO} ${str}..."
- if database_truncate_table "gravity"; then
+ rm "${gravityTEMPfile}" > /dev/null 2>&1
+ output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
+ status="$?"
+
+ if [[ "${status}" -ne 0 ]]; then
+ echo -e "\\n ${CROSS} Unable to create new database ${gravityTEMPfile}\\n ${output}"
+ gravity_Cleanup "error"
+ else
echo -e "${OVER} ${TICK} ${str}"
fi
+ target="$(mktemp -p "/tmp" --suffix=".gravity")"
+
# Loop through $sources and download each one
for ((i = 0; i < "${#sources[@]}"; i++)); do
url="${sources[$i]}"
@@ -335,15 +374,82 @@ gravity_DownloadBlocklists() {
esac
echo -e " ${INFO} Target: ${url}"
- gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}"
+ gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}"
echo ""
done
+
+ str="Storing downloaded domains in new gravity database"
+ echo -ne " ${INFO} ${str}..."
+ output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
+ status="$?"
+
+ if [[ "${status}" -ne 0 ]]; then
+ echo -e "\\n ${CROSS} Unable to fill gravity table in database ${gravityTEMPfile}\\n ${output}"
+ gravity_Cleanup "error"
+ else
+ echo -e "${OVER} ${TICK} ${str}"
+ fi
+
+ if [[ "${status}" -eq 0 && -n "${output}" ]]; then
+ echo -e " Encountered non-critical SQL warnings. Please check the suitability of the lists you're using!\\n\\n SQL warnings:"
+ local warning file line lineno
+ while IFS= read -r line; do
+ echo " - ${line}"
+ warning="$(grep -oh "^[^:]*:[0-9]*" <<< "${line}")"
+ file="${warning%:*}"
+ lineno="${warning#*:}"
+ if [[ -n "${file}" && -n "${lineno}" ]]; then
+ echo -n " Line contains: "
+ awk "NR==${lineno}" < "${file}"
+ fi
+ done <<< "${output}"
+ echo ""
+ fi
+
+ rm "${target}" > /dev/null 2>&1 || \
+ echo -e " ${CROSS} Unable to remove ${target}"
+
gravity_Blackbody=true
}
+total_num=0
+parseList() {
+ local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
+ # This sed does the following things:
+ # 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
+ # 2. Append ,adlistID to every line
+ # 3. Ensures there is a newline on the last line
+ sed -e "/[^a-zA-Z0-9.\_-]/d;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
+ # Find (up to) five domains containing invalid characters (see above)
+ incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
+
+ local num_lines num_target_lines num_correct_lines num_invalid
+ # Get number of lines in source file
+ num_lines="$(grep -c "^" "${src}")"
+ # Get number of lines in destination file
+ num_target_lines="$(grep -c "^" "${target}")"
+ num_correct_lines="$(( num_target_lines-total_num ))"
+ total_num="$num_target_lines"
+ num_invalid="$(( num_lines-num_correct_lines ))"
+ if [[ "${num_invalid}" -eq 0 ]]; then
+ echo " ${INFO} Received ${num_lines} domains"
+ else
+ echo " ${INFO} Received ${num_lines} domains, ${num_invalid} domains invalid!"
+ fi
+
+ # Display sample of invalid lines if we found some
+ if [[ -n "${incorrect_lines}" ]]; then
+ echo " Sample of invalid domains:"
+ while IFS= read -r line; do
+ echo " - ${line}"
+ done <<< "${incorrect_lines}"
+ fi
+}
+
# Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() {
- local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" heisenbergCompensator="" patternBuffer str httpCode success=""
+ local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}"
+ local heisenbergCompensator="" patternBuffer str httpCode success=""
# Create temp file to store content on disk instead of RAM
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
@@ -424,20 +530,14 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if the blocklist was downloaded and saved correctly
if [[ "${success}" == true ]]; then
if [[ "${httpCode}" == "304" ]]; then
- # Add domains to database table
- str="Adding adlist with ID ${adlistID} to database table"
- echo -ne " ${INFO} ${str}..."
- database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
- echo -e "${OVER} ${TICK} ${str}"
+ # Add domains to database table file
+ parseList "${adlistID}" "${saveLocation}" "${target}"
# Check if $patternbuffer is a non-zero length file
elif [[ -s "${patternBuffer}" ]]; then
# Determine if blocklist is non-standard and parse as appropriate
gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}"
- # Add domains to database table
- str="Adding adlist with ID ${adlistID} to database table"
- echo -ne " ${INFO} ${str}..."
- database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
- echo -e "${OVER} ${TICK} ${str}"
+ # Add domains to database table file
+ parseList "${adlistID}" "${saveLocation}" "${target}"
else
# Fall back to previously cached list if $patternBuffer is empty
echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
@@ -446,11 +546,8 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if cached list has read permission
if [[ -r "${saveLocation}" ]]; then
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
- # Add domains to database table
- str="Adding to database table"
- echo -ne " ${INFO} ${str}..."
- database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
- echo -e "${OVER} ${TICK} ${str}"
+ # Add domains to database table file
+ parseList "${adlistID}" "${saveLocation}" "${target}"
else
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
fi
@@ -535,6 +632,7 @@ gravity_Table_Count() {
local unique
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
echo -e " ${INFO} Number of ${str}: ${num} (${unique} unique domains)"
+ sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
else
echo -e " ${INFO} Number of ${str}: ${num}"
fi
@@ -686,10 +784,6 @@ fi
# Move possibly existing legacy files to the gravity database
migrate_to_database
-# Ensure proper permissions are set for the newly created database
-chown pihole:pihole "${gravityDBfile}"
-chmod g+w "${piholeDir}" "${gravityDBfile}"
-
if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache"
echo -ne "${INFO} ${str}..."
@@ -704,15 +798,26 @@ gravity_DownloadBlocklists
# Create local.list
gravity_generateLocalList
-gravity_ShowCount
+# Migrate rest of the data from old to new database
+gravity_swap_databases
+
+# Update gravity timestamp
update_gravity_timestamp
-gravity_Cleanup
-echo ""
+# Ensure proper permissions are set for the database
+chown pihole:pihole "${gravityDBfile}"
+chmod g+w "${piholeDir}" "${gravityDBfile}"
+
+# Compute numbers to be displayed
+gravity_ShowCount
# Determine if DNS has been restarted by this instance of gravity
if [[ -z "${dnsWasOffline:-}" ]]; then
"${PIHOLE_COMMAND}" restartdns reload
fi
+
+gravity_Cleanup
+echo ""
+
"${PIHOLE_COMMAND}" status
diff --git a/pihole b/pihole
index cc7e1b7c..6e72b4a3 100755
--- a/pihole
+++ b/pihole
@@ -306,8 +306,8 @@ tailFunc() {
# Colour A/AAAA/DHCP strings as white
# Colour everything else as gray
tail -f /var/log/pihole.log | sed -E \
- -e "s,($(date +'%b %d ')| dnsmasq[.*[0-9]]),,g" \
- -e "s,(.*(gravity |black |regex | config ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
+ -e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
+ -e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
-e "s,.*,${COL_GRAY}&${COL_NC},"
exit 0