mirror of
https://github.com/pi-hole/pi-hole.git
synced 2024-11-22 06:03:43 +00:00
Merge branch 'development' into development-v6-merge-development (resolved conflicts)
Signed-off-by: Adam Warner <me@adamwarner.co.uk>
This commit is contained in:
commit
f193edd428
6 changed files with 446 additions and 415 deletions
11
.github/workflows/stale.yml
vendored
11
.github/workflows/stale.yml
vendored
|
@ -23,14 +23,17 @@ jobs:
|
|||
days-before-stale: 30
|
||||
days-before-close: 5
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||
stale-issue-label: $stale_label
|
||||
stale-issue-label: '${{ env.stale_label }}'
|
||||
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
|
||||
exempt-all-issue-assignees: true
|
||||
operations-per-run: 300
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
remove_stale: # trigger "stale" removal immediately when stale issues are commented on
|
||||
if: github.event_name == 'issue_comment'
|
||||
remove_stale:
|
||||
# trigger "stale" removal immediately when stale issues are commented on
|
||||
# we need to explicitly check that the trigger does not run on comment on a PR as
|
||||
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only
|
||||
if: ${{ !github.event.issue.pull_request && github.event_name != 'schedule' }}
|
||||
permissions:
|
||||
contents: read # for actions/checkout
|
||||
issues: write # to edit issues label
|
||||
|
@ -39,7 +42,7 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Remove 'stale' label
|
||||
run: gh issue edit ${{ github.event.issue.number }} --remove-label $stale_label
|
||||
run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ Options:
|
|||
exit 0
|
||||
}
|
||||
|
||||
|
||||
GenerateOutput() {
|
||||
local data gravity_data lists_data num_gravity num_lists search_type_str
|
||||
local gravity_data_csv lists_data_csv line current_domain
|
||||
|
@ -107,13 +106,13 @@ Main(){
|
|||
local data
|
||||
|
||||
if [ -z "${domain}" ]; then
|
||||
echo "No domain specified"; exit 1
|
||||
echo "No domain specified"
|
||||
exit 1
|
||||
fi
|
||||
# domains are lowercased and converted to punycode by FTL since
|
||||
# https://github.com/pi-hole/FTL/pull/1715
|
||||
# no need to do it here
|
||||
|
||||
|
||||
# Test if the authentication endpoint is available
|
||||
TestAPIAvailability
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ function get_remote_version() {
|
|||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_remote_hash() {
|
||||
git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 1,8);}' || return 1
|
||||
}
|
||||
|
@ -66,7 +65,6 @@ if [[ "$1" == "reboot" ]]; then
|
|||
sleep 30
|
||||
fi
|
||||
|
||||
|
||||
# get Core versions
|
||||
|
||||
CORE_VERSION="$(get_local_version /etc/.pihole)"
|
||||
|
@ -84,7 +82,6 @@ addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_VERSION" "${GITHUB_CORE_VERSI
|
|||
GITHUB_CORE_HASH="$(get_remote_hash pi-hole "${CORE_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}"
|
||||
|
||||
|
||||
# get Web versions
|
||||
|
||||
WEB_VERSION="$(get_local_version /var/www/html/admin)"
|
||||
|
@ -119,7 +116,6 @@ addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_VERSION" "${GITHUB_FTL_VERSION
|
|||
GITHUB_FTL_HASH="$(get_remote_hash FTL "${FTL_BRANCH}")"
|
||||
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_HASH" "${GITHUB_FTL_HASH}"
|
||||
|
||||
|
||||
# get Docker versions
|
||||
|
||||
if [[ "${DOCKER_TAG}" ]]; then
|
||||
|
|
|
@ -39,9 +39,9 @@ export PATH+=':/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
|
|||
: "${DIALOG_CANCEL:=1}"
|
||||
: "${DIALOG_ESC:=255}"
|
||||
|
||||
|
||||
# List of supported DNS servers
|
||||
DNS_SERVERS=$(cat << EOM
|
||||
DNS_SERVERS=$(
|
||||
cat <<EOM
|
||||
Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
|
||||
OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
|
||||
Level3;4.2.2.1;4.2.2.2;;
|
||||
|
@ -62,7 +62,6 @@ coltable="/opt/pihole/COL_TABLE"
|
|||
# Root of the web server
|
||||
webroot="/var/www/html"
|
||||
|
||||
|
||||
# We clone (or update) two git repositories during the install. This helps to make sure that we always have the latest versions of the relevant files.
|
||||
# web is used to set up the Web admin interface.
|
||||
# Pi-hole contains various setup scripts and files which are critical to the installation.
|
||||
|
@ -176,7 +175,10 @@ os_check() {
|
|||
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
|
||||
# Test via IPv4
|
||||
cmdResult="$(dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
cmdResult="$(
|
||||
dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1
|
||||
echo $?
|
||||
)"
|
||||
# Gets the return code of the previous command (last line)
|
||||
digReturnCode="${cmdResult##*$'\n'}"
|
||||
|
||||
|
@ -197,7 +199,10 @@ os_check() {
|
|||
if [ "$valid_response" = false ]; then
|
||||
unset valid_response
|
||||
|
||||
cmdResult="$(dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
cmdResult="$(
|
||||
dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1
|
||||
echo $?
|
||||
)"
|
||||
# Gets the return code of the previous command (last line)
|
||||
digReturnCode="${cmdResult##*$'\n'}"
|
||||
|
||||
|
@ -217,8 +222,7 @@ os_check() {
|
|||
|
||||
if [ "$valid_response" = true ]; then
|
||||
IFS=" " read -r -a supportedOS < <(echo "${response}" | tr -d '"')
|
||||
for distro_and_versions in "${supportedOS[@]}"
|
||||
do
|
||||
for distro_and_versions in "${supportedOS[@]}"; do
|
||||
distro_part="${distro_and_versions%%=*}"
|
||||
versions_part="${distro_and_versions##*=}"
|
||||
|
||||
|
@ -226,8 +230,7 @@ os_check() {
|
|||
if [[ "${detected_os^^}" =~ ${distro_part^^} ]]; then
|
||||
valid_os=true
|
||||
IFS="," read -r -a supportedVer <<<"${versions_part}"
|
||||
for version in "${supportedVer[@]}"
|
||||
do
|
||||
for version in "${supportedVer[@]}"; do
|
||||
if [[ "${detected_version}" =~ $version ]]; then
|
||||
valid_version=true
|
||||
break
|
||||
|
@ -292,8 +295,7 @@ test_dpkg_lock() {
|
|||
printf " %b Waiting for package manager to finish (up to 30 seconds)\\n" "${INFO}"
|
||||
# fuser is a program to show which processes use the named files, sockets, or filesystems
|
||||
# So while the lock is held,
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1
|
||||
do
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do
|
||||
# we wait half a second,
|
||||
sleep 0.5
|
||||
# increase the iterator,
|
||||
|
@ -302,7 +304,7 @@ test_dpkg_lock() {
|
|||
if [[ $i -gt 60 ]]; then
|
||||
printf " %b %bError: Could not verify package manager finished and released lock. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
printf " Attempt to install packages manually and retry.\\n"
|
||||
exit 1;
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
# and then report success once dpkg is unlocked.
|
||||
|
@ -475,13 +477,19 @@ getGitFiles() {
|
|||
# Show that we're checking it
|
||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||
# Update the repo, returning an error message on failure
|
||||
update_repo "${directory}" || { printf "\\n %b: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||
update_repo "${directory}" || {
|
||||
printf "\\n %b: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
# If it's not a .git repo,
|
||||
else
|
||||
# Show an error
|
||||
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
|
||||
# Attempt to make the repository, showing an error on failure
|
||||
make_repo "${directory}" "${remoteRepo}" || { printf "\\n %bError: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||
make_repo "${directory}" "${remoteRepo}" || {
|
||||
printf "\\n %bError: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
echo ""
|
||||
# Success via one of the two branches, as the commands would exit if they failed.
|
||||
|
@ -699,9 +707,9 @@ valid_ip() {
|
|||
|
||||
# Regex matching one IPv4 component, i.e. an integer from 0 to 255.
|
||||
# See https://tools.ietf.org/html/rfc1340
|
||||
local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)";
|
||||
local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)"
|
||||
# Regex matching an optional port (starting with '#') range of 1-65536
|
||||
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
|
||||
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"
|
||||
# Build a full IPv4 regex from the above subexpressions
|
||||
local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$"
|
||||
|
||||
|
@ -721,7 +729,7 @@ valid_ip6() {
|
|||
# Regex matching an IPv6 CIDR, i.e. 1 to 128
|
||||
local v6cidr="(\\/([1-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])){0,1}"
|
||||
# Regex matching an optional port (starting with '#') range of 1-65536
|
||||
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
|
||||
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"
|
||||
# Build a full IPv6 regex from the above subexpressions
|
||||
local regex="^(((${ipv6elem}))*((:${ipv6elem}))*::((${ipv6elem}))*((:${ipv6elem}))*|((${ipv6elem}))((:${ipv6elem})){7})${v6cidr}${portelem}$"
|
||||
|
||||
|
@ -745,8 +753,7 @@ setDNS() {
|
|||
# and set the new one to newline
|
||||
IFS=$'\n'
|
||||
# Put the DNS Servers into an array
|
||||
for DNSServer in ${DNS_SERVERS}
|
||||
do
|
||||
for DNSServer in ${DNS_SERVERS}; do
|
||||
DNSName="$(cut -d';' -f1 <<<"${DNSServer}")"
|
||||
DNSChooseOptions[DNSServerCount]="${DNSName}"
|
||||
((DNSServerCount = DNSServerCount + 1))
|
||||
|
@ -773,8 +780,7 @@ setDNS() {
|
|||
esac
|
||||
|
||||
# Depending on the user's choice, set the GLOBAL variables to the IP of the respective provider
|
||||
if [[ "${DNSchoices}" == "Custom" ]]
|
||||
then
|
||||
if [[ "${DNSchoices}" == "Custom" ]]; then
|
||||
# Loop until we have a valid DNS setting
|
||||
until [[ "${DNSSettingsCorrect}" = True ]]; do
|
||||
# Signal value, to be used if the user inputs an invalid IP address
|
||||
|
@ -870,11 +876,9 @@ If you want to specify a port other than 53, separate it with a hash.\
|
|||
OIFS=$IFS
|
||||
# and set the new one to newline
|
||||
IFS=$'\n'
|
||||
for DNSServer in ${DNS_SERVERS}
|
||||
do
|
||||
for DNSServer in ${DNS_SERVERS}; do
|
||||
DNSName="$(cut -d';' -f1 <<<"${DNSServer}")"
|
||||
if [[ "${DNSchoices}" == "${DNSName}" ]]
|
||||
then
|
||||
if [[ "${DNSchoices}" == "${DNSName}" ]]; then
|
||||
PIHOLE_DNS_1="$(cut -d';' -f2 <<<"${DNSServer}")"
|
||||
PIHOLE_DNS_2="$(cut -d';' -f3 <<<"${DNSServer}")"
|
||||
break
|
||||
|
@ -989,7 +993,7 @@ installDefaultBlocklists() {
|
|||
# In unattended setup, could be useful to use userdefined blocklist.
|
||||
# If this file exists, we avoid overriding it.
|
||||
if [[ -f "${adlistFile}" ]]; then
|
||||
return;
|
||||
return
|
||||
fi
|
||||
echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >>"${adlistFile}"
|
||||
}
|
||||
|
@ -1033,7 +1037,6 @@ remove_old_pihole_lighttpd_configs() {
|
|||
local confavailable="/etc/lighttpd/conf-available/15-pihole-admin.conf"
|
||||
local confenabled="/etc/lighttpd/conf-enabled/15-pihole-admin.conf"
|
||||
|
||||
|
||||
if [[ -f "${lighttpdConfig}" ]]; then
|
||||
sed -i '/include "\/etc\/lighttpd\/conf.d\/pihole-admin.conf"/d' "${lighttpdConfig}"
|
||||
fi
|
||||
|
@ -1364,9 +1367,9 @@ install_dependent_packages() {
|
|||
# Running apt-get install with minimal output can cause some issues with
|
||||
# requiring user input (e.g password for phpmyadmin see #218)
|
||||
printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
|
||||
printf '%*s\n' "${c}" '' | tr " " -;
|
||||
printf '%*s\n' "${c}" '' | tr " " -
|
||||
"${PKG_INSTALL[@]}" "${installArray[@]}"
|
||||
printf '%*s\n' "${c}" '' | tr " " -;
|
||||
printf '%*s\n' "${c}" '' | tr " " -
|
||||
return
|
||||
fi
|
||||
printf "\\n"
|
||||
|
@ -1387,9 +1390,9 @@ install_dependent_packages() {
|
|||
# If there's anything to install, install everything in the list.
|
||||
if [[ "${#installArray[@]}" -gt 0 ]]; then
|
||||
printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
|
||||
printf '%*s\n' "${c}" '' | tr " " -;
|
||||
printf '%*s\n' "${c}" '' | tr " " -
|
||||
"${PKG_INSTALL[@]}" "${installArray[@]}"
|
||||
printf '%*s\n' "${c}" '' | tr " " -;
|
||||
printf '%*s\n' "${c}" '' | tr " " -
|
||||
return
|
||||
fi
|
||||
printf "\\n"
|
||||
|
@ -1592,7 +1595,7 @@ checkSelinux() {
|
|||
;;
|
||||
esac
|
||||
else
|
||||
echo -e " ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}";
|
||||
echo -e " ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}"
|
||||
fi
|
||||
# Exit the installer if any SELinux checks toggled the flag
|
||||
if [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -z "${PIHOLE_SELINUX}" ]]; then
|
||||
|
@ -1601,8 +1604,8 @@ checkSelinux() {
|
|||
printf " This check can be skipped by setting the environment variable %bPIHOLE_SELINUX%b to %btrue%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
printf " e.g: export PIHOLE_SELINUX=true\\n"
|
||||
printf " By setting this variable to true you acknowledge there may be issues with Pi-hole during or after the install\\n"
|
||||
printf "\\n %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}";
|
||||
exit 1;
|
||||
printf "\\n %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
exit 1
|
||||
elif [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -n "${PIHOLE_SELINUX}" ]]; then
|
||||
printf " %b %bSELinux Enforcing detected%b. PIHOLE_SELINUX env variable set - installer will continue\\n" "${INFO}" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
fi
|
||||
|
@ -1626,7 +1629,6 @@ displayFinalMessage() {
|
|||
# Store a message in a variable and display it
|
||||
additional="View the web interface at http://pi.hole/admin:${WEBPORT} or http://${IPV4_ADDRESS%/*}:${WEBPORT}/admin\\n\\nYour Admin Webpage login password is ${pwstring}"
|
||||
|
||||
|
||||
# Final completion message to user
|
||||
dialog --no-shadow --keep-tite \
|
||||
--title "Installation Complete!" \
|
||||
|
@ -1777,26 +1779,30 @@ clone_or_update_repos() {
|
|||
if [[ "${reconfigure}" == true ]]; then
|
||||
printf " %b Performing reconfiguration, skipping download of local repos\\n" "${INFO}"
|
||||
# Reset the Core repo
|
||||
resetRepo ${PI_HOLE_LOCAL_REPO} || \
|
||||
{ printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \
|
||||
exit 1; \
|
||||
resetRepo ${PI_HOLE_LOCAL_REPO} ||
|
||||
{
|
||||
printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
# Reset the Web repo
|
||||
resetRepo ${webInterfaceDir} || \
|
||||
{ printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceDir}" "${COL_NC}"; \
|
||||
exit 1; \
|
||||
resetRepo ${webInterfaceDir} ||
|
||||
{
|
||||
printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceDir}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
# Otherwise, a repair is happening
|
||||
else
|
||||
# so get git files for Core
|
||||
getGitFiles ${PI_HOLE_LOCAL_REPO} ${piholeGitUrl} || \
|
||||
{ printf " %b Unable to clone %s into %s, unable to continue%b\\n" "${COL_LIGHT_RED}" "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \
|
||||
exit 1; \
|
||||
getGitFiles ${PI_HOLE_LOCAL_REPO} ${piholeGitUrl} ||
|
||||
{
|
||||
printf " %b Unable to clone %s into %s, unable to continue%b\\n" "${COL_LIGHT_RED}" "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
# get the Web git files
|
||||
getGitFiles ${webInterfaceDir} ${webInterfaceGitUrl} || \
|
||||
{ printf " %b Unable to clone %s into ${webInterfaceDir}, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceGitUrl}" "${COL_NC}"; \
|
||||
exit 1; \
|
||||
getGitFiles ${webInterfaceDir} ${webInterfaceGitUrl} ||
|
||||
{
|
||||
printf " %b Unable to clone %s into ${webInterfaceDir}, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceGitUrl}" "${COL_NC}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
@ -1810,7 +1816,10 @@ FTLinstall() {
|
|||
printf " %b %s..." "${INFO}" "${str}"
|
||||
|
||||
# Move into the temp ftl directory
|
||||
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
|
||||
pushd "$(mktemp -d)" >/dev/null || {
|
||||
printf "Unable to make temporary directory for FTL binary download\\n"
|
||||
return 1
|
||||
}
|
||||
local tempdir
|
||||
tempdir="$(pwd)"
|
||||
local ftlBranch
|
||||
|
@ -1850,7 +1859,10 @@ FTLinstall() {
|
|||
install -T -m 0755 "${binary}" /usr/bin/pihole-FTL
|
||||
|
||||
# Move back into the original directory the user was in
|
||||
popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
|
||||
popd >/dev/null || {
|
||||
printf "Unable to return to original directory after FTL binary download.\\n"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Installed the FTL service
|
||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||
|
@ -1861,7 +1873,10 @@ FTLinstall() {
|
|||
return 0
|
||||
else
|
||||
# Otherwise, the hash download failed, so print and exit.
|
||||
popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
|
||||
popd >/dev/null || {
|
||||
printf "Unable to return to original directory after FTL binary download.\\n"
|
||||
return 1
|
||||
}
|
||||
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
|
||||
printf " %b Error: Download of %s/%s failed (checksum error)%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}"
|
||||
|
||||
|
@ -1871,7 +1886,10 @@ FTLinstall() {
|
|||
fi
|
||||
else
|
||||
# Otherwise, the download failed, so print and exit.
|
||||
popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
|
||||
popd >/dev/null || {
|
||||
printf "Unable to return to original directory after FTL binary download.\\n"
|
||||
return 1
|
||||
}
|
||||
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
|
||||
# The URL could not be found
|
||||
printf " %b Error: URL %s/%s not found%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}"
|
||||
|
@ -1884,7 +1902,7 @@ FTLinstall() {
|
|||
|
||||
remove_dir() {
|
||||
# Delete dir
|
||||
rm -r "${1}" > /dev/null 2>&1 || \
|
||||
rm -r "${1}" >/dev/null 2>&1 ||
|
||||
echo -e " ${CROSS} Unable to remove ${1}"
|
||||
}
|
||||
|
||||
|
@ -2239,7 +2257,7 @@ main() {
|
|||
# Check for and disable systemd-resolved-DNSStubListener before reloading resolved
|
||||
# DNSStubListener needs to remain in place for installer to download needed files,
|
||||
# so this change needs to be made after installation is complete,
|
||||
# but before starting or restarting the ftl service
|
||||
# but before starting or resttarting the ftl service
|
||||
disable_resolved_stublistener
|
||||
|
||||
printf " %b Restarting services...\\n" "${INFO}"
|
||||
|
|
69
gravity.sh
69
gravity.sh
|
@ -117,7 +117,7 @@ gravity_swap_databases() {
|
|||
|
||||
# Update timestamp when the gravity table was last updated successfully
|
||||
update_gravity_timestamp() {
|
||||
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1 )
|
||||
output=$({ printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 -ni -ni "${gravityTEMPfile}"; } 2>&1)
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
|
@ -171,8 +171,7 @@ database_table_from_file() {
|
|||
|
||||
# Loop over all domains in ${src} file
|
||||
# Read file line by line
|
||||
grep -v '^ *#' < "${src}" | while IFS= read -r domain
|
||||
do
|
||||
grep -v '^ *#' <"${src}" | while IFS= read -r domain; do
|
||||
# Only add non-empty lines
|
||||
if [[ -n "${domain}" ]]; then
|
||||
if [[ "${table}" == "domain_audit" ]]; then
|
||||
|
@ -202,11 +201,11 @@ database_table_from_file() {
|
|||
|
||||
# Move source file to backup directory, create directory if not existing
|
||||
mkdir -p "${backup_path}"
|
||||
mv "${src}" "${backup_file}" 2> /dev/null || \
|
||||
mv "${src}" "${backup_file}" 2>/dev/null ||
|
||||
echo -e " ${CROSS} Unable to backup ${src} to ${backup_path}"
|
||||
|
||||
# Delete tmpFile
|
||||
rm "${tmpFile}" > /dev/null 2>&1 || \
|
||||
rm "${tmpFile}" >/dev/null 2>&1 ||
|
||||
echo -e " ${CROSS} Unable to remove ${tmpFile}"
|
||||
}
|
||||
|
||||
|
@ -224,7 +223,7 @@ gravity_column_exists() {
|
|||
database_adlist_number() {
|
||||
# Only try to set number of domains when this field exists in the gravity database
|
||||
if ! gravity_column_exists "adlist" "number"; then
|
||||
return;
|
||||
return
|
||||
fi
|
||||
|
||||
output=$({ printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${2}" "${3}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
|
||||
|
@ -240,7 +239,7 @@ database_adlist_number() {
|
|||
database_adlist_status() {
|
||||
# Only try to set the status when this field exists in the gravity database
|
||||
if ! gravity_column_exists "adlist" "status"; then
|
||||
return;
|
||||
return
|
||||
fi
|
||||
|
||||
output=$({ printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
|
||||
|
@ -361,8 +360,8 @@ gravity_DownloadBlocklists() {
|
|||
|
||||
# Retrieve source URLs from gravity database
|
||||
# We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true)
|
||||
mapfile -t sources <<< "$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
|
||||
mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
|
||||
mapfile -t sources <<<"$(pihole-FTL sqlite3 -ni -ni "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2>/dev/null)"
|
||||
mapfile -t sourceIDs <<<"$(pihole-FTL sqlite3 -ni -ni "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2>/dev/null)"
|
||||
mapfile -t sourceTypes <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT type FROM vw_adlist;" 2>/dev/null)"
|
||||
|
||||
# Parse source domains from $sources
|
||||
|
@ -521,19 +520,23 @@ gravity_DownloadBlocklistFromUrl() {
|
|||
# Check if this IP matches any IP of the system
|
||||
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<<"$(ip a)") -gt 0 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
fi
|
||||
;;
|
||||
"NXDOMAIN")
|
||||
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
fi
|
||||
;;
|
||||
"NODATA")
|
||||
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
fi
|
||||
;;
|
||||
"NULL" | *)
|
||||
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "${blocked}" == true ]]; then
|
||||
|
@ -545,11 +548,12 @@ gravity_DownloadBlocklistFromUrl() {
|
|||
fi
|
||||
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
|
||||
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
|
||||
port=443;
|
||||
else port=80
|
||||
port=443
|
||||
else
|
||||
port=80
|
||||
fi
|
||||
bad_list=$(pihole -q -adlist "${domain}" | head -n1 | awk -F 'Match found in ' '{print $2}')
|
||||
echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by ${bad_list%:}. Using DNS on ${PIHOLE_DNS_1} to download ${url}";
|
||||
echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by ${bad_list%:}. Using DNS on ${PIHOLE_DNS_1} to download ${url}"
|
||||
echo -ne " ${INFO} ${str} Pending..."
|
||||
cmd_ext="--resolve $domain:$port:$ip"
|
||||
fi
|
||||
|
@ -561,16 +565,24 @@ gravity_DownloadBlocklistFromUrl() {
|
|||
# Did we "download" a local file?
|
||||
"file"*)
|
||||
if [[ -s "${listCurlBuffer}" ]]; then
|
||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true
|
||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"
|
||||
success=true
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
|
||||
fi;;
|
||||
fi
|
||||
;;
|
||||
# Did we "download" a remote file?
|
||||
*)
|
||||
# Determine "Status:" output based on HTTP response
|
||||
case "${httpCode}" in
|
||||
"200") echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true;;
|
||||
"304") echo -e "${OVER} ${TICK} ${str} No changes detected"; success=true;;
|
||||
"200")
|
||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"
|
||||
success=true
|
||||
;;
|
||||
"304")
|
||||
echo -e "${OVER} ${TICK} ${str} No changes detected"
|
||||
success=true
|
||||
;;
|
||||
"000") echo -e "${OVER} ${CROSS} ${str} Connection Refused" ;;
|
||||
"403") echo -e "${OVER} ${CROSS} ${str} Forbidden" ;;
|
||||
"404") echo -e "${OVER} ${CROSS} ${str} Not found" ;;
|
||||
|
@ -581,7 +593,8 @@ gravity_DownloadBlocklistFromUrl() {
|
|||
"521") echo -e "${OVER} ${CROSS} ${str} Web Server Is Down (Cloudflare)" ;;
|
||||
"522") echo -e "${OVER} ${CROSS} ${str} Connection Timed Out (Cloudflare)" ;;
|
||||
*) echo -e "${OVER} ${CROSS} ${str} ${url} (${httpCode})" ;;
|
||||
esac;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
|
||||
local done="false"
|
||||
|
@ -662,12 +675,12 @@ gravity_Table_Count() {
|
|||
local table="${1}"
|
||||
local str="${2}"
|
||||
local num
|
||||
num="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM ${table};")"
|
||||
num="$(pihole-FTL sqlite3 -ni -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM ${table};")"
|
||||
if [[ "${table}" == "gravity" ]]; then
|
||||
local unique
|
||||
unique="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")"
|
||||
unique="$(pihole-FTL sqlite3 -ni -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")"
|
||||
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
|
||||
pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||
pihole-FTL sqlite3 -ni -ni "${gravityTEMPfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||
else
|
||||
echo -e " ${INFO} Number of ${str}: ${num}"
|
||||
fi
|
||||
|
@ -722,7 +735,7 @@ gravity_Cleanup() {
|
|||
for file in "${piholeDir}"/*."${domainsExtension}"; do
|
||||
# If list is not in active array, then remove it
|
||||
if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then
|
||||
rm -f "${file}" 2> /dev/null || \
|
||||
rm -f "${file}" 2>/dev/null ||
|
||||
echo -e " ${CROSS} Failed to remove ${file##*/}"
|
||||
fi
|
||||
done
|
||||
|
@ -804,7 +817,8 @@ repairSelector() {
|
|||
case "$1" in
|
||||
"recover") recover_database=true ;;
|
||||
"recreate") recreate_database=true ;;
|
||||
*) echo "Usage: pihole -g -r {recover,recreate}
|
||||
*)
|
||||
echo "Usage: pihole -g -r {recover,recreate}
|
||||
Attempt to repair gravity database
|
||||
|
||||
Available options:
|
||||
|
@ -823,7 +837,8 @@ Available options:
|
|||
and create a new file from scratch. If you still
|
||||
have the migration backup created when migrating
|
||||
to Pi-hole v5.0, Pi-hole will import these files."
|
||||
exit 0;;
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue