diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 465ee182c497..79809179cf13 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -27,6 +27,7 @@ https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options. - [ ] New feature (non-breaking change which adds functionality) - [ ] Performance enhancement (non-breaking change which improves efficiency) - [ ] Code cleanup (non-breaking change which makes code smaller or more readable) +- [ ] Quality assurance (non-breaking change which makes the code more robust against bugs) - [ ] Breaking change (fix or feature that would cause existing functionality to change) - [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv) - [ ] Documentation (a change to man pages or other documentation) diff --git a/.github/workflows/checkstyle.yaml b/.github/workflows/checkstyle.yaml index b34ca1302873..a01a4fe8587c 100644 --- a/.github/workflows/checkstyle.yaml +++ b/.github/workflows/checkstyle.yaml @@ -19,7 +19,7 @@ jobs: run: | # for x in lxd core20 snapd; do sudo snap remove $x; done sudo apt-get purge -y snapd google-chrome-stable firefox - ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu22 + ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22 sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck sudo python -m pipx install --quiet flake8 # confirm that the tools are installed diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml new file mode 100644 index 000000000000..6193c8afeae9 --- /dev/null +++ b/.github/workflows/labels.yml @@ -0,0 +1,49 @@ +name: labels + +on: + pull_request_target: + types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ] + +permissions: + pull-requests: write + +jobs: + open: + runs-on: ubuntu-latest + if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }} + steps: + - env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit $ISSUE --add-label "Status: Work in Progress" + + push: + runs-on: ubuntu-latest + if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }} + steps: + - env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale" + + draft: + runs-on: ubuntu-latest + if: ${{ github.event.action == 'converted_to_draft' }} + steps: + - env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress" + + rfr: + runs-on: ubuntu-latest + if: ${{ github.event.action == 'ready_for_review' }} + steps: + - env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed" diff --git a/.github/workflows/scripts/generate-ci-type.py b/.github/workflows/scripts/generate-ci-type.py index 943aae254469..b49255e8381d 100755 --- a/.github/workflows/scripts/generate-ci-type.py +++ b/.github/workflows/scripts/generate-ci-type.py @@ -29,6 +29,7 @@ Patterns of files that are considered to trigger full CI. """ FULL_RUN_REGEX = list(map(re.compile, [ + r'\.github/workflows/scripts/.*', r'cmd.*', r'configs/.*', r'META', diff --git a/.github/workflows/scripts/qemu-1-setup.sh b/.github/workflows/scripts/qemu-1-setup.sh index f838da34efff..de29ad1f57b6 100755 --- a/.github/workflows/scripts/qemu-1-setup.sh +++ b/.github/workflows/scripts/qemu-1-setup.sh @@ -10,36 +10,12 @@ set -eu export DEBIAN_FRONTEND="noninteractive" sudo apt-get -y update sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \ - ksmtuned virt-manager linux-modules-extra-$(uname -r) zfsutils-linux + virt-manager linux-modules-extra-$(uname -r) zfsutils-linux # generate ssh keys rm -f ~/.ssh/id_ed25519 ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N "" -# we expect RAM shortage -cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null -# /etc/ksmtuned.conf - Configuration file for ksmtuned -# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm -KSM_MONITOR_INTERVAL=60 - -# Millisecond sleep between ksm scans for 16Gb server. -# Smaller servers sleep more, bigger sleep less. -KSM_SLEEP_MSEC=30 - -KSM_NPAGES_BOOST=0 -KSM_NPAGES_DECAY=0 -KSM_NPAGES_MIN=1000 -KSM_NPAGES_MAX=25000 - -KSM_THRES_COEF=80 -KSM_THRES_CONST=8192 - -LOGFILE=/var/log/ksmtuned.log -DEBUG=1 -EOF -sudo systemctl restart ksm -sudo systemctl restart ksmtuned - # not needed sudo systemctl stop docker.socket sudo systemctl stop multipathd.socket @@ -65,16 +41,14 @@ $DISK sync sleep 1 -# swap with same size as RAM +# swap with same size as RAM (16GiB) sudo mkswap $DISK-part1 sudo swapon $DISK-part1 -# 60GB data disk +# JBOD 2xdisk for OpenZFS storage (test vm's) SSD1="$DISK-part2" - -# 10GB data disk on ext4 -sudo fallocate -l 10G /test.ssd1 -SSD2=$(sudo losetup -b 4096 -f /test.ssd1 --show) +sudo fallocate -l 12G /test.ssd2 +SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show) # adjust zfs module parameter and create pool exec 1>/dev/null @@ -83,11 +57,11 @@ ARC_MAX=$((1024*1024*512)) echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq -sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 \ - -O relatime=off -O atime=off -O xattr=sa -O compression=lz4 \ - -O mountpoint=/mnt/tests +sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \ + -O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \ + -O redundant_metadata=none -O mountpoint=/mnt/tests # no need for some scheduler for i in /sys/block/s*/queue/scheduler; do - echo "none" | sudo tee $i > /dev/null + echo "none" | sudo tee $i done diff --git a/.github/workflows/scripts/qemu-2-start.sh b/.github/workflows/scripts/qemu-2-start.sh index 39ac92107b71..28da6700e541 100755 --- a/.github/workflows/scripts/qemu-2-start.sh +++ b/.github/workflows/scripts/qemu-2-start.sh @@ -14,13 +14,13 @@ OSv=$OS # compressed with .zst extension REPO="https://github.com/mcmilk/openzfs-freebsd-images" -FREEBSD="$REPO/releases/download/v2024-10-05" +FREEBSD="$REPO/releases/download/v2025-04-13" URLzs="" # Ubuntu mirrors -#UBMIRROR="https://cloud-images.ubuntu.com" +UBMIRROR="https://cloud-images.ubuntu.com" #UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images" -UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images" +#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images" # default nic model for vm's NIC="virtio" @@ -34,11 +34,20 @@ case "$OS" in OSNAME="AlmaLinux 9" URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2" ;; + almalinux10) + OSNAME="AlmaLinux 10" + OSv="almalinux9" + URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2" + ;; archlinux) OSNAME="Archlinux" URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2" - # dns sometimes fails with that url :/ - echo "89.187.191.12 geo.mirror.pkgbuild.com" | sudo tee /etc/hosts > /dev/null + ;; + centos-stream10) + OSNAME="CentOS Stream 10" + # TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo + OSv="centos-stream9" + URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2" ;; centos-stream9) OSNAME="CentOS Stream 9" @@ -52,22 +61,15 @@ case "$OS" in OSNAME="Debian 12" URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2" ;; - fedora40) - OSNAME="Fedora 40" - OSv="fedora-unknown" - URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2" - ;; fedora41) OSNAME="Fedora 41" OSv="fedora-unknown" URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2" ;; - freebsd13-3r) - OSNAME="FreeBSD 13.3-RELEASE" - OSv="freebsd13.0" - URLzs="$FREEBSD/amd64-freebsd-13.3-RELEASE.qcow2.zst" - BASH="/usr/local/bin/bash" - NIC="rtl8139" + fedora42) + OSNAME="Fedora 42" + OSv="fedora-unknown" + URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2" ;; freebsd13-4r) OSNAME="FreeBSD 13.4-RELEASE" @@ -76,11 +78,12 @@ case "$OS" in BASH="/usr/local/bin/bash" NIC="rtl8139" ;; - freebsd14-0r) - OSNAME="FreeBSD 14.0-RELEASE" - OSv="freebsd14.0" - URLzs="$FREEBSD/amd64-freebsd-14.0-RELEASE.qcow2.zst" + freebsd13-5r) + OSNAME="FreeBSD 13.5-RELEASE" + OSv="freebsd13.0" + URLzs="$FREEBSD/amd64-freebsd-13.5-RELEASE.qcow2.zst" BASH="/usr/local/bin/bash" + NIC="rtl8139" ;; freebsd14-1r) OSNAME="FreeBSD 14.1-RELEASE" @@ -88,16 +91,23 @@ case "$OS" in URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst" BASH="/usr/local/bin/bash" ;; - freebsd13-4s) - OSNAME="FreeBSD 13.4-STABLE" + freebsd14-2r) + OSNAME="FreeBSD 14.2-RELEASE" + OSv="freebsd14.0" + URLzs="$FREEBSD/amd64-freebsd-14.2-RELEASE.qcow2.zst" + BASH="/usr/local/bin/bash" + ;; + freebsd13-5s) + OSNAME="FreeBSD 13.5-STABLE" OSv="freebsd13.0" - URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst" + URLzs="$FREEBSD/amd64-freebsd-13.5-STABLE.qcow2.zst" BASH="/usr/local/bin/bash" + NIC="rtl8139" ;; - freebsd14-1s) - OSNAME="FreeBSD 14.1-STABLE" + freebsd14-2s) + OSNAME="FreeBSD 14.2-STABLE" OSv="freebsd14.0" - URLzs="$FREEBSD/amd64-freebsd-14.1-STABLE.qcow2.zst" + URLzs="$FREEBSD/amd64-freebsd-14.2-STABLE.qcow2.zst" BASH="/usr/local/bin/bash" ;; freebsd15-0c) @@ -112,11 +122,6 @@ case "$OS" in MIRROR="http://opensuse-mirror-gce-us.susecloud.net" URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2" ;; - ubuntu20) - OSNAME="Ubuntu 20.04" - OSv="ubuntu20.04" - URL="$UBMIRROR/focal/current/focal-server-cloudimg-amd64.img" - ;; ubuntu22) OSNAME="Ubuntu 22.04" OSv="ubuntu22.04" @@ -140,7 +145,7 @@ echo "ENV=$ENV" >> $ENV # result path echo 'RESPATH="/var/tmp/test_results"' >> $ENV -# FreeBSD 13 has problems with: e1000+virtio +# FreeBSD 13 has problems with: e1000 and virtio echo "NIC=$NIC" >> $ENV # freebsd15 -> used in zfs-qemu.yml @@ -152,6 +157,14 @@ echo "OSv=\"$OSv\"" >> $ENV # FreeBSD 15 (Current) -> used for summary echo "OSNAME=\"$OSNAME\"" >> $ENV +# default vm count for testings +VMs=2 +echo "VMs=\"$VMs\"" >> $ENV + +# default cpu count for testing vm's +CPU=2 +echo "CPU=\"$CPU\"" >> $ENV + sudo mkdir -p "/mnt/tests" sudo chown -R $(whoami) /mnt/tests @@ -213,6 +226,22 @@ sudo virt-install \ --disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \ --import --noautoconsole >/dev/null +# enable KSM on Linux +if [ ${OS:0:7} != "freebsd" ]; then + sudo virsh dommemstat --domain "openzfs" --period 5 + sudo virsh node-memory-tune 100 50 1 + echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null +fi + +# Give the VMs hostnames so we don't have to refer to them with +# hardcoded IP addresses. +# +# vm0: Initial VM we install dependencies and build ZFS on. +# vm1..2 Testing VMs +for ((i=0; i<=VMs; i++)); do + echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts +done + # in case the directory isn't there already mkdir -p $HOME/.ssh diff --git a/.github/workflows/scripts/qemu-3-deps-vm.sh b/.github/workflows/scripts/qemu-3-deps-vm.sh new file mode 100755 index 000000000000..a581b13c2f58 --- /dev/null +++ b/.github/workflows/scripts/qemu-3-deps-vm.sh @@ -0,0 +1,260 @@ +#!/usr/bin/env bash + +###################################################################### +# 3) install dependencies for compiling and loading +# +# $1: OS name (like 'fedora41') +# $2: (optional) Experimental Fedora kernel version, like "6.14" to +# install instead of Fedora defaults. +###################################################################### + +set -eu + +function archlinux() { + echo "##[group]Running pacman -Syu" + sudo btrfs filesystem resize max / + sudo pacman -Syu --noconfirm + echo "##[endgroup]" + + echo "##[group]Install Development Tools" + sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \ + fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \ + parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \ + samba sysstat rng-tools rsync wget xxhash + echo "##[endgroup]" +} + +function debian() { + export DEBIAN_FRONTEND="noninteractive" + + echo "##[group]Running apt-get update+upgrade" + sudo apt-get update -y + sudo apt-get upgrade -y + echo "##[endgroup]" + + echo "##[group]Install Development Tools" + sudo apt-get install -y \ + acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \ + fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \ + libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \ + libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \ + libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \ + lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \ + python3-cffi python3-dev python3-distlib python3-packaging \ + python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \ + rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev + echo "##[endgroup]" +} + +function freebsd() { + export ASSUME_ALWAYS_YES="YES" + + echo "##[group]Install Development Tools" + sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \ + gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \ + pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash + sudo pkg install -xy \ + '^samba4[[:digit:]]+$' \ + '^py3[[:digit:]]+-cffi$' \ + '^py3[[:digit:]]+-sysctl$' \ + '^py3[[:digit:]]+-setuptools$' \ + '^py3[[:digit:]]+-packaging$' + echo "##[endgroup]" +} + +# common packages for: almalinux, centos, redhat +function rhel() { + echo "##[group]Running dnf update" + echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf + sudo dnf clean all + sudo dnf update -y --setopt=fastestmirror=1 --refresh + echo "##[endgroup]" + + echo "##[group]Install Development Tools" + + # Alma wants "Development Tools", Fedora 41 wants "development-tools" + if ! sudo dnf group install -y "Development Tools" ; then + echo "Trying 'development-tools' instead of 'Development Tools'" + sudo dnf group install -y development-tools + fi + + sudo dnf install -y \ + acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \ + gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \ + libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \ + ncompress libselinux-devel libtirpc-devel libtool libudev-devel \ + libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \ + parted perf python3 python3-cffi python3-devel python3-packaging \ + kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \ + rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \ + zlib-devel + echo "##[endgroup]" +} + +function tumbleweed() { + echo "##[group]Running zypper is TODO!" + sleep 23456 + echo "##[endgroup]" +} + +# $1: Kernel version to install (like '6.14rc7') +function install_fedora_experimental_kernel { + + our_version="$1" + sudo dnf -y copr enable @kernel-vanilla/stable + sudo dnf -y copr enable @kernel-vanilla/mainline + all="$(sudo dnf list --showduplicates kernel-*)" + echo "Available versions:" + echo "$all" + + # You can have a bunch of minor variants of the version we want '6.14'. + # Pick the newest variant (sorted by version number). + specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1) + list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')" + sudo dnf install -y $list + sudo dnf -y copr disable @kernel-vanilla/stable + sudo dnf -y copr disable @kernel-vanilla/mainline +} + +# Install dependencies +case "$1" in + almalinux8) + echo "##[group]Enable epel and powertools repositories" + sudo dnf config-manager -y --set-enabled powertools + sudo dnf install -y epel-release + echo "##[endgroup]" + rhel + echo "##[group]Install kernel-abi-whitelists" + sudo dnf install -y kernel-abi-whitelists + echo "##[endgroup]" + ;; + almalinux9|almalinux10|centos-stream9|centos-stream10) + echo "##[group]Enable epel and crb repositories" + sudo dnf config-manager -y --set-enabled crb + sudo dnf install -y epel-release + echo "##[endgroup]" + rhel + echo "##[group]Install kernel-abi-stablelists" + sudo dnf install -y kernel-abi-stablelists + echo "##[endgroup]" + ;; + archlinux) + archlinux + ;; + debian*) + echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections + debian + echo "##[group]Install Debian specific" + sudo apt-get install -yq linux-perf dh-sequence-dkms + echo "##[endgroup]" + ;; + fedora*) + rhel + sudo dnf install -y libunwind-devel + + # Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script' + sudo dnf install -y util-linux-script || true + + # Optional: Install an experimental kernel ($2 = kernel version) + if [ -n "${2:-}" ] ; then + install_fedora_experimental_kernel "$2" + fi + ;; + freebsd*) + freebsd + ;; + tumbleweed) + tumbleweed + ;; + ubuntu*) + debian + echo "##[group]Install Ubuntu specific" + sudo apt-get install -yq linux-tools-common libtirpc-dev \ + linux-modules-extra-$(uname -r) + sudo apt-get install -yq dh-sequence-dkms + echo "##[endgroup]" + echo "##[group]Delete Ubuntu OpenZFS modules" + for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done + echo "##[endgroup]" + ;; +esac + +# This script is used for checkstyle + zloop deps also. +# Install only the needed packages and exit - when used this way. +test -z "${ONLY_DEPS:-}" || exit 0 + +# Start services +echo "##[group]Enable services" +case "$1" in + freebsd*) + # add virtio things + echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf + for i in balloon blk console random scsi; do + echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf + done + echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab + sudo -E mount /dev/fd + sudo -E touch /etc/zfs/exports + sudo -E sysrc mountd_flags="/etc/zfs/exports" + echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null + sudo -E service nfsd enable + sudo -E service qemu-guest-agent enable + sudo -E service samba_server enable + ;; + debian*|ubuntu*) + sudo -E systemctl enable nfs-kernel-server + sudo -E systemctl enable qemu-guest-agent + sudo -E systemctl enable smbd + ;; + *) + # All other linux distros + sudo -E systemctl enable nfs-server + sudo -E systemctl enable qemu-guest-agent + sudo -E systemctl enable smb + ;; +esac +echo "##[endgroup]" + +# Setup Kernel cmdline +CMDLINE="console=tty0 console=ttyS0,115200n8" +CMDLINE="$CMDLINE selinux=0" +CMDLINE="$CMDLINE random.trust_cpu=on" +CMDLINE="$CMDLINE no_timer_check" +case "$1" in + almalinux*|centos*|fedora*) + GRUB_CFG="/boot/grub2/grub.cfg" + GRUB_MKCONFIG="grub2-mkconfig" + CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0" + echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \ + | sudo tee -a /etc/default/grub >/dev/null + ;; + ubuntu24) + GRUB_CFG="/boot/grub/grub.cfg" + GRUB_MKCONFIG="grub-mkconfig" + echo 'GRUB_DISABLE_OS_PROBER="false"' \ + | sudo tee -a /etc/default/grub >/dev/null + ;; + *) + GRUB_CFG="/boot/grub/grub.cfg" + GRUB_MKCONFIG="grub-mkconfig" + ;; +esac + +case "$1" in + archlinux|freebsd*) + true + ;; + *) + echo "##[group]Edit kernel cmdline" + sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true + echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \ + | sudo tee -a /etc/default/grub >/dev/null + sudo $GRUB_MKCONFIG -o $GRUB_CFG + echo "##[endgroup]" + ;; +esac + +# reset cloud-init configuration and poweroff +sudo cloud-init clean --logs +sleep 2 && sudo poweroff & +exit 0 diff --git a/.github/workflows/scripts/qemu-3-deps.sh b/.github/workflows/scripts/qemu-3-deps.sh index 96979cd02e09..267ae4ad3c7b 100755 --- a/.github/workflows/scripts/qemu-3-deps.sh +++ b/.github/workflows/scripts/qemu-3-deps.sh @@ -1,229 +1,28 @@ -#!/usr/bin/env bash - ###################################################################### -# 3) install dependencies for compiling and loading +# 3) Wait for VM to boot from previous step and launch dependencies +# script on it. +# +# $1: OS name (like 'fedora41') +# $2: (optional) Experimental kernel version to install on fedora, +# like "6.14". ###################################################################### -set -eu - -function archlinux() { - echo "##[group]Running pacman -Syu" - sudo btrfs filesystem resize max / - sudo pacman -Syu --noconfirm - echo "##[endgroup]" - - echo "##[group]Install Development Tools" - sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \ - fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \ - parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \ - samba sysstat rng-tools rsync wget xxhash - echo "##[endgroup]" -} - -function debian() { - export DEBIAN_FRONTEND="noninteractive" - - echo "##[group]Running apt-get update+upgrade" - sudo apt-get update -y - sudo apt-get upgrade -y - echo "##[endgroup]" - - echo "##[group]Install Development Tools" - sudo apt-get install -y \ - acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \ - fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \ - libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \ - libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \ - libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \ - lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \ - python3-cffi python3-dev python3-distlib python3-packaging \ - python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \ - rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev - echo "##[endgroup]" -} - -function freebsd() { - export ASSUME_ALWAYS_YES="YES" - - echo "##[group]Install Development Tools" - sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \ - gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \ - pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash - sudo pkg install -xy \ - '^samba4[[:digit:]]+$' \ - '^py3[[:digit:]]+-cffi$' \ - '^py3[[:digit:]]+-sysctl$' \ - '^py3[[:digit:]]+-packaging$' - echo "##[endgroup]" -} - -# common packages for: almalinux, centos, redhat -function rhel() { - echo "##[group]Running dnf update" - echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf - sudo dnf clean all - sudo dnf update -y --setopt=fastestmirror=1 --refresh - echo "##[endgroup]" - - echo "##[group]Install Development Tools" - - # Alma wants "Development Tools", Fedora 41 wants "development-tools" - if ! sudo dnf group install -y "Development Tools" ; then - echo "Trying 'development-tools' instead of 'Development Tools'" - sudo dnf group install -y development-tools - fi - - sudo dnf install -y \ - acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \ - gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \ - libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \ - ncompress libselinux-devel libtirpc-devel libtool libudev-devel \ - libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \ - parted perf python3 python3-cffi python3-devel python3-packaging \ - kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \ - rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \ - zlib-devel - echo "##[endgroup]" -} - -function tumbleweed() { - echo "##[group]Running zypper is TODO!" - sleep 23456 - echo "##[endgroup]" -} - -# Install dependencies -case "$1" in - almalinux8) - echo "##[group]Enable epel and powertools repositories" - sudo dnf config-manager -y --set-enabled powertools - sudo dnf install -y epel-release - echo "##[endgroup]" - rhel - echo "##[group]Install kernel-abi-whitelists" - sudo dnf install -y kernel-abi-whitelists - echo "##[endgroup]" - ;; - almalinux9|centos-stream9) - echo "##[group]Enable epel and crb repositories" - sudo dnf config-manager -y --set-enabled crb - sudo dnf install -y epel-release - echo "##[endgroup]" - rhel - echo "##[group]Install kernel-abi-stablelists" - sudo dnf install -y kernel-abi-stablelists - echo "##[endgroup]" - ;; - archlinux) - archlinux - ;; - debian*) - echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections - debian - echo "##[group]Install Debian specific" - sudo apt-get install -yq linux-perf dh-sequence-dkms - echo "##[endgroup]" - ;; - fedora*) - rhel - ;; - freebsd*) - freebsd - ;; - tumbleweed) - tumbleweed - ;; - ubuntu*) - debian - echo "##[group]Install Ubuntu specific" - sudo apt-get install -yq linux-tools-common libtirpc-dev \ - linux-modules-extra-$(uname -r) - if [ "$1" != "ubuntu20" ]; then - sudo apt-get install -yq dh-sequence-dkms - fi - echo "##[endgroup]" - echo "##[group]Delete Ubuntu OpenZFS modules" - for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done - echo "##[endgroup]" - ;; -esac - -# This script is used for checkstyle + zloop deps also. -# Install only the needed packages and exit - when used this way. -test -z "${ONLY_DEPS:-}" || exit 0 - -# Start services -echo "##[group]Enable services" -case "$1" in - freebsd*) - # add virtio things - echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf - for i in balloon blk console random scsi; do - echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf - done - echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab - sudo -E mount /dev/fd - sudo -E touch /etc/zfs/exports - sudo -E sysrc mountd_flags="/etc/zfs/exports" - echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null - sudo -E service nfsd enable - sudo -E service qemu-guest-agent enable - sudo -E service samba_server enable - ;; - debian*|ubuntu*) - sudo -E systemctl enable nfs-kernel-server - sudo -E systemctl enable qemu-guest-agent - sudo -E systemctl enable smbd - ;; - *) - # All other linux distros - sudo -E systemctl enable nfs-server - sudo -E systemctl enable qemu-guest-agent - sudo -E systemctl enable smb - ;; -esac -echo "##[endgroup]" - -# Setup Kernel cmdline -CMDLINE="console=tty0 console=ttyS0,115200n8" -CMDLINE="$CMDLINE selinux=0" -CMDLINE="$CMDLINE random.trust_cpu=on" -CMDLINE="$CMDLINE no_timer_check" -case "$1" in - almalinux*|centos*|fedora*) - GRUB_CFG="/boot/grub2/grub.cfg" - GRUB_MKCONFIG="grub2-mkconfig" - CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0" - echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \ - | sudo tee -a /etc/default/grub >/dev/null - ;; - ubuntu24) - GRUB_CFG="/boot/grub/grub.cfg" - GRUB_MKCONFIG="grub-mkconfig" - echo 'GRUB_DISABLE_OS_PROBER="false"' \ - | sudo tee -a /etc/default/grub >/dev/null - ;; - *) - GRUB_CFG="/boot/grub/grub.cfg" - GRUB_MKCONFIG="grub-mkconfig" - ;; -esac - -case "$1" in - archlinux|freebsd*) - true - ;; - *) - echo "##[group]Edit kernel cmdline" - sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true - echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \ - | sudo tee -a /etc/default/grub >/dev/null - sudo $GRUB_MKCONFIG -o $GRUB_CFG - echo "##[endgroup]" - ;; -esac - -# reset cloud-init configuration and poweroff -sudo cloud-init clean --logs -sleep 2 && sudo poweroff & -exit 0 +.github/workflows/scripts/qemu-wait-for-vm.sh vm0 + +# SPECIAL CASE: +# +# If the user passed in an experimental kernel version to test on Fedora, +# we need to update the kernel version in zfs's META file to allow the +# build to happen. We update our local copy of META here, since we know +# it will be rsync'd up in the next step. +if [ -n "${2:-}" ] ; then + sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META +fi + +scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh +PID=`pidof /usr/bin/qemu-system-x86_64` +ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@" +# wait for poweroff to succeed +tail --pid=$PID -f /dev/null +sleep 5 # avoid this: "error: Domain is already active" +rm -f $HOME/.ssh/known_hosts diff --git a/.github/workflows/scripts/qemu-4-build-vm.sh b/.github/workflows/scripts/qemu-4-build-vm.sh new file mode 100755 index 000000000000..17e976ebcc39 --- /dev/null +++ b/.github/workflows/scripts/qemu-4-build-vm.sh @@ -0,0 +1,379 @@ +#!/usr/bin/env bash + +###################################################################### +# 4) configure and build openzfs modules. This is run on the VMs. +# +# Usage: +# +# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--poweroff] +# [--release][--repo][--tarball] +# +# OS: OS name like 'fedora41' +# --enable-debug: Build RPMs with '--enable-debug' (for testing) +# --dkms: Build DKMS RPMs as well +# --poweroff: Power-off the VM after building +# --release Build zfs-release*.rpm as well +# --repo After building everything, copy RPMs into /tmp/repo +# in the ZFS RPM repository file structure. Also +# copy tarballs if they were built. +# --tarball: Also build a tarball of ZFS source +###################################################################### + +ENABLE_DEBUG="" +DKMS="" +POWEROFF="" +RELEASE="" +REPO="" +TARBALL="" +while [[ $# -gt 0 ]]; do + case $1 in + --enable-debug) + ENABLE_DEBUG=1 + shift + ;; + --dkms) + DKMS=1 + shift + ;; + --poweroff) + POWEROFF=1 + shift + ;; + --release) + RELEASE=1 + shift + ;; + --repo) + REPO=1 + shift + ;; + --tarball) + TARBALL=1 + shift + ;; + *) + OS=$1 + shift + ;; + esac +done + +set -eu + +function run() { + LOG="/var/tmp/build-stderr.txt" + echo "****************************************************" + echo "$(date) ($*)" + echo "****************************************************" + ($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG + if [ -f /tmp/rv ]; then + RV=$(cat /tmp/rv) + echo "****************************************************" + echo "exit with value=$RV ($*)" + echo "****************************************************" + echo 1 > /var/tmp/build-exitcode.txt + exit $RV + fi +} + +# Look at the RPMs in the current directory and copy/move them to +# /tmp/repo, using the directory structure we use for the ZFS RPM repos. +# +# For example: +# /tmp/repo/epel-testing/9.5 +# /tmp/repo/epel-testing/9.5/SRPMS +# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm +# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm +# /tmp/repo/epel-testing/9.5/kmod +# /tmp/repo/epel-testing/9.5/kmod/x86_64 +# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug +# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm +# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm +# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm +# ... +function copy_rpms_to_repo { + # Pick a RPM to query. It doesn't matter which one - we just want to extract + # the 'Build Host' value from it. + rpm=$(ls zfs-*.rpm | head -n 1) + + # Get zfs version '2.2.99' + zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}') + + # Get "2.1" or "2.2" + zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+) + + # Get 'almalinux9.5' or 'fedora41' type string + build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}') + + # Get '9.5' or '41' OS version + os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$') + + # Our ZFS version and OS name will determine which repo the RPMs + # will go in (regular or testing). Fedora always gets the newest + # releases, and Alma gets the older releases. + case $build_host in + almalinux*) + case $zfs_major in + 2.2) + d="epel" + ;; + *) + d="epel-testing" + ;; + esac + ;; + fedora*) + d="fedora" + ;; + esac + + prefix=/tmp/repo + dst="$prefix/$d/$os_ver" + + # Special case: move zfs-release*.rpm out of the way first (if we built them). + # This will make filtering the other RPMs easier. + mkdir -p $dst + mv zfs-release*.rpm $dst || true + + # Copy source RPMs + mkdir -p $dst/SRPMS + cp $(ls *.src.rpm) $dst/SRPMS/ + + if [[ "$build_host" =~ "almalinux" ]] ; then + # Copy kmods+userspace + mkdir -p $dst/kmod/x86_64/debug + cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64 + cp *debuginfo*.rpm $dst/kmod/x86_64/debug + fi + + if [ -n "$DKMS" ] ; then + # Copy dkms+userspace + mkdir -p $dst/x86_64 + cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64 + fi + + # Copy debug + mkdir -p $dst/x86_64/debug + cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug +} + +function freebsd() { + extra="${1:-}" + + export MAKE="gmake" + echo "##[group]Autogen.sh" + run ./autogen.sh + echo "##[endgroup]" + + echo "##[group]Configure" + run ./configure \ + --prefix=/usr/local \ + --with-libintl-prefix=/usr/local \ + --enable-pyzfs \ + --enable-debuginfo $extra + echo "##[endgroup]" + + echo "##[group]Build" + run gmake -j$(sysctl -n hw.ncpu) + echo "##[endgroup]" + + echo "##[group]Install" + run sudo gmake install + echo "##[endgroup]" +} + +function linux() { + extra="${1:-}" + + echo "##[group]Autogen.sh" + run ./autogen.sh + echo "##[endgroup]" + + echo "##[group]Configure" + run ./configure \ + --prefix=/usr \ + --enable-pyzfs \ + --enable-debuginfo $extra + echo "##[endgroup]" + + echo "##[group]Build" + run make -j$(nproc) + echo "##[endgroup]" + + echo "##[group]Install" + run sudo make install + echo "##[endgroup]" +} + +function rpm_build_and_install() { + extra="${1:-}" + + # Build RPMs with XZ compression by default (since gzip decompression is slow) + echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros + + echo "##[group]Autogen.sh" + run ./autogen.sh + echo "##[endgroup]" + + echo "##[group]Configure" + run ./configure --enable-debuginfo $extra + echo "##[endgroup]" + + echo "##[group]Build" + run make pkg-kmod pkg-utils + echo "##[endgroup]" + + if [ -n "$DKMS" ] ; then + echo "##[group]DKMS" + make rpm-dkms + echo "##[endgroup]" + fi + + if [ -n "$REPO" ] ; then + echo "Skipping install since we're only building RPMs and nothing else" + else + echo "##[group]Install" + run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm') + echo "##[endgroup]" + fi + + # Optionally build the zfs-release.*.rpm + if [ -n "$RELEASE" ] ; then + echo "##[group]Release" + pushd ~ + sudo dnf -y install rpm-build || true + # Check out a sparse copy of zfsonlinux.github.com.git so we don't get + # all the binaries. We just need a few kilobytes of files to build RPMs. + git clone --depth 1 --no-checkout \ + https://github.com/zfsonlinux/zfsonlinux.github.com.git + + cd zfsonlinux.github.com + git sparse-checkout set zfs-release + git checkout + cd zfs-release + + mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD} + cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES + cp zfs-release.spec ~/rpmbuild/SPECS/ + rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec + + # ZFS release RPMs are built. Copy them to the ~/zfs directory just to + # keep all the RPMs in the same place. + cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs + cp ~/rpmbuild/SRPMS/*.rpm ~/zfs + + popd + rm -fr ~/rpmbuild + echo "##[endgroup]" + fi + + if [ -n "$REPO" ] ; then + echo "##[group]Repo" + copy_rpms_to_repo + echo "##[endgroup]" + fi +} + +function deb_build_and_install() { + extra="${1:-}" + + echo "##[group]Autogen.sh" + run ./autogen.sh + echo "##[endgroup]" + + echo "##[group]Configure" + run ./configure \ + --prefix=/usr \ + --enable-pyzfs \ + --enable-debuginfo $extra + echo "##[endgroup]" + + echo "##[group]Build" + run make native-deb-kmod native-deb-utils + echo "##[endgroup]" + + echo "##[group]Install" + # Do kmod install. Note that when you build the native debs, the + # packages themselves are placed in parent directory '../' rather than + # in the source directory like the rpms are. + run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \ + | grep -Ev 'dkms|dracut') + echo "##[endgroup]" +} + +function build_tarball { + if [ -n "$REPO" ] ; then + ./autogen.sh + ./configure --with-config=srpm + make dist + mkdir -p /tmp/repo/releases + # The tarball name is based off of 'Version' field in the META file. + mv *.tar.gz /tmp/repo/releases/ + fi +} + +# Debug: show kernel cmdline +if [ -f /proc/cmdline ] ; then + cat /proc/cmdline || true +fi + +# Set our hostname to our OS name and version number. Specifically, we set the +# major and minor number so that when we query the Build Host field in the RPMs +# we build, we can see what specific version of Fedora/Almalinux we were using +# to build them. This is helpful for matching up KMOD versions. +# +# Examples: +# +# rhel8.10 +# almalinux9.5 +# fedora42 +source /etc/os-release +sudo hostname "$ID$VERSION_ID" + +# save some sysinfo +uname -a > /var/tmp/uname.txt + +cd $HOME/zfs +export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" + +extra="" +if [ -n "$ENABLE_DEBUG" ] ; then + extra="--enable-debug" +fi + +# build +case "$OS" in + freebsd*) + freebsd "$extra" + ;; + alma*|centos*) + rpm_build_and_install "--with-spec=redhat $extra" + ;; + fedora*) + rpm_build_and_install "$extra" + + # Historically, we've always built the release tarballs on Fedora, since + # there was one instance long ago where we built them on CentOS 7, and they + # didn't work correctly for everyone. + if [ -n "$TARBALL" ] ; then + build_tarball + fi + ;; + debian*|ubuntu*) + deb_build_and_install "$extra" + ;; + *) + linux "$extra" + ;; +esac + + +# building the zfs module was ok +echo 0 > /var/tmp/build-exitcode.txt + +# reset cloud-init configuration and poweroff +if [ -n "$POWEROFF" ] ; then + sudo cloud-init clean --logs + sync && sleep 2 && sudo poweroff & +fi +exit 0 diff --git a/.github/workflows/scripts/qemu-4-build.sh b/.github/workflows/scripts/qemu-4-build.sh index 955f605f5bce..63c9bccaa446 100755 --- a/.github/workflows/scripts/qemu-4-build.sh +++ b/.github/workflows/scripts/qemu-4-build.sh @@ -3,151 +3,9 @@ ###################################################################### # 4) configure and build openzfs modules ###################################################################### +echo "Build modules in QEMU machine" -set -eu +# Bring our VM back up and copy over ZFS source +.github/workflows/scripts/qemu-prepare-for-build.sh -function run() { - LOG="/var/tmp/build-stderr.txt" - echo "****************************************************" - echo "$(date) ($*)" - echo "****************************************************" - ($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG - if [ -f /tmp/rv ]; then - RV=$(cat /tmp/rv) - echo "****************************************************" - echo "exit with value=$RV ($*)" - echo "****************************************************" - echo 1 > /var/tmp/build-exitcode.txt - exit $RV - fi -} - -function freebsd() { - export MAKE="gmake" - echo "##[group]Autogen.sh" - run ./autogen.sh - echo "##[endgroup]" - - echo "##[group]Configure" - run ./configure \ - --prefix=/usr/local \ - --with-libintl-prefix=/usr/local \ - --enable-pyzfs \ - --enable-debug \ - --enable-debuginfo - echo "##[endgroup]" - - echo "##[group]Build" - run gmake -j$(sysctl -n hw.ncpu) - echo "##[endgroup]" - - echo "##[group]Install" - run sudo gmake install - echo "##[endgroup]" -} - -function linux() { - echo "##[group]Autogen.sh" - run ./autogen.sh - echo "##[endgroup]" - - echo "##[group]Configure" - run ./configure \ - --prefix=/usr \ - --enable-pyzfs \ - --enable-debug \ - --enable-debuginfo - echo "##[endgroup]" - - echo "##[group]Build" - run make -j$(nproc) - echo "##[endgroup]" - - echo "##[group]Install" - run sudo make install - echo "##[endgroup]" -} - -function rpm_build_and_install() { - EXTRA_CONFIG="${1:-}" - echo "##[group]Autogen.sh" - run ./autogen.sh - echo "##[endgroup]" - - echo "##[group]Configure" - run ./configure --enable-debug --enable-debuginfo $EXTRA_CONFIG - echo "##[endgroup]" - - echo "##[group]Build" - run make pkg-kmod pkg-utils - echo "##[endgroup]" - - echo "##[group]Install" - run sudo dnf -y --nobest install $(ls *.rpm | grep -v src.rpm) - echo "##[endgroup]" - -} - -function deb_build_and_install() { -echo "##[group]Autogen.sh" - run ./autogen.sh - echo "##[endgroup]" - - echo "##[group]Configure" - run ./configure \ - --prefix=/usr \ - --enable-pyzfs \ - --enable-debug \ - --enable-debuginfo - echo "##[endgroup]" - - echo "##[group]Build" - run make native-deb-kmod native-deb-utils - echo "##[endgroup]" - - echo "##[group]Install" - # Do kmod install. Note that when you build the native debs, the - # packages themselves are placed in parent directory '../' rather than - # in the source directory like the rpms are. - run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \ - | grep -Ev 'dkms|dracut') - echo "##[endgroup]" -} - -# Debug: show kernel cmdline -if [ -f /proc/cmdline ] ; then - cat /proc/cmdline || true -fi - -# save some sysinfo -uname -a > /var/tmp/uname.txt - -cd $HOME/zfs -export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" - -# build -case "$1" in - freebsd*) - freebsd - ;; - alma*|centos*) - rpm_build_and_install "--with-spec=redhat" - ;; - fedora*) - rpm_build_and_install - ;; - debian*|ubuntu*) - deb_build_and_install - ;; - *) - linux - ;; -esac - -# building the zfs module was ok -echo 0 > /var/tmp/build-exitcode.txt - -# reset cloud-init configuration and poweroff -sudo cloud-init clean --logs -sync && sleep 2 && sudo poweroff & -exit 0 +ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@ diff --git a/.github/workflows/scripts/qemu-5-setup.sh b/.github/workflows/scripts/qemu-5-setup.sh index bc40e8894b22..6bf10024a1a6 100755 --- a/.github/workflows/scripts/qemu-5-setup.sh +++ b/.github/workflows/scripts/qemu-5-setup.sh @@ -14,39 +14,33 @@ PID=$(pidof /usr/bin/qemu-system-x86_64) tail --pid=$PID -f /dev/null sudo virsh undefine openzfs -# default values per test vm: -VMs=2 -CPU=2 - # cpu pinning CPUSET=("0,1" "2,3") case "$OS" in freebsd*) - # FreeBSD can't be optimized via ksmtuned + # FreeBSD needs only 6GiB RAM=6 ;; *) - # Linux can be optimized via ksmtuned + # Linux needs more memory, but can be optimized to share it via KSM RAM=8 ;; esac -# this can be different for each distro -echo "VMs=$VMs" >> $ENV - # create snapshot we can clone later sudo zfs snapshot zpool/openzfs@now # setup the testing vm's PUBKEY=$(cat ~/.ssh/id_ed25519.pub) -for i in $(seq 1 $VMs); do +# start testing VMs +for ((i=1; i<=VMs; i++)); do echo "Creating disk for vm$i..." DISK="/dev/zvol/zpool/vm$i" FORMAT="raw" - sudo zfs clone zpool/openzfs@now zpool/vm$i - sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2 + sudo zfs clone zpool/openzfs@now zpool/vm$i-system + sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests cat < /tmp/user-data #cloud-config @@ -83,23 +77,21 @@ EOF --graphics none \ --cloud-init user-data=/tmp/user-data \ --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \ - --disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \ - --disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \ + --disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \ + --disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \ --import --noautoconsole >/dev/null done -# check the memory state from time to time +# generate some memory stats cat < cronjob.sh -# $OS exec 1>>/var/tmp/stats.txt exec 2>&1 -echo "*******************************************************" -date +echo "********************************************************************************" uptime free -m -df -h /mnt/tests zfs list EOF + sudo chmod +x cronjob.sh sudo mv -f cronjob.sh /root/cronjob.sh echo '*/5 * * * * /root/cronjob.sh' > crontab.txt @@ -108,17 +100,15 @@ rm crontab.txt # check if the machines are okay echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)" -for i in $(seq 1 $VMs); do - while true; do - ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break - done +for ((i=1; i<=VMs; i++)); do + .github/workflows/scripts/qemu-wait-for-vm.sh vm$i done echo "All $VMs VMs are up now." # Save the VM's serial output (ttyS0) to /var/tmp/console.txt # - ttyS0 on the VM corresponds to a local /dev/pty/N entry # - use 'virsh ttyconsole' to lookup the /dev/pty/N entry -for i in $(seq 1 $VMs); do +for ((i=1; i<=VMs; i++)); do mkdir -p $RESPATH/vm$i read "pty" <<< $(sudo virsh ttyconsole vm$i) sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" & diff --git a/.github/workflows/scripts/qemu-6-tests.sh b/.github/workflows/scripts/qemu-6-tests.sh index 2f023198bbf6..e8e6adecd62f 100755 --- a/.github/workflows/scripts/qemu-6-tests.sh +++ b/.github/workflows/scripts/qemu-6-tests.sh @@ -45,7 +45,7 @@ if [ -z ${1:-} ]; then echo 0 > /tmp/ctr date "+%s" > /tmp/tsstart - for i in $(seq 1 $VMs); do + for ((i=1; i<=VMs; i++)); do IP="192.168.122.1$i" daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \ $SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE @@ -58,7 +58,7 @@ if [ -z ${1:-} ]; then done # wait for all vm's to finish - for i in $(seq 1 $VMs); do + for ((i=1; i<=VMs; i++)); do tail --pid=$(cat vm${i}.pid) -f /dev/null pid=$(cat vm${i}log.pid) rm -f vm${i}log.pid @@ -72,19 +72,31 @@ fi export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin" case "$1" in freebsd*) + TDIR="/usr/local/share/zfs" sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs sudo -E ./zfs/scripts/zfs.sh - TDIR="/usr/local/share/zfs" + sudo mv -f /var/tmp/*.txt /tmp + sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null + sudo mount -o noatime /dev/vtbd1 /var/tmp + sudo chmod 1777 /var/tmp + sudo mv -f /tmp/*.txt /var/tmp ;; *) # use xfs @ /var/tmp for all distros + TDIR="/usr/share/zfs" + sudo -E modprobe zfs sudo mv -f /var/tmp/*.txt /tmp sudo mkfs.xfs -fq /dev/vdb sudo mount -o noatime /dev/vdb /var/tmp sudo chmod 1777 /var/tmp sudo mv -f /tmp/*.txt /var/tmp - sudo -E modprobe zfs - TDIR="/usr/share/zfs" + ;; +esac + +# enable io_uring on el9/el10 +case "$1" in + almalinux9|almalinux10|centos-stream*) + sudo sysctl kernel.io_uring_disabled=0 > /dev/null ;; esac diff --git a/.github/workflows/scripts/qemu-7-prepare.sh b/.github/workflows/scripts/qemu-7-prepare.sh index a5fbd7213161..98a5c24c2521 100755 --- a/.github/workflows/scripts/qemu-7-prepare.sh +++ b/.github/workflows/scripts/qemu-7-prepare.sh @@ -28,15 +28,16 @@ BASE="$HOME/work/zfs/zfs" MERGE="$BASE/.github/workflows/scripts/merge_summary.awk" # catch result files of testings (vm's should be there) -for i in $(seq 1 $VMs); do - rsync -arL zfs@192.168.122.1$i:$RESPATH/current $RESPATH/vm$i || true - scp zfs@192.168.122.1$i:"/var/tmp/*.txt" $RESPATH/vm$i || true +for ((i=1; i<=VMs; i++)); do + rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true + scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true + scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true done cp -f /var/tmp/*.txt $RESPATH || true cd $RESPATH # prepare result files for summary -for i in $(seq 1 $VMs); do +for ((i=1; i<=VMs; i++)); do file="vm$i/build-stderr.txt" test -s $file && mv -f $file build-stderr.txt diff --git a/.github/workflows/scripts/qemu-8-summary.sh b/.github/workflows/scripts/qemu-8-summary.sh index 26dbab28323b..7d1e16567ab4 100755 --- a/.github/workflows/scripts/qemu-8-summary.sh +++ b/.github/workflows/scripts/qemu-8-summary.sh @@ -45,7 +45,7 @@ fi echo -e "\nFull logs for download:\n $1\n" -for i in $(seq 1 $VMs); do +for ((i=1; i<=VMs; i++)); do rv=$(cat vm$i/tests-exitcode.txt) if [ $rv = 0 ]; then diff --git a/.github/workflows/scripts/qemu-prepare-for-build.sh b/.github/workflows/scripts/qemu-prepare-for-build.sh new file mode 100755 index 000000000000..a5a9e422ba6e --- /dev/null +++ b/.github/workflows/scripts/qemu-prepare-for-build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Helper script to run after installing dependencies. This brings the VM back +# up and copies over the zfs source directory. +echo "Build modules in QEMU machine" +sudo virsh start openzfs +.github/workflows/scripts/qemu-wait-for-vm.sh vm0 +rsync -ar $HOME/work/zfs/zfs zfs@vm0:./ diff --git a/.github/workflows/scripts/qemu-test-repo-vm.sh b/.github/workflows/scripts/qemu-test-repo-vm.sh new file mode 100755 index 000000000000..e3cafcbb67cc --- /dev/null +++ b/.github/workflows/scripts/qemu-test-repo-vm.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# +# Do a test install of ZFS from an external repository. +# +# USAGE: +# +# ./qemu-test-repo-vm [URL] +# +# URL: URL to use instead of http://download.zfsonlinux.org +# If blank, use the default repo from zfs-release RPM. + +set -e + +source /etc/os-release +OS="$ID" +VERSION="$VERSION_ID" + +ALTHOST="" +if [ -n "$1" ] ; then + ALTHOST="$1" +fi + +# Write summary to /tmp/repo so our artifacts scripts pick it up +mkdir /tmp/repo +SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt + +# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod' +# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to +# install from. Blank means use default from zfs-release RPM. +function test_install { + repo=$1 + host="" + if [ -n "$2" ] ; then + host=$2 + fi + + args="--disablerepo=zfs --enablerepo=$repo" + + # If we supplied an alternate repo URL, and have not already edited + # zfs.repo, then update the repo file. + if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then + sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo + fi + + sudo dnf -y install $args zfs zfs-test + + # Load modules and create a simple pool as a sanity test. + sudo /usr/share/zfs/zfs.sh -r + truncate -s 100M /tmp/file + sudo zpool create tank /tmp/file + sudo zpool status + + # Print out repo name, rpm installed (kmod or dkms), and repo URL + baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}') + package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms') + + echo "$repo $package $baseurl" >> $SUMMARY + + sudo zpool destroy tank + sudo rm /tmp/file + sudo dnf -y remove zfs +} + +echo "##[group]Installing from repo" +# The openzfs docs are the authoritative instructions for the install. Use +# the specific version of zfs-release RPM it recommends. +case $OS in +almalinux*) + url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst' + name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+') + sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1 + sudo rpm -qi zfs-release + test_install zfs $ALTHOST + test_install zfs-kmod $ALTHOST + test_install zfs-testing $ALTHOST + test_install zfs-testing-kmod $ALTHOST + ;; +fedora*) + url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst' + name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+') + sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm + test_install zfs $ALTHOST + ;; +esac +echo "##[endgroup]" + +# Write out a simple version of the summary here. Later on we will collate all +# the summaries and put them into a nice table in the workflow Summary page. +echo "Summary: " +cat $SUMMARY diff --git a/.github/workflows/scripts/qemu-wait-for-vm.sh b/.github/workflows/scripts/qemu-wait-for-vm.sh new file mode 100755 index 000000000000..e8afdb3f7b98 --- /dev/null +++ b/.github/workflows/scripts/qemu-wait-for-vm.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Wait for a VM to boot up and become active. This is used in a number of our +# scripts. +# +# $1: VM hostname or IP address + +while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do + ssh 2>/dev/null zfs@$1 "uname -a" && break +done diff --git a/.github/workflows/scripts/replace-dupes-with-symlinks.sh b/.github/workflows/scripts/replace-dupes-with-symlinks.sh new file mode 100755 index 000000000000..5412c954ad2f --- /dev/null +++ b/.github/workflows/scripts/replace-dupes-with-symlinks.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# +# Recursively go though a directory structure and replace duplicate files with +# symlinks. This cuts down our RPM repo size by ~25%. +# +# replace-dupes-with-symlinks.sh [DIR] +# +# DIR: Directory to traverse. Defaults to current directory if not specified. +# + +src="$1" +if [ -z "$src" ] ; then + src="." +fi + +declare -A db + +pushd "$src" +while read line ; do + bn="$(basename $line)" + if [ -z "${db[$bn]}" ] ; then + # First time this file has been seen + db[$bn]="$line" + else + if diff -b "$line" "${db[$bn]}" &>/dev/null ; then + # Files are the same, make a symlink + rm "$line" + ln -sr "${db[$bn]}" "$line" + fi + fi +done <<< "$(find . -type f)" +popd diff --git a/.github/workflows/zfs-qemu-packages.yml b/.github/workflows/zfs-qemu-packages.yml new file mode 100644 index 000000000000..5b5afe746859 --- /dev/null +++ b/.github/workflows/zfs-qemu-packages.yml @@ -0,0 +1,140 @@ +# This workflow is used to build and test RPM packages. It is a +# 'workflow_dispatch' workflow, which means it gets run manually. +# +# The workflow has a dropdown menu with two options: +# +# Build RPMs - Build release RPMs and tarballs and put them into an artifact +# ZIP file. The directory structure used in the ZIP file mirrors +# the ZFS yum repo. +# +# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this +# will do a DKMS and KMOD test install from both the regular and +# testing repos. On Fedora, it will do a DKMS install from the +# regular repo. All test install results will be displayed in the +# Summary page. Note that the workflow provides an optional text +# text box where you can specify the full URL to an alternate repo. +# If left blank, it will install from the default repo from the +# zfs-release RPM (http://download.zfsonlinux.org). +# +# Most users will never need to use this workflow. It will be used primary by +# ZFS admins for building and testing releases. +# +name: zfs-qemu-packages + +on: + workflow_dispatch: + inputs: + test_type: + type: choice + required: false + default: "Build RPMs" + description: "Build RPMs or test the repo?" + options: + - "Build RPMs" + - "Test repo" + repo_url: + type: string + required: false + default: "" + description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)" +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + zfs-qemu-packages-jobs: + name: qemu-VMs + strategy: + fail-fast: false + matrix: + os: ['almalinux8', 'almalinux9', 'almalinux10', 'fedora41', 'fedora42'] + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Setup QEMU + timeout-minutes: 10 + run: .github/workflows/scripts/qemu-1-setup.sh + + - name: Start build machine + timeout-minutes: 10 + run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }} + + - name: Install dependencies + timeout-minutes: 20 + run: | + .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} + + - name: Build modules or Test repo + timeout-minutes: 30 + run: | + set -e + if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then + # Bring VM back up and copy over zfs source + .github/workflows/scripts/qemu-prepare-for-build.sh + + mkdir -p /tmp/repo + ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }} + else + .github/workflows/scripts/qemu-4-build.sh --repo --release --dkms --tarball ${{ matrix.os }} + fi + + - name: Prepare artifacts + if: always() + timeout-minutes: 10 + run: | + rsync -a zfs@vm0:/tmp/repo /tmp || true + .github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo + tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo + + - uses: actions/upload-artifact@v4 + id: artifact-upload + if: always() + with: + name: ${{ matrix.os }}-repo + path: ${{ matrix.os }}-repo.tar + compression-level: 0 + retention-days: 2 + if-no-files-found: ignore + + combine_repos: + if: always() + needs: [zfs-qemu-packages-jobs] + name: "Results" + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@v4 + id: artifact-download + if: always() + - name: Test Summary + if: always() + run: | + for i in $(find . -type f -iname "*.tar") ; do + tar -xf $i -C /tmp + done + tar -cf all-repo.tar -C /tmp repo + + # If we're installing from a repo, print out the summary of the versions + # that got installed using Markdown. + if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then + cd /tmp/repo + for i in $(ls *.txt) ; do + nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')" + echo "### $nicename" >> $GITHUB_STEP_SUMMARY + echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY + echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY + awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY + done + fi + + - uses: actions/upload-artifact@v4 + id: artifact-upload2 + if: always() + with: + name: all-repo + path: all-repo.tar + compression-level: 0 + retention-days: 5 + if-no-files-found: ignore diff --git a/.github/workflows/zfs-qemu.yml b/.github/workflows/zfs-qemu.yml index e90030f4c02e..1d9899ae895f 100644 --- a/.github/workflows/zfs-qemu.yml +++ b/.github/workflows/zfs-qemu.yml @@ -3,6 +3,23 @@ name: zfs-qemu on: push: pull_request: + workflow_dispatch: + inputs: + include_stream9: + type: boolean + required: false + default: false + description: 'Test on CentOS 9 stream' + include_stream10: + type: boolean + required: false + default: false + description: 'Test on CentOS 10 stream' + fedora_kernel_ver: + type: string + required: false + default: "" + description: "(optional) Experimental kernel version to install on Fedora (like '6.14' or '6.13.3-0.rc3')" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -22,8 +39,8 @@ jobs: - name: Generate OS config and CI type id: os run: | - FULL_OS='["almalinux8", "almalinux9", "centos-stream9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-4r", "freebsd14-0r", "freebsd14-1s", "ubuntu20", "ubuntu22", "ubuntu24"]' - QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd13-3r", "freebsd14-1r", "ubuntu24"]' + FULL_OS='["almalinux8", "almalinux9", "almalinux10", "debian11", "debian12", "fedora41", "fedora42", "freebsd13-4r", "freebsd14-2s", "freebsd15-0c", "ubuntu22", "ubuntu24"]' + QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-2r", "ubuntu24"]' # determine CI type when running on PR ci_type="full" if ${{ github.event_name == 'pull_request' }}; then @@ -36,7 +53,25 @@ jobs: else os_selection="$FULL_OS" fi - os_json=$(echo ${os_selection} | jq -c) + + if [ ${{ github.event.inputs.fedora_kernel_ver }} != "" ] ; then + # They specified a custom kernel version for Fedora. Use only + # Fedora runners. + os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]') + else + # Normal case + os_json=$(echo ${os_selection} | jq -c) + fi + + # Add optional runners + if [ "${{ github.event.inputs.include_stream9 }}" == 'true' ]; then + os_json=$(echo $os_json | jq -c '. += ["centos-stream9"]') + fi + if [ "${{ github.event.inputs.include_stream10 }}" == 'true' ]; then + os_json=$(echo $os_json | jq -c '. += ["centos-stream10"]') + fi + + echo $os_json echo "os=$os_json" >> $GITHUB_OUTPUT echo "ci_type=$ci_type" >> $GITHUB_OUTPUT @@ -46,11 +81,12 @@ jobs: strategy: fail-fast: false matrix: - # rhl: almalinux8, almalinux9, centos-stream9, fedora40, fedora41 - # debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24 + # rhl: almalinux8, almalinux9, centos-stream9, fedora41 + # debian: debian11, debian12, ubuntu22, ubuntu24 # misc: archlinux, tumbleweed - # FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-0r, freebsd14-1r - # FreeBSD Stable: freebsd13-4s, freebsd14-1s + # FreeBSD variants of 2024-12: + # FreeBSD Release: freebsd13-4r, freebsd14-2r + # FreeBSD Stable: freebsd13-4s, freebsd14-2s # FreeBSD Current: freebsd15-0c os: ${{ fromJson(needs.test-config.outputs.test_os) }} runs-on: ubuntu-24.04 @@ -69,31 +105,11 @@ jobs: - name: Install dependencies timeout-minutes: 20 - run: | - echo "Install dependencies in QEMU machine" - IP=192.168.122.10 - while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do - ssh 2>/dev/null zfs@$IP "uname -a" && break - done - scp .github/workflows/scripts/qemu-3-deps.sh zfs@$IP:qemu-3-deps.sh - PID=`pidof /usr/bin/qemu-system-x86_64` - ssh zfs@$IP '$HOME/qemu-3-deps.sh' ${{ matrix.os }} - # wait for poweroff to succeed - tail --pid=$PID -f /dev/null - sleep 5 # avoid this: "error: Domain is already active" - rm -f $HOME/.ssh/known_hosts + run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} ${{ github.event.inputs.fedora_kernel_ver }} - name: Build modules timeout-minutes: 30 - run: | - echo "Build modules in QEMU machine" - sudo virsh start openzfs - IP=192.168.122.10 - while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do - ssh 2>/dev/null zfs@$IP "uname -a" && break - done - rsync -ar $HOME/work/zfs/zfs zfs@$IP:./ - ssh zfs@$IP '$HOME/zfs/.github/workflows/scripts/qemu-4-build.sh' ${{ matrix.os }} + run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }} - name: Setup testing machines timeout-minutes: 5 diff --git a/.github/workflows/zloop.yml b/.github/workflows/zloop.yml index 90d93c48e4bd..7b3bf49d90d5 100644 --- a/.github/workflows/zloop.yml +++ b/.github/workflows/zloop.yml @@ -20,7 +20,7 @@ jobs: - name: Install dependencies run: | sudo apt-get purge -y snapd google-chrome-stable firefox - ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu24 + ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24 - name: Autogen.sh run: | sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4 diff --git a/META b/META index 0e852d300e8f..9e971a564912 100644 --- a/META +++ b/META @@ -1,10 +1,10 @@ Meta: 1 Name: zfs Branch: 1.0 -Version: 2.2.7 +Version: 2.2.8 Release: 1 Release-Tags: relext License: CDDL Author: OpenZFS -Linux-Maximum: 6.12 +Linux-Maximum: 6.15 Linux-Minimum: 4.18 diff --git a/cmd/zpool/os/linux/zpool_vdev_os.c b/cmd/zpool/os/linux/zpool_vdev_os.c index f194d28c55a9..862b1e61967b 100644 --- a/cmd/zpool/os/linux/zpool_vdev_os.c +++ b/cmd/zpool/os/linux/zpool_vdev_os.c @@ -87,7 +87,8 @@ typedef struct vdev_disk_db_entry { - char id[24]; + /* 24 byte name + 1 byte NULL terminator to make GCC happy */ + char id[25]; int sector_size; } vdev_disk_db_entry_t; diff --git a/config/kernel-automount.m4 b/config/kernel-automount.m4 index 52f1931b748e..b5f1392d0fcd 100644 --- a/config/kernel-automount.m4 +++ b/config/kernel-automount.m4 @@ -5,7 +5,7 @@ dnl # solution to handling automounts. Prior to this cifs/nfs clients dnl # which required automount support would abuse the follow_link() dnl # operation on directories for this purpose. dnl # -AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [ +AC_DEFUN([ZFS_AC_KERNEL_SRC_D_AUTOMOUNT], [ ZFS_LINUX_TEST_SRC([dentry_operations_d_automount], [ #include static struct vfsmount *d_automount(struct path *p) { return NULL; } @@ -15,7 +15,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [ ]) ]) -AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [ +AC_DEFUN([ZFS_AC_KERNEL_D_AUTOMOUNT], [ AC_MSG_CHECKING([whether dops->d_automount() exists]) ZFS_LINUX_TEST_RESULT([dentry_operations_d_automount], [ AC_MSG_RESULT(yes) @@ -23,3 +23,40 @@ AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [ ZFS_LINUX_TEST_ERROR([dops->d_automount()]) ]) ]) + +dnl # +dnl # 6.14 API change +dnl # dops->d_revalidate now has four args. +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS], [ + ZFS_LINUX_TEST_SRC([dentry_operations_d_revalidate_4args], [ + #include + static int d_revalidate(struct inode *dir, + const struct qstr *name, struct dentry *dentry, + unsigned int fl) { return 0; } + struct dentry_operations dops __attribute__ ((unused)) = { + .d_revalidate = d_revalidate, + }; + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_4ARGS], [ + AC_MSG_CHECKING([whether dops->d_revalidate() takes 4 args]) + ZFS_LINUX_TEST_RESULT([dentry_operations_d_revalidate_4args], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_D_REVALIDATE_4ARGS, 1, + [dops->d_revalidate() takes 4 args]) + ],[ + AC_MSG_RESULT(no) + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [ + ZFS_AC_KERNEL_SRC_D_AUTOMOUNT + ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS +]) + +AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [ + ZFS_AC_KERNEL_D_AUTOMOUNT + ZFS_AC_KERNEL_D_REVALIDATE_4ARGS +]) diff --git a/config/kernel-kthread.m4 b/config/kernel-kthread.m4 index 4d580efead6b..607953146323 100644 --- a/config/kernel-kthread.m4 +++ b/config/kernel-kthread.m4 @@ -17,14 +17,21 @@ AC_DEFUN([ZFS_AC_KERNEL_KTHREAD_COMPLETE_AND_EXIT], [ AC_DEFUN([ZFS_AC_KERNEL_KTHREAD_DEQUEUE_SIGNAL], [ dnl # - dnl # 5.17 API: enum pid_type * as new 4th dequeue_signal() argument, - dnl # 5768d8906bc23d512b1a736c1e198aa833a6daa4 ("signal: Requeue signals in the appropriate queue") + dnl # prehistory: + dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, + dnl # siginfo_t *info) dnl # - dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info); - dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); + dnl # 4.20: kernel_siginfo_t introduced, replaces siginfo_t + dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, + dnl kernel_siginfo_t *info) dnl # - dnl # 6.12 API: first arg struct_task* removed - dnl # int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); + dnl # 5.17: enum pid_type introduced as 4th arg + dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, + dnl # kernel_siginfo_t *info, enum pid_type *type) + dnl # + dnl # 6.12: first arg struct_task* removed + dnl # int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, + dnl # enum pid_type *type) dnl # AC_MSG_CHECKING([whether dequeue_signal() takes 4 arguments]) ZFS_LINUX_TEST_RESULT([kthread_dequeue_signal_4arg], [ @@ -33,11 +40,11 @@ AC_DEFUN([ZFS_AC_KERNEL_KTHREAD_DEQUEUE_SIGNAL], [ [dequeue_signal() takes 4 arguments]) ], [ AC_MSG_RESULT(no) - AC_MSG_CHECKING([whether dequeue_signal() a task argument]) - ZFS_LINUX_TEST_RESULT([kthread_dequeue_signal_3arg_task], [ + AC_MSG_CHECKING([whether 3-arg dequeue_signal() takes a type argument]) + ZFS_LINUX_TEST_RESULT([kthread_dequeue_signal_3arg_type], [ AC_MSG_RESULT(yes) - AC_DEFINE(HAVE_DEQUEUE_SIGNAL_3ARG_TASK, 1, - [dequeue_signal() takes a task argument]) + AC_DEFINE(HAVE_DEQUEUE_SIGNAL_3ARG_TYPE, 1, + [3-arg dequeue_signal() takes a type argument]) ], [ AC_MSG_RESULT(no) ]) @@ -56,27 +63,27 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_KTHREAD_COMPLETE_AND_EXIT], [ ]) AC_DEFUN([ZFS_AC_KERNEL_SRC_KTHREAD_DEQUEUE_SIGNAL], [ - ZFS_LINUX_TEST_SRC([kthread_dequeue_signal_3arg_task], [ + ZFS_LINUX_TEST_SRC([kthread_dequeue_signal_4arg], [ #include ], [ struct task_struct *task = NULL; sigset_t *mask = NULL; kernel_siginfo_t *info = NULL; + enum pid_type *type = NULL; int error __attribute__ ((unused)); - error = dequeue_signal(task, mask, info); + error = dequeue_signal(task, mask, info, type); ]) - ZFS_LINUX_TEST_SRC([kthread_dequeue_signal_4arg], [ + ZFS_LINUX_TEST_SRC([kthread_dequeue_signal_3arg_type], [ #include ], [ - struct task_struct *task = NULL; sigset_t *mask = NULL; kernel_siginfo_t *info = NULL; enum pid_type *type = NULL; int error __attribute__ ((unused)); - error = dequeue_signal(task, mask, info, type); + error = dequeue_signal(mask, info, type); ]) ]) diff --git a/config/kernel-mkdir.m4 b/config/kernel-mkdir.m4 index 8e084443c7b4..c1aebc387abe 100644 --- a/config/kernel-mkdir.m4 +++ b/config/kernel-mkdir.m4 @@ -2,6 +2,22 @@ dnl # dnl # Supported mkdir() interfaces checked newest to oldest. dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [ + dnl # + dnl # 6.15 API change + dnl # mkdir() returns struct dentry * + dnl # + ZFS_LINUX_TEST_SRC([mkdir_return_dentry], [ + #include + + static struct dentry *mkdir(struct mnt_idmap *idmap, + struct inode *inode, struct dentry *dentry, + umode_t umode) { return dentry; } + static const struct inode_operations + iops __attribute__ ((unused)) = { + .mkdir = mkdir, + }; + ],[]) + dnl # dnl # 6.3 API change dnl # mkdir() takes struct mnt_idmap * as the first arg @@ -59,29 +75,40 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [ AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [ dnl # - dnl # 6.3 API change - dnl # mkdir() takes struct mnt_idmap * as the first arg + dnl # 6.15 API change + dnl # mkdir() returns struct dentry * dnl # - AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*]) - ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [ + AC_MSG_CHECKING([whether iops->mkdir() returns struct dentry*]) + ZFS_LINUX_TEST_RESULT([mkdir_return_dentry], [ AC_MSG_RESULT(yes) - AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1, - [iops->mkdir() takes struct mnt_idmap*]) + AC_DEFINE(HAVE_IOPS_MKDIR_DENTRY, 1, + [iops->mkdir() returns struct dentry*]) ],[ - AC_MSG_RESULT(no) - dnl # - dnl # 5.12 API change - dnl # The struct user_namespace arg was added as the first argument to - dnl # mkdir() of the iops structure. + dnl # 6.3 API change + dnl # mkdir() takes struct mnt_idmap * as the first arg dnl # - AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*]) - ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [ + AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*]) + ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [ AC_MSG_RESULT(yes) - AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1, - [iops->mkdir() takes struct user_namespace*]) + AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1, + [iops->mkdir() takes struct mnt_idmap*]) ],[ AC_MSG_RESULT(no) + + dnl # + dnl # 5.12 API change + dnl # The struct user_namespace arg was added as the first argument to + dnl # mkdir() of the iops structure. + dnl # + AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*]) + ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1, + [iops->mkdir() takes struct user_namespace*]) + ],[ + AC_MSG_RESULT(no) + ]) ]) ]) ]) diff --git a/config/kernel-objtool.m4 b/config/kernel-objtool.m4 index f9f9d657d805..e616ccebcbc0 100644 --- a/config/kernel-objtool.m4 +++ b/config/kernel-objtool.m4 @@ -11,10 +11,12 @@ AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL_HEADER], [ #include ],[ ],[ + objtool_header=$LINUX/include/linux/objtool.h AC_DEFINE(HAVE_KERNEL_OBJTOOL_HEADER, 1, [kernel has linux/objtool.h]) AC_MSG_RESULT(linux/objtool.h) ],[ + objtool_header=$LINUX/include/linux/frame.h AC_MSG_RESULT(linux/frame.h) ]) ]) @@ -62,6 +64,23 @@ AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_STACK_FRAME_NON_STANDARD, 1, [STACK_FRAME_NON_STANDARD is defined]) + + dnl # Needed for kernels missing the asm macro. We grep + dnl # for it in the header file since there is currently + dnl # no test to check the result of assembling a file. + AC_MSG_CHECKING( + [whether STACK_FRAME_NON_STANDARD asm macro is defined]) + dnl # Escape square brackets. + sp='@<:@@<:@:space:@:>@@:>@' + dotmacro='@<:@.@:>@macro' + regexp="^$sp*$dotmacro$sp+STACK_FRAME_NON_STANDARD$sp" + AS_IF([$EGREP -s -q "$regexp" $objtool_header],[ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_STACK_FRAME_NON_STANDARD_ASM, 1, + [STACK_FRAME_NON_STANDARD asm macro is defined]) + ],[ + AC_MSG_RESULT(no) + ]) ],[ AC_MSG_RESULT(no) ]) diff --git a/config/kernel-sb-dying.m4 b/config/kernel-sb-dying.m4 new file mode 100644 index 000000000000..882f3e542357 --- /dev/null +++ b/config/kernel-sb-dying.m4 @@ -0,0 +1,19 @@ +dnl # +dnl # SB_DYING exists since Linux 6.6 +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_SB_DYING], [ + ZFS_LINUX_TEST_SRC([sb_dying], [ + #include + ],[ + (void) SB_DYING; + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_SB_DYING], [ + AC_MSG_CHECKING([whether SB_DYING is defined]) + ZFS_LINUX_TEST_RESULT([sb_dying], [ + AC_MSG_RESULT(yes) + ],[ + AC_MSG_RESULT(no) + ]) +]) diff --git a/config/kernel-timer.m4 b/config/kernel-timer.m4 new file mode 100644 index 000000000000..c89ea204e83d --- /dev/null +++ b/config/kernel-timer.m4 @@ -0,0 +1,32 @@ +dnl # +dnl # 6.2: timer_delete_sync introduced, del_timer_sync deprecated and made +dnl # into a simple wrapper +dnl # 6.15: del_timer_sync removed +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_TIMER_DELETE_SYNC], [ + ZFS_LINUX_TEST_SRC([timer_delete_sync], [ + #include + ],[ + struct timer_list *timer __attribute__((unused)) = NULL; + timer_delete_sync(timer); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_TIMER_DELETE_SYNC], [ + AC_MSG_CHECKING([whether timer_delete_sync() is available]) + ZFS_LINUX_TEST_RESULT([timer_delete_sync], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_TIMER_DELETE_SYNC, 1, + [timer_delete_sync is available]) + ],[ + AC_MSG_RESULT(no) + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_SRC_TIMER], [ + ZFS_AC_KERNEL_SRC_TIMER_DELETE_SYNC +]) + +AC_DEFUN([ZFS_AC_KERNEL_TIMER], [ + ZFS_AC_KERNEL_TIMER_DELETE_SYNC +]) diff --git a/config/kernel-vfs-migrate_folio.m4 b/config/kernel-vfs-migrate_folio.m4 new file mode 100644 index 000000000000..186cd0581a17 --- /dev/null +++ b/config/kernel-vfs-migrate_folio.m4 @@ -0,0 +1,27 @@ +dnl # +dnl # Linux 6.0 uses migrate_folio in lieu of migrate_page +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_MIGRATE_FOLIO], [ + ZFS_LINUX_TEST_SRC([vfs_has_migrate_folio], [ + #include + #include + + static const struct address_space_operations + aops __attribute__ ((unused)) = { + .migrate_folio = migrate_folio, + }; + ],[]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_VFS_MIGRATE_FOLIO], [ + dnl # + dnl # Linux 6.0 uses migrate_folio in lieu of migrate_page + dnl # + AC_MSG_CHECKING([whether migrate_folio exists]) + ZFS_LINUX_TEST_RESULT([vfs_has_migrate_folio], [ + AC_MSG_RESULT([yes]) + AC_DEFINE(HAVE_VFS_MIGRATE_FOLIO, 1, [migrate_folio exists]) + ],[ + AC_MSG_RESULT([no]) + ]) +]) diff --git a/config/kernel-vfs-migratepage.m4 b/config/kernel-vfs-migratepage.m4 new file mode 100644 index 000000000000..05db3af511eb --- /dev/null +++ b/config/kernel-vfs-migratepage.m4 @@ -0,0 +1,27 @@ +dnl # +dnl # Linux 6.0 gets rid of address_space_operations.migratepage +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_MIGRATEPAGE], [ + ZFS_LINUX_TEST_SRC([vfs_has_migratepage], [ + #include + #include + + static const struct address_space_operations + aops __attribute__ ((unused)) = { + .migratepage = migrate_page, + }; + ],[]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_VFS_MIGRATEPAGE], [ + dnl # + dnl # Linux 6.0 gets rid of address_space_operations.migratepage + dnl # + AC_MSG_CHECKING([whether migratepage exists]) + ZFS_LINUX_TEST_RESULT([vfs_has_migratepage], [ + AC_MSG_RESULT([yes]) + AC_DEFINE(HAVE_VFS_MIGRATEPAGE, 1, [migratepage exists]) + ],[ + AC_MSG_RESULT([no]) + ]) +]) diff --git a/config/kernel.m4 b/config/kernel.m4 index df3bf5293529..f0a4dc0fe430 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -73,10 +73,13 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE ZFS_AC_KERNEL_SRC_SECURITY_INODE ZFS_AC_KERNEL_SRC_FST_MOUNT + ZFS_AC_KERNEL_SRC_SB_DYING ZFS_AC_KERNEL_SRC_SET_NLINK ZFS_AC_KERNEL_SRC_SGET ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO + ZFS_AC_KERNEL_SRC_VFS_MIGRATE_FOLIO + ZFS_AC_KERNEL_SRC_VFS_MIGRATEPAGE ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO ZFS_AC_KERNEL_SRC_VFS_READPAGES @@ -127,6 +130,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ ZFS_AC_KERNEL_SRC_MM_PAGE_SIZE ZFS_AC_KERNEL_SRC_MM_PAGE_MAPPING ZFS_AC_KERNEL_SRC_FILE + ZFS_AC_KERNEL_SRC_TIMER case "$host_cpu" in powerpc*) ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE @@ -183,10 +187,13 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ ZFS_AC_KERNEL_TRUNCATE_SETSIZE ZFS_AC_KERNEL_SECURITY_INODE ZFS_AC_KERNEL_FST_MOUNT + ZFS_AC_KERNEL_SB_DYING ZFS_AC_KERNEL_SET_NLINK ZFS_AC_KERNEL_SGET ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO ZFS_AC_KERNEL_VFS_READ_FOLIO + ZFS_AC_KERNEL_VFS_MIGRATE_FOLIO + ZFS_AC_KERNEL_VFS_MIGRATEPAGE ZFS_AC_KERNEL_VFS_FSYNC_2ARGS ZFS_AC_KERNEL_VFS_DIRECT_IO ZFS_AC_KERNEL_VFS_READPAGES @@ -238,6 +245,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ ZFS_AC_KERNEL_MM_PAGE_MAPPING ZFS_AC_KERNEL_1ARG_ASSIGN_STR ZFS_AC_KERNEL_FILE + ZFS_AC_KERNEL_TIMER case "$host_cpu" in powerpc*) ZFS_AC_KERNEL_CPU_HAS_FEATURE @@ -629,11 +637,16 @@ AC_DEFUN([ZFS_LINUX_COMPILE], [ building kernel modules]) AC_ARG_VAR([KERNEL_LLVM], [Binary option to build kernel modules with LLVM/CLANG toolchain]) + AC_ARG_VAR([KERNEL_CROSS_COMPILE], [Cross compile prefix + for kernel module builds]) + AC_ARG_VAR([KERNEL_ARCH], [Architecture to build kernel modules for]) AC_TRY_COMMAND([ KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6" make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC} ${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM} CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES + ${KERNEL_CROSS_COMPILE:+CROSS_COMPILE=$KERNEL_CROSS_COMPILE} + ${KERNEL_ARCH:+ARCH=$KERNEL_ARCH} -C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1]) AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4]) ]) diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index bb5a85d815d1..57582b9d18f5 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -393,6 +393,8 @@ AC_DEFUN([ZFS_AC_RPM], [ RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_cc KERNEL_CC=$(KERNEL_CC)"' RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_ld KERNEL_LD=$(KERNEL_LD)"' RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_llvm KERNEL_LLVM=$(KERNEL_LLVM)"' + RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_cross_compile KERNEL_CROSS_COMPILE=$(KERNEL_CROSS_COMPILE)"' + RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_arch KERNEL_ARCH=$(KERNEL_ARCH)"' ]) RPM_DEFINE_DKMS='' diff --git a/contrib/debian/control b/contrib/debian/control index e56fbf0f1c93..010273397bf9 100644 --- a/contrib/debian/control +++ b/contrib/debian/control @@ -11,6 +11,7 @@ Build-Depends: debhelper-compat (= 12), libelf-dev, libpam0g-dev, libssl-dev | libssl1.0-dev, + libtirpc-dev, libtool, libudev-dev, lsb-release, diff --git a/contrib/initramfs/scripts/local-top/zfs b/contrib/initramfs/scripts/local-top/zfs index 6b80e9f43607..fc455077ec94 100755 --- a/contrib/initramfs/scripts/local-top/zfs +++ b/contrib/initramfs/scripts/local-top/zfs @@ -41,9 +41,9 @@ activate_vg() return 1 fi - # Detect and activate available volume groups + # Detect and auto-activate available volume groups /sbin/lvm vgscan - /sbin/lvm vgchange -a y --sysinit + /sbin/lvm vgchange -aay --sysinit return $? } diff --git a/include/os/freebsd/spl/sys/policy.h b/include/os/freebsd/spl/sys/policy.h index 32c10bdca90e..e1747bb14821 100644 --- a/include/os/freebsd/spl/sys/policy.h +++ b/include/os/freebsd/spl/sys/policy.h @@ -38,7 +38,6 @@ struct znode; int secpolicy_nfs(cred_t *cr); int secpolicy_zfs(cred_t *crd); -int secpolicy_zfs_proc(cred_t *cr, proc_t *proc); int secpolicy_sys_config(cred_t *cr, int checkonly); int secpolicy_zinject(cred_t *cr); int secpolicy_fs_unmount(cred_t *cr, struct mount *vfsp); diff --git a/include/os/linux/kernel/linux/blkdev_compat.h b/include/os/linux/kernel/linux/blkdev_compat.h index c0d3770748ea..26e7b0b2a34a 100644 --- a/include/os/linux/kernel/linux/blkdev_compat.h +++ b/include/os/linux/kernel/linux/blkdev_compat.h @@ -356,7 +356,7 @@ bio_set_flush(struct bio *bio) static inline boolean_t bio_is_flush(struct bio *bio) { - return (bio_op(bio) == REQ_OP_FLUSH); + return (bio_op(bio) == REQ_OP_FLUSH || op_is_flush(bio->bi_opf)); } /* diff --git a/include/os/linux/kernel/linux/page_compat.h b/include/os/linux/kernel/linux/page_compat.h index 963b96ba6351..7dcf53bbea47 100644 --- a/include/os/linux/kernel/linux/page_compat.h +++ b/include/os/linux/kernel/linux/page_compat.h @@ -4,8 +4,8 @@ /* * Create our own accessor functions to follow the Linux API changes */ -#define nr_file_pages() global_node_page_state(NR_FILE_PAGES) -#define nr_inactive_anon_pages() global_node_page_state(NR_INACTIVE_ANON) +#define nr_file_pages() (global_node_page_state(NR_ACTIVE_FILE) + \ + global_node_page_state(NR_INACTIVE_FILE)) #define nr_inactive_file_pages() global_node_page_state(NR_INACTIVE_FILE) #endif /* _ZFS_PAGE_COMPAT_H */ diff --git a/include/os/linux/spl/sys/taskq.h b/include/os/linux/spl/sys/taskq.h index 8c4fee5299ff..0c3149b1efc5 100644 --- a/include/os/linux/spl/sys/taskq.h +++ b/include/os/linux/spl/sys/taskq.h @@ -33,6 +33,7 @@ #include #include #include +#include #define TASKQ_NAMELEN 31 diff --git a/include/os/linux/spl/sys/uio.h b/include/os/linux/spl/sys/uio.h index 5e6ea8d3c221..82a227d76ca0 100644 --- a/include/os/linux/spl/sys/uio.h +++ b/include/os/linux/spl/sys/uio.h @@ -151,7 +151,7 @@ zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio, struct request *rq) #if defined(HAVE_VFS_IOV_ITER) static inline void zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset, - ssize_t resid, size_t skip) + ssize_t resid) { uio->uio_iter = iter; uio->uio_iovcnt = iter->nr_segs; @@ -161,7 +161,7 @@ zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset, uio->uio_fmode = 0; uio->uio_extflg = 0; uio->uio_resid = resid; - uio->uio_skip = skip; + uio->uio_skip = 0; } #endif diff --git a/include/os/linux/zfs/sys/abd_os.h b/include/os/linux/zfs/sys/abd_os.h index ce4f5a2bdf9b..e38d8b81b20b 100644 --- a/include/os/linux/zfs/sys/abd_os.h +++ b/include/os/linux/zfs/sys/abd_os.h @@ -30,6 +30,8 @@ extern "C" { #endif +struct abd; + struct abd_scatter { uint_t abd_offset; uint_t abd_nents; @@ -41,10 +43,8 @@ struct abd_linear { struct scatterlist *abd_sgl; /* for LINEAR_PAGE */ }; -typedef struct abd abd_t; - typedef int abd_iter_page_func_t(struct page *, size_t, size_t, void *); -int abd_iterate_page_func(abd_t *, size_t, size_t, abd_iter_page_func_t *, +int abd_iterate_page_func(struct abd *, size_t, size_t, abd_iter_page_func_t *, void *); /* @@ -52,8 +52,8 @@ int abd_iterate_page_func(abd_t *, size_t, size_t, abd_iter_page_func_t *, * Note: these are only needed to support vdev_classic. See comment in * vdev_disk.c. */ -unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t); -unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t); +unsigned int abd_bio_map_off(struct bio *, struct abd *, unsigned int, size_t); +unsigned long abd_nr_pages_off(struct abd *, unsigned int, size_t); #ifdef __cplusplus } diff --git a/include/os/linux/zfs/sys/policy.h b/include/os/linux/zfs/sys/policy.h index 0c265db78591..03069b9947d5 100644 --- a/include/os/linux/zfs/sys/policy.h +++ b/include/os/linux/zfs/sys/policy.h @@ -51,7 +51,6 @@ int secpolicy_vnode_setids_setgids(const cred_t *, gid_t, zidmap_t *, struct user_namespace *); int secpolicy_zinject(const cred_t *); int secpolicy_zfs(const cred_t *); -int secpolicy_zfs_proc(const cred_t *, proc_t *); void secpolicy_setid_clear(vattr_t *, cred_t *); int secpolicy_setid_setsticky_clear(struct inode *, vattr_t *, const vattr_t *, cred_t *, zidmap_t *, struct user_namespace *); diff --git a/include/sys/dmu_recv.h b/include/sys/dmu_recv.h index 3390ca1089f8..3ca73f7ea05c 100644 --- a/include/sys/dmu_recv.h +++ b/include/sys/dmu_recv.h @@ -59,7 +59,6 @@ typedef struct dmu_recv_cookie { uint64_t drc_ivset_guid; void *drc_owner; cred_t *drc_cred; - proc_t *drc_proc; nvlist_t *drc_begin_nvl; objset_t *drc_os; diff --git a/include/sys/dsl_dataset.h b/include/sys/dsl_dataset.h index 3450527af7e0..047c062386a7 100644 --- a/include/sys/dsl_dataset.h +++ b/include/sys/dsl_dataset.h @@ -283,7 +283,6 @@ typedef struct dsl_dataset_promote_arg { uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap; nvlist_t *err_ds; cred_t *cr; - proc_t *proc; } dsl_dataset_promote_arg_t; typedef struct dsl_dataset_rollback_arg { @@ -298,7 +297,6 @@ typedef struct dsl_dataset_snapshot_arg { nvlist_t *ddsa_props; nvlist_t *ddsa_errors; cred_t *ddsa_cr; - proc_t *ddsa_proc; } dsl_dataset_snapshot_arg_t; typedef struct dsl_dataset_rename_snapshot_arg { @@ -458,7 +456,7 @@ int dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone, void dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone, dsl_dataset_t *origin_head, dmu_tx_t *tx); int dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname, - dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc); + dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr); void dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname, dmu_tx_t *tx); diff --git a/include/sys/dsl_dir.h b/include/sys/dsl_dir.h index f7c0d9acd10d..a338615e0e88 100644 --- a/include/sys/dsl_dir.h +++ b/include/sys/dsl_dir.h @@ -184,11 +184,11 @@ int dsl_dir_set_reservation(const char *ddname, zprop_source_t source, uint64_t reservation); int dsl_dir_activate_fs_ss_limit(const char *); int dsl_fs_ss_limit_check(dsl_dir_t *, uint64_t, zfs_prop_t, dsl_dir_t *, - cred_t *, proc_t *); + cred_t *); void dsl_fs_ss_count_adjust(dsl_dir_t *, int64_t, const char *, dmu_tx_t *); int dsl_dir_rename(const char *oldname, const char *newname); int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, - uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *, proc_t *); + uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *); boolean_t dsl_dir_is_clone(dsl_dir_t *dd); void dsl_dir_new_refreservation(dsl_dir_t *dd, struct dsl_dataset *ds, uint64_t reservation, cred_t *cr, dmu_tx_t *tx); diff --git a/include/sys/frame.h b/include/sys/frame.h index caae851421d8..0250f47b61e0 100644 --- a/include/sys/frame.h +++ b/include/sys/frame.h @@ -30,8 +30,16 @@ extern "C" { #else #include #endif +#if defined(_ASM) && ! defined(HAVE_STACK_FRAME_NON_STANDARD_ASM) +.macro STACK_FRAME_NON_STANDARD func:req +.endm +#endif #else #define STACK_FRAME_NON_STANDARD(func) +#if defined(_ASM) +.macro STACK_FRAME_NON_STANDARD func:req +.endm +#endif #endif #ifdef __cplusplus diff --git a/include/sys/zcp.h b/include/sys/zcp.h index 6301cc08e7ea..dafbc741690a 100644 --- a/include/sys/zcp.h +++ b/include/sys/zcp.h @@ -75,7 +75,6 @@ typedef struct zcp_run_info { * rather than the 'current' thread's. */ cred_t *zri_cred; - proc_t *zri_proc; /* * The tx in which this channel program is running. diff --git a/include/sys/zfs_context.h b/include/sys/zfs_context.h index bd123435cdb1..5dedb14c7fb5 100644 --- a/include/sys/zfs_context.h +++ b/include/sys/zfs_context.h @@ -629,6 +629,9 @@ extern void delay(clock_t ticks); #define kcred NULL #define CRED() NULL +#define crhold(cr) ((void)cr) +#define crfree(cr) ((void)cr) + #define ptob(x) ((x) * PAGESIZE) #define NN_DIVISOR_1000 (1U << 0) @@ -741,7 +744,6 @@ extern int zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr); extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr); extern int secpolicy_zfs(const cred_t *cr); -extern int secpolicy_zfs_proc(const cred_t *cr, proc_t *proc); extern zoneid_t getzoneid(void); /* SID stuff */ diff --git a/lib/libspl/os/linux/zone.c b/lib/libspl/os/linux/zone.c index 622d04cbc14a..f8a10bfa167a 100644 --- a/lib/libspl/os/linux/zone.c +++ b/lib/libspl/os/linux/zone.c @@ -42,20 +42,20 @@ getzoneid(void) int c = snprintf(path, sizeof (path), "/proc/self/ns/user"); /* This API doesn't have any error checking... */ if (c < 0 || c >= sizeof (path)) - return (0); + return (GLOBAL_ZONEID); ssize_t r = readlink(path, buf, sizeof (buf) - 1); if (r < 0) - return (0); + return (GLOBAL_ZONEID); cp = strchr(buf, '['); if (cp == NULL) - return (0); + return (GLOBAL_ZONEID); cp++; unsigned long n = strtoul(cp, NULL, 10); if (n == ULONG_MAX && errno == ERANGE) - return (0); + return (GLOBAL_ZONEID); zoneid_t z = (zoneid_t)n; return (z); diff --git a/lib/libzpool/kernel.c b/lib/libzpool/kernel.c index a02bee72b8df..148c765753dd 100644 --- a/lib/libzpool/kernel.c +++ b/lib/libzpool/kernel.c @@ -914,13 +914,6 @@ secpolicy_zfs(const cred_t *cr) return (0); } -int -secpolicy_zfs_proc(const cred_t *cr, proc_t *proc) -{ - (void) cr, (void) proc; - return (0); -} - ksiddomain_t * ksid_lookupdomain(const char *dom) { diff --git a/man/man4/zfs.4 b/man/man4/zfs.4 index 9d3fc1055732..1d734d865a16 100644 --- a/man/man4/zfs.4 +++ b/man/man4/zfs.4 @@ -793,7 +793,9 @@ pressure on the pagecache, yet still allows the ARC to be reclaimed down to .Sy zfs_arc_min if necessary. This value is specified as percent of pagecache size (as measured by -.Sy NR_FILE_PAGES ) , +.Sy NR_ACTIVE_FILE ++ +.Sy NR_INACTIVE_FILE ) , where that percent may exceed .Sy 100 . This @@ -1388,30 +1390,17 @@ This parameter only applies on Linux. This parameter is ignored if .Sy zfs_vdev_disk_classic Ns = Ns Sy 1 . . -.It Sy zfs_vdev_disk_classic Ns = Ns 0 Ns | Ns Sy 1 Pq uint -Controls the method used to submit IO to the Linux block layer -(default -.Sy 1 "classic" Ns -) -.Pp -If set to 1, the "classic" method is used. -This is the method that has been in use since the earliest versions of -ZFS-on-Linux. -It has known issues with highly fragmented IO requests and is less efficient on -many workloads, but it well known and well understood. -.Pp -If set to 0, the "new" method is used. -This method is available since 2.2.4 and should resolve all known issues and be -far more efficient, but has not had as much testing. -In the 2.2.x series, this parameter defaults to 1, to use the "classic" method. -.Pp -It is not recommended that you change it except on advice from the OpenZFS -developers. -If you do change it, please also open a bug report describing why you did so, +.It Sy zfs_vdev_disk_classic Ns = Ns Sy 0 Ns | Ns 1 Pq uint +If set to 1, OpenZFS will submit IO to Linux using the method it used in 2.2 +and earlier. +This "classic" method has known issues with highly fragmented IO requests and +is slower on many workloads, but it has been in use for many years and is known +to be very stable. +If you set this parameter, please also open a bug report why you did so, including the workload involved and any error messages. .Pp -This parameter and the "classic" submission method will be removed in a future -release of OpenZFS once we have total confidence in the new method. +This parameter and the classic submission method will be removed once we have +total confidence in the new method. .Pp This parameter only applies on Linux, and can only be set at module load time. . diff --git a/module/Kbuild.in b/module/Kbuild.in index 4afd064930a3..e7790c6b920d 100644 --- a/module/Kbuild.in +++ b/module/Kbuild.in @@ -180,14 +180,6 @@ $(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \ $(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \ $(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include) -# Suppress objtool "return with modified stack frame" warnings. -OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y - -# Suppress objtool "unsupported stack pointer realignment" warnings. -# See #6950 for the reasoning. -OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y -OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y - LUA_OBJS := \ lapi.o \ lauxlib.o \ @@ -498,11 +490,6 @@ UBSAN_SANITIZE_sa.o := n UBSAN_SANITIZE_zfs/zap_micro.o := n UBSAN_SANITIZE_zfs/sa.o := n -# Suppress incorrect warnings from versions of objtool which are not -# aware of x86 EVEX prefix instructions used for AVX512. -OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y -OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y - ifeq ($(CONFIG_ALTIVEC),y) $(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec endif diff --git a/module/Makefile.in b/module/Makefile.in index 9b34b3dfaec7..529ab81dcec5 100644 --- a/module/Makefile.in +++ b/module/Makefile.in @@ -55,6 +55,8 @@ modules-Linux: mkdir -p $(sort $(dir $(zfs-objs) $(zfs-))) $(MAKE) -C @LINUX_OBJ@ $(if @KERNEL_CC@,CC=@KERNEL_CC@) \ $(if @KERNEL_LD@,LD=@KERNEL_LD@) $(if @KERNEL_LLVM@,LLVM=@KERNEL_LLVM@) \ + $(if @KERNEL_CROSS_COMPILE@,CROSS_COMPILE=@KERNEL_CROSS_COMPILE@) \ + $(if @KERNEL_ARCH@,ARCH=@KERNEL_ARCH@) \ M="$$PWD" @KERNEL_MAKE@ CONFIG_ZFS=m modules modules-FreeBSD: diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c index 6f6649b3b58b..87b4e3e16979 100644 --- a/module/icp/algs/modes/modes.c +++ b/module/icp/algs/modes/modes.c @@ -182,12 +182,12 @@ gcm_clear_ctx(gcm_ctx_t *ctx) #if defined(CAN_USE_GCM_ASM) if (ctx->gcm_use_avx == B_TRUE) { ASSERT3P(ctx->gcm_Htable, !=, NULL); - memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len); + explicit_memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len); kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len); } #endif if (ctx->gcm_pt_buf != NULL) { - memset(ctx->gcm_pt_buf, 0, ctx->gcm_pt_buf_len); + explicit_memset(ctx->gcm_pt_buf, 0, ctx->gcm_pt_buf_len); vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); } /* Optional */ diff --git a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S index 909b2147dff9..cb75043a49a7 100644 --- a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S +++ b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S @@ -49,6 +49,7 @@ #define _ASM #include +#include /* Windows userland links with OpenSSL */ #if !defined (_WIN32) || defined (_KERNEL) @@ -377,6 +378,7 @@ FUNCTION(_aesni_ctr32_ghash_6x) RET .cfi_endproc SET_SIZE(_aesni_ctr32_ghash_6x) +STACK_FRAME_NON_STANDARD _aesni_ctr32_ghash_6x #endif /* ifdef HAVE_MOVBE */ .balign 32 @@ -705,6 +707,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) RET .cfi_endproc SET_SIZE(_aesni_ctr32_ghash_no_movbe_6x) +STACK_FRAME_NON_STANDARD _aesni_ctr32_ghash_no_movbe_6x ENTRY_ALIGN(aesni_gcm_decrypt, 32) .cfi_startproc @@ -822,6 +825,7 @@ ENTRY_ALIGN(aesni_gcm_decrypt, 32) RET .cfi_endproc SET_SIZE(aesni_gcm_decrypt) +STACK_FRAME_NON_STANDARD aesni_gcm_decrypt .balign 32 FUNCTION(_aesni_ctr32_6x) @@ -1197,6 +1201,7 @@ ENTRY_ALIGN(aesni_gcm_encrypt, 32) RET .cfi_endproc SET_SIZE(aesni_gcm_encrypt) +STACK_FRAME_NON_STANDARD aesni_gcm_encrypt #endif /* !_WIN32 || _KERNEL */ @@ -1256,6 +1261,18 @@ SECTION_STATIC .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .balign 64 +/* Workaround for missing asm macro in RHEL 8. */ +#if defined(__linux__) && defined(HAVE_STACK_FRAME_NON_STANDARD) && \ + ! defined(HAVE_STACK_FRAME_NON_STANDARD_ASM) +.section .discard.func_stack_frame_non_standard, "aw" +#ifdef HAVE_MOVBE + .long _aesni_ctr32_ghash_6x - . +#endif + .long _aesni_ctr32_ghash_no_movbe_6x - . + .long aesni_gcm_decrypt - . + .long aesni_gcm_encrypt - . +#endif + /* Mark the stack non-executable. */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits diff --git a/module/icp/asm-x86_64/sha2/sha256-x86_64.S b/module/icp/asm-x86_64/sha2/sha256-x86_64.S index d3e5e3f0d080..edd68979a5f4 100644 --- a/module/icp/asm-x86_64/sha2/sha256-x86_64.S +++ b/module/icp/asm-x86_64/sha2/sha256-x86_64.S @@ -23,6 +23,7 @@ #define _ASM #include +#include SECTION_STATIC @@ -1419,6 +1420,7 @@ ENTRY_ALIGN(zfs_sha256_transform_x64, 16) RET .cfi_endproc SET_SIZE(zfs_sha256_transform_x64) +STACK_FRAME_NON_STANDARD zfs_sha256_transform_x64 ENTRY_ALIGN(zfs_sha256_transform_shani, 64) .cfi_startproc @@ -1627,6 +1629,7 @@ ENTRY_ALIGN(zfs_sha256_transform_shani, 64) RET .cfi_endproc SET_SIZE(zfs_sha256_transform_shani) +STACK_FRAME_NON_STANDARD zfs_sha256_transform_shani ENTRY_ALIGN(zfs_sha256_transform_ssse3, 64) .cfi_startproc @@ -2738,6 +2741,7 @@ ENTRY_ALIGN(zfs_sha256_transform_ssse3, 64) RET .cfi_endproc SET_SIZE(zfs_sha256_transform_ssse3) +STACK_FRAME_NON_STANDARD zfs_sha256_transform_ssse3 ENTRY_ALIGN(zfs_sha256_transform_avx, 64) .cfi_startproc @@ -3812,6 +3816,7 @@ ENTRY_ALIGN(zfs_sha256_transform_avx, 64) RET .cfi_endproc SET_SIZE(zfs_sha256_transform_avx) +STACK_FRAME_NON_STANDARD zfs_sha256_transform_avx ENTRY_ALIGN(zfs_sha256_transform_avx2, 64) .cfi_startproc @@ -5097,6 +5102,18 @@ ENTRY_ALIGN(zfs_sha256_transform_avx2, 64) RET .cfi_endproc SET_SIZE(zfs_sha256_transform_avx2) +STACK_FRAME_NON_STANDARD zfs_sha256_transform_avx2 + +/* Workaround for missing asm macro in RHEL 8. */ +#if defined(__linux__) && defined(HAVE_STACK_FRAME_NON_STANDARD) && \ + ! defined(HAVE_STACK_FRAME_NON_STANDARD_ASM) +.section .discard.func_stack_frame_non_standard, "aw" + .long zfs_sha256_transform_x64 - . + .long zfs_sha256_transform_shani - . + .long zfs_sha256_transform_ssse3 - . + .long zfs_sha256_transform_avx - . + .long zfs_sha256_transform_avx2 - . +#endif #if defined(__ELF__) .section .note.GNU-stack,"",%progbits diff --git a/module/icp/asm-x86_64/sha2/sha512-x86_64.S b/module/icp/asm-x86_64/sha2/sha512-x86_64.S index fbbcca650d10..4af77a3decbc 100644 --- a/module/icp/asm-x86_64/sha2/sha512-x86_64.S +++ b/module/icp/asm-x86_64/sha2/sha512-x86_64.S @@ -23,6 +23,7 @@ #define _ASM #include +#include SECTION_STATIC @@ -1462,6 +1463,7 @@ ENTRY_ALIGN(zfs_sha512_transform_x64, 16) RET .cfi_endproc SET_SIZE(zfs_sha512_transform_x64) +STACK_FRAME_NON_STANDARD zfs_sha512_transform_x64 ENTRY_ALIGN(zfs_sha512_transform_avx, 64) .cfi_startproc @@ -2626,6 +2628,7 @@ ENTRY_ALIGN(zfs_sha512_transform_avx, 64) RET .cfi_endproc SET_SIZE(zfs_sha512_transform_avx) +STACK_FRAME_NON_STANDARD zfs_sha512_transform_avx ENTRY_ALIGN(zfs_sha512_transform_avx2, 64) .cfi_startproc @@ -4004,6 +4007,16 @@ ENTRY_ALIGN(zfs_sha512_transform_avx2, 64) RET .cfi_endproc SET_SIZE(zfs_sha512_transform_avx2) +STACK_FRAME_NON_STANDARD zfs_sha512_transform_avx2 + +/* Workaround for missing asm macro in RHEL 8. */ +#if defined(__linux__) && defined(HAVE_STACK_FRAME_NON_STANDARD) && \ + ! defined(HAVE_STACK_FRAME_NON_STANDARD_ASM) +.section .discard.func_stack_frame_non_standard, "aw" + .long zfs_sha512_transform_x64 - . + .long zfs_sha512_transform_avx - . + .long zfs_sha512_transform_avx2 - . +#endif #if defined(__ELF__) .section .note.GNU-stack,"",%progbits diff --git a/module/os/freebsd/spl/spl_policy.c b/module/os/freebsd/spl/spl_policy.c index 42a693b073d1..5f59934f22e8 100644 --- a/module/os/freebsd/spl/spl_policy.c +++ b/module/os/freebsd/spl/spl_policy.c @@ -51,13 +51,6 @@ secpolicy_zfs(cred_t *cr) return (priv_check_cred(cr, PRIV_VFS_MOUNT)); } -int -secpolicy_zfs_proc(cred_t *cr, proc_t *proc) -{ - - return (priv_check_cred(cr, PRIV_VFS_MOUNT)); -} - int secpolicy_sys_config(cred_t *cr, int checkonly __unused) { diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c index 7f6780909973..5ebd7a6256f2 100644 --- a/module/os/linux/spl/spl-taskq.c +++ b/module/os/linux/spl/spl-taskq.c @@ -30,6 +30,11 @@ #include #include +/* Linux 6.2 renamed timer_delete_sync(); point it at its old name for those. */ +#ifndef HAVE_TIMER_DELETE_SYNC +#define timer_delete_sync(t) del_timer_sync(t) +#endif + static int spl_taskq_thread_bind = 0; module_param(spl_taskq_thread_bind, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); @@ -547,7 +552,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id) */ if (timer_pending(&t->tqent_timer)) { spin_unlock_irqrestore(&tq->tq_lock, flags); - del_timer_sync(&t->tqent_timer); + timer_delete_sync(&t->tqent_timer); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); } diff --git a/module/os/linux/spl/spl-thread.c b/module/os/linux/spl/spl-thread.c index 7f74d44f91ff..7b0ce30c7884 100644 --- a/module/os/linux/spl/spl-thread.c +++ b/module/os/linux/spl/spl-thread.c @@ -171,11 +171,11 @@ issig(void) #if defined(HAVE_DEQUEUE_SIGNAL_4ARG) enum pid_type __type; if (dequeue_signal(current, &set, &__info, &__type) != 0) { -#elif defined(HAVE_DEQUEUE_SIGNAL_3ARG_TASK) - if (dequeue_signal(current, &set, &__info) != 0) { -#else +#elif defined(HAVE_DEQUEUE_SIGNAL_3ARG_TYPE) enum pid_type __type; if (dequeue_signal(&set, &__info, &__type) != 0) { +#else + if (dequeue_signal(current, &set, &__info) != 0) { #endif spin_unlock_irq(¤t->sighand->siglock); kernel_signal_stop(); diff --git a/module/os/linux/zfs/policy.c b/module/os/linux/zfs/policy.c index d21bc667ba69..b2916f4217c4 100644 --- a/module/os/linux/zfs/policy.c +++ b/module/os/linux/zfs/policy.c @@ -23,6 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2013, Joyent, Inc. All rights reserved. * Copyright (C) 2016 Lawrence Livermore National Security, LLC. + * Copyright (c) 2025, Rob Norris * * For Linux the vast majority of this enforcement is already handled via * the standard Linux VFS permission checks. However certain administrative @@ -34,28 +35,32 @@ #include #include -/* - * The passed credentials cannot be directly verified because Linux only - * provides and interface to check the *current* process credentials. In - * order to handle this the capable() test is only run when the passed - * credentials match the current process credentials or the kcred. In - * all other cases this function must fail and return the passed err. - */ static int priv_policy_ns(const cred_t *cr, int capability, int err, struct user_namespace *ns) { - if (cr != CRED() && (cr != kcred)) - return (err); + /* + * The passed credentials cannot be directly verified because Linux + * only provides an interface to check the *current* process + * credentials. In order to handle this we check if the passed in + * creds match the current process credentials or the kcred. If not, + * we swap the passed credentials into the current task, perform the + * check, and then revert it before returning. + */ + const cred_t *old = + (cr != CRED() && cr != kcred) ? override_creds(cr) : NULL; #if defined(CONFIG_USER_NS) - if (!(ns ? ns_capable(ns, capability) : capable(capability))) + if (ns ? ns_capable(ns, capability) : capable(capability)) #else - if (!capable(capability)) + if (capable(capability)) #endif - return (err); + err = 0; - return (0); + if (old) + revert_creds(old); + + return (err); } static int @@ -248,19 +253,6 @@ secpolicy_zfs(const cred_t *cr) return (priv_policy(cr, CAP_SYS_ADMIN, EACCES)); } -/* - * Equivalent to secpolicy_zfs(), but works even if the cred_t is not that of - * the current process. Takes both cred_t and proc_t so that this can work - * easily on all platforms. - */ -int -secpolicy_zfs_proc(const cred_t *cr, proc_t *proc) -{ - if (!has_capability(proc, CAP_SYS_ADMIN)) - return (EACCES); - return (0); -} - void secpolicy_setid_clear(vattr_t *vap, cred_t *cr) { diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c index a1d03188d826..e27d7346a665 100644 --- a/module/os/linux/zfs/vdev_disk.c +++ b/module/os/linux/zfs/vdev_disk.c @@ -963,10 +963,8 @@ vdev_disk_io_rw(zio_t *zio) /* * This is the classic, battle-tested BIO submission code. Until we're totally * sure that the new code is safe and correct in all cases, this will remain - * available. - * - * It is enabled by setting zfs_vdev_disk_classic=1 at module load time. It is - * enabled (=1) by default since 2.2.4, and disabled by default (=0) on master. + * available and can be enabled by setting zfs_vdev_disk_classic=1 at module + * load time. * * These functions have been renamed to vdev_classic_* to make it clear what * they belong to, but their implementations are unchanged. @@ -1516,7 +1514,7 @@ vdev_disk_rele(vdev_t *vd) * BIO submission method. See comment above about vdev_classic. * Set zfs_vdev_disk_classic=0 for new, =1 for classic */ -static uint_t zfs_vdev_disk_classic = 1; /* default classic */ +static uint_t zfs_vdev_disk_classic = 0; /* default new */ /* Set submission function from module parameter */ static int diff --git a/module/os/linux/zfs/vdev_file.c b/module/os/linux/zfs/vdev_file.c index 6d5841a2f0c9..8fec5970aad5 100644 --- a/module/os/linux/zfs/vdev_file.c +++ b/module/os/linux/zfs/vdev_file.c @@ -33,11 +33,13 @@ #include #include #include -#include #include #include #ifdef _KERNEL #include +#include +#else +#include #endif /* * Virtual device vector for files. diff --git a/module/os/linux/zfs/zfs_uio.c b/module/os/linux/zfs/zfs_uio.c index feba18fdaf4d..5f8d3efdc902 100644 --- a/module/os/linux/zfs/zfs_uio.c +++ b/module/os/linux/zfs/zfs_uio.c @@ -268,9 +268,6 @@ zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, { size_t cnt = MIN(n, uio->uio_resid); - if (uio->uio_skip) - iov_iter_advance(uio->uio_iter, uio->uio_skip); - if (rw == UIO_READ) cnt = copy_to_iter(p, cnt, uio->uio_iter); else diff --git a/module/os/linux/zfs/zpl_ctldir.c b/module/os/linux/zfs/zpl_ctldir.c index 56a30be5110c..a7fdb8f28009 100644 --- a/module/os/linux/zfs/zpl_ctldir.c +++ b/module/os/linux/zfs/zpl_ctldir.c @@ -185,8 +185,14 @@ zpl_snapdir_automount(struct path *path) * as of the 3.18 kernel revaliding the mountpoint dentry will result in * the snapshot being immediately unmounted. */ +#ifdef HAVE_D_REVALIDATE_4ARGS +static int +zpl_snapdir_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) +#else static int zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags) +#endif { return (!!dentry->d_inode); } @@ -330,14 +336,20 @@ zpl_snapdir_rmdir(struct inode *dip, struct dentry *dentry) return (error); } +#if defined(HAVE_IOPS_MKDIR_USERNS) static int -#ifdef HAVE_IOPS_MKDIR_USERNS zpl_snapdir_mkdir(struct user_namespace *user_ns, struct inode *dip, struct dentry *dentry, umode_t mode) #elif defined(HAVE_IOPS_MKDIR_IDMAP) +static int +zpl_snapdir_mkdir(struct mnt_idmap *user_ns, struct inode *dip, + struct dentry *dentry, umode_t mode) +#elif defined(HAVE_IOPS_MKDIR_DENTRY) +static struct dentry * zpl_snapdir_mkdir(struct mnt_idmap *user_ns, struct inode *dip, struct dentry *dentry, umode_t mode) #else +static int zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) #endif { @@ -365,7 +377,11 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) ASSERT3S(error, <=, 0); crfree(cr); +#if defined(HAVE_IOPS_MKDIR_DENTRY) + return (ERR_PTR(error)); +#else return (error); +#endif } /* diff --git a/module/os/linux/zfs/zpl_file.c b/module/os/linux/zfs/zpl_file.c index 4d1bf1d5477f..7a1e7eee79de 100644 --- a/module/os/linux/zfs/zpl_file.c +++ b/module/os/linux/zfs/zpl_file.c @@ -28,6 +28,7 @@ #include #endif #include +#include #include #include #include @@ -227,7 +228,8 @@ zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to, loff_t pos, ssize_t count, size_t skip) { #if defined(HAVE_VFS_IOV_ITER) - zfs_uio_iov_iter_init(uio, to, pos, count, skip); + (void) skip; + zfs_uio_iov_iter_init(uio, to, pos, count); #else zfs_uio_iovec_init(uio, zfs_uio_iter_iov(to), to->nr_segs, pos, zfs_uio_iov_iter_type(to) & ITER_KVEC ? @@ -1090,6 +1092,11 @@ const struct address_space_operations zpl_address_space_operations = { #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO .dirty_folio = filemap_dirty_folio, #endif +#ifdef HAVE_VFS_MIGRATE_FOLIO + .migrate_folio = migrate_folio, +#elif defined(HAVE_VFS_MIGRATEPAGE) + .migratepage = migrate_page, +#endif }; const struct file_operations zpl_file_operations = { diff --git a/module/os/linux/zfs/zpl_inode.c b/module/os/linux/zfs/zpl_inode.c index 8386fc2ae0ce..56ef3a7d1212 100644 --- a/module/os/linux/zfs/zpl_inode.c +++ b/module/os/linux/zfs/zpl_inode.c @@ -333,14 +333,20 @@ zpl_unlink(struct inode *dir, struct dentry *dentry) return (error); } +#if defined(HAVE_IOPS_MKDIR_USERNS) static int -#ifdef HAVE_IOPS_MKDIR_USERNS zpl_mkdir(struct user_namespace *user_ns, struct inode *dir, struct dentry *dentry, umode_t mode) #elif defined(HAVE_IOPS_MKDIR_IDMAP) +static int +zpl_mkdir(struct mnt_idmap *user_ns, struct inode *dir, + struct dentry *dentry, umode_t mode) +#elif defined(HAVE_IOPS_MKDIR_DENTRY) +static struct dentry * zpl_mkdir(struct mnt_idmap *user_ns, struct inode *dir, struct dentry *dentry, umode_t mode) #else +static int zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) #endif { @@ -349,7 +355,8 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) znode_t *zp; int error; fstrans_cookie_t cookie; -#if !(defined(HAVE_IOPS_MKDIR_USERNS) || defined(HAVE_IOPS_MKDIR_IDMAP)) +#if !(defined(HAVE_IOPS_MKDIR_USERNS) || \ + defined(HAVE_IOPS_MKDIR_IDMAP) || defined(HAVE_IOPS_MKDIR_DENTRY)) zidmap_t *user_ns = kcred->user_ns; #endif @@ -377,9 +384,13 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) spl_fstrans_unmark(cookie); kmem_free(vap, sizeof (vattr_t)); crfree(cr); - ASSERT3S(error, <=, 0); + ASSERT3S(error, <=, 0); +#if defined(HAVE_IOPS_MKDIR_DENTRY) + return (error != 0 ? ERR_PTR(error) : NULL); +#else return (error); +#endif } static int diff --git a/module/os/linux/zfs/zpl_super.c b/module/os/linux/zfs/zpl_super.c index 19a9ab9c7e3f..4c05b14c0f7e 100644 --- a/module/os/linux/zfs/zpl_super.c +++ b/module/os/linux/zfs/zpl_super.c @@ -370,17 +370,25 @@ zpl_prune_sb(uint64_t nr_to_scan, void *arg) int objects = 0; /* - * deactivate_locked_super calls shrinker_free and only then - * sops->kill_sb cb, resulting in UAF on umount when trying to reach - * for the shrinker functions in zpl_prune_sb of in-umount dataset. - * Increment if s_active is not zero, but don't prune if it is - - * umount could be underway. + * Ensure the superblock is not in the process of being torn down. */ - if (atomic_inc_not_zero(&sb->s_active)) { - (void) -zfs_prune(sb, nr_to_scan, &objects); - atomic_dec(&sb->s_active); +#ifdef HAVE_SB_DYING + if (down_read_trylock(&sb->s_umount)) { + if (!(sb->s_flags & SB_DYING) && sb->s_root && + (sb->s_flags & SB_BORN)) { + (void) zfs_prune(sb, nr_to_scan, &objects); + } + up_read(&sb->s_umount); } - +#else + if (down_read_trylock(&sb->s_umount)) { + if (!hlist_unhashed(&sb->s_instances) && + sb->s_root && (sb->s_flags & SB_BORN)) { + (void) zfs_prune(sb, nr_to_scan, &objects); + } + up_read(&sb->s_umount); + } +#endif } const struct super_operations zpl_super_operations = { diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 01f812b8e814..790babd3888e 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -202,7 +202,16 @@ static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv) * We need BLK_MQ_F_BLOCKING here since we do blocking calls in * zvol_request_impl() */ - zso->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; + zso->tag_set.flags = BLK_MQ_F_BLOCKING; + +#ifdef BLK_MQ_F_SHOULD_MERGE + /* + * Linux 6.14 removed BLK_MQ_F_SHOULD_MERGE and made it implicit. + * For older kernels, we set it. + */ + zso->tag_set.flags |= BLK_MQ_F_SHOULD_MERGE; +#endif + zso->tag_set.driver_data = zv; return (blk_mq_alloc_tag_set(&zso->tag_set)); @@ -1386,13 +1395,15 @@ zvol_alloc(dev_t dev, const char *name, uint64_t volblocksize) */ if (zv->zv_zso->use_blk_mq) { ret = zvol_alloc_blk_mq(zv, &limits); + if (ret != 0) + goto out_kmem; zso->zvo_disk->fops = &zvol_ops_blk_mq; } else { ret = zvol_alloc_non_blk_mq(zso, &limits); + if (ret != 0) + goto out_kmem; zso->zvo_disk->fops = &zvol_ops; } - if (ret != 0) - goto out_kmem; /* Limit read-ahead to a single page to prevent over-prefetching. */ blk_queue_set_read_ahead(zso->zvo_queue, 1); diff --git a/module/zcommon/zfs_valstr.c b/module/zcommon/zfs_valstr.c index e2d4d1aefefb..f810d206cfab 100644 --- a/module/zcommon/zfs_valstr.c +++ b/module/zcommon/zfs_valstr.c @@ -38,7 +38,9 @@ */ typedef struct { const char vb_bit; - const char vb_pair[2]; + + /* 2 byte name + 1 byte NULL terminator to make GCC happy */ + const char vb_pair[3]; const char *vb_name; } valstr_bit_t; diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 4013b2d90fd5..5c6e92f0f8b3 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -4205,15 +4205,17 @@ static uint64_t arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down, uint_t balance) { - if (total < 8 || up + down == 0) + if (total < 32 || up + down == 0) return (frac); /* - * We should not have more ghost hits than ghost size, but they - * may get close. Restrict maximum adjustment in that case. + * We should not have more ghost hits than ghost size, but they may + * get close. To avoid overflows below up/down should not be bigger + * than 1/5 of total. But to limit maximum adjustment speed restrict + * it some more. */ - if (up + down >= total / 4) { - uint64_t scale = (up + down) / (total / 8); + if (up + down >= total / 16) { + uint64_t scale = (up + down) / (total / 32); up /= scale; down /= scale; } @@ -4222,6 +4224,7 @@ arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down, int s = highbit64(total); s = MIN(64 - s, 32); + ASSERT3U(frac, <=, 1ULL << 32); uint64_t ofrac = (1ULL << 32) - frac; if (frac >= 4 * ofrac) @@ -4232,6 +4235,8 @@ arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down, down = (down << s) / (total >> (32 - s)); down = down * 100 / balance; + ASSERT3U(up, <=, (1ULL << 32) - frac); + ASSERT3U(down, <=, frac); return (frac + up - down); } diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index 1fb198219904..770a96785c6b 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -438,7 +438,8 @@ ddt_stat_update(ddt_t *ddt, ddt_entry_t *dde, uint64_t neg) ddt_stat_generate(ddt, dde, &dds); bucket = highbit64(dds.dds_ref_blocks) - 1; - ASSERT(bucket >= 0); + if (unlikely(bucket >= 0)) /* if() needed for GCC bounds check */ + ASSERT(bucket >= 0); ddh = &ddt->ddt_histogram[dde->dde_type][dde->dde_class]; diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 5ea2fca9db1e..ff3af5721a04 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -33,6 +33,7 @@ * Copyright (c) 2019, Klara Inc. * Copyright (c) 2019, Allan Jude * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. + * Copyright (c) 2025, Rob Norris */ /* Portions Copyright 2010 Robert Milkowski */ @@ -66,6 +67,7 @@ #include "zfs_namecheck.h" #include #include +#include /* * Needed to close a window in dnode_move() that allows the objset to be freed @@ -756,7 +758,7 @@ dmu_objset_hold_flags(const char *name, boolean_t decrypt, const void *tag, err = dmu_objset_from_ds(ds, osp); if (err != 0) { - dsl_dataset_rele(ds, tag); + dsl_dataset_rele_flags(ds, flags, tag); dsl_pool_rele(dp, tag); } @@ -1172,7 +1174,6 @@ dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, typedef struct dmu_objset_create_arg { const char *doca_name; cred_t *doca_cred; - proc_t *doca_proc; void (*doca_userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); void *doca_userarg; @@ -1216,7 +1217,7 @@ dmu_objset_create_check(void *arg, dmu_tx_t *tx) } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, - doca->doca_cred, doca->doca_proc); + doca->doca_cred); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (error); @@ -1343,9 +1344,11 @@ dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, dmu_objset_create_arg_t doca; dsl_crypto_params_t tmp_dcp = { 0 }; + cred_t *cr = CRED(); + crhold(cr); + doca.doca_name = name; - doca.doca_cred = CRED(); - doca.doca_proc = curproc; + doca.doca_cred = cr; doca.doca_flags = flags; doca.doca_userfunc = func; doca.doca_userarg = arg; @@ -1367,6 +1370,9 @@ dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, if (rv == 0) zvol_create_minor(name); + + crfree(cr); + return (rv); } @@ -1374,7 +1380,6 @@ typedef struct dmu_objset_clone_arg { const char *doca_clone; const char *doca_origin; cred_t *doca_cred; - proc_t *doca_proc; } dmu_objset_clone_arg_t; static int @@ -1402,7 +1407,7 @@ dmu_objset_clone_check(void *arg, dmu_tx_t *tx) } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, - doca->doca_cred, doca->doca_proc); + doca->doca_cred); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EDQUOT)); @@ -1458,10 +1463,12 @@ dmu_objset_clone(const char *clone, const char *origin) { dmu_objset_clone_arg_t doca; + cred_t *cr = CRED(); + crhold(cr); + doca.doca_clone = clone; doca.doca_origin = origin; - doca.doca_cred = CRED(); - doca.doca_proc = curproc; + doca.doca_cred = cr; int rv = dsl_sync_task(clone, dmu_objset_clone_check, dmu_objset_clone_sync, &doca, @@ -1470,6 +1477,8 @@ dmu_objset_clone(const char *clone, const char *origin) if (rv == 0) zvol_create_minor(clone); + crfree(cr); + return (rv); } diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index 883ee192f07f..41b279d48464 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -29,6 +29,7 @@ * Copyright (c) 2019, Allan Jude * Copyright (c) 2019 Datto Inc. * Copyright (c) 2022 Axcient. + * Copyright (c) 2025, Rob Norris */ #include @@ -67,6 +68,7 @@ #include #endif #include +#include static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE; static uint_t zfs_recv_queue_ff = 20; @@ -144,7 +146,6 @@ typedef struct dmu_recv_begin_arg { const char *drba_origin; dmu_recv_cookie_t *drba_cookie; cred_t *drba_cred; - proc_t *drba_proc; dsl_crypto_params_t *drba_dcp; } dmu_recv_begin_arg_t; @@ -410,7 +411,7 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, * against that limit. */ error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, - NULL, drba->drba_cred, drba->drba_proc); + NULL, drba->drba_cred); if (error != 0) return (error); @@ -739,16 +740,14 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx) * filesystems and increment those counts during begin_sync). */ error = dsl_fs_ss_limit_check(ds->ds_dir, 1, - ZFS_PROP_FILESYSTEM_LIMIT, NULL, - drba->drba_cred, drba->drba_proc); + ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); } error = dsl_fs_ss_limit_check(ds->ds_dir, 1, - ZFS_PROP_SNAPSHOT_LIMIT, NULL, - drba->drba_cred, drba->drba_proc); + ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); @@ -1226,6 +1225,9 @@ dmu_recv_begin(const char *tofs, const char *tosnap, dmu_recv_begin_arg_t drba = { 0 }; int err = 0; + cred_t *cr = CRED(); + crhold(cr); + memset(drc, 0, sizeof (dmu_recv_cookie_t)); drc->drc_drr_begin = drr_begin; drc->drc_drrb = &drr_begin->drr_u.drr_begin; @@ -1234,8 +1236,7 @@ dmu_recv_begin(const char *tofs, const char *tosnap, drc->drc_force = force; drc->drc_heal = heal; drc->drc_resumable = resumable; - drc->drc_cred = CRED(); - drc->drc_proc = curproc; + drc->drc_cred = cr; drc->drc_clone = (origin != NULL); if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { @@ -1247,6 +1248,8 @@ dmu_recv_begin(const char *tofs, const char *tosnap, (void) fletcher_4_incremental_native(drr_begin, sizeof (dmu_replay_record_t), &drc->drc_cksum); } else { + crfree(cr); + drc->drc_cred = NULL; return (SET_ERROR(EINVAL)); } @@ -1263,9 +1266,11 @@ dmu_recv_begin(const char *tofs, const char *tosnap, * upper limit. Systems with less than 1GB of RAM will see a lower * limit from `arc_all_memory() / 4`. */ - if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4))) - return (E2BIG); - + if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4))) { + crfree(cr); + drc->drc_cred = NULL; + return (SET_ERROR(E2BIG)); + } if (payloadlen != 0) { void *payload = vmem_alloc(payloadlen, KM_SLEEP); @@ -1281,6 +1286,8 @@ dmu_recv_begin(const char *tofs, const char *tosnap, payload); if (err != 0) { vmem_free(payload, payloadlen); + crfree(cr); + drc->drc_cred = NULL; return (err); } err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl, @@ -1289,6 +1296,8 @@ dmu_recv_begin(const char *tofs, const char *tosnap, if (err != 0) { kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); + crfree(cr); + drc->drc_cred = NULL; return (err); } } @@ -1298,8 +1307,7 @@ dmu_recv_begin(const char *tofs, const char *tosnap, drba.drba_origin = origin; drba.drba_cookie = drc; - drba.drba_cred = CRED(); - drba.drba_proc = curproc; + drba.drba_cred = drc->drc_cred; if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) { err = dsl_sync_task(tofs, @@ -1334,6 +1342,8 @@ dmu_recv_begin(const char *tofs, const char *tosnap, if (err != 0) { kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); nvlist_free(drc->drc_begin_nvl); + crfree(cr); + drc->drc_cred = NULL; } return (err); } @@ -3483,6 +3493,8 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp) */ dmu_recv_cleanup_ds(drc); nvlist_free(drc->drc_keynvl); + crfree(drc->drc_cred); + drc->drc_cred = NULL; } objlist_destroy(drc->drc_ignore_objlist); @@ -3557,8 +3569,7 @@ dmu_recv_end_check(void *arg, dmu_tx_t *tx) return (error); } error = dsl_dataset_snapshot_check_impl(origin_head, - drc->drc_tosnap, tx, B_TRUE, 1, - drc->drc_cred, drc->drc_proc); + drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); dsl_dataset_rele(origin_head, FTAG); if (error != 0) return (error); @@ -3566,8 +3577,7 @@ dmu_recv_end_check(void *arg, dmu_tx_t *tx) error = dsl_destroy_head_check_impl(drc->drc_ds, 1); } else { error = dsl_dataset_snapshot_check_impl(drc->drc_ds, - drc->drc_tosnap, tx, B_TRUE, 1, - drc->drc_cred, drc->drc_proc); + drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); } return (error); } @@ -3779,6 +3789,10 @@ dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) zvol_create_minor(snapname); kmem_strfree(snapname); } + + crfree(drc->drc_cred); + drc->drc_cred = NULL; + return (error); } diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 30d2ca5d6aa2..716b1b1f778c 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -2669,8 +2669,8 @@ dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, } if (fromsnap != 0) { - err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags, - FTAG, &fromds); + err = dsl_dataset_hold_obj(dspp.dp, fromsnap, FTAG, &fromds); + if (err != 0) { dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); dsl_pool_rele(dspp.dp, FTAG); @@ -2722,7 +2722,7 @@ dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, kmem_free(dspp.fromredactsnaps, dspp.numfromredactsnaps * sizeof (uint64_t)); - dsl_dataset_rele(dspp.to_ds, FTAG); + dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); return (err); } diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 85b598df3f49..c65f7ca03daf 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -31,6 +31,7 @@ * Copyright (c) 2019, Klara Inc. * Copyright (c) 2019, Allan Jude * Copyright (c) 2020 The FreeBSD Foundation [1] + * Copyright (c) 2025, Rob Norris * * [1] Portions of this software were developed by Allan Jude * under sponsorship from the FreeBSD Foundation. @@ -1486,7 +1487,7 @@ dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) int dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname, - dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc) + dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr) { int error; uint64_t value; @@ -1531,7 +1532,7 @@ dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname, */ if (cnt != 0 && cr != NULL) { error = dsl_fs_ss_limit_check(ds->ds_dir, cnt, - ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr, proc); + ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr); if (error != 0) return (error); } @@ -1632,7 +1633,7 @@ dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx) if (error == 0) { error = dsl_fs_ss_limit_check(ds->ds_dir, cnt, ZFS_PROP_SNAPSHOT_LIMIT, NULL, - ddsa->ddsa_cr, ddsa->ddsa_proc); + ddsa->ddsa_cr); dsl_dataset_rele(ds, FTAG); } @@ -1670,7 +1671,7 @@ dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx) if (error == 0) { /* passing 0/NULL skips dsl_fs_ss_limit_check */ error = dsl_dataset_snapshot_check_impl(ds, - atp + 1, tx, B_FALSE, 0, NULL, NULL); + atp + 1, tx, B_FALSE, 0, NULL); dsl_dataset_rele(ds, FTAG); } @@ -1944,11 +1945,13 @@ dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors) } } + cred_t *cr = CRED(); + crhold(cr); + ddsa.ddsa_snaps = snaps; ddsa.ddsa_props = props; ddsa.ddsa_errors = errors; - ddsa.ddsa_cr = CRED(); - ddsa.ddsa_proc = curproc; + ddsa.ddsa_cr = cr; if (error == 0) { error = dsl_sync_task(firstname, dsl_dataset_snapshot_check, @@ -1956,6 +1959,8 @@ dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors) fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL); } + crfree(cr); + if (suspended != NULL) { for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL; pair = nvlist_next_nvpair(suspended, pair)) { @@ -1996,7 +2001,7 @@ dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx) /* NULL cred means no limit check for tmp snapshot */ error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname, - tx, B_FALSE, 0, NULL, NULL); + tx, B_FALSE, 0, NULL); if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); @@ -3454,7 +3459,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx) /* Check that there is enough space and limit headroom here */ err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir, - 0, ss_mv_cnt, ddpa->used, ddpa->cr, ddpa->proc); + 0, ss_mv_cnt, ddpa->used, ddpa->cr); if (err != 0) goto out; @@ -3889,15 +3894,19 @@ dsl_dataset_promote(const char *name, char *conflsnap) if (error != 0) return (error); + cred_t *cr = CRED(); + crhold(cr); + ddpa.ddpa_clonename = name; ddpa.err_ds = fnvlist_alloc(); - ddpa.cr = CRED(); - ddpa.proc = curproc; + ddpa.cr = cr; error = dsl_sync_task(name, dsl_dataset_promote_check, dsl_dataset_promote_sync, &ddpa, 2 + numsnaps, ZFS_SPACE_CHECK_RESERVED); + crfree(cr); + /* * Return the first conflicting snapshot found. */ diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c index baf970121a61..4839fb39f5b3 100644 --- a/module/zfs/dsl_dir.c +++ b/module/zfs/dsl_dir.c @@ -27,6 +27,7 @@ * Copyright (c) 2016 Actifio, Inc. All rights reserved. * Copyright (c) 2018, loli10K . All rights reserved. * Copyright (c) 2023 Hewlett Packard Enterprise Development LP. + * Copyright (c) 2025, Rob Norris */ #include @@ -755,7 +756,7 @@ typedef enum { static enforce_res_t dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, - cred_t *cr, proc_t *proc) + cred_t *cr) { enforce_res_t enforce = ENFORCE_ALWAYS; uint64_t obj; @@ -770,16 +771,8 @@ dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, if (crgetzoneid(cr) != GLOBAL_ZONEID) return (ENFORCE_ALWAYS); - /* - * We are checking the saved credentials of the user process, which is - * not the current process. Note that we can't use secpolicy_zfs(), - * because it only works if the cred is that of the current process (on - * Linux). - */ - if (secpolicy_zfs_proc(cr, proc) == 0) + if (secpolicy_zfs(cr) == 0) return (ENFORCE_NEVER); -#else - (void) proc; #endif if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0) @@ -813,7 +806,7 @@ dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, */ int dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop, - dsl_dir_t *ancestor, cred_t *cr, proc_t *proc) + dsl_dir_t *ancestor, cred_t *cr) { objset_t *os = dd->dd_pool->dp_meta_objset; uint64_t limit, count; @@ -845,7 +838,7 @@ dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop, * are allowed to change the limit on the current dataset, but there * is another limit in the tree above. */ - enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc); + enforce = dsl_enforce_ds_ss_limits(dd, prop, cr); if (enforce == ENFORCE_NEVER) return (0); @@ -889,7 +882,7 @@ dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop, if (dd->dd_parent != NULL) err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop, - ancestor, cr, proc); + ancestor, cr); return (err); } @@ -1912,7 +1905,6 @@ typedef struct dsl_dir_rename_arg { const char *ddra_oldname; const char *ddra_newname; cred_t *ddra_cred; - proc_t *ddra_proc; } dsl_dir_rename_arg_t; typedef struct dsl_valid_rename_arg { @@ -2091,8 +2083,7 @@ dsl_dir_rename_check(void *arg, dmu_tx_t *tx) } error = dsl_dir_transfer_possible(dd->dd_parent, - newparent, fs_cnt, ss_cnt, myspace, - ddra->ddra_cred, ddra->ddra_proc); + newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred); if (error != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); @@ -2209,22 +2200,27 @@ dsl_dir_rename_sync(void *arg, dmu_tx_t *tx) int dsl_dir_rename(const char *oldname, const char *newname) { + cred_t *cr = CRED(); + crhold(cr); + dsl_dir_rename_arg_t ddra; ddra.ddra_oldname = oldname; ddra.ddra_newname = newname; - ddra.ddra_cred = CRED(); - ddra.ddra_proc = curproc; + ddra.ddra_cred = cr; - return (dsl_sync_task(oldname, + int err = dsl_sync_task(oldname, dsl_dir_rename_check, dsl_dir_rename_sync, &ddra, - 3, ZFS_SPACE_CHECK_RESERVED)); + 3, ZFS_SPACE_CHECK_RESERVED); + + crfree(cr); + return (err); } int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, - cred_t *cr, proc_t *proc) + cred_t *cr) { dsl_dir_t *ancestor; int64_t adelta; @@ -2238,11 +2234,11 @@ dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, return (SET_ERROR(ENOSPC)); err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT, - ancestor, cr, proc); + ancestor, cr); if (err != 0) return (err); err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT, - ancestor, cr, proc); + ancestor, cr); if (err != 0) return (err); diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 3876b1014973..7eda653a810e 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -27,6 +27,7 @@ */ #include +#include #include #include #include @@ -5506,6 +5507,13 @@ spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) if (BP_GET_NDVAS(bp) < 1) return (B_FALSE); + /* + * Cloned blocks can not be remapped since BRT depends on specific + * vdev id and offset in the DVA[0] for its reference counting. + */ + if (!BP_IS_METADATA(bp) && brt_maybe_exists(spa, bp)) + return (B_FALSE); + /* * Note: we only remap dva[0]. If we remapped other dvas, we * would no longer know what their phys birth txg is. diff --git a/module/zfs/zcp.c b/module/zfs/zcp.c index 7c279162a9d1..c92067fbd9a2 100644 --- a/module/zfs/zcp.c +++ b/module/zfs/zcp.c @@ -1140,12 +1140,14 @@ zcp_eval(const char *poolname, const char *program, boolean_t sync, } VERIFY3U(3, ==, lua_gettop(state)); + cred_t *cr = CRED(); + crhold(cr); + runinfo.zri_state = state; runinfo.zri_allocargs = &allocargs; runinfo.zri_outnvl = outnvl; runinfo.zri_result = 0; - runinfo.zri_cred = CRED(); - runinfo.zri_proc = curproc; + runinfo.zri_cred = cr; runinfo.zri_timed_out = B_FALSE; runinfo.zri_canceled = B_FALSE; runinfo.zri_sync = sync; @@ -1164,6 +1166,8 @@ zcp_eval(const char *poolname, const char *program, boolean_t sync, } lua_close(state); + crfree(cr); + /* * Create device minor nodes for any new zvols. */ diff --git a/module/zfs/zcp_synctask.c b/module/zfs/zcp_synctask.c index 058910054d97..af94569e0865 100644 --- a/module/zfs/zcp_synctask.c +++ b/module/zfs/zcp_synctask.c @@ -192,7 +192,6 @@ zcp_synctask_promote(lua_State *state, boolean_t sync, nvlist_t *err_details) ddpa.ddpa_clonename = dsname; ddpa.err_ds = err_details; ddpa.cr = ri->zri_cred; - ddpa.proc = ri->zri_proc; /* * If there was a snapshot name conflict, then err_ds will be filled @@ -276,7 +275,6 @@ zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details) ddsa.ddsa_errors = NULL; ddsa.ddsa_props = NULL; ddsa.ddsa_cr = ri->zri_cred; - ddsa.ddsa_proc = ri->zri_proc; ddsa.ddsa_snaps = fnvlist_alloc(); fnvlist_add_boolean(ddsa.ddsa_snaps, dsname); diff --git a/rpm/generic/zfs-dkms.spec.in b/rpm/generic/zfs-dkms.spec.in index cd85dd28cf56..fe127025e860 100644 --- a/rpm/generic/zfs-dkms.spec.in +++ b/rpm/generic/zfs-dkms.spec.in @@ -29,9 +29,16 @@ Requires(post): dkms >= 2.2.0.3 Requires(preun): dkms >= 2.2.0.3 Requires: gcc, make, perl, diffutils Requires(post): gcc, make, perl, diffutils + +# Hold back kernel upgrades if kernel is not supported by ZFS %if 0%{?rhel}%{?fedora}%{?mageia}%{?suse_version}%{?openEuler} Requires: kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999 Requires(post): kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999 +Conflicts: kernel-devel < @ZFS_META_KVER_MIN@, kernel-devel > @ZFS_META_KVER_MAX@.999 +Requires: kernel-uname-r >= @ZFS_META_KVER_MIN@, kernel-uname-r <= @ZFS_META_KVER_MAX@.999 +Requires(post): kernel-uname-r >= @ZFS_META_KVER_MIN@, kernel-uname-r <= @ZFS_META_KVER_MAX@.999 +Conflicts: kernel-uname-r < @ZFS_META_KVER_MIN@, kernel-uname-r > @ZFS_META_KVER_MAX@.999 + Obsoletes: spl-dkms <= %{version} %endif Provides: %{module}-kmod = %{version} diff --git a/rpm/generic/zfs-kmod.spec.in b/rpm/generic/zfs-kmod.spec.in index 30524474d1ac..7ed828bd0c9c 100644 --- a/rpm/generic/zfs-kmod.spec.in +++ b/rpm/generic/zfs-kmod.spec.in @@ -144,7 +144,9 @@ for kernel_version in %{?kernel_versions}; do %{debuginfo} \ %{?kernel_cc} \ %{?kernel_ld} \ - %{?kernel_llvm} + %{?kernel_llvm} \ + %{?kernel_cross_compile} \ + %{?kernel_arch} # Pre-6.10 kernel builds didn't need to copy over the source files to the # build directory. However we do need to do it though post-6.10 due to diff --git a/rpm/redhat/zfs-kmod.spec.in b/rpm/redhat/zfs-kmod.spec.in index 876c198c64de..a95bdf20f873 100644 --- a/rpm/redhat/zfs-kmod.spec.in +++ b/rpm/redhat/zfs-kmod.spec.in @@ -69,7 +69,9 @@ fi %{debuginfo} \ %{?kernel_cc} \ %{?kernel_ld} \ - %{?kernel_llvm} + %{?kernel_llvm} \ + %{?kernel_cross_compile} \ + %{?kernel_arch} make %{?_smp_mflags} # Module signing (modsign) diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index f302df81b919..b49e1979bcbc 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -895,7 +895,7 @@ tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos', 'send_spill_block', 'send_holds', 'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol', 'send_partial_dataset', 'send_invalid', 'send_doall', 'send_raw_spill_block', 'send_raw_ashift', - 'send_raw_large_blocks'] + 'send_raw_large_blocks', 'send_leak_keymaps'] tags = ['functional', 'rsend'] [tests/functional/scrub_mirror] diff --git a/tests/zfs-tests/cmd/file/largest_file.c b/tests/zfs-tests/cmd/file/largest_file.c index d7252556b3cf..4cf04a7681eb 100644 --- a/tests/zfs-tests/cmd/file/largest_file.c +++ b/tests/zfs-tests/cmd/file/largest_file.c @@ -61,7 +61,7 @@ main(int argc, char **argv) offset_t llseek_ret = 0; int write_ret = 0; int err = 0; - char mybuf[5] = "aaaa\0"; + char mybuf[5] = "aaaa"; char *testfile; mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; struct sigaction sa; diff --git a/tests/zfs-tests/cmd/getversion.c b/tests/zfs-tests/cmd/getversion.c index 62c1c5b6abc0..1e026b92d17d 100644 --- a/tests/zfs-tests/cmd/getversion.c +++ b/tests/zfs-tests/cmd/getversion.c @@ -19,9 +19,9 @@ */ #include -#include #include #include +#include #include #include #include diff --git a/tests/zfs-tests/include/blkdev.shlib b/tests/zfs-tests/include/blkdev.shlib index 6b83b10d604d..e9721bd9fc5e 100644 --- a/tests/zfs-tests/include/blkdev.shlib +++ b/tests/zfs-tests/include/blkdev.shlib @@ -556,27 +556,15 @@ function list_file_blocks # input_file # 512B blocks for ease of use with dd. # typeset level vdev path offset length - if awk -n '' 2>/dev/null; then - # gawk needs -n to decode hex - AWK='awk -n' - else - AWK='awk' - fi sync_all_pools true - zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 ' + zdb -dddddd $ds $objnum | awk ' /^$/ { looking = 0 } looking { level = $2 field = 3 while (split($field, dva, ":") == 3) { - # top level vdev id - vdev = int(dva[1]) - # offset + 4M label/boot pad in 512B blocks - offset = (int("0x"dva[2]) + pad) / bs - # length in 512B blocks - len = int("0x"dva[3]) / bs - print level, vdev, offset, len + print level, int(dva[1]), "0x"dva[2], "0x"dva[3] ++field } @@ -585,7 +573,8 @@ function list_file_blocks # input_file ' | \ while read level vdev offset length; do for path in ${VDEV_MAP[$vdev][@]}; do - echo "$level $path $offset $length" + echo "$level $path $(( ($offset + (4<<20)) / 512 ))" \ + "$(( $length / 512 ))" done done 2>/dev/null } diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 2747e24cad9c..f9005769cff2 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -1907,6 +1907,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/rsend/send_holds.ksh \ functional/rsend/send_hole_birth.ksh \ functional/rsend/send_invalid.ksh \ + functional/rsend/send_leak_keymaps.ksh \ functional/rsend/send-L_toggle.ksh \ functional/rsend/send_mixed_raw.ksh \ functional/rsend/send_partial_dataset.ksh \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh index 52b22dd833f0..dd0084816855 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh @@ -58,7 +58,7 @@ log_must fio --rw=write --name=job --size=10M --filename=/$TESTPOOL2/10m_file log_must zinject -t data -e checksum -f 100 -am /$TESTPOOL2/10m_file # Try to read the 2nd megabyte of 10m_file -dd if=/$TESTPOOL2/10m_file bs=1M || true +dd if=/$TESTPOOL2/10m_file bs=1M of=/dev/null || true log_must zfs snapshot $TESTPOOL2@snap log_must zfs clone $TESTPOOL2@snap $TESTPOOL2/clone diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_004_pos.ksh index 111d598dfb7d..e80835821a83 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_004_pos.ksh @@ -62,7 +62,7 @@ log_must fio --rw=write --name=job --size=10M --filename=/$TESTPOOL2/10m_file log_must zinject -t data -e checksum -f 100 -am /$TESTPOOL2/10m_file # Try to read the file -dd if=/$TESTPOOL2/10m_file bs=1M || true +dd if=/$TESTPOOL2/10m_file bs=1M of=/dev/null || true log_must zfs snapshot $TESTPOOL2@snap log_must zfs clone $TESTPOOL2@snap $TESTPOOL2/clone diff --git a/tests/zfs-tests/tests/functional/io/io_uring.ksh b/tests/zfs-tests/tests/functional/io/io_uring.ksh index f14b9f450826..6db2b3ae554b 100755 --- a/tests/zfs-tests/tests/functional/io/io_uring.ksh +++ b/tests/zfs-tests/tests/functional/io/io_uring.ksh @@ -44,13 +44,6 @@ if ! $(grep -q "CONFIG_IO_URING=y" /boot/config-$(uname -r)); then log_unsupported "Requires io_uring support within Kernel" fi -if [ -e /etc/os-release ] ; then - source /etc/os-release - if [ $PLATFORM_ID = "platform:el9" ]; then - log_unsupported "Disabled on RHEL 9 variants: fails with 'Operation not permitted'" - fi -fi - fio --ioengine=io_uring --parse-only || log_unsupported "fio io_uring support required" function cleanup diff --git a/tests/zfs-tests/tests/functional/rsend/send_leak_keymaps.ksh b/tests/zfs-tests/tests/functional/rsend/send_leak_keymaps.ksh new file mode 100755 index 000000000000..6ab8da6fd740 --- /dev/null +++ b/tests/zfs-tests/tests/functional/rsend/send_leak_keymaps.ksh @@ -0,0 +1,82 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025 by George Amanakis. All rights reserved. +# + +. $STF_SUITE/tests/functional/rsend/rsend.kshlib + +# +# DESCRIPTION: +# Verify that an incremental non-raw zfs send from an encrypted filesystem +# does not leak any keys or key mappings. +# +# STRATEGY: +# 1. Create a new encrypted filesystem +# 2. Write some files and create snapshots. +# 3. Send to a new filesystem +# 4. Do an incremental (-I) send and before that access all properties on the +# sending filesystem (emulate sanoid) +# 5. Export and re-import the pool. Upon exporting the pool if any keys/key +# mappings leaked a panic will occur. +# + +verify_runnable "both" + +function cleanup +{ + datasetexists $TESTPOOL/$TESTFS2 && \ + destroy_dataset $TESTPOOL/$TESTFS2 -r + datasetexists $TESTPOOL/recv && \ + destroy_dataset $TESTPOOL/recv -r + [[ -f $keyfile ]] && log_must rm $keyfile +} +log_onexit cleanup + +log_assert "Verify non-raw send with encryption does not leak any key mappings" + +typeset keyfile=/$TESTPOOL/pkey + +# Create an encrypted dataset +log_must eval "echo 'password' > $keyfile" +log_must zfs create -o encryption=on -o keyformat=passphrase \ + -o keylocation=file://$keyfile $TESTPOOL/$TESTFS2 + +log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/testfile bs=128K count=4 \ + status=none + +for i in $(seq 0 20); do + log_note "Taking snapshots" + log_must zfs snapshot $TESTPOOL/$TESTFS2@snap_$i + log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/testfile bs=128K \ + count=4 status=none +done + +log_must eval "zfs send $TESTPOOL/$TESTFS2@snap_0 | zfs recv $TESTPOOL/recv" + +for i in $(seq 3 3 20); do + log_note "Sending incremental snapshot snap_$((i - 3)) -> snap_$i" + log_must zfs get -Hpd 1 -t snapshot all $TESTPOOL/$TESTFS2 &>/dev/null + log_must eval "zfs send -I $TESTPOOL/$TESTFS2@snap_$((i - 3)) \ + $TESTPOOL/$TESTFS2@snap_$i | zfs recv $TESTPOOL/recv" +done + +log_must zpool export $TESTPOOL +log_must zpool import $TESTPOOL + +log_pass "Verify non-raw send with encryption does not leak any key mappings"